xref: /netbsd-src/sys/uvm/uvm_aobj.c (revision 796c32c94f6e154afc9de0f63da35c91bb739b45)
1 /*	$NetBSD: uvm_aobj.c,v 1.126 2017/10/28 00:37:13 pgoyette Exp $	*/
2 
3 /*
4  * Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and
5  *                    Washington University.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  *
28  * from: Id: uvm_aobj.c,v 1.1.2.5 1998/02/06 05:14:38 chs Exp
29  */
30 
31 /*
32  * uvm_aobj.c: anonymous memory uvm_object pager
33  *
34  * author: Chuck Silvers <chuq@chuq.com>
35  * started: Jan-1998
36  *
37  * - design mostly from Chuck Cranor
38  */
39 
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: uvm_aobj.c,v 1.126 2017/10/28 00:37:13 pgoyette Exp $");
42 
43 #ifdef _KERNEL_OPT
44 #include "opt_uvmhist.h"
45 #endif
46 
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/kernel.h>
50 #include <sys/kmem.h>
51 #include <sys/pool.h>
52 #include <sys/atomic.h>
53 
54 #include <uvm/uvm.h>
55 
56 /*
57  * An anonymous UVM object (aobj) manages anonymous-memory.  In addition to
58  * keeping the list of resident pages, it may also keep a list of allocated
59  * swap blocks.  Depending on the size of the object, this list is either
60  * stored in an array (small objects) or in a hash table (large objects).
61  *
62  * Lock order
63  *
64  *	uao_list_lock ->
65  *		uvm_object::vmobjlock
66  */
67 
68 /*
69  * Note: for hash tables, we break the address space of the aobj into blocks
70  * of UAO_SWHASH_CLUSTER_SIZE pages, which shall be a power of two.
71  */
72 
73 #define	UAO_SWHASH_CLUSTER_SHIFT	4
74 #define	UAO_SWHASH_CLUSTER_SIZE		(1 << UAO_SWHASH_CLUSTER_SHIFT)
75 
76 /* Get the "tag" for this page index. */
77 #define	UAO_SWHASH_ELT_TAG(idx)		((idx) >> UAO_SWHASH_CLUSTER_SHIFT)
78 #define UAO_SWHASH_ELT_PAGESLOT_IDX(idx) \
79     ((idx) & (UAO_SWHASH_CLUSTER_SIZE - 1))
80 
81 /* Given an ELT and a page index, find the swap slot. */
82 #define	UAO_SWHASH_ELT_PAGESLOT(elt, idx) \
83     ((elt)->slots[UAO_SWHASH_ELT_PAGESLOT_IDX(idx)])
84 
85 /* Given an ELT, return its pageidx base. */
86 #define	UAO_SWHASH_ELT_PAGEIDX_BASE(ELT) \
87     ((elt)->tag << UAO_SWHASH_CLUSTER_SHIFT)
88 
89 /* The hash function. */
90 #define	UAO_SWHASH_HASH(aobj, idx) \
91     (&(aobj)->u_swhash[(((idx) >> UAO_SWHASH_CLUSTER_SHIFT) \
92     & (aobj)->u_swhashmask)])
93 
94 /*
95  * The threshold which determines whether we will use an array or a
96  * hash table to store the list of allocated swap blocks.
97  */
98 #define	UAO_SWHASH_THRESHOLD		(UAO_SWHASH_CLUSTER_SIZE * 4)
99 #define	UAO_USES_SWHASH(aobj) \
100     ((aobj)->u_pages > UAO_SWHASH_THRESHOLD)
101 
102 /* The number of buckets in a hash, with an upper bound. */
103 #define	UAO_SWHASH_MAXBUCKETS		256
104 #define	UAO_SWHASH_BUCKETS(aobj) \
105     (MIN((aobj)->u_pages >> UAO_SWHASH_CLUSTER_SHIFT, UAO_SWHASH_MAXBUCKETS))
106 
107 /*
108  * uao_swhash_elt: when a hash table is being used, this structure defines
109  * the format of an entry in the bucket list.
110  */
111 
112 struct uao_swhash_elt {
113 	LIST_ENTRY(uao_swhash_elt) list;	/* the hash list */
114 	voff_t tag;				/* our 'tag' */
115 	int count;				/* our number of active slots */
116 	int slots[UAO_SWHASH_CLUSTER_SIZE];	/* the slots */
117 };
118 
119 /*
120  * uao_swhash: the swap hash table structure
121  */
122 
123 LIST_HEAD(uao_swhash, uao_swhash_elt);
124 
125 /*
126  * uao_swhash_elt_pool: pool of uao_swhash_elt structures.
127  * Note: pages for this pool must not come from a pageable kernel map.
128  */
129 static struct pool	uao_swhash_elt_pool	__cacheline_aligned;
130 
131 /*
132  * uvm_aobj: the actual anon-backed uvm_object
133  *
134  * => the uvm_object is at the top of the structure, this allows
135  *   (struct uvm_aobj *) == (struct uvm_object *)
136  * => only one of u_swslots and u_swhash is used in any given aobj
137  */
138 
139 struct uvm_aobj {
140 	struct uvm_object u_obj; /* has: lock, pgops, memq, #pages, #refs */
141 	pgoff_t u_pages;	 /* number of pages in entire object */
142 	int u_flags;		 /* the flags (see uvm_aobj.h) */
143 	int *u_swslots;		 /* array of offset->swapslot mappings */
144 				 /*
145 				  * hashtable of offset->swapslot mappings
146 				  * (u_swhash is an array of bucket heads)
147 				  */
148 	struct uao_swhash *u_swhash;
149 	u_long u_swhashmask;		/* mask for hashtable */
150 	LIST_ENTRY(uvm_aobj) u_list;	/* global list of aobjs */
151 	int u_freelist;		  /* freelist to allocate pages from */
152 };
153 
154 static void	uao_free(struct uvm_aobj *);
155 static int	uao_get(struct uvm_object *, voff_t, struct vm_page **,
156 		    int *, int, vm_prot_t, int, int);
157 static int	uao_put(struct uvm_object *, voff_t, voff_t, int);
158 
159 #if defined(VMSWAP)
160 static struct uao_swhash_elt *uao_find_swhash_elt
161     (struct uvm_aobj *, int, bool);
162 
163 static bool uao_pagein(struct uvm_aobj *, int, int);
164 static bool uao_pagein_page(struct uvm_aobj *, int);
165 #endif /* defined(VMSWAP) */
166 
167 static struct vm_page	*uao_pagealloc(struct uvm_object *, voff_t, int);
168 
169 /*
170  * aobj_pager
171  *
172  * note that some functions (e.g. put) are handled elsewhere
173  */
174 
175 const struct uvm_pagerops aobj_pager = {
176 	.pgo_reference = uao_reference,
177 	.pgo_detach = uao_detach,
178 	.pgo_get = uao_get,
179 	.pgo_put = uao_put,
180 };
181 
182 /*
183  * uao_list: global list of active aobjs, locked by uao_list_lock
184  */
185 
186 static LIST_HEAD(aobjlist, uvm_aobj) uao_list	__cacheline_aligned;
187 static kmutex_t		uao_list_lock		__cacheline_aligned;
188 
189 /*
190  * hash table/array related functions
191  */
192 
193 #if defined(VMSWAP)
194 
195 /*
196  * uao_find_swhash_elt: find (or create) a hash table entry for a page
197  * offset.
198  *
199  * => the object should be locked by the caller
200  */
201 
202 static struct uao_swhash_elt *
203 uao_find_swhash_elt(struct uvm_aobj *aobj, int pageidx, bool create)
204 {
205 	struct uao_swhash *swhash;
206 	struct uao_swhash_elt *elt;
207 	voff_t page_tag;
208 
209 	swhash = UAO_SWHASH_HASH(aobj, pageidx);
210 	page_tag = UAO_SWHASH_ELT_TAG(pageidx);
211 
212 	/*
213 	 * now search the bucket for the requested tag
214 	 */
215 
216 	LIST_FOREACH(elt, swhash, list) {
217 		if (elt->tag == page_tag) {
218 			return elt;
219 		}
220 	}
221 	if (!create) {
222 		return NULL;
223 	}
224 
225 	/*
226 	 * allocate a new entry for the bucket and init/insert it in
227 	 */
228 
229 	elt = pool_get(&uao_swhash_elt_pool, PR_NOWAIT);
230 	if (elt == NULL) {
231 		return NULL;
232 	}
233 	LIST_INSERT_HEAD(swhash, elt, list);
234 	elt->tag = page_tag;
235 	elt->count = 0;
236 	memset(elt->slots, 0, sizeof(elt->slots));
237 	return elt;
238 }
239 
240 /*
241  * uao_find_swslot: find the swap slot number for an aobj/pageidx
242  *
243  * => object must be locked by caller
244  */
245 
246 int
247 uao_find_swslot(struct uvm_object *uobj, int pageidx)
248 {
249 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
250 	struct uao_swhash_elt *elt;
251 
252 	/*
253 	 * if noswap flag is set, then we never return a slot
254 	 */
255 
256 	if (aobj->u_flags & UAO_FLAG_NOSWAP)
257 		return 0;
258 
259 	/*
260 	 * if hashing, look in hash table.
261 	 */
262 
263 	if (UAO_USES_SWHASH(aobj)) {
264 		elt = uao_find_swhash_elt(aobj, pageidx, false);
265 		return elt ? UAO_SWHASH_ELT_PAGESLOT(elt, pageidx) : 0;
266 	}
267 
268 	/*
269 	 * otherwise, look in the array
270 	 */
271 
272 	return aobj->u_swslots[pageidx];
273 }
274 
275 /*
276  * uao_set_swslot: set the swap slot for a page in an aobj.
277  *
278  * => setting a slot to zero frees the slot
279  * => object must be locked by caller
280  * => we return the old slot number, or -1 if we failed to allocate
281  *    memory to record the new slot number
282  */
283 
284 int
285 uao_set_swslot(struct uvm_object *uobj, int pageidx, int slot)
286 {
287 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
288 	struct uao_swhash_elt *elt;
289 	int oldslot;
290 	UVMHIST_FUNC("uao_set_swslot"); UVMHIST_CALLED(pdhist);
291 	UVMHIST_LOG(pdhist, "aobj %#jx pageidx %jd slot %jd",
292 	    (uintptr_t)aobj, pageidx, slot, 0);
293 
294 	KASSERT(mutex_owned(uobj->vmobjlock) || uobj->uo_refs == 0);
295 
296 	/*
297 	 * if noswap flag is set, then we can't set a non-zero slot.
298 	 */
299 
300 	if (aobj->u_flags & UAO_FLAG_NOSWAP) {
301 		KASSERTMSG(slot == 0, "uao_set_swslot: no swap object");
302 		return 0;
303 	}
304 
305 	/*
306 	 * are we using a hash table?  if so, add it in the hash.
307 	 */
308 
309 	if (UAO_USES_SWHASH(aobj)) {
310 
311 		/*
312 		 * Avoid allocating an entry just to free it again if
313 		 * the page had not swap slot in the first place, and
314 		 * we are freeing.
315 		 */
316 
317 		elt = uao_find_swhash_elt(aobj, pageidx, slot != 0);
318 		if (elt == NULL) {
319 			return slot ? -1 : 0;
320 		}
321 
322 		oldslot = UAO_SWHASH_ELT_PAGESLOT(elt, pageidx);
323 		UAO_SWHASH_ELT_PAGESLOT(elt, pageidx) = slot;
324 
325 		/*
326 		 * now adjust the elt's reference counter and free it if we've
327 		 * dropped it to zero.
328 		 */
329 
330 		if (slot) {
331 			if (oldslot == 0)
332 				elt->count++;
333 		} else {
334 			if (oldslot)
335 				elt->count--;
336 
337 			if (elt->count == 0) {
338 				LIST_REMOVE(elt, list);
339 				pool_put(&uao_swhash_elt_pool, elt);
340 			}
341 		}
342 	} else {
343 		/* we are using an array */
344 		oldslot = aobj->u_swslots[pageidx];
345 		aobj->u_swslots[pageidx] = slot;
346 	}
347 	return oldslot;
348 }
349 
350 #endif /* defined(VMSWAP) */
351 
352 /*
353  * end of hash/array functions
354  */
355 
356 /*
357  * uao_free: free all resources held by an aobj, and then free the aobj
358  *
359  * => the aobj should be dead
360  */
361 
362 static void
363 uao_free(struct uvm_aobj *aobj)
364 {
365 	struct uvm_object *uobj = &aobj->u_obj;
366 
367 	KASSERT(mutex_owned(uobj->vmobjlock));
368 	uao_dropswap_range(uobj, 0, 0);
369 	mutex_exit(uobj->vmobjlock);
370 
371 #if defined(VMSWAP)
372 	if (UAO_USES_SWHASH(aobj)) {
373 
374 		/*
375 		 * free the hash table itself.
376 		 */
377 
378 		hashdone(aobj->u_swhash, HASH_LIST, aobj->u_swhashmask);
379 	} else {
380 
381 		/*
382 		 * free the array itsself.
383 		 */
384 
385 		kmem_free(aobj->u_swslots, aobj->u_pages * sizeof(int));
386 	}
387 #endif /* defined(VMSWAP) */
388 
389 	/*
390 	 * finally free the aobj itself
391 	 */
392 
393 	uvm_obj_destroy(uobj, true);
394 	kmem_free(aobj, sizeof(struct uvm_aobj));
395 }
396 
397 /*
398  * pager functions
399  */
400 
401 /*
402  * uao_create: create an aobj of the given size and return its uvm_object.
403  *
404  * => for normal use, flags are always zero
405  * => for the kernel object, the flags are:
406  *	UAO_FLAG_KERNOBJ - allocate the kernel object (can only happen once)
407  *	UAO_FLAG_KERNSWAP - enable swapping of kernel object ("           ")
408  */
409 
410 struct uvm_object *
411 uao_create(vsize_t size, int flags)
412 {
413 	static struct uvm_aobj kernel_object_store;
414 	static kmutex_t kernel_object_lock;
415 	static int kobj_alloced __diagused = 0;
416 	pgoff_t pages = round_page(size) >> PAGE_SHIFT;
417 	struct uvm_aobj *aobj;
418 	int refs;
419 
420 	/*
421 	 * Allocate a new aobj, unless kernel object is requested.
422 	 */
423 
424 	if (flags & UAO_FLAG_KERNOBJ) {
425 		KASSERT(!kobj_alloced);
426 		aobj = &kernel_object_store;
427 		aobj->u_pages = pages;
428 		aobj->u_flags = UAO_FLAG_NOSWAP;
429 		refs = UVM_OBJ_KERN;
430 		kobj_alloced = UAO_FLAG_KERNOBJ;
431 	} else if (flags & UAO_FLAG_KERNSWAP) {
432 		KASSERT(kobj_alloced == UAO_FLAG_KERNOBJ);
433 		aobj = &kernel_object_store;
434 		kobj_alloced = UAO_FLAG_KERNSWAP;
435 		refs = 0xdeadbeaf; /* XXX: gcc */
436 	} else {
437 		aobj = kmem_alloc(sizeof(struct uvm_aobj), KM_SLEEP);
438 		aobj->u_pages = pages;
439 		aobj->u_flags = 0;
440 		refs = 1;
441 	}
442 
443 	/*
444 	 * no freelist by default
445 	 */
446 
447 	aobj->u_freelist = VM_NFREELIST;
448 
449 	/*
450  	 * allocate hash/array if necessary
451  	 *
452  	 * note: in the KERNSWAP case no need to worry about locking since
453  	 * we are still booting we should be the only thread around.
454  	 */
455 
456 	if (flags == 0 || (flags & UAO_FLAG_KERNSWAP) != 0) {
457 #if defined(VMSWAP)
458 		const int kernswap = (flags & UAO_FLAG_KERNSWAP) != 0;
459 
460 		/* allocate hash table or array depending on object size */
461 		if (UAO_USES_SWHASH(aobj)) {
462 			aobj->u_swhash = hashinit(UAO_SWHASH_BUCKETS(aobj),
463 			    HASH_LIST, kernswap ? false : true,
464 			    &aobj->u_swhashmask);
465 			if (aobj->u_swhash == NULL)
466 				panic("uao_create: hashinit swhash failed");
467 		} else {
468 			aobj->u_swslots = kmem_zalloc(pages * sizeof(int),
469 			    kernswap ? KM_NOSLEEP : KM_SLEEP);
470 			if (aobj->u_swslots == NULL)
471 				panic("uao_create: swslots allocation failed");
472 		}
473 #endif /* defined(VMSWAP) */
474 
475 		if (flags) {
476 			aobj->u_flags &= ~UAO_FLAG_NOSWAP; /* clear noswap */
477 			return &aobj->u_obj;
478 		}
479 	}
480 
481 	/*
482 	 * Initialise UVM object.
483 	 */
484 
485 	const bool kernobj = (flags & UAO_FLAG_KERNOBJ) != 0;
486 	uvm_obj_init(&aobj->u_obj, &aobj_pager, !kernobj, refs);
487 	if (__predict_false(kernobj)) {
488 		/* Initialisation only once, for UAO_FLAG_KERNOBJ. */
489 		mutex_init(&kernel_object_lock, MUTEX_DEFAULT, IPL_NONE);
490 		uvm_obj_setlock(&aobj->u_obj, &kernel_object_lock);
491 	}
492 
493 	/*
494  	 * now that aobj is ready, add it to the global list
495  	 */
496 
497 	mutex_enter(&uao_list_lock);
498 	LIST_INSERT_HEAD(&uao_list, aobj, u_list);
499 	mutex_exit(&uao_list_lock);
500 	return(&aobj->u_obj);
501 }
502 
503 /*
504  * uao_set_pgfl: allocate pages only from the specified freelist.
505  *
506  * => must be called before any pages are allocated for the object.
507  * => reset by setting it to VM_NFREELIST, meaning any freelist.
508  */
509 
510 void
511 uao_set_pgfl(struct uvm_object *uobj, int freelist)
512 {
513 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
514 
515 	KASSERTMSG((0 <= freelist), "invalid freelist %d", freelist);
516 	KASSERTMSG((freelist <= VM_NFREELIST), "invalid freelist %d",
517 	    freelist);
518 
519 	aobj->u_freelist = freelist;
520 }
521 
522 /*
523  * uao_pagealloc: allocate a page for aobj.
524  */
525 
526 static inline struct vm_page *
527 uao_pagealloc(struct uvm_object *uobj, voff_t offset, int flags)
528 {
529 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
530 
531 	if (__predict_true(aobj->u_freelist == VM_NFREELIST))
532 		return uvm_pagealloc(uobj, offset, NULL, flags);
533 	else
534 		return uvm_pagealloc_strat(uobj, offset, NULL, flags,
535 		    UVM_PGA_STRAT_ONLY, aobj->u_freelist);
536 }
537 
538 /*
539  * uao_init: set up aobj pager subsystem
540  *
541  * => called at boot time from uvm_pager_init()
542  */
543 
544 void
545 uao_init(void)
546 {
547 	static int uao_initialized;
548 
549 	if (uao_initialized)
550 		return;
551 	uao_initialized = true;
552 	LIST_INIT(&uao_list);
553 	mutex_init(&uao_list_lock, MUTEX_DEFAULT, IPL_NONE);
554 	pool_init(&uao_swhash_elt_pool, sizeof(struct uao_swhash_elt),
555 	    0, 0, 0, "uaoeltpl", NULL, IPL_VM);
556 }
557 
558 /*
559  * uao_reference: hold a reference to an anonymous UVM object.
560  */
561 void
562 uao_reference(struct uvm_object *uobj)
563 {
564 	/* Kernel object is persistent. */
565 	if (UVM_OBJ_IS_KERN_OBJECT(uobj)) {
566 		return;
567 	}
568 	atomic_inc_uint(&uobj->uo_refs);
569 }
570 
571 /*
572  * uao_detach: drop a reference to an anonymous UVM object.
573  */
574 void
575 uao_detach(struct uvm_object *uobj)
576 {
577 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
578 	struct vm_page *pg;
579 
580 	UVMHIST_FUNC("uao_detach"); UVMHIST_CALLED(maphist);
581 
582 	/*
583 	 * Detaching from kernel object is a NOP.
584 	 */
585 
586 	if (UVM_OBJ_IS_KERN_OBJECT(uobj))
587 		return;
588 
589 	/*
590 	 * Drop the reference.  If it was the last one, destroy the object.
591 	 */
592 
593 	KASSERT(uobj->uo_refs > 0);
594 	UVMHIST_LOG(maphist,"  (uobj=0x%#jx)  ref=%jd",
595 	    (uintptr_t)uobj, uobj->uo_refs, 0, 0);
596 	if (atomic_dec_uint_nv(&uobj->uo_refs) > 0) {
597 		UVMHIST_LOG(maphist, "<- done (rc>0)", 0,0,0,0);
598 		return;
599 	}
600 
601 	/*
602 	 * Remove the aobj from the global list.
603 	 */
604 
605 	mutex_enter(&uao_list_lock);
606 	LIST_REMOVE(aobj, u_list);
607 	mutex_exit(&uao_list_lock);
608 
609 	/*
610 	 * Free all the pages left in the aobj.  For each page, when the
611 	 * page is no longer busy (and thus after any disk I/O that it is
612 	 * involved in is complete), release any swap resources and free
613 	 * the page itself.
614 	 */
615 
616 	mutex_enter(uobj->vmobjlock);
617 	mutex_enter(&uvm_pageqlock);
618 	while ((pg = TAILQ_FIRST(&uobj->memq)) != NULL) {
619 		pmap_page_protect(pg, VM_PROT_NONE);
620 		if (pg->flags & PG_BUSY) {
621 			pg->flags |= PG_WANTED;
622 			mutex_exit(&uvm_pageqlock);
623 			UVM_UNLOCK_AND_WAIT(pg, uobj->vmobjlock, false,
624 			    "uao_det", 0);
625 			mutex_enter(uobj->vmobjlock);
626 			mutex_enter(&uvm_pageqlock);
627 			continue;
628 		}
629 		uao_dropswap(&aobj->u_obj, pg->offset >> PAGE_SHIFT);
630 		uvm_pagefree(pg);
631 	}
632 	mutex_exit(&uvm_pageqlock);
633 
634 	/*
635 	 * Finally, free the anonymous UVM object itself.
636 	 */
637 
638 	uao_free(aobj);
639 }
640 
641 /*
642  * uao_put: flush pages out of a uvm object
643  *
644  * => object should be locked by caller.  we may _unlock_ the object
645  *	if (and only if) we need to clean a page (PGO_CLEANIT).
646  *	XXXJRT Currently, however, we don't.  In the case of cleaning
647  *	XXXJRT a page, we simply just deactivate it.  Should probably
648  *	XXXJRT handle this better, in the future (although "flushing"
649  *	XXXJRT anonymous memory isn't terribly important).
650  * => if PGO_CLEANIT is not set, then we will neither unlock the object
651  *	or block.
652  * => if PGO_ALLPAGE is set, then all pages in the object are valid targets
653  *	for flushing.
654  * => NOTE: we rely on the fact that the object's memq is a TAILQ and
655  *	that new pages are inserted on the tail end of the list.  thus,
656  *	we can make a complete pass through the object in one go by starting
657  *	at the head and working towards the tail (new pages are put in
658  *	front of us).
659  * => NOTE: we are allowed to lock the page queues, so the caller
660  *	must not be holding the lock on them [e.g. pagedaemon had
661  *	better not call us with the queues locked]
662  * => we return 0 unless we encountered some sort of I/O error
663  *	XXXJRT currently never happens, as we never directly initiate
664  *	XXXJRT I/O
665  *
666  * note on page traversal:
667  *	we can traverse the pages in an object either by going down the
668  *	linked list in "uobj->memq", or we can go over the address range
669  *	by page doing hash table lookups for each address.  depending
670  *	on how many pages are in the object it may be cheaper to do one
671  *	or the other.  we set "by_list" to true if we are using memq.
672  *	if the cost of a hash lookup was equal to the cost of the list
673  *	traversal we could compare the number of pages in the start->stop
674  *	range to the total number of pages in the object.  however, it
675  *	seems that a hash table lookup is more expensive than the linked
676  *	list traversal, so we multiply the number of pages in the
677  *	start->stop range by a penalty which we define below.
678  */
679 
680 static int
681 uao_put(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
682 {
683 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
684 	struct vm_page *pg, *nextpg, curmp, endmp;
685 	bool by_list;
686 	voff_t curoff;
687 	UVMHIST_FUNC("uao_put"); UVMHIST_CALLED(maphist);
688 
689 	KASSERT(mutex_owned(uobj->vmobjlock));
690 
691 	curoff = 0;
692 	if (flags & PGO_ALLPAGES) {
693 		start = 0;
694 		stop = aobj->u_pages << PAGE_SHIFT;
695 		by_list = true;		/* always go by the list */
696 	} else {
697 		start = trunc_page(start);
698 		if (stop == 0) {
699 			stop = aobj->u_pages << PAGE_SHIFT;
700 		} else {
701 			stop = round_page(stop);
702 		}
703 		if (stop > (aobj->u_pages << PAGE_SHIFT)) {
704 			printf("uao_flush: strange, got an out of range "
705 			    "flush (fixed)\n");
706 			stop = aobj->u_pages << PAGE_SHIFT;
707 		}
708 		by_list = (uobj->uo_npages <=
709 		    ((stop - start) >> PAGE_SHIFT) * UVM_PAGE_TREE_PENALTY);
710 	}
711 	UVMHIST_LOG(maphist,
712 	    " flush start=0x%jx, stop=0x%jx, by_list=%jd, flags=0x%jx",
713 	    start, stop, by_list, flags);
714 
715 	/*
716 	 * Don't need to do any work here if we're not freeing
717 	 * or deactivating pages.
718 	 */
719 
720 	if ((flags & (PGO_DEACTIVATE|PGO_FREE)) == 0) {
721 		mutex_exit(uobj->vmobjlock);
722 		return 0;
723 	}
724 
725 	/*
726 	 * Initialize the marker pages.  See the comment in
727 	 * genfs_putpages() also.
728 	 */
729 
730 	curmp.flags = PG_MARKER;
731 	endmp.flags = PG_MARKER;
732 
733 	/*
734 	 * now do it.  note: we must update nextpg in the body of loop or we
735 	 * will get stuck.  we need to use nextpg if we'll traverse the list
736 	 * because we may free "pg" before doing the next loop.
737 	 */
738 
739 	if (by_list) {
740 		TAILQ_INSERT_TAIL(&uobj->memq, &endmp, listq.queue);
741 		nextpg = TAILQ_FIRST(&uobj->memq);
742 	} else {
743 		curoff = start;
744 		nextpg = NULL;	/* Quell compiler warning */
745 	}
746 
747 	/* locked: uobj */
748 	for (;;) {
749 		if (by_list) {
750 			pg = nextpg;
751 			if (pg == &endmp)
752 				break;
753 			nextpg = TAILQ_NEXT(pg, listq.queue);
754 			if (pg->flags & PG_MARKER)
755 				continue;
756 			if (pg->offset < start || pg->offset >= stop)
757 				continue;
758 		} else {
759 			if (curoff < stop) {
760 				pg = uvm_pagelookup(uobj, curoff);
761 				curoff += PAGE_SIZE;
762 			} else
763 				break;
764 			if (pg == NULL)
765 				continue;
766 		}
767 
768 		/*
769 		 * wait and try again if the page is busy.
770 		 */
771 
772 		if (pg->flags & PG_BUSY) {
773 			if (by_list) {
774 				TAILQ_INSERT_BEFORE(pg, &curmp, listq.queue);
775 			}
776 			pg->flags |= PG_WANTED;
777 			UVM_UNLOCK_AND_WAIT(pg, uobj->vmobjlock, 0,
778 			    "uao_put", 0);
779 			mutex_enter(uobj->vmobjlock);
780 			if (by_list) {
781 				nextpg = TAILQ_NEXT(&curmp, listq.queue);
782 				TAILQ_REMOVE(&uobj->memq, &curmp,
783 				    listq.queue);
784 			} else
785 				curoff -= PAGE_SIZE;
786 			continue;
787 		}
788 
789 		switch (flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE)) {
790 
791 		/*
792 		 * XXX In these first 3 cases, we always just
793 		 * XXX deactivate the page.  We may want to
794 		 * XXX handle the different cases more specifically
795 		 * XXX in the future.
796 		 */
797 
798 		case PGO_CLEANIT|PGO_FREE:
799 		case PGO_CLEANIT|PGO_DEACTIVATE:
800 		case PGO_DEACTIVATE:
801  deactivate_it:
802 			mutex_enter(&uvm_pageqlock);
803 			/* skip the page if it's wired */
804 			if (pg->wire_count == 0) {
805 				uvm_pagedeactivate(pg);
806 			}
807 			mutex_exit(&uvm_pageqlock);
808 			break;
809 
810 		case PGO_FREE:
811 			/*
812 			 * If there are multiple references to
813 			 * the object, just deactivate the page.
814 			 */
815 
816 			if (uobj->uo_refs > 1)
817 				goto deactivate_it;
818 
819 			/*
820 			 * free the swap slot and the page.
821 			 */
822 
823 			pmap_page_protect(pg, VM_PROT_NONE);
824 
825 			/*
826 			 * freeing swapslot here is not strictly necessary.
827 			 * however, leaving it here doesn't save much
828 			 * because we need to update swap accounting anyway.
829 			 */
830 
831 			uao_dropswap(uobj, pg->offset >> PAGE_SHIFT);
832 			mutex_enter(&uvm_pageqlock);
833 			uvm_pagefree(pg);
834 			mutex_exit(&uvm_pageqlock);
835 			break;
836 
837 		default:
838 			panic("%s: impossible", __func__);
839 		}
840 	}
841 	if (by_list) {
842 		TAILQ_REMOVE(&uobj->memq, &endmp, listq.queue);
843 	}
844 	mutex_exit(uobj->vmobjlock);
845 	return 0;
846 }
847 
848 /*
849  * uao_get: fetch me a page
850  *
851  * we have three cases:
852  * 1: page is resident     -> just return the page.
853  * 2: page is zero-fill    -> allocate a new page and zero it.
854  * 3: page is swapped out  -> fetch the page from swap.
855  *
856  * cases 1 and 2 can be handled with PGO_LOCKED, case 3 cannot.
857  * so, if the "center" page hits case 3 (or any page, with PGO_ALLPAGES),
858  * then we will need to return EBUSY.
859  *
860  * => prefer map unlocked (not required)
861  * => object must be locked!  we will _unlock_ it before starting any I/O.
862  * => flags: PGO_ALLPAGES: get all of the pages
863  *           PGO_LOCKED: fault data structures are locked
864  * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
865  * => NOTE: caller must check for released pages!!
866  */
867 
868 static int
869 uao_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps,
870     int *npagesp, int centeridx, vm_prot_t access_type, int advice, int flags)
871 {
872 	voff_t current_offset;
873 	struct vm_page *ptmp = NULL;	/* Quell compiler warning */
874 	int lcv, gotpages, maxpages, swslot, pageidx;
875 	bool done;
876 	UVMHIST_FUNC("uao_get"); UVMHIST_CALLED(pdhist);
877 
878 	UVMHIST_LOG(pdhist, "aobj=%#jx offset=%jd, flags=%jd",
879 		    (uintptr_t)uobj, offset, flags,0);
880 
881 	/*
882  	 * get number of pages
883  	 */
884 
885 	maxpages = *npagesp;
886 
887 	/*
888  	 * step 1: handled the case where fault data structures are locked.
889  	 */
890 
891 	if (flags & PGO_LOCKED) {
892 
893 		/*
894  		 * step 1a: get pages that are already resident.   only do
895 		 * this if the data structures are locked (i.e. the first
896 		 * time through).
897  		 */
898 
899 		done = true;	/* be optimistic */
900 		gotpages = 0;	/* # of pages we got so far */
901 		for (lcv = 0, current_offset = offset ; lcv < maxpages ;
902 		    lcv++, current_offset += PAGE_SIZE) {
903 			/* do we care about this page?  if not, skip it */
904 			if (pps[lcv] == PGO_DONTCARE)
905 				continue;
906 			ptmp = uvm_pagelookup(uobj, current_offset);
907 
908 			/*
909  			 * if page is new, attempt to allocate the page,
910 			 * zero-fill'd.
911  			 */
912 
913 			if (ptmp == NULL && uao_find_swslot(uobj,
914 			    current_offset >> PAGE_SHIFT) == 0) {
915 				ptmp = uao_pagealloc(uobj, current_offset,
916 				    UVM_FLAG_COLORMATCH|UVM_PGA_ZERO);
917 				if (ptmp) {
918 					/* new page */
919 					ptmp->flags &= ~(PG_FAKE);
920 					ptmp->pqflags |= PQ_AOBJ;
921 					goto gotpage;
922 				}
923 			}
924 
925 			/*
926 			 * to be useful must get a non-busy page
927 			 */
928 
929 			if (ptmp == NULL || (ptmp->flags & PG_BUSY) != 0) {
930 				if (lcv == centeridx ||
931 				    (flags & PGO_ALLPAGES) != 0)
932 					/* need to do a wait or I/O! */
933 					done = false;
934 				continue;
935 			}
936 
937 			/*
938 			 * useful page: busy/lock it and plug it in our
939 			 * result array
940 			 */
941 
942 			/* caller must un-busy this page */
943 			ptmp->flags |= PG_BUSY;
944 			UVM_PAGE_OWN(ptmp, "uao_get1");
945 gotpage:
946 			pps[lcv] = ptmp;
947 			gotpages++;
948 		}
949 
950 		/*
951  		 * step 1b: now we've either done everything needed or we
952 		 * to unlock and do some waiting or I/O.
953  		 */
954 
955 		UVMHIST_LOG(pdhist, "<- done (done=%jd)", done, 0,0,0);
956 		*npagesp = gotpages;
957 		if (done)
958 			return 0;
959 		else
960 			return EBUSY;
961 	}
962 
963 	/*
964  	 * step 2: get non-resident or busy pages.
965  	 * object is locked.   data structures are unlocked.
966  	 */
967 
968 	if ((flags & PGO_SYNCIO) == 0) {
969 		goto done;
970 	}
971 
972 	for (lcv = 0, current_offset = offset ; lcv < maxpages ;
973 	    lcv++, current_offset += PAGE_SIZE) {
974 
975 		/*
976 		 * - skip over pages we've already gotten or don't want
977 		 * - skip over pages we don't _have_ to get
978 		 */
979 
980 		if (pps[lcv] != NULL ||
981 		    (lcv != centeridx && (flags & PGO_ALLPAGES) == 0))
982 			continue;
983 
984 		pageidx = current_offset >> PAGE_SHIFT;
985 
986 		/*
987  		 * we have yet to locate the current page (pps[lcv]).   we
988 		 * first look for a page that is already at the current offset.
989 		 * if we find a page, we check to see if it is busy or
990 		 * released.  if that is the case, then we sleep on the page
991 		 * until it is no longer busy or released and repeat the lookup.
992 		 * if the page we found is neither busy nor released, then we
993 		 * busy it (so we own it) and plug it into pps[lcv].   this
994 		 * 'break's the following while loop and indicates we are
995 		 * ready to move on to the next page in the "lcv" loop above.
996  		 *
997  		 * if we exit the while loop with pps[lcv] still set to NULL,
998 		 * then it means that we allocated a new busy/fake/clean page
999 		 * ptmp in the object and we need to do I/O to fill in the data.
1000  		 */
1001 
1002 		/* top of "pps" while loop */
1003 		while (pps[lcv] == NULL) {
1004 			/* look for a resident page */
1005 			ptmp = uvm_pagelookup(uobj, current_offset);
1006 
1007 			/* not resident?   allocate one now (if we can) */
1008 			if (ptmp == NULL) {
1009 
1010 				ptmp = uao_pagealloc(uobj, current_offset, 0);
1011 
1012 				/* out of RAM? */
1013 				if (ptmp == NULL) {
1014 					mutex_exit(uobj->vmobjlock);
1015 					UVMHIST_LOG(pdhist,
1016 					    "sleeping, ptmp == NULL\n",0,0,0,0);
1017 					uvm_wait("uao_getpage");
1018 					mutex_enter(uobj->vmobjlock);
1019 					continue;
1020 				}
1021 
1022 				/*
1023 				 * safe with PQ's unlocked: because we just
1024 				 * alloc'd the page
1025 				 */
1026 
1027 				ptmp->pqflags |= PQ_AOBJ;
1028 
1029 				/*
1030 				 * got new page ready for I/O.  break pps while
1031 				 * loop.  pps[lcv] is still NULL.
1032 				 */
1033 
1034 				break;
1035 			}
1036 
1037 			/* page is there, see if we need to wait on it */
1038 			if ((ptmp->flags & PG_BUSY) != 0) {
1039 				ptmp->flags |= PG_WANTED;
1040 				UVMHIST_LOG(pdhist,
1041 				    "sleeping, ptmp->flags 0x%jx\n",
1042 				    ptmp->flags,0,0,0);
1043 				UVM_UNLOCK_AND_WAIT(ptmp, uobj->vmobjlock,
1044 				    false, "uao_get", 0);
1045 				mutex_enter(uobj->vmobjlock);
1046 				continue;
1047 			}
1048 
1049 			/*
1050  			 * if we get here then the page has become resident and
1051 			 * unbusy between steps 1 and 2.  we busy it now (so we
1052 			 * own it) and set pps[lcv] (so that we exit the while
1053 			 * loop).
1054  			 */
1055 
1056 			/* we own it, caller must un-busy */
1057 			ptmp->flags |= PG_BUSY;
1058 			UVM_PAGE_OWN(ptmp, "uao_get2");
1059 			pps[lcv] = ptmp;
1060 		}
1061 
1062 		/*
1063  		 * if we own the valid page at the correct offset, pps[lcv] will
1064  		 * point to it.   nothing more to do except go to the next page.
1065  		 */
1066 
1067 		if (pps[lcv])
1068 			continue;			/* next lcv */
1069 
1070 		/*
1071  		 * we have a "fake/busy/clean" page that we just allocated.
1072  		 * do the needed "i/o", either reading from swap or zeroing.
1073  		 */
1074 
1075 		swslot = uao_find_swslot(uobj, pageidx);
1076 
1077 		/*
1078  		 * just zero the page if there's nothing in swap.
1079  		 */
1080 
1081 		if (swslot == 0) {
1082 
1083 			/*
1084 			 * page hasn't existed before, just zero it.
1085 			 */
1086 
1087 			uvm_pagezero(ptmp);
1088 		} else {
1089 #if defined(VMSWAP)
1090 			int error;
1091 
1092 			UVMHIST_LOG(pdhist, "pagein from swslot %jd",
1093 			     swslot, 0,0,0);
1094 
1095 			/*
1096 			 * page in the swapped-out page.
1097 			 * unlock object for i/o, relock when done.
1098 			 */
1099 
1100 			mutex_exit(uobj->vmobjlock);
1101 			error = uvm_swap_get(ptmp, swslot, PGO_SYNCIO);
1102 			mutex_enter(uobj->vmobjlock);
1103 
1104 			/*
1105 			 * I/O done.  check for errors.
1106 			 */
1107 
1108 			if (error != 0) {
1109 				UVMHIST_LOG(pdhist, "<- done (error=%jd)",
1110 				    error,0,0,0);
1111 				if (ptmp->flags & PG_WANTED)
1112 					wakeup(ptmp);
1113 
1114 				/*
1115 				 * remove the swap slot from the aobj
1116 				 * and mark the aobj as having no real slot.
1117 				 * don't free the swap slot, thus preventing
1118 				 * it from being used again.
1119 				 */
1120 
1121 				swslot = uao_set_swslot(uobj, pageidx,
1122 				    SWSLOT_BAD);
1123 				if (swslot > 0) {
1124 					uvm_swap_markbad(swslot, 1);
1125 				}
1126 
1127 				mutex_enter(&uvm_pageqlock);
1128 				uvm_pagefree(ptmp);
1129 				mutex_exit(&uvm_pageqlock);
1130 				mutex_exit(uobj->vmobjlock);
1131 				return error;
1132 			}
1133 #else /* defined(VMSWAP) */
1134 			panic("%s: pagein", __func__);
1135 #endif /* defined(VMSWAP) */
1136 		}
1137 
1138 		if ((access_type & VM_PROT_WRITE) == 0) {
1139 			ptmp->flags |= PG_CLEAN;
1140 			pmap_clear_modify(ptmp);
1141 		}
1142 
1143 		/*
1144  		 * we got the page!   clear the fake flag (indicates valid
1145 		 * data now in page) and plug into our result array.   note
1146 		 * that page is still busy.
1147  		 *
1148  		 * it is the callers job to:
1149  		 * => check if the page is released
1150  		 * => unbusy the page
1151  		 * => activate the page
1152  		 */
1153 
1154 		ptmp->flags &= ~PG_FAKE;
1155 		pps[lcv] = ptmp;
1156 	}
1157 
1158 	/*
1159  	 * finally, unlock object and return.
1160  	 */
1161 
1162 done:
1163 	mutex_exit(uobj->vmobjlock);
1164 	UVMHIST_LOG(pdhist, "<- done (OK)",0,0,0,0);
1165 	return 0;
1166 }
1167 
1168 #if defined(VMSWAP)
1169 
1170 /*
1171  * uao_dropswap:  release any swap resources from this aobj page.
1172  *
1173  * => aobj must be locked or have a reference count of 0.
1174  */
1175 
1176 void
1177 uao_dropswap(struct uvm_object *uobj, int pageidx)
1178 {
1179 	int slot;
1180 
1181 	slot = uao_set_swslot(uobj, pageidx, 0);
1182 	if (slot) {
1183 		uvm_swap_free(slot, 1);
1184 	}
1185 }
1186 
1187 /*
1188  * page in every page in every aobj that is paged-out to a range of swslots.
1189  *
1190  * => nothing should be locked.
1191  * => returns true if pagein was aborted due to lack of memory.
1192  */
1193 
1194 bool
1195 uao_swap_off(int startslot, int endslot)
1196 {
1197 	struct uvm_aobj *aobj;
1198 
1199 	/*
1200 	 * Walk the list of all anonymous UVM objects.  Grab the first.
1201 	 */
1202 	mutex_enter(&uao_list_lock);
1203 	if ((aobj = LIST_FIRST(&uao_list)) == NULL) {
1204 		mutex_exit(&uao_list_lock);
1205 		return false;
1206 	}
1207 	uao_reference(&aobj->u_obj);
1208 
1209 	do {
1210 		struct uvm_aobj *nextaobj;
1211 		bool rv;
1212 
1213 		/*
1214 		 * Prefetch the next object and immediately hold a reference
1215 		 * on it, so neither the current nor the next entry could
1216 		 * disappear while we are iterating.
1217 		 */
1218 		if ((nextaobj = LIST_NEXT(aobj, u_list)) != NULL) {
1219 			uao_reference(&nextaobj->u_obj);
1220 		}
1221 		mutex_exit(&uao_list_lock);
1222 
1223 		/*
1224 		 * Page in all pages in the swap slot range.
1225 		 */
1226 		mutex_enter(aobj->u_obj.vmobjlock);
1227 		rv = uao_pagein(aobj, startslot, endslot);
1228 		mutex_exit(aobj->u_obj.vmobjlock);
1229 
1230 		/* Drop the reference of the current object. */
1231 		uao_detach(&aobj->u_obj);
1232 		if (rv) {
1233 			if (nextaobj) {
1234 				uao_detach(&nextaobj->u_obj);
1235 			}
1236 			return rv;
1237 		}
1238 
1239 		aobj = nextaobj;
1240 		mutex_enter(&uao_list_lock);
1241 	} while (aobj);
1242 
1243 	mutex_exit(&uao_list_lock);
1244 	return false;
1245 }
1246 
1247 /*
1248  * page in any pages from aobj in the given range.
1249  *
1250  * => aobj must be locked and is returned locked.
1251  * => returns true if pagein was aborted due to lack of memory.
1252  */
1253 static bool
1254 uao_pagein(struct uvm_aobj *aobj, int startslot, int endslot)
1255 {
1256 	bool rv;
1257 
1258 	if (UAO_USES_SWHASH(aobj)) {
1259 		struct uao_swhash_elt *elt;
1260 		int buck;
1261 
1262 restart:
1263 		for (buck = aobj->u_swhashmask; buck >= 0; buck--) {
1264 			for (elt = LIST_FIRST(&aobj->u_swhash[buck]);
1265 			     elt != NULL;
1266 			     elt = LIST_NEXT(elt, list)) {
1267 				int i;
1268 
1269 				for (i = 0; i < UAO_SWHASH_CLUSTER_SIZE; i++) {
1270 					int slot = elt->slots[i];
1271 
1272 					/*
1273 					 * if the slot isn't in range, skip it.
1274 					 */
1275 
1276 					if (slot < startslot ||
1277 					    slot >= endslot) {
1278 						continue;
1279 					}
1280 
1281 					/*
1282 					 * process the page,
1283 					 * the start over on this object
1284 					 * since the swhash elt
1285 					 * may have been freed.
1286 					 */
1287 
1288 					rv = uao_pagein_page(aobj,
1289 					  UAO_SWHASH_ELT_PAGEIDX_BASE(elt) + i);
1290 					if (rv) {
1291 						return rv;
1292 					}
1293 					goto restart;
1294 				}
1295 			}
1296 		}
1297 	} else {
1298 		int i;
1299 
1300 		for (i = 0; i < aobj->u_pages; i++) {
1301 			int slot = aobj->u_swslots[i];
1302 
1303 			/*
1304 			 * if the slot isn't in range, skip it
1305 			 */
1306 
1307 			if (slot < startslot || slot >= endslot) {
1308 				continue;
1309 			}
1310 
1311 			/*
1312 			 * process the page.
1313 			 */
1314 
1315 			rv = uao_pagein_page(aobj, i);
1316 			if (rv) {
1317 				return rv;
1318 			}
1319 		}
1320 	}
1321 
1322 	return false;
1323 }
1324 
1325 /*
1326  * uao_pagein_page: page in a single page from an anonymous UVM object.
1327  *
1328  * => Returns true if pagein was aborted due to lack of memory.
1329  * => Object must be locked and is returned locked.
1330  */
1331 
1332 static bool
1333 uao_pagein_page(struct uvm_aobj *aobj, int pageidx)
1334 {
1335 	struct uvm_object *uobj = &aobj->u_obj;
1336 	struct vm_page *pg;
1337 	int rv, npages;
1338 
1339 	pg = NULL;
1340 	npages = 1;
1341 
1342 	KASSERT(mutex_owned(uobj->vmobjlock));
1343 	rv = uao_get(uobj, pageidx << PAGE_SHIFT, &pg, &npages,
1344 	    0, VM_PROT_READ | VM_PROT_WRITE, 0, PGO_SYNCIO);
1345 
1346 	/*
1347 	 * relock and finish up.
1348 	 */
1349 
1350 	mutex_enter(uobj->vmobjlock);
1351 	switch (rv) {
1352 	case 0:
1353 		break;
1354 
1355 	case EIO:
1356 	case ERESTART:
1357 
1358 		/*
1359 		 * nothing more to do on errors.
1360 		 * ERESTART can only mean that the anon was freed,
1361 		 * so again there's nothing to do.
1362 		 */
1363 
1364 		return false;
1365 
1366 	default:
1367 		return true;
1368 	}
1369 
1370 	/*
1371 	 * ok, we've got the page now.
1372 	 * mark it as dirty, clear its swslot and un-busy it.
1373 	 */
1374 	uao_dropswap(&aobj->u_obj, pageidx);
1375 
1376 	/*
1377 	 * make sure it's on a page queue.
1378 	 */
1379 	mutex_enter(&uvm_pageqlock);
1380 	if (pg->wire_count == 0)
1381 		uvm_pageenqueue(pg);
1382 	mutex_exit(&uvm_pageqlock);
1383 
1384 	if (pg->flags & PG_WANTED) {
1385 		wakeup(pg);
1386 	}
1387 	pg->flags &= ~(PG_WANTED|PG_BUSY|PG_CLEAN|PG_FAKE);
1388 	UVM_PAGE_OWN(pg, NULL);
1389 
1390 	return false;
1391 }
1392 
1393 /*
1394  * uao_dropswap_range: drop swapslots in the range.
1395  *
1396  * => aobj must be locked and is returned locked.
1397  * => start is inclusive.  end is exclusive.
1398  */
1399 
1400 void
1401 uao_dropswap_range(struct uvm_object *uobj, voff_t start, voff_t end)
1402 {
1403 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
1404 	int swpgonlydelta = 0;
1405 
1406 	KASSERT(mutex_owned(uobj->vmobjlock));
1407 
1408 	if (end == 0) {
1409 		end = INT64_MAX;
1410 	}
1411 
1412 	if (UAO_USES_SWHASH(aobj)) {
1413 		int i, hashbuckets = aobj->u_swhashmask + 1;
1414 		voff_t taghi;
1415 		voff_t taglo;
1416 
1417 		taglo = UAO_SWHASH_ELT_TAG(start);
1418 		taghi = UAO_SWHASH_ELT_TAG(end);
1419 
1420 		for (i = 0; i < hashbuckets; i++) {
1421 			struct uao_swhash_elt *elt, *next;
1422 
1423 			for (elt = LIST_FIRST(&aobj->u_swhash[i]);
1424 			     elt != NULL;
1425 			     elt = next) {
1426 				int startidx, endidx;
1427 				int j;
1428 
1429 				next = LIST_NEXT(elt, list);
1430 
1431 				if (elt->tag < taglo || taghi < elt->tag) {
1432 					continue;
1433 				}
1434 
1435 				if (elt->tag == taglo) {
1436 					startidx =
1437 					    UAO_SWHASH_ELT_PAGESLOT_IDX(start);
1438 				} else {
1439 					startidx = 0;
1440 				}
1441 
1442 				if (elt->tag == taghi) {
1443 					endidx =
1444 					    UAO_SWHASH_ELT_PAGESLOT_IDX(end);
1445 				} else {
1446 					endidx = UAO_SWHASH_CLUSTER_SIZE;
1447 				}
1448 
1449 				for (j = startidx; j < endidx; j++) {
1450 					int slot = elt->slots[j];
1451 
1452 					KASSERT(uvm_pagelookup(&aobj->u_obj,
1453 					    (UAO_SWHASH_ELT_PAGEIDX_BASE(elt)
1454 					    + j) << PAGE_SHIFT) == NULL);
1455 					if (slot > 0) {
1456 						uvm_swap_free(slot, 1);
1457 						swpgonlydelta++;
1458 						KASSERT(elt->count > 0);
1459 						elt->slots[j] = 0;
1460 						elt->count--;
1461 					}
1462 				}
1463 
1464 				if (elt->count == 0) {
1465 					LIST_REMOVE(elt, list);
1466 					pool_put(&uao_swhash_elt_pool, elt);
1467 				}
1468 			}
1469 		}
1470 	} else {
1471 		int i;
1472 
1473 		if (aobj->u_pages < end) {
1474 			end = aobj->u_pages;
1475 		}
1476 		for (i = start; i < end; i++) {
1477 			int slot = aobj->u_swslots[i];
1478 
1479 			if (slot > 0) {
1480 				uvm_swap_free(slot, 1);
1481 				swpgonlydelta++;
1482 			}
1483 		}
1484 	}
1485 
1486 	/*
1487 	 * adjust the counter of pages only in swap for all
1488 	 * the swap slots we've freed.
1489 	 */
1490 
1491 	if (swpgonlydelta > 0) {
1492 		mutex_enter(&uvm_swap_data_lock);
1493 		KASSERT(uvmexp.swpgonly >= swpgonlydelta);
1494 		uvmexp.swpgonly -= swpgonlydelta;
1495 		mutex_exit(&uvm_swap_data_lock);
1496 	}
1497 }
1498 
1499 #endif /* defined(VMSWAP) */
1500