xref: /netbsd-src/sys/uvm/uvm_aobj.c (revision d48f14661dda8638fee055ba15d35bdfb29b9fa8)
1 /*	$NetBSD: uvm_aobj.c,v 1.78 2005/12/24 13:22:13 yamt Exp $	*/
2 
3 /*
4  * Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and
5  *                    Washington University.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *      This product includes software developed by Charles D. Cranor and
19  *      Washington University.
20  * 4. The name of the author may not be used to endorse or promote products
21  *    derived from this software without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  *
34  * from: Id: uvm_aobj.c,v 1.1.2.5 1998/02/06 05:14:38 chs Exp
35  */
36 /*
37  * uvm_aobj.c: anonymous memory uvm_object pager
38  *
39  * author: Chuck Silvers <chuq@chuq.com>
40  * started: Jan-1998
41  *
42  * - design mostly from Chuck Cranor
43  */
44 
45 #include <sys/cdefs.h>
46 __KERNEL_RCSID(0, "$NetBSD: uvm_aobj.c,v 1.78 2005/12/24 13:22:13 yamt Exp $");
47 
48 #include "opt_uvmhist.h"
49 
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/proc.h>
53 #include <sys/malloc.h>
54 #include <sys/kernel.h>
55 #include <sys/pool.h>
56 #include <sys/kernel.h>
57 
58 #include <uvm/uvm.h>
59 
60 /*
61  * an aobj manages anonymous-memory backed uvm_objects.   in addition
62  * to keeping the list of resident pages, it also keeps a list of
63  * allocated swap blocks.  depending on the size of the aobj this list
64  * of allocated swap blocks is either stored in an array (small objects)
65  * or in a hash table (large objects).
66  */
67 
68 /*
69  * local structures
70  */
71 
72 /*
73  * for hash tables, we break the address space of the aobj into blocks
74  * of UAO_SWHASH_CLUSTER_SIZE pages.   we require the cluster size to
75  * be a power of two.
76  */
77 
78 #define UAO_SWHASH_CLUSTER_SHIFT 4
79 #define UAO_SWHASH_CLUSTER_SIZE (1 << UAO_SWHASH_CLUSTER_SHIFT)
80 
81 /* get the "tag" for this page index */
82 #define UAO_SWHASH_ELT_TAG(PAGEIDX) \
83 	((PAGEIDX) >> UAO_SWHASH_CLUSTER_SHIFT)
84 
85 #define UAO_SWHASH_ELT_PAGESLOT_IDX(PAGEIDX) \
86 	((PAGEIDX) & (UAO_SWHASH_CLUSTER_SIZE - 1))
87 
88 /* given an ELT and a page index, find the swap slot */
89 #define UAO_SWHASH_ELT_PAGESLOT(ELT, PAGEIDX) \
90 	((ELT)->slots[UAO_SWHASH_ELT_PAGESLOT_IDX(PAGEIDX)])
91 
92 /* given an ELT, return its pageidx base */
93 #define UAO_SWHASH_ELT_PAGEIDX_BASE(ELT) \
94 	((ELT)->tag << UAO_SWHASH_CLUSTER_SHIFT)
95 
96 /*
97  * the swhash hash function
98  */
99 
100 #define UAO_SWHASH_HASH(AOBJ, PAGEIDX) \
101 	(&(AOBJ)->u_swhash[(((PAGEIDX) >> UAO_SWHASH_CLUSTER_SHIFT) \
102 			    & (AOBJ)->u_swhashmask)])
103 
104 /*
105  * the swhash threshhold determines if we will use an array or a
106  * hash table to store the list of allocated swap blocks.
107  */
108 
109 #define UAO_SWHASH_THRESHOLD (UAO_SWHASH_CLUSTER_SIZE * 4)
110 #define UAO_USES_SWHASH(AOBJ) \
111 	((AOBJ)->u_pages > UAO_SWHASH_THRESHOLD)	/* use hash? */
112 
113 /*
114  * the number of buckets in a swhash, with an upper bound
115  */
116 
117 #define UAO_SWHASH_MAXBUCKETS 256
118 #define UAO_SWHASH_BUCKETS(AOBJ) \
119 	(MIN((AOBJ)->u_pages >> UAO_SWHASH_CLUSTER_SHIFT, \
120 	     UAO_SWHASH_MAXBUCKETS))
121 
122 
123 /*
124  * uao_swhash_elt: when a hash table is being used, this structure defines
125  * the format of an entry in the bucket list.
126  */
127 
128 struct uao_swhash_elt {
129 	LIST_ENTRY(uao_swhash_elt) list;	/* the hash list */
130 	voff_t tag;				/* our 'tag' */
131 	int count;				/* our number of active slots */
132 	int slots[UAO_SWHASH_CLUSTER_SIZE];	/* the slots */
133 };
134 
135 /*
136  * uao_swhash: the swap hash table structure
137  */
138 
139 LIST_HEAD(uao_swhash, uao_swhash_elt);
140 
141 /*
142  * uao_swhash_elt_pool: pool of uao_swhash_elt structures
143  * NOTE: Pages for this pool must not come from a pageable kernel map!
144  */
145 POOL_INIT(uao_swhash_elt_pool, sizeof(struct uao_swhash_elt), 0, 0, 0,
146     "uaoeltpl", NULL);
147 
148 /*
149  * uvm_aobj: the actual anon-backed uvm_object
150  *
151  * => the uvm_object is at the top of the structure, this allows
152  *   (struct uvm_aobj *) == (struct uvm_object *)
153  * => only one of u_swslots and u_swhash is used in any given aobj
154  */
155 
156 struct uvm_aobj {
157 	struct uvm_object u_obj; /* has: lock, pgops, memq, #pages, #refs */
158 	int u_pages;		 /* number of pages in entire object */
159 	int u_flags;		 /* the flags (see uvm_aobj.h) */
160 	int *u_swslots;		 /* array of offset->swapslot mappings */
161 				 /*
162 				  * hashtable of offset->swapslot mappings
163 				  * (u_swhash is an array of bucket heads)
164 				  */
165 	struct uao_swhash *u_swhash;
166 	u_long u_swhashmask;		/* mask for hashtable */
167 	LIST_ENTRY(uvm_aobj) u_list;	/* global list of aobjs */
168 };
169 
170 /*
171  * uvm_aobj_pool: pool of uvm_aobj structures
172  */
173 POOL_INIT(uvm_aobj_pool, sizeof(struct uvm_aobj), 0, 0, 0, "aobjpl",
174     &pool_allocator_nointr);
175 
176 MALLOC_DEFINE(M_UVMAOBJ, "UVM aobj", "UVM aobj and related structures");
177 
178 /*
179  * local functions
180  */
181 
182 static void	uao_free(struct uvm_aobj *);
183 static int	uao_get(struct uvm_object *, voff_t, struct vm_page **,
184 		    int *, int, vm_prot_t, int, int);
185 static boolean_t uao_put(struct uvm_object *, voff_t, voff_t, int);
186 
187 #if defined(VMSWAP)
188 static struct uao_swhash_elt *uao_find_swhash_elt
189     (struct uvm_aobj *, int, boolean_t);
190 
191 static boolean_t uao_pagein(struct uvm_aobj *, int, int);
192 static boolean_t uao_pagein_page(struct uvm_aobj *, int);
193 static void uao_dropswap_range1(struct uvm_aobj *, voff_t, voff_t);
194 #endif /* defined(VMSWAP) */
195 
196 /*
197  * aobj_pager
198  *
199  * note that some functions (e.g. put) are handled elsewhere
200  */
201 
202 struct uvm_pagerops aobj_pager = {
203 	NULL,			/* init */
204 	uao_reference,		/* reference */
205 	uao_detach,		/* detach */
206 	NULL,			/* fault */
207 	uao_get,		/* get */
208 	uao_put,		/* flush */
209 };
210 
211 /*
212  * uao_list: global list of active aobjs, locked by uao_list_lock
213  */
214 
215 static LIST_HEAD(aobjlist, uvm_aobj) uao_list;
216 static struct simplelock uao_list_lock;
217 
218 /*
219  * functions
220  */
221 
222 /*
223  * hash table/array related functions
224  */
225 
226 #if defined(VMSWAP)
227 
228 /*
229  * uao_find_swhash_elt: find (or create) a hash table entry for a page
230  * offset.
231  *
232  * => the object should be locked by the caller
233  */
234 
235 static struct uao_swhash_elt *
236 uao_find_swhash_elt(struct uvm_aobj *aobj, int pageidx, boolean_t create)
237 {
238 	struct uao_swhash *swhash;
239 	struct uao_swhash_elt *elt;
240 	voff_t page_tag;
241 
242 	swhash = UAO_SWHASH_HASH(aobj, pageidx);
243 	page_tag = UAO_SWHASH_ELT_TAG(pageidx);
244 
245 	/*
246 	 * now search the bucket for the requested tag
247 	 */
248 
249 	LIST_FOREACH(elt, swhash, list) {
250 		if (elt->tag == page_tag) {
251 			return elt;
252 		}
253 	}
254 	if (!create) {
255 		return NULL;
256 	}
257 
258 	/*
259 	 * allocate a new entry for the bucket and init/insert it in
260 	 */
261 
262 	elt = pool_get(&uao_swhash_elt_pool, PR_NOWAIT);
263 	if (elt == NULL) {
264 		return NULL;
265 	}
266 	LIST_INSERT_HEAD(swhash, elt, list);
267 	elt->tag = page_tag;
268 	elt->count = 0;
269 	memset(elt->slots, 0, sizeof(elt->slots));
270 	return elt;
271 }
272 
273 /*
274  * uao_find_swslot: find the swap slot number for an aobj/pageidx
275  *
276  * => object must be locked by caller
277  */
278 
279 int
280 uao_find_swslot(struct uvm_object *uobj, int pageidx)
281 {
282 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
283 	struct uao_swhash_elt *elt;
284 
285 	/*
286 	 * if noswap flag is set, then we never return a slot
287 	 */
288 
289 	if (aobj->u_flags & UAO_FLAG_NOSWAP)
290 		return(0);
291 
292 	/*
293 	 * if hashing, look in hash table.
294 	 */
295 
296 	if (UAO_USES_SWHASH(aobj)) {
297 		elt = uao_find_swhash_elt(aobj, pageidx, FALSE);
298 		if (elt)
299 			return(UAO_SWHASH_ELT_PAGESLOT(elt, pageidx));
300 		else
301 			return(0);
302 	}
303 
304 	/*
305 	 * otherwise, look in the array
306 	 */
307 
308 	return(aobj->u_swslots[pageidx]);
309 }
310 
311 /*
312  * uao_set_swslot: set the swap slot for a page in an aobj.
313  *
314  * => setting a slot to zero frees the slot
315  * => object must be locked by caller
316  * => we return the old slot number, or -1 if we failed to allocate
317  *    memory to record the new slot number
318  */
319 
320 int
321 uao_set_swslot(struct uvm_object *uobj, int pageidx, int slot)
322 {
323 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
324 	struct uao_swhash_elt *elt;
325 	int oldslot;
326 	UVMHIST_FUNC("uao_set_swslot"); UVMHIST_CALLED(pdhist);
327 	UVMHIST_LOG(pdhist, "aobj %p pageidx %d slot %d",
328 	    aobj, pageidx, slot, 0);
329 
330 	/*
331 	 * if noswap flag is set, then we can't set a non-zero slot.
332 	 */
333 
334 	if (aobj->u_flags & UAO_FLAG_NOSWAP) {
335 		if (slot == 0)
336 			return(0);
337 
338 		printf("uao_set_swslot: uobj = %p\n", uobj);
339 		panic("uao_set_swslot: NOSWAP object");
340 	}
341 
342 	/*
343 	 * are we using a hash table?  if so, add it in the hash.
344 	 */
345 
346 	if (UAO_USES_SWHASH(aobj)) {
347 
348 		/*
349 		 * Avoid allocating an entry just to free it again if
350 		 * the page had not swap slot in the first place, and
351 		 * we are freeing.
352 		 */
353 
354 		elt = uao_find_swhash_elt(aobj, pageidx, slot != 0);
355 		if (elt == NULL) {
356 			return slot ? -1 : 0;
357 		}
358 
359 		oldslot = UAO_SWHASH_ELT_PAGESLOT(elt, pageidx);
360 		UAO_SWHASH_ELT_PAGESLOT(elt, pageidx) = slot;
361 
362 		/*
363 		 * now adjust the elt's reference counter and free it if we've
364 		 * dropped it to zero.
365 		 */
366 
367 		if (slot) {
368 			if (oldslot == 0)
369 				elt->count++;
370 		} else {
371 			if (oldslot)
372 				elt->count--;
373 
374 			if (elt->count == 0) {
375 				LIST_REMOVE(elt, list);
376 				pool_put(&uao_swhash_elt_pool, elt);
377 			}
378 		}
379 	} else {
380 		/* we are using an array */
381 		oldslot = aobj->u_swslots[pageidx];
382 		aobj->u_swslots[pageidx] = slot;
383 	}
384 	return (oldslot);
385 }
386 
387 #endif /* defined(VMSWAP) */
388 
389 /*
390  * end of hash/array functions
391  */
392 
393 /*
394  * uao_free: free all resources held by an aobj, and then free the aobj
395  *
396  * => the aobj should be dead
397  */
398 
399 static void
400 uao_free(struct uvm_aobj *aobj)
401 {
402 	int swpgonlydelta = 0;
403 
404 	simple_unlock(&aobj->u_obj.vmobjlock);
405 
406 #if defined(VMSWAP)
407 	uao_dropswap_range1(aobj, 0, 0);
408 
409 	if (UAO_USES_SWHASH(aobj)) {
410 
411 		/*
412 		 * free the hash table itself.
413 		 */
414 
415 		free(aobj->u_swhash, M_UVMAOBJ);
416 	} else {
417 
418 		/*
419 		 * free the array itsself.
420 		 */
421 
422 		free(aobj->u_swslots, M_UVMAOBJ);
423 	}
424 #endif /* defined(VMSWAP) */
425 
426 	/*
427 	 * finally free the aobj itself
428 	 */
429 
430 	pool_put(&uvm_aobj_pool, aobj);
431 
432 	/*
433 	 * adjust the counter of pages only in swap for all
434 	 * the swap slots we've freed.
435 	 */
436 
437 	if (swpgonlydelta > 0) {
438 		simple_lock(&uvm.swap_data_lock);
439 		KASSERT(uvmexp.swpgonly >= swpgonlydelta);
440 		uvmexp.swpgonly -= swpgonlydelta;
441 		simple_unlock(&uvm.swap_data_lock);
442 	}
443 }
444 
445 /*
446  * pager functions
447  */
448 
449 /*
450  * uao_create: create an aobj of the given size and return its uvm_object.
451  *
452  * => for normal use, flags are always zero
453  * => for the kernel object, the flags are:
454  *	UAO_FLAG_KERNOBJ - allocate the kernel object (can only happen once)
455  *	UAO_FLAG_KERNSWAP - enable swapping of kernel object ("           ")
456  */
457 
458 struct uvm_object *
459 uao_create(vsize_t size, int flags)
460 {
461 	static struct uvm_aobj kernel_object_store;
462 	static int kobj_alloced = 0;
463 	int pages = round_page(size) >> PAGE_SHIFT;
464 	struct uvm_aobj *aobj;
465 	int refs;
466 
467 	/*
468 	 * malloc a new aobj unless we are asked for the kernel object
469 	 */
470 
471 	if (flags & UAO_FLAG_KERNOBJ) {
472 		KASSERT(!kobj_alloced);
473 		aobj = &kernel_object_store;
474 		aobj->u_pages = pages;
475 		aobj->u_flags = UAO_FLAG_NOSWAP;
476 		refs = UVM_OBJ_KERN;
477 		kobj_alloced = UAO_FLAG_KERNOBJ;
478 	} else if (flags & UAO_FLAG_KERNSWAP) {
479 		KASSERT(kobj_alloced == UAO_FLAG_KERNOBJ);
480 		aobj = &kernel_object_store;
481 		kobj_alloced = UAO_FLAG_KERNSWAP;
482 		refs = 0xdeadbeaf; /* XXX: gcc */
483 	} else {
484 		aobj = pool_get(&uvm_aobj_pool, PR_WAITOK);
485 		aobj->u_pages = pages;
486 		aobj->u_flags = 0;
487 		refs = 1;
488 	}
489 
490 	/*
491  	 * allocate hash/array if necessary
492  	 *
493  	 * note: in the KERNSWAP case no need to worry about locking since
494  	 * we are still booting we should be the only thread around.
495  	 */
496 
497 	if (flags == 0 || (flags & UAO_FLAG_KERNSWAP) != 0) {
498 #if defined(VMSWAP)
499 		int mflags = (flags & UAO_FLAG_KERNSWAP) != 0 ?
500 		    M_NOWAIT : M_WAITOK;
501 
502 		/* allocate hash table or array depending on object size */
503 		if (UAO_USES_SWHASH(aobj)) {
504 			aobj->u_swhash = hashinit(UAO_SWHASH_BUCKETS(aobj),
505 			    HASH_LIST, M_UVMAOBJ, mflags, &aobj->u_swhashmask);
506 			if (aobj->u_swhash == NULL)
507 				panic("uao_create: hashinit swhash failed");
508 		} else {
509 			aobj->u_swslots = malloc(pages * sizeof(int),
510 			    M_UVMAOBJ, mflags);
511 			if (aobj->u_swslots == NULL)
512 				panic("uao_create: malloc swslots failed");
513 			memset(aobj->u_swslots, 0, pages * sizeof(int));
514 		}
515 #endif /* defined(VMSWAP) */
516 
517 		if (flags) {
518 			aobj->u_flags &= ~UAO_FLAG_NOSWAP; /* clear noswap */
519 			return(&aobj->u_obj);
520 		}
521 	}
522 
523 	/*
524  	 * init aobj fields
525  	 */
526 
527 	UVM_OBJ_INIT(&aobj->u_obj, &aobj_pager, refs);
528 
529 	/*
530  	 * now that aobj is ready, add it to the global list
531  	 */
532 
533 	simple_lock(&uao_list_lock);
534 	LIST_INSERT_HEAD(&uao_list, aobj, u_list);
535 	simple_unlock(&uao_list_lock);
536 	return(&aobj->u_obj);
537 }
538 
539 
540 
541 /*
542  * uao_init: set up aobj pager subsystem
543  *
544  * => called at boot time from uvm_pager_init()
545  */
546 
547 void
548 uao_init(void)
549 {
550 	static int uao_initialized;
551 
552 	if (uao_initialized)
553 		return;
554 	uao_initialized = TRUE;
555 	LIST_INIT(&uao_list);
556 	simple_lock_init(&uao_list_lock);
557 }
558 
559 /*
560  * uao_reference: add a ref to an aobj
561  *
562  * => aobj must be unlocked
563  * => just lock it and call the locked version
564  */
565 
566 void
567 uao_reference(struct uvm_object *uobj)
568 {
569 	simple_lock(&uobj->vmobjlock);
570 	uao_reference_locked(uobj);
571 	simple_unlock(&uobj->vmobjlock);
572 }
573 
574 /*
575  * uao_reference_locked: add a ref to an aobj that is already locked
576  *
577  * => aobj must be locked
578  * this needs to be separate from the normal routine
579  * since sometimes we need to add a reference to an aobj when
580  * it's already locked.
581  */
582 
583 void
584 uao_reference_locked(struct uvm_object *uobj)
585 {
586 	UVMHIST_FUNC("uao_reference"); UVMHIST_CALLED(maphist);
587 
588 	/*
589  	 * kernel_object already has plenty of references, leave it alone.
590  	 */
591 
592 	if (UVM_OBJ_IS_KERN_OBJECT(uobj))
593 		return;
594 
595 	uobj->uo_refs++;
596 	UVMHIST_LOG(maphist, "<- done (uobj=0x%x, ref = %d)",
597 		    uobj, uobj->uo_refs,0,0);
598 }
599 
600 /*
601  * uao_detach: drop a reference to an aobj
602  *
603  * => aobj must be unlocked
604  * => just lock it and call the locked version
605  */
606 
607 void
608 uao_detach(struct uvm_object *uobj)
609 {
610 	simple_lock(&uobj->vmobjlock);
611 	uao_detach_locked(uobj);
612 }
613 
614 /*
615  * uao_detach_locked: drop a reference to an aobj
616  *
617  * => aobj must be locked, and is unlocked (or freed) upon return.
618  * this needs to be separate from the normal routine
619  * since sometimes we need to detach from an aobj when
620  * it's already locked.
621  */
622 
623 void
624 uao_detach_locked(struct uvm_object *uobj)
625 {
626 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
627 	struct vm_page *pg;
628 	UVMHIST_FUNC("uao_detach"); UVMHIST_CALLED(maphist);
629 
630 	/*
631  	 * detaching from kernel_object is a noop.
632  	 */
633 
634 	if (UVM_OBJ_IS_KERN_OBJECT(uobj)) {
635 		simple_unlock(&uobj->vmobjlock);
636 		return;
637 	}
638 
639 	UVMHIST_LOG(maphist,"  (uobj=0x%x)  ref=%d", uobj,uobj->uo_refs,0,0);
640 	uobj->uo_refs--;
641 	if (uobj->uo_refs) {
642 		simple_unlock(&uobj->vmobjlock);
643 		UVMHIST_LOG(maphist, "<- done (rc>0)", 0,0,0,0);
644 		return;
645 	}
646 
647 	/*
648  	 * remove the aobj from the global list.
649  	 */
650 
651 	simple_lock(&uao_list_lock);
652 	LIST_REMOVE(aobj, u_list);
653 	simple_unlock(&uao_list_lock);
654 
655 	/*
656  	 * free all the pages left in the aobj.  for each page,
657 	 * when the page is no longer busy (and thus after any disk i/o that
658 	 * it's involved in is complete), release any swap resources and
659 	 * free the page itself.
660  	 */
661 
662 	uvm_lock_pageq();
663 	while ((pg = TAILQ_FIRST(&uobj->memq)) != NULL) {
664 		pmap_page_protect(pg, VM_PROT_NONE);
665 		if (pg->flags & PG_BUSY) {
666 			pg->flags |= PG_WANTED;
667 			uvm_unlock_pageq();
668 			UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, FALSE,
669 			    "uao_det", 0);
670 			simple_lock(&uobj->vmobjlock);
671 			uvm_lock_pageq();
672 			continue;
673 		}
674 		uao_dropswap(&aobj->u_obj, pg->offset >> PAGE_SHIFT);
675 		uvm_pagefree(pg);
676 	}
677 	uvm_unlock_pageq();
678 
679 	/*
680  	 * finally, free the aobj itself.
681  	 */
682 
683 	uao_free(aobj);
684 }
685 
686 /*
687  * uao_put: flush pages out of a uvm object
688  *
689  * => object should be locked by caller.  we may _unlock_ the object
690  *	if (and only if) we need to clean a page (PGO_CLEANIT).
691  *	XXXJRT Currently, however, we don't.  In the case of cleaning
692  *	XXXJRT a page, we simply just deactivate it.  Should probably
693  *	XXXJRT handle this better, in the future (although "flushing"
694  *	XXXJRT anonymous memory isn't terribly important).
695  * => if PGO_CLEANIT is not set, then we will neither unlock the object
696  *	or block.
697  * => if PGO_ALLPAGE is set, then all pages in the object are valid targets
698  *	for flushing.
699  * => NOTE: we rely on the fact that the object's memq is a TAILQ and
700  *	that new pages are inserted on the tail end of the list.  thus,
701  *	we can make a complete pass through the object in one go by starting
702  *	at the head and working towards the tail (new pages are put in
703  *	front of us).
704  * => NOTE: we are allowed to lock the page queues, so the caller
705  *	must not be holding the lock on them [e.g. pagedaemon had
706  *	better not call us with the queues locked]
707  * => we return TRUE unless we encountered some sort of I/O error
708  *	XXXJRT currently never happens, as we never directly initiate
709  *	XXXJRT I/O
710  *
711  * note on page traversal:
712  *	we can traverse the pages in an object either by going down the
713  *	linked list in "uobj->memq", or we can go over the address range
714  *	by page doing hash table lookups for each address.  depending
715  *	on how many pages are in the object it may be cheaper to do one
716  *	or the other.  we set "by_list" to true if we are using memq.
717  *	if the cost of a hash lookup was equal to the cost of the list
718  *	traversal we could compare the number of pages in the start->stop
719  *	range to the total number of pages in the object.  however, it
720  *	seems that a hash table lookup is more expensive than the linked
721  *	list traversal, so we multiply the number of pages in the
722  *	start->stop range by a penalty which we define below.
723  */
724 
725 static int
726 uao_put(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
727 {
728 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
729 	struct vm_page *pg, *nextpg, curmp, endmp;
730 	boolean_t by_list;
731 	voff_t curoff;
732 	UVMHIST_FUNC("uao_put"); UVMHIST_CALLED(maphist);
733 
734 	curoff = 0;
735 	if (flags & PGO_ALLPAGES) {
736 		start = 0;
737 		stop = aobj->u_pages << PAGE_SHIFT;
738 		by_list = TRUE;		/* always go by the list */
739 	} else {
740 		start = trunc_page(start);
741 		if (stop == 0) {
742 			stop = aobj->u_pages << PAGE_SHIFT;
743 		} else {
744 			stop = round_page(stop);
745 		}
746 		if (stop > (aobj->u_pages << PAGE_SHIFT)) {
747 			printf("uao_flush: strange, got an out of range "
748 			    "flush (fixed)\n");
749 			stop = aobj->u_pages << PAGE_SHIFT;
750 		}
751 		by_list = (uobj->uo_npages <=
752 		    ((stop - start) >> PAGE_SHIFT) * UVM_PAGE_HASH_PENALTY);
753 	}
754 	UVMHIST_LOG(maphist,
755 	    " flush start=0x%lx, stop=0x%x, by_list=%d, flags=0x%x",
756 	    start, stop, by_list, flags);
757 
758 	/*
759 	 * Don't need to do any work here if we're not freeing
760 	 * or deactivating pages.
761 	 */
762 
763 	if ((flags & (PGO_DEACTIVATE|PGO_FREE)) == 0) {
764 		simple_unlock(&uobj->vmobjlock);
765 		return 0;
766 	}
767 
768 	/*
769 	 * Initialize the marker pages.  See the comment in
770 	 * genfs_putpages() also.
771 	 */
772 
773 	curmp.uobject = uobj;
774 	curmp.offset = (voff_t)-1;
775 	curmp.flags = PG_BUSY;
776 	endmp.uobject = uobj;
777 	endmp.offset = (voff_t)-1;
778 	endmp.flags = PG_BUSY;
779 
780 	/*
781 	 * now do it.  note: we must update nextpg in the body of loop or we
782 	 * will get stuck.  we need to use nextpg if we'll traverse the list
783 	 * because we may free "pg" before doing the next loop.
784 	 */
785 
786 	if (by_list) {
787 		TAILQ_INSERT_TAIL(&uobj->memq, &endmp, listq);
788 		nextpg = TAILQ_FIRST(&uobj->memq);
789 		PHOLD(curlwp);
790 	} else {
791 		curoff = start;
792 		nextpg = NULL;	/* Quell compiler warning */
793 	}
794 
795 	uvm_lock_pageq();
796 
797 	/* locked: both page queues and uobj */
798 	for (;;) {
799 		if (by_list) {
800 			pg = nextpg;
801 			if (pg == &endmp)
802 				break;
803 			nextpg = TAILQ_NEXT(pg, listq);
804 			if (pg->offset < start || pg->offset >= stop)
805 				continue;
806 		} else {
807 			if (curoff < stop) {
808 				pg = uvm_pagelookup(uobj, curoff);
809 				curoff += PAGE_SIZE;
810 			} else
811 				break;
812 			if (pg == NULL)
813 				continue;
814 		}
815 		switch (flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE)) {
816 
817 		/*
818 		 * XXX In these first 3 cases, we always just
819 		 * XXX deactivate the page.  We may want to
820 		 * XXX handle the different cases more specifically
821 		 * XXX in the future.
822 		 */
823 
824 		case PGO_CLEANIT|PGO_FREE:
825 		case PGO_CLEANIT|PGO_DEACTIVATE:
826 		case PGO_DEACTIVATE:
827  deactivate_it:
828 			/* skip the page if it's loaned or wired */
829 			if (pg->loan_count != 0 || pg->wire_count != 0)
830 				continue;
831 
832 			/* ...and deactivate the page. */
833 			pmap_clear_reference(pg);
834 			uvm_pagedeactivate(pg);
835 			continue;
836 
837 		case PGO_FREE:
838 
839 			/*
840 			 * If there are multiple references to
841 			 * the object, just deactivate the page.
842 			 */
843 
844 			if (uobj->uo_refs > 1)
845 				goto deactivate_it;
846 
847 			/*
848 			 * wait and try again if the page is busy.
849 			 * otherwise free the swap slot and the page.
850 			 */
851 
852 			pmap_page_protect(pg, VM_PROT_NONE);
853 			if (pg->flags & PG_BUSY) {
854 				if (by_list) {
855 					TAILQ_INSERT_BEFORE(pg, &curmp, listq);
856 				}
857 				pg->flags |= PG_WANTED;
858 				uvm_unlock_pageq();
859 				UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, 0,
860 				    "uao_put", 0);
861 				simple_lock(&uobj->vmobjlock);
862 				uvm_lock_pageq();
863 				if (by_list) {
864 					nextpg = TAILQ_NEXT(&curmp, listq);
865 					TAILQ_REMOVE(&uobj->memq, &curmp,
866 					    listq);
867 				} else
868 					curoff -= PAGE_SIZE;
869 				continue;
870 			}
871 
872 			/*
873 			 * freeing swapslot here is not strictly necessary.
874 			 * however, leaving it here doesn't save much
875 			 * because we need to update swap accounting anyway.
876 			 */
877 
878 			uao_dropswap(uobj, pg->offset >> PAGE_SHIFT);
879 			uvm_pagefree(pg);
880 			continue;
881 		}
882 	}
883 	uvm_unlock_pageq();
884 	if (by_list) {
885 		TAILQ_REMOVE(&uobj->memq, &endmp, listq);
886 		PRELE(curlwp);
887 	}
888 	simple_unlock(&uobj->vmobjlock);
889 	return 0;
890 }
891 
892 /*
893  * uao_get: fetch me a page
894  *
895  * we have three cases:
896  * 1: page is resident     -> just return the page.
897  * 2: page is zero-fill    -> allocate a new page and zero it.
898  * 3: page is swapped out  -> fetch the page from swap.
899  *
900  * cases 1 and 2 can be handled with PGO_LOCKED, case 3 cannot.
901  * so, if the "center" page hits case 3 (or any page, with PGO_ALLPAGES),
902  * then we will need to return EBUSY.
903  *
904  * => prefer map unlocked (not required)
905  * => object must be locked!  we will _unlock_ it before starting any I/O.
906  * => flags: PGO_ALLPAGES: get all of the pages
907  *           PGO_LOCKED: fault data structures are locked
908  * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
909  * => NOTE: caller must check for released pages!!
910  */
911 
912 static int
913 uao_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps,
914     int *npagesp, int centeridx, vm_prot_t access_type, int advice, int flags)
915 {
916 #if defined(VMSWAP)
917 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
918 #endif /* defined(VMSWAP) */
919 	voff_t current_offset;
920 	struct vm_page *ptmp = NULL;	/* Quell compiler warning */
921 	int lcv, gotpages, maxpages, swslot, pageidx;
922 	boolean_t done;
923 	UVMHIST_FUNC("uao_get"); UVMHIST_CALLED(pdhist);
924 
925 	UVMHIST_LOG(pdhist, "aobj=%p offset=%d, flags=%d",
926 		    (struct uvm_aobj *)uobj, offset, flags,0);
927 
928 	/*
929  	 * get number of pages
930  	 */
931 
932 	maxpages = *npagesp;
933 
934 	/*
935  	 * step 1: handled the case where fault data structures are locked.
936  	 */
937 
938 	if (flags & PGO_LOCKED) {
939 
940 		/*
941  		 * step 1a: get pages that are already resident.   only do
942 		 * this if the data structures are locked (i.e. the first
943 		 * time through).
944  		 */
945 
946 		done = TRUE;	/* be optimistic */
947 		gotpages = 0;	/* # of pages we got so far */
948 		for (lcv = 0, current_offset = offset ; lcv < maxpages ;
949 		    lcv++, current_offset += PAGE_SIZE) {
950 			/* do we care about this page?  if not, skip it */
951 			if (pps[lcv] == PGO_DONTCARE)
952 				continue;
953 			ptmp = uvm_pagelookup(uobj, current_offset);
954 
955 			/*
956  			 * if page is new, attempt to allocate the page,
957 			 * zero-fill'd.
958  			 */
959 
960 			if (ptmp == NULL && uao_find_swslot(&aobj->u_obj,
961 			    current_offset >> PAGE_SHIFT) == 0) {
962 				ptmp = uvm_pagealloc(uobj, current_offset,
963 				    NULL, UVM_PGA_ZERO);
964 				if (ptmp) {
965 					/* new page */
966 					ptmp->flags &= ~(PG_FAKE);
967 					ptmp->pqflags |= PQ_AOBJ;
968 					goto gotpage;
969 				}
970 			}
971 
972 			/*
973 			 * to be useful must get a non-busy page
974 			 */
975 
976 			if (ptmp == NULL || (ptmp->flags & PG_BUSY) != 0) {
977 				if (lcv == centeridx ||
978 				    (flags & PGO_ALLPAGES) != 0)
979 					/* need to do a wait or I/O! */
980 					done = FALSE;
981 					continue;
982 			}
983 
984 			/*
985 			 * useful page: busy/lock it and plug it in our
986 			 * result array
987 			 */
988 
989 			/* caller must un-busy this page */
990 			ptmp->flags |= PG_BUSY;
991 			UVM_PAGE_OWN(ptmp, "uao_get1");
992 gotpage:
993 			pps[lcv] = ptmp;
994 			gotpages++;
995 		}
996 
997 		/*
998  		 * step 1b: now we've either done everything needed or we
999 		 * to unlock and do some waiting or I/O.
1000  		 */
1001 
1002 		UVMHIST_LOG(pdhist, "<- done (done=%d)", done, 0,0,0);
1003 		*npagesp = gotpages;
1004 		if (done)
1005 			return 0;
1006 		else
1007 			return EBUSY;
1008 	}
1009 
1010 	/*
1011  	 * step 2: get non-resident or busy pages.
1012  	 * object is locked.   data structures are unlocked.
1013  	 */
1014 
1015 	if ((flags & PGO_SYNCIO) == 0) {
1016 		goto done;
1017 	}
1018 
1019 	for (lcv = 0, current_offset = offset ; lcv < maxpages ;
1020 	    lcv++, current_offset += PAGE_SIZE) {
1021 
1022 		/*
1023 		 * - skip over pages we've already gotten or don't want
1024 		 * - skip over pages we don't _have_ to get
1025 		 */
1026 
1027 		if (pps[lcv] != NULL ||
1028 		    (lcv != centeridx && (flags & PGO_ALLPAGES) == 0))
1029 			continue;
1030 
1031 		pageidx = current_offset >> PAGE_SHIFT;
1032 
1033 		/*
1034  		 * we have yet to locate the current page (pps[lcv]).   we
1035 		 * first look for a page that is already at the current offset.
1036 		 * if we find a page, we check to see if it is busy or
1037 		 * released.  if that is the case, then we sleep on the page
1038 		 * until it is no longer busy or released and repeat the lookup.
1039 		 * if the page we found is neither busy nor released, then we
1040 		 * busy it (so we own it) and plug it into pps[lcv].   this
1041 		 * 'break's the following while loop and indicates we are
1042 		 * ready to move on to the next page in the "lcv" loop above.
1043  		 *
1044  		 * if we exit the while loop with pps[lcv] still set to NULL,
1045 		 * then it means that we allocated a new busy/fake/clean page
1046 		 * ptmp in the object and we need to do I/O to fill in the data.
1047  		 */
1048 
1049 		/* top of "pps" while loop */
1050 		while (pps[lcv] == NULL) {
1051 			/* look for a resident page */
1052 			ptmp = uvm_pagelookup(uobj, current_offset);
1053 
1054 			/* not resident?   allocate one now (if we can) */
1055 			if (ptmp == NULL) {
1056 
1057 				ptmp = uvm_pagealloc(uobj, current_offset,
1058 				    NULL, 0);
1059 
1060 				/* out of RAM? */
1061 				if (ptmp == NULL) {
1062 					simple_unlock(&uobj->vmobjlock);
1063 					UVMHIST_LOG(pdhist,
1064 					    "sleeping, ptmp == NULL\n",0,0,0,0);
1065 					uvm_wait("uao_getpage");
1066 					simple_lock(&uobj->vmobjlock);
1067 					continue;
1068 				}
1069 
1070 				/*
1071 				 * safe with PQ's unlocked: because we just
1072 				 * alloc'd the page
1073 				 */
1074 
1075 				ptmp->pqflags |= PQ_AOBJ;
1076 
1077 				/*
1078 				 * got new page ready for I/O.  break pps while
1079 				 * loop.  pps[lcv] is still NULL.
1080 				 */
1081 
1082 				break;
1083 			}
1084 
1085 			/* page is there, see if we need to wait on it */
1086 			if ((ptmp->flags & PG_BUSY) != 0) {
1087 				ptmp->flags |= PG_WANTED;
1088 				UVMHIST_LOG(pdhist,
1089 				    "sleeping, ptmp->flags 0x%x\n",
1090 				    ptmp->flags,0,0,0);
1091 				UVM_UNLOCK_AND_WAIT(ptmp, &uobj->vmobjlock,
1092 				    FALSE, "uao_get", 0);
1093 				simple_lock(&uobj->vmobjlock);
1094 				continue;
1095 			}
1096 
1097 			/*
1098  			 * if we get here then the page has become resident and
1099 			 * unbusy between steps 1 and 2.  we busy it now (so we
1100 			 * own it) and set pps[lcv] (so that we exit the while
1101 			 * loop).
1102  			 */
1103 
1104 			/* we own it, caller must un-busy */
1105 			ptmp->flags |= PG_BUSY;
1106 			UVM_PAGE_OWN(ptmp, "uao_get2");
1107 			pps[lcv] = ptmp;
1108 		}
1109 
1110 		/*
1111  		 * if we own the valid page at the correct offset, pps[lcv] will
1112  		 * point to it.   nothing more to do except go to the next page.
1113  		 */
1114 
1115 		if (pps[lcv])
1116 			continue;			/* next lcv */
1117 
1118 		/*
1119  		 * we have a "fake/busy/clean" page that we just allocated.
1120  		 * do the needed "i/o", either reading from swap or zeroing.
1121  		 */
1122 
1123 		swslot = uao_find_swslot(&aobj->u_obj, pageidx);
1124 
1125 		/*
1126  		 * just zero the page if there's nothing in swap.
1127  		 */
1128 
1129 		if (swslot == 0) {
1130 
1131 			/*
1132 			 * page hasn't existed before, just zero it.
1133 			 */
1134 
1135 			uvm_pagezero(ptmp);
1136 		} else {
1137 #if defined(VMSWAP)
1138 			int error;
1139 
1140 			UVMHIST_LOG(pdhist, "pagein from swslot %d",
1141 			     swslot, 0,0,0);
1142 
1143 			/*
1144 			 * page in the swapped-out page.
1145 			 * unlock object for i/o, relock when done.
1146 			 */
1147 
1148 			simple_unlock(&uobj->vmobjlock);
1149 			error = uvm_swap_get(ptmp, swslot, PGO_SYNCIO);
1150 			simple_lock(&uobj->vmobjlock);
1151 
1152 			/*
1153 			 * I/O done.  check for errors.
1154 			 */
1155 
1156 			if (error != 0) {
1157 				UVMHIST_LOG(pdhist, "<- done (error=%d)",
1158 				    error,0,0,0);
1159 				if (ptmp->flags & PG_WANTED)
1160 					wakeup(ptmp);
1161 
1162 				/*
1163 				 * remove the swap slot from the aobj
1164 				 * and mark the aobj as having no real slot.
1165 				 * don't free the swap slot, thus preventing
1166 				 * it from being used again.
1167 				 */
1168 
1169 				swslot = uao_set_swslot(&aobj->u_obj, pageidx,
1170 							SWSLOT_BAD);
1171 				if (swslot > 0) {
1172 					uvm_swap_markbad(swslot, 1);
1173 				}
1174 
1175 				uvm_lock_pageq();
1176 				uvm_pagefree(ptmp);
1177 				uvm_unlock_pageq();
1178 				simple_unlock(&uobj->vmobjlock);
1179 				return error;
1180 			}
1181 #else /* defined(VMSWAP) */
1182 			panic("%s: pagein", __func__);
1183 #endif /* defined(VMSWAP) */
1184 		}
1185 
1186 		if ((access_type & VM_PROT_WRITE) == 0) {
1187 			ptmp->flags |= PG_CLEAN;
1188 			pmap_clear_modify(ptmp);
1189 		}
1190 
1191 		/*
1192  		 * we got the page!   clear the fake flag (indicates valid
1193 		 * data now in page) and plug into our result array.   note
1194 		 * that page is still busy.
1195  		 *
1196  		 * it is the callers job to:
1197  		 * => check if the page is released
1198  		 * => unbusy the page
1199  		 * => activate the page
1200  		 */
1201 
1202 		ptmp->flags &= ~PG_FAKE;
1203 		pps[lcv] = ptmp;
1204 	}
1205 
1206 	/*
1207  	 * finally, unlock object and return.
1208  	 */
1209 
1210 done:
1211 	simple_unlock(&uobj->vmobjlock);
1212 	UVMHIST_LOG(pdhist, "<- done (OK)",0,0,0,0);
1213 	return 0;
1214 }
1215 
1216 #if defined(VMSWAP)
1217 
1218 /*
1219  * uao_dropswap:  release any swap resources from this aobj page.
1220  *
1221  * => aobj must be locked or have a reference count of 0.
1222  */
1223 
1224 void
1225 uao_dropswap(struct uvm_object *uobj, int pageidx)
1226 {
1227 	int slot;
1228 
1229 	slot = uao_set_swslot(uobj, pageidx, 0);
1230 	if (slot) {
1231 		uvm_swap_free(slot, 1);
1232 	}
1233 }
1234 
1235 /*
1236  * page in every page in every aobj that is paged-out to a range of swslots.
1237  *
1238  * => nothing should be locked.
1239  * => returns TRUE if pagein was aborted due to lack of memory.
1240  */
1241 
1242 boolean_t
1243 uao_swap_off(int startslot, int endslot)
1244 {
1245 	struct uvm_aobj *aobj, *nextaobj;
1246 	boolean_t rv;
1247 
1248 	/*
1249 	 * walk the list of all aobjs.
1250 	 */
1251 
1252 restart:
1253 	simple_lock(&uao_list_lock);
1254 	for (aobj = LIST_FIRST(&uao_list);
1255 	     aobj != NULL;
1256 	     aobj = nextaobj) {
1257 
1258 		/*
1259 		 * try to get the object lock, start all over if we fail.
1260 		 * most of the time we'll get the aobj lock,
1261 		 * so this should be a rare case.
1262 		 */
1263 
1264 		if (!simple_lock_try(&aobj->u_obj.vmobjlock)) {
1265 			simple_unlock(&uao_list_lock);
1266 			goto restart;
1267 		}
1268 
1269 		/*
1270 		 * add a ref to the aobj so it doesn't disappear
1271 		 * while we're working.
1272 		 */
1273 
1274 		uao_reference_locked(&aobj->u_obj);
1275 
1276 		/*
1277 		 * now it's safe to unlock the uao list.
1278 		 */
1279 
1280 		simple_unlock(&uao_list_lock);
1281 
1282 		/*
1283 		 * page in any pages in the swslot range.
1284 		 * if there's an error, abort and return the error.
1285 		 */
1286 
1287 		rv = uao_pagein(aobj, startslot, endslot);
1288 		if (rv) {
1289 			uao_detach_locked(&aobj->u_obj);
1290 			return rv;
1291 		}
1292 
1293 		/*
1294 		 * we're done with this aobj.
1295 		 * relock the list and drop our ref on the aobj.
1296 		 */
1297 
1298 		simple_lock(&uao_list_lock);
1299 		nextaobj = LIST_NEXT(aobj, u_list);
1300 		uao_detach_locked(&aobj->u_obj);
1301 	}
1302 
1303 	/*
1304 	 * done with traversal, unlock the list
1305 	 */
1306 	simple_unlock(&uao_list_lock);
1307 	return FALSE;
1308 }
1309 
1310 
1311 /*
1312  * page in any pages from aobj in the given range.
1313  *
1314  * => aobj must be locked and is returned locked.
1315  * => returns TRUE if pagein was aborted due to lack of memory.
1316  */
1317 static boolean_t
1318 uao_pagein(struct uvm_aobj *aobj, int startslot, int endslot)
1319 {
1320 	boolean_t rv;
1321 
1322 	if (UAO_USES_SWHASH(aobj)) {
1323 		struct uao_swhash_elt *elt;
1324 		int buck;
1325 
1326 restart:
1327 		for (buck = aobj->u_swhashmask; buck >= 0; buck--) {
1328 			for (elt = LIST_FIRST(&aobj->u_swhash[buck]);
1329 			     elt != NULL;
1330 			     elt = LIST_NEXT(elt, list)) {
1331 				int i;
1332 
1333 				for (i = 0; i < UAO_SWHASH_CLUSTER_SIZE; i++) {
1334 					int slot = elt->slots[i];
1335 
1336 					/*
1337 					 * if the slot isn't in range, skip it.
1338 					 */
1339 
1340 					if (slot < startslot ||
1341 					    slot >= endslot) {
1342 						continue;
1343 					}
1344 
1345 					/*
1346 					 * process the page,
1347 					 * the start over on this object
1348 					 * since the swhash elt
1349 					 * may have been freed.
1350 					 */
1351 
1352 					rv = uao_pagein_page(aobj,
1353 					  UAO_SWHASH_ELT_PAGEIDX_BASE(elt) + i);
1354 					if (rv) {
1355 						return rv;
1356 					}
1357 					goto restart;
1358 				}
1359 			}
1360 		}
1361 	} else {
1362 		int i;
1363 
1364 		for (i = 0; i < aobj->u_pages; i++) {
1365 			int slot = aobj->u_swslots[i];
1366 
1367 			/*
1368 			 * if the slot isn't in range, skip it
1369 			 */
1370 
1371 			if (slot < startslot || slot >= endslot) {
1372 				continue;
1373 			}
1374 
1375 			/*
1376 			 * process the page.
1377 			 */
1378 
1379 			rv = uao_pagein_page(aobj, i);
1380 			if (rv) {
1381 				return rv;
1382 			}
1383 		}
1384 	}
1385 
1386 	return FALSE;
1387 }
1388 
1389 /*
1390  * page in a page from an aobj.  used for swap_off.
1391  * returns TRUE if pagein was aborted due to lack of memory.
1392  *
1393  * => aobj must be locked and is returned locked.
1394  */
1395 
1396 static boolean_t
1397 uao_pagein_page(struct uvm_aobj *aobj, int pageidx)
1398 {
1399 	struct vm_page *pg;
1400 	int rv, npages;
1401 
1402 	pg = NULL;
1403 	npages = 1;
1404 	/* locked: aobj */
1405 	rv = uao_get(&aobj->u_obj, pageidx << PAGE_SHIFT,
1406 	    &pg, &npages, 0, VM_PROT_READ|VM_PROT_WRITE, 0, PGO_SYNCIO);
1407 	/* unlocked: aobj */
1408 
1409 	/*
1410 	 * relock and finish up.
1411 	 */
1412 
1413 	simple_lock(&aobj->u_obj.vmobjlock);
1414 	switch (rv) {
1415 	case 0:
1416 		break;
1417 
1418 	case EIO:
1419 	case ERESTART:
1420 
1421 		/*
1422 		 * nothing more to do on errors.
1423 		 * ERESTART can only mean that the anon was freed,
1424 		 * so again there's nothing to do.
1425 		 */
1426 
1427 		return FALSE;
1428 
1429 	default:
1430 		return TRUE;
1431 	}
1432 
1433 	/*
1434 	 * ok, we've got the page now.
1435 	 * mark it as dirty, clear its swslot and un-busy it.
1436 	 */
1437 	uao_dropswap(&aobj->u_obj, pageidx);
1438 
1439 	/*
1440 	 * deactivate the page (to make sure it's on a page queue).
1441 	 */
1442 	uvm_lock_pageq();
1443 	if (pg->wire_count == 0)
1444 		uvm_pagedeactivate(pg);
1445 	uvm_unlock_pageq();
1446 
1447 	if (pg->flags & PG_WANTED) {
1448 		wakeup(pg);
1449 	}
1450 	pg->flags &= ~(PG_WANTED|PG_BUSY|PG_CLEAN|PG_FAKE);
1451 	UVM_PAGE_OWN(pg, NULL);
1452 
1453 	return FALSE;
1454 }
1455 
1456 /*
1457  * uao_dropswap_range: drop swapslots in the range.
1458  *
1459  * => aobj must be locked and is returned locked.
1460  * => start is inclusive.  end is exclusive.
1461  */
1462 
1463 void
1464 uao_dropswap_range(struct uvm_object *uobj, voff_t start, voff_t end)
1465 {
1466 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
1467 
1468 	LOCK_ASSERT(simple_lock_held(&uobj->vmobjlock));
1469 
1470 	uao_dropswap_range1(aobj, start, end);
1471 }
1472 
1473 static void
1474 uao_dropswap_range1(struct uvm_aobj *aobj, voff_t start, voff_t end)
1475 {
1476 	int swpgonlydelta = 0;
1477 
1478 	if (end == 0) {
1479 		end = INT64_MAX;
1480 	}
1481 
1482 	if (UAO_USES_SWHASH(aobj)) {
1483 		int i, hashbuckets = aobj->u_swhashmask + 1;
1484 		voff_t taghi;
1485 		voff_t taglo;
1486 
1487 		taglo = UAO_SWHASH_ELT_TAG(start);
1488 		taghi = UAO_SWHASH_ELT_TAG(end);
1489 
1490 		for (i = 0; i < hashbuckets; i++) {
1491 			struct uao_swhash_elt *elt, *next;
1492 
1493 			for (elt = LIST_FIRST(&aobj->u_swhash[i]);
1494 			     elt != NULL;
1495 			     elt = next) {
1496 				int startidx, endidx;
1497 				int j;
1498 
1499 				next = LIST_NEXT(elt, list);
1500 
1501 				if (elt->tag < taglo || taghi < elt->tag) {
1502 					continue;
1503 				}
1504 
1505 				if (elt->tag == taglo) {
1506 					startidx =
1507 					    UAO_SWHASH_ELT_PAGESLOT_IDX(start);
1508 				} else {
1509 					startidx = 0;
1510 				}
1511 
1512 				if (elt->tag == taghi) {
1513 					endidx =
1514 					    UAO_SWHASH_ELT_PAGESLOT_IDX(end);
1515 				} else {
1516 					endidx = UAO_SWHASH_CLUSTER_SIZE;
1517 				}
1518 
1519 				for (j = startidx; j < endidx; j++) {
1520 					int slot = elt->slots[j];
1521 
1522 					KASSERT(uvm_pagelookup(&aobj->u_obj,
1523 					    (UAO_SWHASH_ELT_PAGEIDX_BASE(elt)
1524 					    + j) << PAGE_SHIFT) == NULL);
1525 					if (slot > 0) {
1526 						uvm_swap_free(slot, 1);
1527 						swpgonlydelta++;
1528 						KASSERT(elt->count > 0);
1529 						elt->slots[j] = 0;
1530 						elt->count--;
1531 					}
1532 				}
1533 
1534 				if (elt->count == 0) {
1535 					LIST_REMOVE(elt, list);
1536 					pool_put(&uao_swhash_elt_pool, elt);
1537 				}
1538 			}
1539 		}
1540 	} else {
1541 		int i;
1542 
1543 		if (aobj->u_pages < end) {
1544 			end = aobj->u_pages;
1545 		}
1546 		for (i = start; i < end; i++) {
1547 			int slot = aobj->u_swslots[i];
1548 
1549 			if (slot > 0) {
1550 				uvm_swap_free(slot, 1);
1551 				swpgonlydelta++;
1552 			}
1553 		}
1554 	}
1555 
1556 	/*
1557 	 * adjust the counter of pages only in swap for all
1558 	 * the swap slots we've freed.
1559 	 */
1560 
1561 	if (swpgonlydelta > 0) {
1562 		simple_lock(&uvm.swap_data_lock);
1563 		KASSERT(uvmexp.swpgonly >= swpgonlydelta);
1564 		uvmexp.swpgonly -= swpgonlydelta;
1565 		simple_unlock(&uvm.swap_data_lock);
1566 	}
1567 }
1568 
1569 #endif /* defined(VMSWAP) */
1570