xref: /netbsd-src/sys/uvm/uvm_aobj.c (revision dc306354b0b29af51801a7632f1e95265a68cd81)
1 /*	$NetBSD: uvm_aobj.c,v 1.15 1998/10/18 23:49:59 chs Exp $	*/
2 
3 /*
4  * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
5  *	   >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
6  */
7 /*
8  * Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and
9  *                    Washington University.
10  * All rights reserved.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *      This product includes software developed by Charles D. Cranor and
23  *      Washington University.
24  * 4. The name of the author may not be used to endorse or promote products
25  *    derived from this software without specific prior written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
28  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
29  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
30  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
31  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
32  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
33  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
34  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
35  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
36  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37  *
38  * from: Id: uvm_aobj.c,v 1.1.2.5 1998/02/06 05:14:38 chs Exp
39  */
40 /*
41  * uvm_aobj.c: anonymous memory uvm_object pager
42  *
43  * author: Chuck Silvers <chuq@chuq.com>
44  * started: Jan-1998
45  *
46  * - design mostly from Chuck Cranor
47  */
48 
49 
50 
51 #include "opt_uvmhist.h"
52 
53 #include <sys/param.h>
54 #include <sys/systm.h>
55 #include <sys/proc.h>
56 #include <sys/malloc.h>
57 #include <sys/pool.h>
58 
59 #include <vm/vm.h>
60 #include <vm/vm_page.h>
61 #include <vm/vm_kern.h>
62 
63 #include <uvm/uvm.h>
64 
65 /*
66  * an aobj manages anonymous-memory backed uvm_objects.   in addition
67  * to keeping the list of resident pages, it also keeps a list of
68  * allocated swap blocks.  depending on the size of the aobj this list
69  * of allocated swap blocks is either stored in an array (small objects)
70  * or in a hash table (large objects).
71  */
72 
73 /*
74  * local structures
75  */
76 
77 /*
78  * for hash tables, we break the address space of the aobj into blocks
79  * of UAO_SWHASH_CLUSTER_SIZE pages.   we require the cluster size to
80  * be a power of two.
81  */
82 
83 #define UAO_SWHASH_CLUSTER_SHIFT 4
84 #define UAO_SWHASH_CLUSTER_SIZE (1 << UAO_SWHASH_CLUSTER_SHIFT)
85 
86 /* get the "tag" for this page index */
87 #define UAO_SWHASH_ELT_TAG(PAGEIDX) \
88 	((PAGEIDX) >> UAO_SWHASH_CLUSTER_SHIFT)
89 
90 /* given an ELT and a page index, find the swap slot */
91 #define UAO_SWHASH_ELT_PAGESLOT(ELT, PAGEIDX) \
92 	((ELT)->slots[(PAGEIDX) & (UAO_SWHASH_CLUSTER_SIZE - 1)])
93 
94 /* given an ELT, return its pageidx base */
95 #define UAO_SWHASH_ELT_PAGEIDX_BASE(ELT) \
96 	((ELT)->tag << UAO_SWHASH_CLUSTER_SHIFT)
97 
98 /*
99  * the swhash hash function
100  */
101 #define UAO_SWHASH_HASH(AOBJ, PAGEIDX) \
102 	(&(AOBJ)->u_swhash[(((PAGEIDX) >> UAO_SWHASH_CLUSTER_SHIFT) \
103 			    & (AOBJ)->u_swhashmask)])
104 
105 /*
106  * the swhash threshhold determines if we will use an array or a
107  * hash table to store the list of allocated swap blocks.
108  */
109 
110 #define UAO_SWHASH_THRESHOLD (UAO_SWHASH_CLUSTER_SIZE * 4)
111 #define UAO_USES_SWHASH(AOBJ) \
112 	((AOBJ)->u_pages > UAO_SWHASH_THRESHOLD)	/* use hash? */
113 
114 /*
115  * the number of buckets in a swhash, with an upper bound
116  */
117 #define UAO_SWHASH_MAXBUCKETS 256
118 #define UAO_SWHASH_BUCKETS(AOBJ) \
119 	(min((AOBJ)->u_pages >> UAO_SWHASH_CLUSTER_SHIFT, \
120 	     UAO_SWHASH_MAXBUCKETS))
121 
122 
123 /*
124  * uao_swhash_elt: when a hash table is being used, this structure defines
125  * the format of an entry in the bucket list.
126  */
127 
128 struct uao_swhash_elt {
129 	LIST_ENTRY(uao_swhash_elt) list;	/* the hash list */
130 	vaddr_t tag;			/* our 'tag' */
131 	int count;				/* our number of active slots */
132 	int slots[UAO_SWHASH_CLUSTER_SIZE];	/* the slots */
133 };
134 
135 /*
136  * uao_swhash: the swap hash table structure
137  */
138 
139 LIST_HEAD(uao_swhash, uao_swhash_elt);
140 
141 /*
142  * uao_swhash_elt_pool: pool of uao_swhash_elt structures
143  */
144 
145 struct pool uao_swhash_elt_pool;
146 
147 /*
148  * uvm_aobj: the actual anon-backed uvm_object
149  *
150  * => the uvm_object is at the top of the structure, this allows
151  *   (struct uvm_device *) == (struct uvm_object *)
152  * => only one of u_swslots and u_swhash is used in any given aobj
153  */
154 
155 struct uvm_aobj {
156 	struct uvm_object u_obj; /* has: lock, pgops, memq, #pages, #refs */
157 	int u_pages;		 /* number of pages in entire object */
158 	int u_flags;		 /* the flags (see uvm_aobj.h) */
159 	int *u_swslots;		 /* array of offset->swapslot mappings */
160 				 /*
161 				  * hashtable of offset->swapslot mappings
162 				  * (u_swhash is an array of bucket heads)
163 				  */
164 	struct uao_swhash *u_swhash;
165 	u_long u_swhashmask;		/* mask for hashtable */
166 	LIST_ENTRY(uvm_aobj) u_list;	/* global list of aobjs */
167 };
168 
169 /*
170  * uvm_aobj_pool: pool of uvm_aobj structures
171  */
172 
173 struct pool uvm_aobj_pool;
174 
175 /*
176  * local functions
177  */
178 
179 static void			 uao_init __P((void));
180 static struct uao_swhash_elt	*uao_find_swhash_elt __P((struct uvm_aobj *,
181 							  int, boolean_t));
182 static int			 uao_find_swslot __P((struct uvm_aobj *,
183 						      int));
184 static boolean_t		 uao_flush __P((struct uvm_object *,
185 						vaddr_t, vaddr_t,
186 						int));
187 static void			 uao_free __P((struct uvm_aobj *));
188 static int			 uao_get __P((struct uvm_object *, vaddr_t,
189 					      vm_page_t *, int *, int,
190 					      vm_prot_t, int, int));
191 static boolean_t		 uao_releasepg __P((struct vm_page *,
192 						    struct vm_page **));
193 
194 
195 
196 /*
197  * aobj_pager
198  *
199  * note that some functions (e.g. put) are handled elsewhere
200  */
201 
202 struct uvm_pagerops aobj_pager = {
203 	uao_init,		/* init */
204 	NULL,			/* attach */
205 	uao_reference,		/* reference */
206 	uao_detach,		/* detach */
207 	NULL,			/* fault */
208 	uao_flush,		/* flush */
209 	uao_get,		/* get */
210 	NULL,			/* asyncget */
211 	NULL,			/* put (done by pagedaemon) */
212 	NULL,			/* cluster */
213 	NULL,			/* mk_pcluster */
214 	uvm_shareprot,		/* shareprot */
215 	NULL,			/* aiodone */
216 	uao_releasepg		/* releasepg */
217 };
218 
219 /*
220  * uao_list: global list of active aobjs, locked by uao_list_lock
221  */
222 
223 static LIST_HEAD(aobjlist, uvm_aobj) uao_list;
224 static simple_lock_data_t uao_list_lock;
225 
226 
227 /*
228  * functions
229  */
230 
231 /*
232  * hash table/array related functions
233  */
234 
235 /*
236  * uao_find_swhash_elt: find (or create) a hash table entry for a page
237  * offset.
238  *
239  * => the object should be locked by the caller
240  */
241 
242 static struct uao_swhash_elt *
243 uao_find_swhash_elt(aobj, pageidx, create)
244 	struct uvm_aobj *aobj;
245 	int pageidx;
246 	boolean_t create;
247 {
248 	struct uao_swhash *swhash;
249 	struct uao_swhash_elt *elt;
250 	int page_tag;
251 
252 	swhash = UAO_SWHASH_HASH(aobj, pageidx); /* first hash to get bucket */
253 	page_tag = UAO_SWHASH_ELT_TAG(pageidx);	/* tag to search for */
254 
255 	/*
256 	 * now search the bucket for the requested tag
257 	 */
258 	for (elt = swhash->lh_first; elt != NULL; elt = elt->list.le_next) {
259 		if (elt->tag == page_tag)
260 			return(elt);
261 	}
262 
263 	/* fail now if we are not allowed to create a new entry in the bucket */
264 	if (!create)
265 		return NULL;
266 
267 
268 	/*
269 	 * allocate a new entry for the bucket and init/insert it in
270 	 */
271 	elt = pool_get(&uao_swhash_elt_pool, PR_WAITOK);
272 	LIST_INSERT_HEAD(swhash, elt, list);
273 	elt->tag = page_tag;
274 	elt->count = 0;
275 	memset(elt->slots, 0, sizeof(elt->slots));
276 
277 	return(elt);
278 }
279 
280 /*
281  * uao_find_swslot: find the swap slot number for an aobj/pageidx
282  *
283  * => object must be locked by caller
284  */
285 __inline static int
286 uao_find_swslot(aobj, pageidx)
287 	struct uvm_aobj *aobj;
288 	int pageidx;
289 {
290 
291 	/*
292 	 * if noswap flag is set, then we never return a slot
293 	 */
294 
295 	if (aobj->u_flags & UAO_FLAG_NOSWAP)
296 		return(0);
297 
298 	/*
299 	 * if hashing, look in hash table.
300 	 */
301 
302 	if (UAO_USES_SWHASH(aobj)) {
303 		struct uao_swhash_elt *elt =
304 		    uao_find_swhash_elt(aobj, pageidx, FALSE);
305 
306 		if (elt)
307 			return(UAO_SWHASH_ELT_PAGESLOT(elt, pageidx));
308 		else
309 			return(NULL);
310 	}
311 
312 	/*
313 	 * otherwise, look in the array
314 	 */
315 	return(aobj->u_swslots[pageidx]);
316 }
317 
318 /*
319  * uao_set_swslot: set the swap slot for a page in an aobj.
320  *
321  * => setting a slot to zero frees the slot
322  * => object must be locked by caller
323  */
324 int
325 uao_set_swslot(uobj, pageidx, slot)
326 	struct uvm_object *uobj;
327 	int pageidx, slot;
328 {
329 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
330 	int oldslot;
331 	UVMHIST_FUNC("uao_set_swslot"); UVMHIST_CALLED(pdhist);
332 	UVMHIST_LOG(pdhist, "aobj %p pageidx %d slot %d",
333 	    aobj, pageidx, slot, 0);
334 
335 	/*
336 	 * if noswap flag is set, then we can't set a slot
337 	 */
338 
339 	if (aobj->u_flags & UAO_FLAG_NOSWAP) {
340 
341 		if (slot == 0)
342 			return(0);		/* a clear is ok */
343 
344 		/* but a set is not */
345 		printf("uao_set_swslot: uobj = %p\n", uobj);
346 	    panic("uao_set_swslot: attempt to set a slot on a NOSWAP object");
347 	}
348 
349 	/*
350 	 * are we using a hash table?  if so, add it in the hash.
351 	 */
352 
353 	if (UAO_USES_SWHASH(aobj)) {
354 		/*
355 		 * Avoid allocating an entry just to free it again if
356 		 * the page had not swap slot in the first place, and
357 		 * we are freeing.
358 		 */
359 		struct uao_swhash_elt *elt =
360 		    uao_find_swhash_elt(aobj, pageidx, slot ? TRUE : FALSE);
361 		if (elt == NULL) {
362 #ifdef DIAGNOSTIC
363 			if (slot)
364 				panic("uao_set_swslot: didn't create elt");
365 #endif
366 			return (0);
367 		}
368 
369 		oldslot = UAO_SWHASH_ELT_PAGESLOT(elt, pageidx);
370 		UAO_SWHASH_ELT_PAGESLOT(elt, pageidx) = slot;
371 
372 		/*
373 		 * now adjust the elt's reference counter and free it if we've
374 		 * dropped it to zero.
375 		 */
376 
377 		/* an allocation? */
378 		if (slot) {
379 			if (oldslot == 0)
380 				elt->count++;
381 		} else {		/* freeing slot ... */
382 			if (oldslot)	/* to be safe */
383 				elt->count--;
384 
385 			if (elt->count == 0) {
386 				LIST_REMOVE(elt, list);
387 				pool_put(&uao_swhash_elt_pool, elt);
388 			}
389 		}
390 
391 	} else {
392 		/* we are using an array */
393 		oldslot = aobj->u_swslots[pageidx];
394 		aobj->u_swslots[pageidx] = slot;
395 	}
396 	return (oldslot);
397 }
398 
399 /*
400  * end of hash/array functions
401  */
402 
403 /*
404  * uao_free: free all resources held by an aobj, and then free the aobj
405  *
406  * => the aobj should be dead
407  */
408 static void
409 uao_free(aobj)
410 	struct uvm_aobj *aobj;
411 {
412 
413 	if (UAO_USES_SWHASH(aobj)) {
414 		int i, hashbuckets = aobj->u_swhashmask + 1;
415 
416 		/*
417 		 * free the swslots from each hash bucket,
418 		 * then the hash bucket, and finally the hash table itself.
419 		 */
420 		for (i = 0; i < hashbuckets; i++) {
421 			struct uao_swhash_elt *elt, *next;
422 
423 			for (elt = aobj->u_swhash[i].lh_first; elt != NULL;
424 			    elt = next) {
425 				int j;
426 
427 				for (j = 0; j < UAO_SWHASH_CLUSTER_SIZE; j++)
428 				{
429 					int slot = elt->slots[j];
430 
431 					if (slot)
432 						uvm_swap_free(slot, 1);
433 				}
434 
435 				next = elt->list.le_next;
436 				pool_put(&uao_swhash_elt_pool, elt);
437 			}
438 		}
439 		FREE(aobj->u_swhash, M_UVMAOBJ);
440 	} else {
441 		int i;
442 
443 		/*
444 		 * free the array
445 		 */
446 
447 		for (i = 0; i < aobj->u_pages; i++)
448 		{
449 			int slot = aobj->u_swslots[i];
450 
451 			if (slot)
452 				uvm_swap_free(slot, 1);
453 		}
454 		FREE(aobj->u_swslots, M_UVMAOBJ);
455 	}
456 
457 	/*
458 	 * finally free the aobj itself
459 	 */
460 	pool_put(&uvm_aobj_pool, aobj);
461 }
462 
463 /*
464  * pager functions
465  */
466 
467 /*
468  * uao_create: create an aobj of the given size and return its uvm_object.
469  *
470  * => for normal use, flags are always zero
471  * => for the kernel object, the flags are:
472  *	UAO_FLAG_KERNOBJ - allocate the kernel object (can only happen once)
473  *	UAO_FLAG_KERNSWAP - enable swapping of kernel object ("           ")
474  */
475 struct uvm_object *
476 uao_create(size, flags)
477 	vsize_t size;
478 	int flags;
479 {
480 	static struct uvm_aobj kernel_object_store;	/* home of kernel_object */
481 	static int kobj_alloced = 0;			/* not allocated yet */
482 	int pages = round_page(size) >> PAGE_SHIFT;
483 	struct uvm_aobj *aobj;
484 
485 	/*
486  	* malloc a new aobj unless we are asked for the kernel object
487  	*/
488 	if (flags & UAO_FLAG_KERNOBJ) {		/* want kernel object? */
489 		if (kobj_alloced)
490 			panic("uao_create: kernel object already allocated");
491 
492 		/*
493 		 * XXXTHORPEJ: Need to call this now, so the pool gets
494 		 * initialized!
495 		 */
496 		uao_init();
497 
498 		aobj = &kernel_object_store;
499 		aobj->u_pages = pages;
500 		aobj->u_flags = UAO_FLAG_NOSWAP;	/* no swap to start */
501 		/* we are special, we never die */
502 		aobj->u_obj.uo_refs = UVM_OBJ_KERN;
503 		kobj_alloced = UAO_FLAG_KERNOBJ;
504 	} else if (flags & UAO_FLAG_KERNSWAP) {
505 		aobj = &kernel_object_store;
506 		if (kobj_alloced != UAO_FLAG_KERNOBJ)
507 		    panic("uao_create: asked to enable swap on kernel object");
508 		kobj_alloced = UAO_FLAG_KERNSWAP;
509 	} else {	/* normal object */
510 		aobj = pool_get(&uvm_aobj_pool, PR_WAITOK);
511 		aobj->u_pages = pages;
512 		aobj->u_flags = 0;		/* normal object */
513 		aobj->u_obj.uo_refs = 1;	/* start with 1 reference */
514 	}
515 
516 	/*
517  	 * allocate hash/array if necessary
518  	 *
519  	 * note: in the KERNSWAP case no need to worry about locking since
520  	 * we are still booting we should be the only thread around.
521  	 */
522 	if (flags == 0 || (flags & UAO_FLAG_KERNSWAP) != 0) {
523 		int mflags = (flags & UAO_FLAG_KERNSWAP) != 0 ?
524 		    M_NOWAIT : M_WAITOK;
525 
526 		/* allocate hash table or array depending on object size */
527 			if (UAO_USES_SWHASH(aobj)) {
528 			aobj->u_swhash = hashinit(UAO_SWHASH_BUCKETS(aobj),
529 			    M_UVMAOBJ, mflags, &aobj->u_swhashmask);
530 			if (aobj->u_swhash == NULL)
531 				panic("uao_create: hashinit swhash failed");
532 		} else {
533 			MALLOC(aobj->u_swslots, int *, pages * sizeof(int),
534 			    M_UVMAOBJ, mflags);
535 			if (aobj->u_swslots == NULL)
536 				panic("uao_create: malloc swslots failed");
537 			memset(aobj->u_swslots, 0, pages * sizeof(int));
538 		}
539 
540 		if (flags) {
541 			aobj->u_flags &= ~UAO_FLAG_NOSWAP; /* clear noswap */
542 			return(&aobj->u_obj);
543 			/* done! */
544 		}
545 	}
546 
547 	/*
548  	 * init aobj fields
549  	 */
550 	simple_lock_init(&aobj->u_obj.vmobjlock);
551 	aobj->u_obj.pgops = &aobj_pager;
552 	TAILQ_INIT(&aobj->u_obj.memq);
553 	aobj->u_obj.uo_npages = 0;
554 
555 	/*
556  	 * now that aobj is ready, add it to the global list
557  	 * XXXCHS: uao_init hasn't been called'd in the KERNOBJ case,
558 	 * do we really need the kernel object on this list anyway?
559  	 */
560 	simple_lock(&uao_list_lock);
561 	LIST_INSERT_HEAD(&uao_list, aobj, u_list);
562 	simple_unlock(&uao_list_lock);
563 
564 	/*
565  	 * done!
566  	 */
567 	return(&aobj->u_obj);
568 }
569 
570 
571 
572 /*
573  * uao_init: set up aobj pager subsystem
574  *
575  * => called at boot time from uvm_pager_init()
576  */
577 static void
578 uao_init()
579 {
580 	static int uao_initialized;
581 
582 	if (uao_initialized)
583 		return;
584 	uao_initialized = TRUE;
585 
586 	LIST_INIT(&uao_list);
587 	simple_lock_init(&uao_list_lock);
588 
589 	/*
590 	 * NOTE: Pages fror this pool must not come from a pageable
591 	 * kernel map!
592 	 */
593 	pool_init(&uao_swhash_elt_pool, sizeof(struct uao_swhash_elt),
594 	    0, 0, 0, "uaoeltpl", 0, NULL, NULL, M_UVMAOBJ);
595 
596 	pool_init(&uvm_aobj_pool, sizeof(struct uvm_aobj), 0, 0, 0,
597 	    "aobjpl", 0,
598 	    pool_page_alloc_nointr, pool_page_free_nointr, M_UVMAOBJ);
599 }
600 
601 /*
602  * uao_reference: add a ref to an aobj
603  *
604  * => aobj must be unlocked (we will lock it)
605  */
606 void
607 uao_reference(uobj)
608 	struct uvm_object *uobj;
609 {
610 	UVMHIST_FUNC("uao_reference"); UVMHIST_CALLED(maphist);
611 
612 	/*
613  	 * kernel_object already has plenty of references, leave it alone.
614  	 */
615 
616 	if (uobj->uo_refs == UVM_OBJ_KERN)
617 		return;
618 
619 	simple_lock(&uobj->vmobjlock);
620 	uobj->uo_refs++;		/* bump! */
621 	UVMHIST_LOG(maphist, "<- done (uobj=0x%x, ref = %d)",
622 	uobj, uobj->uo_refs,0,0);
623 	simple_unlock(&uobj->vmobjlock);
624 }
625 
626 /*
627  * uao_detach: drop a reference to an aobj
628  *
629  * => aobj must be unlocked, we will lock it
630  */
631 void
632 uao_detach(uobj)
633 	struct uvm_object *uobj;
634 {
635 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
636 	struct vm_page *pg;
637 	boolean_t busybody;
638 	UVMHIST_FUNC("uao_detach"); UVMHIST_CALLED(maphist);
639 
640 	/*
641  	 * detaching from kernel_object is a noop.
642  	 */
643 	if (uobj->uo_refs == UVM_OBJ_KERN)
644 		return;
645 
646 	simple_lock(&uobj->vmobjlock);
647 
648 	UVMHIST_LOG(maphist,"  (uobj=0x%x)  ref=%d", uobj,uobj->uo_refs,0,0);
649 	uobj->uo_refs--;				/* drop ref! */
650 	if (uobj->uo_refs) {				/* still more refs? */
651 		simple_unlock(&uobj->vmobjlock);
652 		UVMHIST_LOG(maphist, "<- done (rc>0)", 0,0,0,0);
653 		return;
654 	}
655 
656 	/*
657  	 * remove the aobj from the global list.
658  	 */
659 	simple_lock(&uao_list_lock);
660 	LIST_REMOVE(aobj, u_list);
661 	simple_unlock(&uao_list_lock);
662 
663 	/*
664  	 * free all the pages that aren't PG_BUSY, mark for release any that are.
665  	 */
666 
667 	busybody = FALSE;
668 	for (pg = uobj->memq.tqh_first ; pg != NULL ; pg = pg->listq.tqe_next) {
669 		int swslot;
670 
671 		if (pg->flags & PG_BUSY) {
672 			pg->flags |= PG_RELEASED;
673 			busybody = TRUE;
674 			continue;
675 		}
676 
677 
678 		/* zap the mappings, free the swap slot, free the page */
679 		pmap_page_protect(PMAP_PGARG(pg), VM_PROT_NONE);
680 
681 		swslot = uao_set_swslot(&aobj->u_obj,
682 					pg->offset >> PAGE_SHIFT, 0);
683 		if (swslot) {
684 			uvm_swap_free(swslot, 1);
685 		}
686 
687 		uvm_lock_pageq();
688 		uvm_pagefree(pg);
689 		uvm_unlock_pageq();
690 	}
691 
692 	/*
693  	 * if we found any busy pages, we're done for now.
694  	 * mark the aobj for death, releasepg will finish up for us.
695  	 */
696 	if (busybody) {
697 		aobj->u_flags |= UAO_FLAG_KILLME;
698 		simple_unlock(&aobj->u_obj.vmobjlock);
699 		return;
700 	}
701 
702 	/*
703  	 * finally, free the rest.
704  	 */
705 	uao_free(aobj);
706 }
707 
708 /*
709  * uao_flush: uh, yea, sure it's flushed.  really!
710  */
711 boolean_t
712 uao_flush(uobj, start, end, flags)
713 	struct uvm_object *uobj;
714 	vaddr_t start, end;
715 	int flags;
716 {
717 
718 	/*
719  	 * anonymous memory doesn't "flush"
720  	 */
721 	/*
722  	 * XXX
723  	 * deal with PGO_DEACTIVATE (for madvise(MADV_SEQUENTIAL))
724  	 * and PGO_FREE (for msync(MSINVALIDATE))
725  	 */
726 	return TRUE;
727 }
728 
729 /*
730  * uao_get: fetch me a page
731  *
732  * we have three cases:
733  * 1: page is resident     -> just return the page.
734  * 2: page is zero-fill    -> allocate a new page and zero it.
735  * 3: page is swapped out  -> fetch the page from swap.
736  *
737  * cases 1 and 2 can be handled with PGO_LOCKED, case 3 cannot.
738  * so, if the "center" page hits case 3 (or any page, with PGO_ALLPAGES),
739  * then we will need to return VM_PAGER_UNLOCK.
740  *
741  * => prefer map unlocked (not required)
742  * => object must be locked!  we will _unlock_ it before starting any I/O.
743  * => flags: PGO_ALLPAGES: get all of the pages
744  *           PGO_LOCKED: fault data structures are locked
745  * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
746  * => NOTE: caller must check for released pages!!
747  */
748 static int
749 uao_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags)
750 	struct uvm_object *uobj;
751 	vaddr_t offset;
752 	struct vm_page **pps;
753 	int *npagesp;
754 	int centeridx, advice, flags;
755 	vm_prot_t access_type;
756 {
757 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
758 	vaddr_t current_offset;
759 	vm_page_t ptmp;
760 	int lcv, gotpages, maxpages, swslot, rv;
761 	boolean_t done;
762 	UVMHIST_FUNC("uao_get"); UVMHIST_CALLED(pdhist);
763 
764 	UVMHIST_LOG(pdhist, "aobj=%p offset=%d, flags=%d", aobj, offset, flags,0);
765 
766 	/*
767  	 * get number of pages
768  	 */
769 
770 	maxpages = *npagesp;
771 
772 	/*
773  	 * step 1: handled the case where fault data structures are locked.
774  	 */
775 
776 	if (flags & PGO_LOCKED) {
777 
778 		/*
779  		 * step 1a: get pages that are already resident.   only do
780 		 * this if the data structures are locked (i.e. the first
781 		 * time through).
782  		 */
783 
784 		done = TRUE;	/* be optimistic */
785 		gotpages = 0;	/* # of pages we got so far */
786 
787 		for (lcv = 0, current_offset = offset ; lcv < maxpages ;
788 		    lcv++, current_offset += PAGE_SIZE) {
789 			/* do we care about this page?  if not, skip it */
790 			if (pps[lcv] == PGO_DONTCARE)
791 				continue;
792 
793 			ptmp = uvm_pagelookup(uobj, current_offset);
794 
795 			/*
796  			 * if page is new, attempt to allocate the page, then
797 			 * zero-fill it.
798  			 */
799 			if (ptmp == NULL && uao_find_swslot(aobj,
800 			    current_offset >> PAGE_SHIFT) == 0) {
801 				ptmp = uvm_pagealloc(uobj, current_offset,
802 				    NULL);
803 				if (ptmp) {
804 					/* new page */
805 					ptmp->flags &= ~(PG_BUSY|PG_FAKE);
806 					ptmp->pqflags |= PQ_AOBJ;
807 					UVM_PAGE_OWN(ptmp, NULL);
808 					uvm_pagezero(ptmp);
809 				}
810 			}
811 
812 			/*
813 			 * to be useful must get a non-busy, non-released page
814 			 */
815 			if (ptmp == NULL ||
816 			    (ptmp->flags & (PG_BUSY|PG_RELEASED)) != 0) {
817 				if (lcv == centeridx ||
818 				    (flags & PGO_ALLPAGES) != 0)
819 					/* need to do a wait or I/O! */
820 					done = FALSE;
821 					continue;
822 			}
823 
824 			/*
825 			 * useful page: busy/lock it and plug it in our
826 			 * result array
827 			 */
828 			/* caller must un-busy this page */
829 			ptmp->flags |= PG_BUSY;
830 			UVM_PAGE_OWN(ptmp, "uao_get1");
831 			pps[lcv] = ptmp;
832 			gotpages++;
833 
834 		}	/* "for" lcv loop */
835 
836 		/*
837  		 * step 1b: now we've either done everything needed or we
838 		 * to unlock and do some waiting or I/O.
839  		 */
840 
841 		UVMHIST_LOG(pdhist, "<- done (done=%d)", done, 0,0,0);
842 
843 		*npagesp = gotpages;
844 		if (done)
845 			/* bingo! */
846 			return(VM_PAGER_OK);
847 		else
848 			/* EEK!   Need to unlock and I/O */
849 			return(VM_PAGER_UNLOCK);
850 	}
851 
852 	/*
853  	 * step 2: get non-resident or busy pages.
854  	 * object is locked.   data structures are unlocked.
855  	 */
856 
857 	for (lcv = 0, current_offset = offset ; lcv < maxpages ;
858 	    lcv++, current_offset += PAGE_SIZE) {
859 		/*
860 		 * - skip over pages we've already gotten or don't want
861 		 * - skip over pages we don't _have_ to get
862 		 */
863 		if (pps[lcv] != NULL ||
864 		    (lcv != centeridx && (flags & PGO_ALLPAGES) == 0))
865 			continue;
866 
867 		/*
868  		 * we have yet to locate the current page (pps[lcv]).   we
869 		 * first look for a page that is already at the current offset.
870 		 * if we find a page, we check to see if it is busy or
871 		 * released.  if that is the case, then we sleep on the page
872 		 * until it is no longer busy or released and repeat the lookup.
873 		 * if the page we found is neither busy nor released, then we
874 		 * busy it (so we own it) and plug it into pps[lcv].   this
875 		 * 'break's the following while loop and indicates we are
876 		 * ready to move on to the next page in the "lcv" loop above.
877  		 *
878  		 * if we exit the while loop with pps[lcv] still set to NULL,
879 		 * then it means that we allocated a new busy/fake/clean page
880 		 * ptmp in the object and we need to do I/O to fill in the data.
881  		 */
882 
883 		/* top of "pps" while loop */
884 		while (pps[lcv] == NULL) {
885 			/* look for a resident page */
886 			ptmp = uvm_pagelookup(uobj, current_offset);
887 
888 			/* not resident?   allocate one now (if we can) */
889 			if (ptmp == NULL) {
890 
891 				ptmp = uvm_pagealloc(uobj, current_offset,
892 				    NULL);	/* alloc */
893 
894 				/* out of RAM? */
895 				if (ptmp == NULL) {
896 					simple_unlock(&uobj->vmobjlock);
897 					UVMHIST_LOG(pdhist,
898 					    "sleeping, ptmp == NULL\n",0,0,0,0);
899 					uvm_wait("uao_getpage");
900 					simple_lock(&uobj->vmobjlock);
901 					/* goto top of pps while loop */
902 					continue;
903 				}
904 
905 				/*
906 				 * safe with PQ's unlocked: because we just
907 				 * alloc'd the page
908 				 */
909 				ptmp->pqflags |= PQ_AOBJ;
910 
911 				/*
912 				 * got new page ready for I/O.  break pps while
913 				 * loop.  pps[lcv] is still NULL.
914 				 */
915 				break;
916 			}
917 
918 			/* page is there, see if we need to wait on it */
919 			if ((ptmp->flags & (PG_BUSY|PG_RELEASED)) != 0) {
920 				ptmp->flags |= PG_WANTED;
921 				UVMHIST_LOG(pdhist,
922 				    "sleeping, ptmp->flags 0x%x\n",
923 				    ptmp->flags,0,0,0);
924 				UVM_UNLOCK_AND_WAIT(ptmp, &uobj->vmobjlock, 0,
925 				    "uao_get", 0);
926 				simple_lock(&uobj->vmobjlock);
927 				continue;	/* goto top of pps while loop */
928 			}
929 
930 			/*
931  			 * if we get here then the page has become resident and
932 			 * unbusy between steps 1 and 2.  we busy it now (so we
933 			 * own it) and set pps[lcv] (so that we exit the while
934 			 * loop).
935  			 */
936 			/* we own it, caller must un-busy */
937 			ptmp->flags |= PG_BUSY;
938 			UVM_PAGE_OWN(ptmp, "uao_get2");
939 			pps[lcv] = ptmp;
940 		}
941 
942 		/*
943  		 * if we own the valid page at the correct offset, pps[lcv] will
944  		 * point to it.   nothing more to do except go to the next page.
945  		 */
946 		if (pps[lcv])
947 			continue;			/* next lcv */
948 
949 		/*
950  		 * we have a "fake/busy/clean" page that we just allocated.
951  		 * do the needed "i/o", either reading from swap or zeroing.
952  		 */
953 		swslot = uao_find_swslot(aobj, current_offset >> PAGE_SHIFT);
954 
955 		/*
956  		 * just zero the page if there's nothing in swap.
957  		 */
958 		if (swslot == 0)
959 		{
960 			/*
961 			 * page hasn't existed before, just zero it.
962 			 */
963 			uvm_pagezero(ptmp);
964 		}
965 		else
966 		{
967 			UVMHIST_LOG(pdhist, "pagein from swslot %d",
968 			     swslot, 0,0,0);
969 
970 			/*
971 			 * page in the swapped-out page.
972 			 * unlock object for i/o, relock when done.
973 			 */
974 			simple_unlock(&uobj->vmobjlock);
975 			rv = uvm_swap_get(ptmp, swslot, PGO_SYNCIO);
976 			simple_lock(&uobj->vmobjlock);
977 
978 			/*
979 			 * I/O done.  check for errors.
980 			 */
981 			if (rv != VM_PAGER_OK)
982 			{
983 				UVMHIST_LOG(pdhist, "<- done (error=%d)",
984 				    rv,0,0,0);
985 				if (ptmp->flags & PG_WANTED)
986 					/* object lock still held */
987 					thread_wakeup(ptmp);
988 				ptmp->flags &= ~(PG_WANTED|PG_BUSY);
989 				UVM_PAGE_OWN(ptmp, NULL);
990 				uvm_lock_pageq();
991 				uvm_pagefree(ptmp);
992 				uvm_unlock_pageq();
993 				simple_unlock(&uobj->vmobjlock);
994 				return (rv);
995 			}
996 		}
997 
998 		/*
999  		 * we got the page!   clear the fake flag (indicates valid
1000 		 * data now in page) and plug into our result array.   note
1001 		 * that page is still busy.
1002  		 *
1003  		 * it is the callers job to:
1004  		 * => check if the page is released
1005  		 * => unbusy the page
1006  		 * => activate the page
1007  		 */
1008 
1009 		ptmp->flags &= ~PG_FAKE;		/* data is valid ... */
1010 		pmap_clear_modify(PMAP_PGARG(ptmp));	/* ... and clean */
1011 		pps[lcv] = ptmp;
1012 
1013 	}	/* lcv loop */
1014 
1015 	/*
1016  	 * finally, unlock object and return.
1017  	 */
1018 
1019 	simple_unlock(&uobj->vmobjlock);
1020 	UVMHIST_LOG(pdhist, "<- done (OK)",0,0,0,0);
1021 	return(VM_PAGER_OK);
1022 }
1023 
1024 /*
1025  * uao_releasepg: handle released page in an aobj
1026  *
1027  * => "pg" is a PG_BUSY [caller owns it], PG_RELEASED page that we need
1028  *      to dispose of.
1029  * => caller must handle PG_WANTED case
1030  * => called with page's object locked, pageq's unlocked
1031  * => returns TRUE if page's object is still alive, FALSE if we
1032  *      killed the page's object.    if we return TRUE, then we
1033  *      return with the object locked.
1034  * => if (nextpgp != NULL) => we return pageq.tqe_next here, and return
1035  *                              with the page queues locked [for pagedaemon]
1036  * => if (nextpgp == NULL) => we return with page queues unlocked [normal case]
1037  * => we kill the aobj if it is not referenced and we are suppose to
1038  *      kill it ("KILLME").
1039  */
1040 static boolean_t uao_releasepg(pg, nextpgp)
1041 	struct vm_page *pg;
1042 	struct vm_page **nextpgp;	/* OUT */
1043 {
1044 	struct uvm_aobj *aobj = (struct uvm_aobj *) pg->uobject;
1045 	int slot;
1046 
1047 #ifdef DIAGNOSTIC
1048 	if ((pg->flags & PG_RELEASED) == 0)
1049 		panic("uao_releasepg: page not released!");
1050 #endif
1051 
1052 	/*
1053  	 * dispose of the page [caller handles PG_WANTED] and swap slot.
1054  	 */
1055 	pmap_page_protect(PMAP_PGARG(pg), VM_PROT_NONE);
1056 	slot = uao_set_swslot(&aobj->u_obj, pg->offset >> PAGE_SHIFT, 0);
1057 	if (slot)
1058 		uvm_swap_free(slot, 1);
1059 	uvm_lock_pageq();
1060 	if (nextpgp)
1061 		*nextpgp = pg->pageq.tqe_next;	/* next page for daemon */
1062 	uvm_pagefree(pg);
1063 	if (!nextpgp)
1064 		uvm_unlock_pageq();			/* keep locked for daemon */
1065 
1066 	/*
1067  	 * if we're not killing the object, we're done.
1068  	 */
1069 	if ((aobj->u_flags & UAO_FLAG_KILLME) == 0)
1070 		return TRUE;
1071 
1072 #ifdef DIAGNOSTIC
1073 	if (aobj->u_obj.uo_refs)
1074 		panic("uvm_km_releasepg: kill flag set on referenced object!");
1075 #endif
1076 
1077 	/*
1078  	 * if there are still pages in the object, we're done for now.
1079  	 */
1080 	if (aobj->u_obj.uo_npages != 0)
1081 		return TRUE;
1082 
1083 #ifdef DIAGNOSTIC
1084 	if (aobj->u_obj.memq.tqh_first)
1085 		panic("uvn_releasepg: pages in object with npages == 0");
1086 #endif
1087 
1088 	/*
1089  	 * finally, free the rest.
1090  	 */
1091 	uao_free(aobj);
1092 
1093 	return FALSE;
1094 }
1095