xref: /openbsd-src/sys/uvm/uvm_pager.c (revision b2ea75c1b17e1a9a339660e7ed45cd24946b230e)
1 /*	$OpenBSD: uvm_pager.c,v 1.14 2001/08/11 10:57:22 art Exp $	*/
2 /*	$NetBSD: uvm_pager.c,v 1.30 2000/05/20 03:36:06 thorpej Exp $	*/
3 
4 /*
5  *
6  * Copyright (c) 1997 Charles D. Cranor and Washington University.
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *      This product includes software developed by Charles D. Cranor and
20  *      Washington University.
21  * 4. The name of the author may not be used to endorse or promote products
22  *    derived from this software without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
25  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
26  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
27  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
28  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
29  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
30  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
31  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
32  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
33  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34  *
35  * from: Id: uvm_pager.c,v 1.1.2.23 1998/02/02 20:38:06 chuck Exp
36  */
37 
38 /*
39  * uvm_pager.c: generic functions used to assist the pagers.
40  */
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/proc.h>
45 #include <sys/malloc.h>
46 
47 #include <vm/vm.h>
48 #include <vm/vm_page.h>
49 #include <vm/vm_kern.h>
50 
51 #define UVM_PAGER
52 #include <uvm/uvm.h>
53 
54 /*
55  * list of uvm pagers in the system
56  */
57 
58 extern struct uvm_pagerops uvm_deviceops;
59 extern struct uvm_pagerops uvm_vnodeops;
60 
61 struct uvm_pagerops *uvmpagerops[] = {
62 	&aobj_pager,
63 	&uvm_deviceops,
64 	&uvm_vnodeops,
65 };
66 
67 /*
68  * the pager map: provides KVA for I/O
69  */
70 
71 #define PAGER_MAP_SIZE       (4 * 1024 * 1024)
72 vm_map_t pager_map;		/* XXX */
73 simple_lock_data_t pager_map_wanted_lock;
74 boolean_t pager_map_wanted;	/* locked by pager map */
75 
76 
77 /*
78  * uvm_pager_init: init pagers (at boot time)
79  */
80 
81 void
82 uvm_pager_init()
83 {
84 	int lcv;
85 
86 	/*
87 	 * init pager map
88 	 */
89 
90 	 pager_map = uvm_km_suballoc(kernel_map, &uvm.pager_sva, &uvm.pager_eva,
91 	 			PAGER_MAP_SIZE, 0, FALSE, NULL);
92 	 simple_lock_init(&pager_map_wanted_lock);
93 	 pager_map_wanted = FALSE;
94 
95 	/*
96 	 * init ASYNC I/O queue
97 	 */
98 
99 	TAILQ_INIT(&uvm.aio_done);
100 
101 	/*
102 	 * call pager init functions
103 	 */
104 	for (lcv = 0 ; lcv < sizeof(uvmpagerops)/sizeof(struct uvm_pagerops *);
105 	    lcv++) {
106 		if (uvmpagerops[lcv]->pgo_init)
107 			uvmpagerops[lcv]->pgo_init();
108 	}
109 }
110 
111 /*
112  * uvm_pagermapin: map pages into KVA (pager_map) for I/O that needs mappings
113  *
114  * we basically just map in a blank map entry to reserve the space in the
115  * map and then use pmap_enter() to put the mappings in by hand.
116  */
117 
118 vaddr_t
119 uvm_pagermapin(pps, npages, aiop, flags)
120 	struct vm_page **pps;
121 	int npages;
122 	struct uvm_aiodesc **aiop;	/* OUT */
123 	int flags;
124 {
125 	vsize_t size;
126 	vaddr_t kva;
127 	struct uvm_aiodesc *aio;
128 	vaddr_t cva;
129 	struct vm_page *pp;
130 	vm_prot_t prot;
131 	UVMHIST_FUNC("uvm_pagermapin"); UVMHIST_CALLED(maphist);
132 
133 	UVMHIST_LOG(maphist,"(pps=0x%x, npages=%d, aiop=0x%x, flags=0x%x)",
134 	      pps, npages, aiop, flags);
135 
136 	/*
137 	 * compute protection.  outgoing I/O only needs read
138 	 * access to the page, whereas incoming needs read/write.
139 	 */
140 
141 	prot = VM_PROT_READ;
142 	if (flags & UVMPAGER_MAPIN_READ)
143 		prot |= VM_PROT_WRITE;
144 
145 ReStart:
146 	if (aiop) {
147 		MALLOC(aio, struct uvm_aiodesc *, sizeof(*aio), M_TEMP,
148 		    (flags & UVMPAGER_MAPIN_WAITOK));
149 		if (aio == NULL)
150 			return(0);
151 		*aiop = aio;
152 	} else {
153 		aio = NULL;
154 	}
155 
156 	size = npages << PAGE_SHIFT;
157 	kva = 0;			/* let system choose VA */
158 
159 	if (uvm_map(pager_map, &kva, size, NULL,
160 	      UVM_UNKNOWN_OFFSET, UVM_FLAG_NOMERGE) != KERN_SUCCESS) {
161 		if ((flags & UVMPAGER_MAPIN_WAITOK) == 0) {
162 			if (aio)
163 				FREE(aio, M_TEMP);
164 			UVMHIST_LOG(maphist,"<- NOWAIT failed", 0,0,0,0);
165 			return(0);
166 		}
167 		simple_lock(&pager_map_wanted_lock);
168 		pager_map_wanted = TRUE;
169 		UVMHIST_LOG(maphist, "  SLEEPING on pager_map",0,0,0,0);
170 		UVM_UNLOCK_AND_WAIT(pager_map, &pager_map_wanted_lock, FALSE,
171 		    "pager_map",0);
172 		goto ReStart;
173 	}
174 
175 	/* got it */
176 	for (cva = kva ; size != 0 ; size -= PAGE_SIZE, cva += PAGE_SIZE) {
177 		pp = *pps++;
178 #ifdef DEBUG
179 		if ((pp->flags & PG_BUSY) == 0)
180 			panic("uvm_pagermapin: page not busy");
181 #endif
182 		pmap_enter(vm_map_pmap(pager_map), cva, VM_PAGE_TO_PHYS(pp),
183 		    prot, PMAP_WIRED | prot);
184 	}
185 
186 	UVMHIST_LOG(maphist, "<- done (KVA=0x%x)", kva,0,0,0);
187 	return(kva);
188 }
189 
190 /*
191  * uvm_pagermapout: remove pager_map mapping
192  *
193  * we remove our mappings by hand and then remove the mapping (waking
194  * up anyone wanting space).
195  */
196 
197 void
198 uvm_pagermapout(kva, npages)
199 	vaddr_t kva;
200 	int npages;
201 {
202 	vsize_t size = npages << PAGE_SHIFT;
203 	vm_map_entry_t entries;
204 	UVMHIST_FUNC("uvm_pagermapout"); UVMHIST_CALLED(maphist);
205 
206 	UVMHIST_LOG(maphist, " (kva=0x%x, npages=%d)", kva, npages,0,0);
207 
208 	/*
209 	 * duplicate uvm_unmap, but add in pager_map_wanted handling.
210 	 */
211 
212 	vm_map_lock(pager_map);
213 	(void) uvm_unmap_remove(pager_map, kva, kva + size, &entries);
214 	simple_lock(&pager_map_wanted_lock);
215 	if (pager_map_wanted) {
216 		pager_map_wanted = FALSE;
217 		wakeup(pager_map);
218 	}
219 	simple_unlock(&pager_map_wanted_lock);
220 	vm_map_unlock(pager_map);
221 	if (entries)
222 		uvm_unmap_detach(entries, 0);
223 
224 	UVMHIST_LOG(maphist,"<- done",0,0,0,0);
225 }
226 
227 /*
228  * uvm_mk_pcluster
229  *
230  * generic "make 'pager put' cluster" function.  a pager can either
231  * [1] set pgo_mk_pcluster to NULL (never cluster), [2] set it to this
232  * generic function, or [3] set it to a pager specific function.
233  *
234  * => caller must lock object _and_ pagequeues (since we need to look
235  *    at active vs. inactive bits, etc.)
236  * => caller must make center page busy and write-protect it
237  * => we mark all cluster pages busy for the caller
238  * => the caller must unbusy all pages (and check wanted/released
239  *    status if it drops the object lock)
240  * => flags:
241  *      PGO_ALLPAGES:  all pages in object are valid targets
242  *      !PGO_ALLPAGES: use "lo" and "hi" to limit range of cluster
243  *      PGO_DOACTCLUST: include active pages in cluster.
244  *        NOTE: the caller should clear PG_CLEANCHK bits if PGO_DOACTCLUST.
245  *              PG_CLEANCHK is only a hint, but clearing will help reduce
246  *		the number of calls we make to the pmap layer.
247  */
248 
249 struct vm_page **
250 uvm_mk_pcluster(uobj, pps, npages, center, flags, mlo, mhi)
251 	struct uvm_object *uobj;	/* IN */
252 	struct vm_page **pps, *center;  /* IN/OUT, IN */
253 	int *npages, flags;		/* IN/OUT, IN */
254 	voff_t mlo, mhi;		/* IN (if !PGO_ALLPAGES) */
255 {
256 	struct vm_page **ppsp, *pclust;
257 	voff_t lo, hi, curoff;
258 	int center_idx, forward;
259 	UVMHIST_FUNC("uvm_mk_pcluster"); UVMHIST_CALLED(maphist);
260 
261 	/*
262 	 * center page should already be busy and write protected.  XXX:
263 	 * suppose page is wired?  if we lock, then a process could
264 	 * fault/block on it.  if we don't lock, a process could write the
265 	 * pages in the middle of an I/O.  (consider an msync()).  let's
266 	 * lock it for now (better to delay than corrupt data?).
267 	 */
268 
269 	/*
270 	 * get cluster boundaries, check sanity, and apply our limits as well.
271 	 */
272 
273 	uobj->pgops->pgo_cluster(uobj, center->offset, &lo, &hi);
274 	if ((flags & PGO_ALLPAGES) == 0) {
275 		if (lo < mlo)
276 			lo = mlo;
277 		if (hi > mhi)
278 			hi = mhi;
279 	}
280 	if ((hi - lo) >> PAGE_SHIFT > *npages) {  /* pps too small, bail out! */
281 #ifdef DIAGNOSTIC
282 	    printf("uvm_mk_pcluster: provided page array too small (fixed)\n");
283 #endif
284 		pps[0] = center;
285 		*npages = 1;
286 		return(pps);
287 	}
288 
289 	/*
290 	 * now determine the center and attempt to cluster around the
291 	 * edges
292 	 */
293 
294 	center_idx = (center->offset - lo) >> PAGE_SHIFT;
295 	pps[center_idx] = center;	/* plug in the center page */
296 	ppsp = &pps[center_idx];
297 	*npages = 1;
298 
299 	/*
300 	 * attempt to cluster around the left [backward], and then
301 	 * the right side [forward].
302 	 *
303 	 * note that for inactive pages (pages that have been deactivated)
304 	 * there are no valid mappings and PG_CLEAN should be up to date.
305 	 * [i.e. there is no need to query the pmap with pmap_is_modified
306 	 * since there are no mappings].
307 	 */
308 
309 	for (forward  = 0 ; forward <= 1 ; forward++) {
310 
311 		curoff = center->offset + (forward ? PAGE_SIZE : -PAGE_SIZE);
312 		for ( ;(forward == 0 && curoff >= lo) ||
313 		       (forward && curoff < hi);
314 		      curoff += (forward ? 1 : -1) << PAGE_SHIFT) {
315 
316 			pclust = uvm_pagelookup(uobj, curoff); /* lookup page */
317 			if (pclust == NULL)
318 				break;			/* no page */
319 			/* handle active pages */
320 			/* NOTE: inactive pages don't have pmap mappings */
321 			if ((pclust->pqflags & PQ_INACTIVE) == 0) {
322 				if ((flags & PGO_DOACTCLUST) == 0)
323 					/* dont want mapped pages at all */
324 					break;
325 
326 				/* make sure "clean" bit is sync'd */
327 				if ((pclust->flags & PG_CLEANCHK) == 0) {
328 					if ((pclust->flags & (PG_CLEAN|PG_BUSY))
329 					   == PG_CLEAN &&
330 					   pmap_is_modified(pclust))
331 						pclust->flags &= ~PG_CLEAN;
332 					/* now checked */
333 					pclust->flags |= PG_CLEANCHK;
334 				}
335 			}
336 			/* is page available for cleaning and does it need it */
337 			if ((pclust->flags & (PG_CLEAN|PG_BUSY)) != 0)
338 				break;	/* page is already clean or is busy */
339 
340 			/* yes!   enroll the page in our array */
341 			pclust->flags |= PG_BUSY;		/* busy! */
342 			UVM_PAGE_OWN(pclust, "uvm_mk_pcluster");
343 			/* XXX: protect wired page?   see above comment. */
344 			pmap_page_protect(pclust, VM_PROT_READ);
345 			if (!forward) {
346 				ppsp--;			/* back up one page */
347 				*ppsp = pclust;
348 			} else {
349 				/* move forward one page */
350 				ppsp[*npages] = pclust;
351 			}
352 			*npages = *npages + 1;
353 		}
354 	}
355 
356 	/*
357 	 * done!  return the cluster array to the caller!!!
358 	 */
359 
360 	UVMHIST_LOG(maphist, "<- done",0,0,0,0);
361 	return(ppsp);
362 }
363 
364 /*
365  * uvm_pager_put: high level pageout routine
366  *
367  * we want to pageout page "pg" to backing store, clustering if
368  * possible.
369  *
370  * => page queues must be locked by caller
371  * => if page is not swap-backed, then "uobj" points to the object
372  *	backing it.   this object should be locked by the caller.
373  * => if page is swap-backed, then "uobj" should be NULL.
374  * => "pg" should be PG_BUSY (by caller), and !PG_CLEAN
375  *    for swap-backed memory, "pg" can be NULL if there is no page
376  *    of interest [sometimes the case for the pagedaemon]
377  * => "ppsp_ptr" should point to an array of npages vm_page pointers
378  *	for possible cluster building
379  * => flags (first two for non-swap-backed pages)
380  *	PGO_ALLPAGES: all pages in uobj are valid targets
381  *	PGO_DOACTCLUST: include "PQ_ACTIVE" pages as valid targets
382  *	PGO_SYNCIO: do SYNC I/O (no async)
383  *	PGO_PDFREECLUST: pagedaemon: drop cluster on successful I/O
384  * => start/stop: if (uobj && !PGO_ALLPAGES) limit targets to this range
385  *		  if (!uobj) start is the (daddr_t) of the starting swapblk
386  * => return state:
387  *	1. we return the VM_PAGER status code of the pageout
388  *	2. we return with the page queues unlocked
389  *	3. if (uobj != NULL) [!swap_backed] we return with
390  *		uobj locked _only_ if PGO_PDFREECLUST is set
391  *		AND result != VM_PAGER_PEND.   in all other cases
392  *		we return with uobj unlocked.   [this is a hack
393  *		that allows the pagedaemon to save one lock/unlock
394  *		pair in the !swap_backed case since we have to
395  *		lock the uobj to drop the cluster anyway]
396  *	4. on errors we always drop the cluster.   thus, if we return
397  *		!PEND, !OK, then the caller only has to worry about
398  *		un-busying the main page (not the cluster pages).
399  *	5. on success, if !PGO_PDFREECLUST, we return the cluster
400  *		with all pages busy (caller must un-busy and check
401  *		wanted/released flags).
402  */
403 
404 int
405 uvm_pager_put(uobj, pg, ppsp_ptr, npages, flags, start, stop)
406 	struct uvm_object *uobj;	/* IN */
407 	struct vm_page *pg, ***ppsp_ptr;/* IN, IN/OUT */
408 	int *npages;			/* IN/OUT */
409 	int flags;			/* IN */
410 	voff_t start, stop;		/* IN, IN */
411 {
412 	int result;
413 	daddr_t swblk;
414 	struct vm_page **ppsp = *ppsp_ptr;
415 
416 	/*
417 	 * note that uobj is null  if we are doing a swap-backed pageout.
418 	 * note that uobj is !null if we are doing normal object pageout.
419 	 * note that the page queues must be locked to cluster.
420 	 */
421 
422 	if (uobj) {	/* if !swap-backed */
423 
424 		/*
425 		 * attempt to build a cluster for pageout using its
426 		 * make-put-cluster function (if it has one).
427 		 */
428 
429 		if (uobj->pgops->pgo_mk_pcluster) {
430 			ppsp = uobj->pgops->pgo_mk_pcluster(uobj, ppsp,
431 			    npages, pg, flags, start, stop);
432 			*ppsp_ptr = ppsp;  /* update caller's pointer */
433 		} else {
434 			ppsp[0] = pg;
435 			*npages = 1;
436 		}
437 
438 		swblk = 0;		/* XXX: keep gcc happy */
439 
440 	} else {
441 
442 		/*
443 		 * for swap-backed pageout, the caller (the pagedaemon) has
444 		 * already built the cluster for us.   the starting swap
445 		 * block we are writing to has been passed in as "start."
446 		 * "pg" could be NULL if there is no page we are especially
447 		 * interested in (in which case the whole cluster gets dropped
448 		 * in the event of an error or a sync "done").
449 		 */
450 		swblk = (daddr_t) start;
451 		/* ppsp and npages should be ok */
452 	}
453 
454 	/* now that we've clustered we can unlock the page queues */
455 	uvm_unlock_pageq();
456 
457 	/*
458 	 * now attempt the I/O.   if we have a failure and we are
459 	 * clustered, we will drop the cluster and try again.
460 	 */
461 
462 ReTry:
463 	if (uobj) {
464 		/* object is locked */
465 		result = uobj->pgops->pgo_put(uobj, ppsp, *npages,
466 		    flags & PGO_SYNCIO);
467 		/* object is now unlocked */
468 	} else {
469 		/* nothing locked */
470 		result = uvm_swap_put(swblk, ppsp, *npages, flags & PGO_SYNCIO);
471 		/* nothing locked */
472 	}
473 
474 	/*
475 	 * we have attempted the I/O.
476 	 *
477 	 * if the I/O was a success then:
478 	 * 	if !PGO_PDFREECLUST, we return the cluster to the
479 	 *		caller (who must un-busy all pages)
480 	 *	else we un-busy cluster pages for the pagedaemon
481 	 *
482 	 * if I/O is pending (async i/o) then we return the pending code.
483 	 * [in this case the async i/o done function must clean up when
484 	 *  i/o is done...]
485 	 */
486 
487 	if (result == VM_PAGER_PEND || result == VM_PAGER_OK) {
488 		if (result == VM_PAGER_OK && (flags & PGO_PDFREECLUST)) {
489 			/*
490 			 * drop cluster and relock object (only if I/O is
491 			 * not pending)
492 			 */
493 			if (uobj)
494 				/* required for dropcluster */
495 				simple_lock(&uobj->vmobjlock);
496 			if (*npages > 1 || pg == NULL)
497 				uvm_pager_dropcluster(uobj, pg, ppsp, npages,
498 				    PGO_PDFREECLUST);
499 			/* if (uobj): object still locked, as per
500 			 * return-state item #3 */
501 		}
502 		return (result);
503 	}
504 
505 	/*
506 	 * a pager error occured.
507 	 * for transient errors, drop to a cluster of 1 page ("pg")
508 	 * and try again.  for hard errors, don't bother retrying.
509 	 */
510 
511 	if (*npages > 1 || pg == NULL) {
512 		if (uobj) {
513 			simple_lock(&uobj->vmobjlock);
514 		}
515 		uvm_pager_dropcluster(uobj, pg, ppsp, npages, PGO_REALLOCSWAP);
516 
517 		/*
518 		 * for failed swap-backed pageouts with a "pg",
519 		 * we need to reset pg's swslot to either:
520 		 * "swblk" (for transient errors, so we can retry),
521 		 * or 0 (for hard errors).
522 		 */
523 
524 		if (uobj == NULL && pg != NULL) {
525 			int nswblk = (result == VM_PAGER_AGAIN) ? swblk : 0;
526 			if (pg->pqflags & PQ_ANON) {
527 				simple_lock(&pg->uanon->an_lock);
528 				pg->uanon->an_swslot = nswblk;
529 				simple_unlock(&pg->uanon->an_lock);
530 			} else {
531 				simple_lock(&pg->uobject->vmobjlock);
532 				uao_set_swslot(pg->uobject,
533 					       pg->offset >> PAGE_SHIFT,
534 					       nswblk);
535 				simple_unlock(&pg->uobject->vmobjlock);
536 			}
537 		}
538 		if (result == VM_PAGER_AGAIN) {
539 
540 			/*
541 			 * for transient failures, free all the swslots that
542 			 * we're not going to retry with.
543 			 */
544 
545 			if (uobj == NULL) {
546 				if (pg) {
547 					uvm_swap_free(swblk + 1, *npages - 1);
548 				} else {
549 					uvm_swap_free(swblk, *npages);
550 				}
551 			}
552 			if (pg) {
553 				ppsp[0] = pg;
554 				*npages = 1;
555 				goto ReTry;
556 			}
557 		} else if (uobj == NULL) {
558 
559 			/*
560 			 * for hard errors on swap-backed pageouts,
561 			 * mark the swslots as bad.  note that we do not
562 			 * free swslots that we mark bad.
563 			 */
564 
565 			uvm_swap_markbad(swblk, *npages);
566 		}
567 	}
568 
569 	/*
570 	 * a pager error occured (even after dropping the cluster, if there
571 	 * was one).    give up!   the caller only has one page ("pg")
572 	 * to worry about.
573 	 */
574 
575 	if (uobj && (flags & PGO_PDFREECLUST) != 0)
576 		simple_lock(&uobj->vmobjlock);
577 	return(result);
578 }
579 
580 /*
581  * uvm_pager_dropcluster: drop a cluster we have built (because we
582  * got an error, or, if PGO_PDFREECLUST we are un-busying the
583  * cluster pages on behalf of the pagedaemon).
584  *
585  * => uobj, if non-null, is a non-swap-backed object that is
586  *	locked by the caller.   we return with this object still
587  *	locked.
588  * => page queues are not locked
589  * => pg is our page of interest (the one we clustered around, can be null)
590  * => ppsp/npages is our current cluster
591  * => flags: PGO_PDFREECLUST: pageout was a success: un-busy cluster
592  *	pages on behalf of the pagedaemon.
593  *           PGO_REALLOCSWAP: drop previously allocated swap slots for
594  *		clustered swap-backed pages (except for "pg" if !NULL)
595  *		"swblk" is the start of swap alloc (e.g. for ppsp[0])
596  *		[only meaningful if swap-backed (uobj == NULL)]
597  */
598 
599 void
600 uvm_pager_dropcluster(uobj, pg, ppsp, npages, flags)
601 	struct uvm_object *uobj;	/* IN */
602 	struct vm_page *pg, **ppsp;	/* IN, IN/OUT */
603 	int *npages;			/* IN/OUT */
604 	int flags;
605 {
606 	int lcv;
607 	boolean_t obj_is_alive;
608 	struct uvm_object *saved_uobj;
609 
610 	/*
611 	 * drop all pages but "pg"
612 	 */
613 
614 	for (lcv = 0 ; lcv < *npages ; lcv++) {
615 
616 		if (ppsp[lcv] == pg)		/* skip "pg" */
617 			continue;
618 
619 		/*
620 		 * if swap-backed, gain lock on object that owns page.  note
621 		 * that PQ_ANON bit can't change as long as we are holding
622 		 * the PG_BUSY bit (so there is no need to lock the page
623 		 * queues to test it).
624 		 *
625 		 * once we have the lock, dispose of the pointer to swap, if
626 		 * requested
627 		 */
628 		if (!uobj) {
629 			if (ppsp[lcv]->pqflags & PQ_ANON) {
630 				simple_lock(&ppsp[lcv]->uanon->an_lock);
631 				if (flags & PGO_REALLOCSWAP)
632 					  /* zap swap block */
633 					  ppsp[lcv]->uanon->an_swslot = 0;
634 			} else {
635 				simple_lock(&ppsp[lcv]->uobject->vmobjlock);
636 				if (flags & PGO_REALLOCSWAP)
637 					uao_set_swslot(ppsp[lcv]->uobject,
638 					    ppsp[lcv]->offset >> PAGE_SHIFT, 0);
639 			}
640 		}
641 
642 		/* did someone want the page while we had it busy-locked? */
643 		if (ppsp[lcv]->flags & PG_WANTED)
644 			/* still holding obj lock */
645 			wakeup(ppsp[lcv]);
646 
647 		/* if page was released, release it.  otherwise un-busy it */
648 		if (ppsp[lcv]->flags & PG_RELEASED) {
649 
650 			if (ppsp[lcv]->pqflags & PQ_ANON) {
651 				/* so that anfree will free */
652 				ppsp[lcv]->flags &= ~(PG_BUSY);
653 				UVM_PAGE_OWN(ppsp[lcv], NULL);
654 
655 				pmap_page_protect(ppsp[lcv], VM_PROT_NONE);
656 				simple_unlock(&ppsp[lcv]->uanon->an_lock);
657 				/* kills anon and frees pg */
658 				uvm_anfree(ppsp[lcv]->uanon);
659 
660 				continue;
661 			}
662 
663 			/*
664 			 * pgo_releasepg will dump the page for us
665 			 */
666 
667 #ifdef DIAGNOSTIC
668 			if (ppsp[lcv]->uobject->pgops->pgo_releasepg == NULL)
669 				panic("uvm_pager_dropcluster: no releasepg "
670 				    "function");
671 #endif
672 			saved_uobj = ppsp[lcv]->uobject;
673 			obj_is_alive =
674 			    saved_uobj->pgops->pgo_releasepg(ppsp[lcv], NULL);
675 
676 #ifdef DIAGNOSTIC
677 			/* for normal objects, "pg" is still PG_BUSY by us,
678 			 * so obj can't die */
679 			if (uobj && !obj_is_alive)
680 				panic("uvm_pager_dropcluster: object died "
681 				    "with active page");
682 #endif
683 			/* only unlock the object if it is still alive...  */
684 			if (obj_is_alive && saved_uobj != uobj)
685 				simple_unlock(&saved_uobj->vmobjlock);
686 
687 			/*
688 			 * XXXCDC: suppose uobj died in the pgo_releasepg?
689 			 * how pass that
690 			 * info up to caller.  we are currently ignoring it...
691 			 */
692 
693 			continue;		/* next page */
694 
695 		} else {
696 			ppsp[lcv]->flags &= ~(PG_BUSY|PG_WANTED);
697 			UVM_PAGE_OWN(ppsp[lcv], NULL);
698 		}
699 
700 		/*
701 		 * if we are operating on behalf of the pagedaemon and we
702 		 * had a successful pageout update the page!
703 		 */
704 		if (flags & PGO_PDFREECLUST) {
705 			pmap_clear_reference(ppsp[lcv]);
706 			pmap_clear_modify(ppsp[lcv]);
707 			ppsp[lcv]->flags |= PG_CLEAN;
708 		}
709 
710 		/* if anonymous cluster, unlock object and move on */
711 		if (!uobj) {
712 			if (ppsp[lcv]->pqflags & PQ_ANON)
713 				simple_unlock(&ppsp[lcv]->uanon->an_lock);
714 			else
715 				simple_unlock(&ppsp[lcv]->uobject->vmobjlock);
716 		}
717 	}
718 }
719