xref: /openbsd-src/sys/uvm/uvm_pager.c (revision db3296cf5c1dd9058ceecc3a29fe4aaa0bd26000)
1 /*	$OpenBSD: uvm_pager.c,v 1.34 2003/03/29 01:13:57 mickey Exp $	*/
2 /*	$NetBSD: uvm_pager.c,v 1.36 2000/11/27 18:26:41 chs Exp $	*/
3 
4 /*
5  *
6  * Copyright (c) 1997 Charles D. Cranor and Washington University.
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *      This product includes software developed by Charles D. Cranor and
20  *      Washington University.
21  * 4. The name of the author may not be used to endorse or promote products
22  *    derived from this software without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
25  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
26  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
27  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
28  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
29  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
30  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
31  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
32  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
33  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34  *
35  * from: Id: uvm_pager.c,v 1.1.2.23 1998/02/02 20:38:06 chuck Exp
36  */
37 
38 /*
39  * uvm_pager.c: generic functions used to assist the pagers.
40  */
41 
42 #define UVM_PAGER
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/proc.h>
46 #include <sys/malloc.h>
47 #include <sys/pool.h>
48 #include <sys/vnode.h>
49 #include <sys/buf.h>
50 
51 #include <uvm/uvm.h>
52 
53 struct pool *uvm_aiobuf_pool;
54 
55 /*
56  * list of uvm pagers in the system
57  */
58 
59 extern struct uvm_pagerops uvm_deviceops;
60 extern struct uvm_pagerops uvm_vnodeops;
61 #ifdef UBC
62 extern struct uvm_pagerops ubc_pager;
63 #endif
64 
65 struct uvm_pagerops *uvmpagerops[] = {
66 	&aobj_pager,
67 	&uvm_deviceops,
68 	&uvm_vnodeops,
69 #ifdef UBC
70 	&ubc_pager,
71 #endif
72 };
73 
74 /*
75  * the pager map: provides KVA for I/O
76  */
77 
78 vm_map_t pager_map;		/* XXX */
79 simple_lock_data_t pager_map_wanted_lock;
80 boolean_t pager_map_wanted;	/* locked by pager map */
81 static vaddr_t emergva;
82 static boolean_t emerginuse;
83 
84 /*
85  * uvm_pager_init: init pagers (at boot time)
86  */
87 
88 void
89 uvm_pager_init()
90 {
91 	int lcv;
92 
93 	/*
94 	 * init pager map
95 	 */
96 
97 	pager_map = uvm_km_suballoc(kernel_map, &uvm.pager_sva, &uvm.pager_eva,
98 	 			    PAGER_MAP_SIZE, 0, FALSE, NULL);
99 	simple_lock_init(&pager_map_wanted_lock);
100 	pager_map_wanted = FALSE;
101 	emergva = uvm_km_valloc(kernel_map, MAXBSIZE);
102 	emerginuse = FALSE;
103 
104 	/*
105 	 * init ASYNC I/O queue
106 	 */
107 
108 	TAILQ_INIT(&uvm.aio_done);
109 
110 	/*
111 	 * call pager init functions
112 	 */
113 	for (lcv = 0 ; lcv < sizeof(uvmpagerops)/sizeof(struct uvm_pagerops *);
114 	    lcv++) {
115 		if (uvmpagerops[lcv]->pgo_init)
116 			uvmpagerops[lcv]->pgo_init();
117 	}
118 }
119 
120 /*
121  * uvm_pagermapin: map pages into KVA (pager_map) for I/O that needs mappings
122  *
123  * we basically just map in a blank map entry to reserve the space in the
124  * map and then use pmap_enter() to put the mappings in by hand.
125  */
126 
127 vaddr_t
128 uvm_pagermapin(pps, npages, flags)
129 	struct vm_page **pps;
130 	int npages;
131 	int flags;
132 {
133 	vsize_t size;
134 	vaddr_t kva;
135 	vaddr_t cva;
136 	struct vm_page *pp;
137 	vm_prot_t prot;
138 	UVMHIST_FUNC("uvm_pagermapin"); UVMHIST_CALLED(maphist);
139 
140 	UVMHIST_LOG(maphist,"(pps=0x%x, npages=%d)", pps, npages,0,0);
141 
142 	/*
143 	 * compute protection.  outgoing I/O only needs read
144 	 * access to the page, whereas incoming needs read/write.
145 	 */
146 
147 	prot = VM_PROT_READ;
148 	if (flags & UVMPAGER_MAPIN_READ)
149 		prot |= VM_PROT_WRITE;
150 
151 ReStart:
152 	size = npages << PAGE_SHIFT;
153 	kva = 0;			/* let system choose VA */
154 
155 	if (uvm_map(pager_map, &kva, size, NULL,
156 	      UVM_UNKNOWN_OFFSET, 0, UVM_FLAG_NOMERGE) != KERN_SUCCESS) {
157 		if (curproc == uvm.pagedaemon_proc) {
158 			simple_lock(&pager_map_wanted_lock);
159 			if (emerginuse) {
160 				UVM_UNLOCK_AND_WAIT(&emergva,
161 				    &pager_map_wanted_lock, FALSE,
162 				    "emergva", 0);
163 				goto ReStart;
164 			}
165 			emerginuse = TRUE;
166 			simple_unlock(&pager_map_wanted_lock);
167 			kva = emergva;
168 			KASSERT(npages <= MAXBSIZE >> PAGE_SHIFT);
169 			goto enter;
170 		}
171 		if ((flags & UVMPAGER_MAPIN_WAITOK) == 0) {
172 			UVMHIST_LOG(maphist,"<- NOWAIT failed", 0,0,0,0);
173 			return(0);
174 		}
175 		simple_lock(&pager_map_wanted_lock);
176 		pager_map_wanted = TRUE;
177 		UVMHIST_LOG(maphist, "  SLEEPING on pager_map",0,0,0,0);
178 		UVM_UNLOCK_AND_WAIT(pager_map, &pager_map_wanted_lock, FALSE,
179 		    "pager_map", 0);
180 		goto ReStart;
181 	}
182 
183 enter:
184 	/* got it */
185 	for (cva = kva ; size != 0 ; size -= PAGE_SIZE, cva += PAGE_SIZE) {
186 		pp = *pps++;
187 		KASSERT(pp);
188 		KASSERT(pp->flags & PG_BUSY);
189 		pmap_enter(vm_map_pmap(pager_map), cva, VM_PAGE_TO_PHYS(pp),
190 		    prot, PMAP_WIRED | prot);
191 	}
192 
193 	UVMHIST_LOG(maphist, "<- done (KVA=0x%x)", kva,0,0,0);
194 	return(kva);
195 }
196 
197 /*
198  * uvm_pagermapout: remove pager_map mapping
199  *
200  * we remove our mappings by hand and then remove the mapping (waking
201  * up anyone wanting space).
202  */
203 
204 void
205 uvm_pagermapout(kva, npages)
206 	vaddr_t kva;
207 	int npages;
208 {
209 	vsize_t size = npages << PAGE_SHIFT;
210 	vm_map_entry_t entries;
211 	UVMHIST_FUNC("uvm_pagermapout"); UVMHIST_CALLED(maphist);
212 
213 	UVMHIST_LOG(maphist, " (kva=0x%x, npages=%d)", kva, npages,0,0);
214 
215 	/*
216 	 * duplicate uvm_unmap, but add in pager_map_wanted handling.
217 	 */
218 
219 	if (kva == emergva) {
220 		simple_lock(&pager_map_wanted_lock);
221 		emerginuse = FALSE;
222 		wakeup(&emergva);
223 		simple_unlock(&pager_map_wanted_lock);
224 		entries = NULL;
225 		goto remove;
226 	}
227 
228 	vm_map_lock(pager_map);
229 	uvm_unmap_remove(pager_map, kva, kva + size, &entries);
230 	simple_lock(&pager_map_wanted_lock);
231 	if (pager_map_wanted) {
232 		pager_map_wanted = FALSE;
233 		wakeup(pager_map);
234 	}
235 	simple_unlock(&pager_map_wanted_lock);
236 	vm_map_unlock(pager_map);
237 remove:
238 	pmap_remove(pmap_kernel(), kva, kva + (npages << PAGE_SHIFT));
239 	if (entries)
240 		uvm_unmap_detach(entries, 0);
241 
242 	UVMHIST_LOG(maphist,"<- done",0,0,0,0);
243 }
244 
245 /*
246  * uvm_mk_pcluster
247  *
248  * generic "make 'pager put' cluster" function.  a pager can either
249  * [1] set pgo_mk_pcluster to NULL (never cluster), [2] set it to this
250  * generic function, or [3] set it to a pager specific function.
251  *
252  * => caller must lock object _and_ pagequeues (since we need to look
253  *    at active vs. inactive bits, etc.)
254  * => caller must make center page busy and write-protect it
255  * => we mark all cluster pages busy for the caller
256  * => the caller must unbusy all pages (and check wanted/released
257  *    status if it drops the object lock)
258  * => flags:
259  *      PGO_ALLPAGES:  all pages in object are valid targets
260  *      !PGO_ALLPAGES: use "lo" and "hi" to limit range of cluster
261  *      PGO_DOACTCLUST: include active pages in cluster.
262  *        NOTE: the caller should clear PG_CLEANCHK bits if PGO_DOACTCLUST.
263  *              PG_CLEANCHK is only a hint, but clearing will help reduce
264  *		the number of calls we make to the pmap layer.
265  */
266 
267 struct vm_page **
268 uvm_mk_pcluster(uobj, pps, npages, center, flags, mlo, mhi)
269 	struct uvm_object *uobj;	/* IN */
270 	struct vm_page **pps, *center;  /* IN/OUT, IN */
271 	int *npages, flags;		/* IN/OUT, IN */
272 	voff_t mlo, mhi;		/* IN (if !PGO_ALLPAGES) */
273 {
274 	struct vm_page **ppsp, *pclust;
275 	voff_t lo, hi, curoff;
276 	int center_idx, forward, incr;
277 	UVMHIST_FUNC("uvm_mk_pcluster"); UVMHIST_CALLED(maphist);
278 
279 	/*
280 	 * center page should already be busy and write protected.  XXX:
281 	 * suppose page is wired?  if we lock, then a process could
282 	 * fault/block on it.  if we don't lock, a process could write the
283 	 * pages in the middle of an I/O.  (consider an msync()).  let's
284 	 * lock it for now (better to delay than corrupt data?).
285 	 */
286 
287 	/*
288 	 * get cluster boundaries, check sanity, and apply our limits as well.
289 	 */
290 
291 	uobj->pgops->pgo_cluster(uobj, center->offset, &lo, &hi);
292 	if ((flags & PGO_ALLPAGES) == 0) {
293 		if (lo < mlo)
294 			lo = mlo;
295 		if (hi > mhi)
296 			hi = mhi;
297 	}
298 	if ((hi - lo) >> PAGE_SHIFT > *npages) { /* pps too small, bail out! */
299 		pps[0] = center;
300 		*npages = 1;
301 		return(pps);
302 	}
303 
304 	/*
305 	 * now determine the center and attempt to cluster around the
306 	 * edges
307 	 */
308 
309 	center_idx = (center->offset - lo) >> PAGE_SHIFT;
310 	pps[center_idx] = center;	/* plug in the center page */
311 	ppsp = &pps[center_idx];
312 	*npages = 1;
313 
314 	/*
315 	 * attempt to cluster around the left [backward], and then
316 	 * the right side [forward].
317 	 *
318 	 * note that for inactive pages (pages that have been deactivated)
319 	 * there are no valid mappings and PG_CLEAN should be up to date.
320 	 * [i.e. there is no need to query the pmap with pmap_is_modified
321 	 * since there are no mappings].
322 	 */
323 
324 	for (forward  = 0 ; forward <= 1 ; forward++) {
325 		incr = forward ? PAGE_SIZE : -PAGE_SIZE;
326 		curoff = center->offset + incr;
327 		for ( ;(forward == 0 && curoff >= lo) ||
328 		       (forward && curoff < hi);
329 		      curoff += incr) {
330 
331 			pclust = uvm_pagelookup(uobj, curoff); /* lookup page */
332 			if (pclust == NULL) {
333 				break;			/* no page */
334 			}
335 			/* handle active pages */
336 			/* NOTE: inactive pages don't have pmap mappings */
337 			if ((pclust->pqflags & PQ_INACTIVE) == 0) {
338 				if ((flags & PGO_DOACTCLUST) == 0) {
339 					/* dont want mapped pages at all */
340 					break;
341 				}
342 
343 				/* make sure "clean" bit is sync'd */
344 				if ((pclust->flags & PG_CLEANCHK) == 0) {
345 					if ((pclust->flags & (PG_CLEAN|PG_BUSY))
346 					   == PG_CLEAN &&
347 					   pmap_is_modified(pclust))
348 						pclust->flags &= ~PG_CLEAN;
349 					/* now checked */
350 					pclust->flags |= PG_CLEANCHK;
351 				}
352 			}
353 
354 			/* is page available for cleaning and does it need it */
355 			if ((pclust->flags & (PG_CLEAN|PG_BUSY)) != 0) {
356 				break;	/* page is already clean or is busy */
357 			}
358 
359 			/* yes!   enroll the page in our array */
360 			pclust->flags |= PG_BUSY;		/* busy! */
361 			UVM_PAGE_OWN(pclust, "uvm_mk_pcluster");
362 
363 			/* XXX: protect wired page?   see above comment. */
364 			pmap_page_protect(pclust, VM_PROT_READ);
365 			if (!forward) {
366 				ppsp--;			/* back up one page */
367 				*ppsp = pclust;
368 			} else {
369 				/* move forward one page */
370 				ppsp[*npages] = pclust;
371 			}
372 			(*npages)++;
373 		}
374 	}
375 
376 	/*
377 	 * done!  return the cluster array to the caller!!!
378 	 */
379 
380 	UVMHIST_LOG(maphist, "<- done",0,0,0,0);
381 	return(ppsp);
382 }
383 
384 /*
385  * uvm_pager_put: high level pageout routine
386  *
387  * we want to pageout page "pg" to backing store, clustering if
388  * possible.
389  *
390  * => page queues must be locked by caller
391  * => if page is not swap-backed, then "uobj" points to the object
392  *	backing it.   this object should be locked by the caller.
393  * => if page is swap-backed, then "uobj" should be NULL.
394  * => "pg" should be PG_BUSY (by caller), and !PG_CLEAN
395  *    for swap-backed memory, "pg" can be NULL if there is no page
396  *    of interest [sometimes the case for the pagedaemon]
397  * => "ppsp_ptr" should point to an array of npages vm_page pointers
398  *	for possible cluster building
399  * => flags (first two for non-swap-backed pages)
400  *	PGO_ALLPAGES: all pages in uobj are valid targets
401  *	PGO_DOACTCLUST: include "PQ_ACTIVE" pages as valid targets
402  *	PGO_SYNCIO: do SYNC I/O (no async)
403  *	PGO_PDFREECLUST: pagedaemon: drop cluster on successful I/O
404  * => start/stop: if (uobj && !PGO_ALLPAGES) limit targets to this range
405  *		  if (!uobj) start is the (daddr_t) of the starting swapblk
406  * => return state:
407  *	1. we return the VM_PAGER status code of the pageout
408  *	2. we return with the page queues unlocked
409  *	3. if (uobj != NULL) [!swap_backed] we return with
410  *		uobj locked _only_ if PGO_PDFREECLUST is set
411  *		AND result != VM_PAGER_PEND.   in all other cases
412  *		we return with uobj unlocked.   [this is a hack
413  *		that allows the pagedaemon to save one lock/unlock
414  *		pair in the !swap_backed case since we have to
415  *		lock the uobj to drop the cluster anyway]
416  *	4. on errors we always drop the cluster.   thus, if we return
417  *		!PEND, !OK, then the caller only has to worry about
418  *		un-busying the main page (not the cluster pages).
419  *	5. on success, if !PGO_PDFREECLUST, we return the cluster
420  *		with all pages busy (caller must un-busy and check
421  *		wanted/released flags).
422  */
423 
424 int
425 uvm_pager_put(uobj, pg, ppsp_ptr, npages, flags, start, stop)
426 	struct uvm_object *uobj;	/* IN */
427 	struct vm_page *pg, ***ppsp_ptr;/* IN, IN/OUT */
428 	int *npages;			/* IN/OUT */
429 	int flags;			/* IN */
430 	voff_t start, stop;		/* IN, IN */
431 {
432 	int result;
433 	daddr_t swblk;
434 	struct vm_page **ppsp = *ppsp_ptr;
435 	UVMHIST_FUNC("uvm_pager_put"); UVMHIST_CALLED(pdhist);
436 
437 	/*
438 	 * note that uobj is null  if we are doing a swap-backed pageout.
439 	 * note that uobj is !null if we are doing normal object pageout.
440 	 * note that the page queues must be locked to cluster.
441 	 */
442 
443 	if (uobj) {	/* if !swap-backed */
444 
445 		/*
446 		 * attempt to build a cluster for pageout using its
447 		 * make-put-cluster function (if it has one).
448 		 */
449 
450 		if (uobj->pgops->pgo_mk_pcluster) {
451 			ppsp = uobj->pgops->pgo_mk_pcluster(uobj, ppsp,
452 			    npages, pg, flags, start, stop);
453 			*ppsp_ptr = ppsp;  /* update caller's pointer */
454 		} else {
455 			ppsp[0] = pg;
456 			*npages = 1;
457 		}
458 
459 		swblk = 0;		/* XXX: keep gcc happy */
460 
461 	} else {
462 
463 		/*
464 		 * for swap-backed pageout, the caller (the pagedaemon) has
465 		 * already built the cluster for us.   the starting swap
466 		 * block we are writing to has been passed in as "start."
467 		 * "pg" could be NULL if there is no page we are especially
468 		 * interested in (in which case the whole cluster gets dropped
469 		 * in the event of an error or a sync "done").
470 		 */
471 		swblk = (daddr_t) start;
472 		/* ppsp and npages should be ok */
473 	}
474 
475 	/* now that we've clustered we can unlock the page queues */
476 	uvm_unlock_pageq();
477 
478 	/*
479 	 * now attempt the I/O.   if we have a failure and we are
480 	 * clustered, we will drop the cluster and try again.
481 	 */
482 
483 ReTry:
484 	if (uobj) {
485 		/* object is locked */
486 		result = uobj->pgops->pgo_put(uobj, ppsp, *npages, flags);
487 		UVMHIST_LOG(pdhist, "put -> %d", result, 0,0,0);
488 		/* object is now unlocked */
489 	} else {
490 		/* nothing locked */
491 		result = uvm_swap_put(swblk, ppsp, *npages, flags);
492 		/* nothing locked */
493 	}
494 
495 	/*
496 	 * we have attempted the I/O.
497 	 *
498 	 * if the I/O was a success then:
499 	 * 	if !PGO_PDFREECLUST, we return the cluster to the
500 	 *		caller (who must un-busy all pages)
501 	 *	else we un-busy cluster pages for the pagedaemon
502 	 *
503 	 * if I/O is pending (async i/o) then we return the pending code.
504 	 * [in this case the async i/o done function must clean up when
505 	 *  i/o is done...]
506 	 */
507 
508 	if (result == VM_PAGER_PEND || result == VM_PAGER_OK) {
509 		if (result == VM_PAGER_OK && (flags & PGO_PDFREECLUST)) {
510 			/*
511 			 * drop cluster and relock object (only if I/O is
512 			 * not pending)
513 			 */
514 			if (uobj)
515 				/* required for dropcluster */
516 				simple_lock(&uobj->vmobjlock);
517 			if (*npages > 1 || pg == NULL)
518 				uvm_pager_dropcluster(uobj, pg, ppsp, npages,
519 				    PGO_PDFREECLUST);
520 			/* if (uobj): object still locked, as per
521 			 * return-state item #3 */
522 		}
523 		return (result);
524 	}
525 
526 	/*
527 	 * a pager error occured (even after dropping the cluster, if there
528 	 * was one).  give up! the caller only has one page ("pg")
529 	 * to worry about.
530 	 */
531 
532 	if (*npages > 1 || pg == NULL) {
533 		if (uobj) {
534 			simple_lock(&uobj->vmobjlock);
535 		}
536 		uvm_pager_dropcluster(uobj, pg, ppsp, npages, PGO_REALLOCSWAP);
537 
538 		/*
539 		 * for failed swap-backed pageouts with a "pg",
540 		 * we need to reset pg's swslot to either:
541 		 * "swblk" (for transient errors, so we can retry),
542 		 * or 0 (for hard errors).
543 		 */
544 
545 		if (uobj == NULL && pg != NULL) {
546 			int nswblk = (result == VM_PAGER_AGAIN) ? swblk : 0;
547 			if (pg->pqflags & PQ_ANON) {
548 				simple_lock(&pg->uanon->an_lock);
549 				pg->uanon->an_swslot = nswblk;
550 				simple_unlock(&pg->uanon->an_lock);
551 			} else {
552 				simple_lock(&pg->uobject->vmobjlock);
553 				uao_set_swslot(pg->uobject,
554 					       pg->offset >> PAGE_SHIFT,
555 					       nswblk);
556 				simple_unlock(&pg->uobject->vmobjlock);
557 			}
558 		}
559 		if (result == VM_PAGER_AGAIN) {
560 
561 			/*
562 			 * for transient failures, free all the swslots that
563 			 * we're not going to retry with.
564 			 */
565 
566 			if (uobj == NULL) {
567 				if (pg) {
568 					uvm_swap_free(swblk + 1, *npages - 1);
569 				} else {
570 					uvm_swap_free(swblk, *npages);
571 				}
572 			}
573 			if (pg) {
574 				ppsp[0] = pg;
575 				*npages = 1;
576 				goto ReTry;
577 			}
578 		} else if (uobj == NULL) {
579 
580 			/*
581 			 * for hard errors on swap-backed pageouts,
582 			 * mark the swslots as bad.  note that we do not
583 			 * free swslots that we mark bad.
584 			 */
585 
586 			uvm_swap_markbad(swblk, *npages);
587 		}
588 	}
589 
590 	/*
591 	 * a pager error occurred (even after dropping the cluster, if there
592 	 * was one).    give up!   the caller only has one page ("pg")
593 	 * to worry about.
594 	 */
595 
596 	if (uobj && (flags & PGO_PDFREECLUST) != 0)
597 		simple_lock(&uobj->vmobjlock);
598 	return(result);
599 }
600 
601 /*
602  * uvm_pager_dropcluster: drop a cluster we have built (because we
603  * got an error, or, if PGO_PDFREECLUST we are un-busying the
604  * cluster pages on behalf of the pagedaemon).
605  *
606  * => uobj, if non-null, is a non-swap-backed object that is
607  *	locked by the caller.   we return with this object still
608  *	locked.
609  * => page queues are not locked
610  * => pg is our page of interest (the one we clustered around, can be null)
611  * => ppsp/npages is our current cluster
612  * => flags: PGO_PDFREECLUST: pageout was a success: un-busy cluster
613  *	pages on behalf of the pagedaemon.
614  *           PGO_REALLOCSWAP: drop previously allocated swap slots for
615  *		clustered swap-backed pages (except for "pg" if !NULL)
616  *		"swblk" is the start of swap alloc (e.g. for ppsp[0])
617  *		[only meaningful if swap-backed (uobj == NULL)]
618  */
619 
620 void
621 uvm_pager_dropcluster(uobj, pg, ppsp, npages, flags)
622 	struct uvm_object *uobj;	/* IN */
623 	struct vm_page *pg, **ppsp;	/* IN, IN/OUT */
624 	int *npages;			/* IN/OUT */
625 	int flags;
626 {
627 	int lcv;
628 	boolean_t obj_is_alive;
629 	struct uvm_object *saved_uobj;
630 
631 	/*
632 	 * drop all pages but "pg"
633 	 */
634 
635 	for (lcv = 0 ; lcv < *npages ; lcv++) {
636 
637 		/* skip "pg" or empty slot */
638 		if (ppsp[lcv] == pg || ppsp[lcv] == NULL)
639 			continue;
640 
641 		/*
642 		 * if swap-backed, gain lock on object that owns page.  note
643 		 * that PQ_ANON bit can't change as long as we are holding
644 		 * the PG_BUSY bit (so there is no need to lock the page
645 		 * queues to test it).
646 		 *
647 		 * once we have the lock, dispose of the pointer to swap, if
648 		 * requested
649 		 */
650 		if (!uobj) {
651 			if (ppsp[lcv]->pqflags & PQ_ANON) {
652 				simple_lock(&ppsp[lcv]->uanon->an_lock);
653 				if (flags & PGO_REALLOCSWAP)
654 					  /* zap swap block */
655 					  ppsp[lcv]->uanon->an_swslot = 0;
656 			} else {
657 				simple_lock(&ppsp[lcv]->uobject->vmobjlock);
658 				if (flags & PGO_REALLOCSWAP)
659 					uao_set_swslot(ppsp[lcv]->uobject,
660 					    ppsp[lcv]->offset >> PAGE_SHIFT, 0);
661 			}
662 		}
663 
664 		/* did someone want the page while we had it busy-locked? */
665 		if (ppsp[lcv]->flags & PG_WANTED) {
666 			/* still holding obj lock */
667 			wakeup(ppsp[lcv]);
668 		}
669 
670 		/* if page was released, release it.  otherwise un-busy it */
671 		if (ppsp[lcv]->flags & PG_RELEASED) {
672 
673 			if (ppsp[lcv]->pqflags & PQ_ANON) {
674 				/* so that anfree will free */
675 				ppsp[lcv]->flags &= ~(PG_BUSY);
676 				UVM_PAGE_OWN(ppsp[lcv], NULL);
677 
678 				pmap_page_protect(ppsp[lcv], VM_PROT_NONE);
679 				simple_unlock(&ppsp[lcv]->uanon->an_lock);
680 				/* kills anon and frees pg */
681 				uvm_anfree(ppsp[lcv]->uanon);
682 
683 				continue;
684 			}
685 
686 			/*
687 			 * pgo_releasepg will dump the page for us
688 			 */
689 
690 			saved_uobj = ppsp[lcv]->uobject;
691 			obj_is_alive =
692 			    saved_uobj->pgops->pgo_releasepg(ppsp[lcv], NULL);
693 
694 			/* for normal objects, "pg" is still PG_BUSY by us,
695 			 * so obj can't die */
696 			KASSERT(!uobj || obj_is_alive);
697 
698 			/* only unlock the object if it is still alive...  */
699 			if (obj_is_alive && saved_uobj != uobj)
700 				simple_unlock(&saved_uobj->vmobjlock);
701 
702 			/*
703 			 * XXXCDC: suppose uobj died in the pgo_releasepg?
704 			 * how pass that
705 			 * info up to caller.  we are currently ignoring it...
706 			 */
707 
708 			continue;		/* next page */
709 
710 		} else {
711 			ppsp[lcv]->flags &= ~(PG_BUSY|PG_WANTED|PG_FAKE);
712 			UVM_PAGE_OWN(ppsp[lcv], NULL);
713 		}
714 
715 		/*
716 		 * if we are operating on behalf of the pagedaemon and we
717 		 * had a successful pageout update the page!
718 		 */
719 		if (flags & PGO_PDFREECLUST) {
720 			pmap_clear_reference(ppsp[lcv]);
721 			pmap_clear_modify(ppsp[lcv]);
722 			ppsp[lcv]->flags |= PG_CLEAN;
723 		}
724 
725 		/* if anonymous cluster, unlock object and move on */
726 		if (!uobj) {
727 			if (ppsp[lcv]->pqflags & PQ_ANON)
728 				simple_unlock(&ppsp[lcv]->uanon->an_lock);
729 			else
730 				simple_unlock(&ppsp[lcv]->uobject->vmobjlock);
731 		}
732 	}
733 }
734 
735 #ifdef UBC
736 /*
737  * interrupt-context iodone handler for nested i/o bufs.
738  *
739  * => must be at splbio().
740  */
741 
742 void
743 uvm_aio_biodone1(bp)
744 	struct buf *bp;
745 {
746 	struct buf *mbp = bp->b_private;
747 
748 	splassert(IPL_BIO);
749 
750 	KASSERT(mbp != bp);
751 	if (bp->b_flags & B_ERROR) {
752 		mbp->b_flags |= B_ERROR;
753 		mbp->b_error = bp->b_error;
754 	}
755 	mbp->b_resid -= bp->b_bcount;
756 	pool_put(&bufpool, bp);
757 	if (mbp->b_resid == 0) {
758 		biodone(mbp);
759 	}
760 }
761 #endif
762 
763 /*
764  * interrupt-context iodone handler for single-buf i/os
765  * or the top-level buf of a nested-buf i/o.
766  *
767  * => must be at splbio().
768  */
769 
770 void
771 uvm_aio_biodone(bp)
772 	struct buf *bp;
773 {
774 	splassert(IPL_BIO);
775 
776 	/* reset b_iodone for when this is a single-buf i/o. */
777 	bp->b_iodone = uvm_aio_aiodone;
778 
779 	simple_lock(&uvm.aiodoned_lock);	/* locks uvm.aio_done */
780 	TAILQ_INSERT_TAIL(&uvm.aio_done, bp, b_freelist);
781 	wakeup(&uvm.aiodoned);
782 	simple_unlock(&uvm.aiodoned_lock);
783 }
784 
785 /*
786  * uvm_aio_aiodone: do iodone processing for async i/os.
787  * this should be called in thread context, not interrupt context.
788  */
789 
790 void
791 uvm_aio_aiodone(bp)
792 	struct buf *bp;
793 {
794 	int npages = bp->b_bufsize >> PAGE_SHIFT;
795 	struct vm_page *pg, *pgs[npages];
796 	struct uvm_object *uobj;
797 	int i, error;
798 	boolean_t write, swap;
799 	UVMHIST_FUNC("uvm_aio_aiodone"); UVMHIST_CALLED(pdhist);
800 	UVMHIST_LOG(pdhist, "bp %p", bp, 0,0,0);
801 
802 	splassert(IPL_BIO);
803 
804 	error = (bp->b_flags & B_ERROR) ? (bp->b_error ? bp->b_error : EIO) : 0;
805 	write = (bp->b_flags & B_READ) == 0;
806 #ifdef UBC
807 	/* XXXUBC B_NOCACHE is for swap pager, should be done differently */
808 	if (write && !(bp->b_flags & B_NOCACHE) && bioops.io_pageiodone) {
809 		(*bioops.io_pageiodone)(bp);
810 	}
811 #endif
812 
813 	uobj = NULL;
814 	for (i = 0; i < npages; i++) {
815 		pgs[i] = uvm_pageratop((vaddr_t)bp->b_data + (i << PAGE_SHIFT));
816 		UVMHIST_LOG(pdhist, "pgs[%d] = %p", i, pgs[i],0,0);
817 	}
818 	uvm_pagermapout((vaddr_t)bp->b_data, npages);
819 #ifdef UVM_SWAP_ENCRYPT
820 	/*
821 	 * XXX - assumes that we only get ASYNC writes. used to be above.
822 	 */
823 	if (pgs[0]->pqflags & PQ_ENCRYPT) {
824 		uvm_swap_freepages(pgs, npages);
825 		goto freed;
826 	}
827 #endif /* UVM_SWAP_ENCRYPT */
828 	for (i = 0; i < npages; i++) {
829 		pg = pgs[i];
830 
831 		if (i == 0) {
832 			swap = (pg->pqflags & PQ_SWAPBACKED) != 0;
833 			if (!swap) {
834 				uobj = pg->uobject;
835 				simple_lock(&uobj->vmobjlock);
836 			}
837 		}
838 		KASSERT(swap || pg->uobject == uobj);
839 		if (swap) {
840 			if (pg->pqflags & PQ_ANON) {
841 				simple_lock(&pg->uanon->an_lock);
842 			} else {
843 				simple_lock(&pg->uobject->vmobjlock);
844 			}
845 		}
846 
847 		/*
848 		 * if this is a read and we got an error, mark the pages
849 		 * PG_RELEASED so that uvm_page_unbusy() will free them.
850 		 */
851 
852 		if (!write && error) {
853 			pg->flags |= PG_RELEASED;
854 			continue;
855 		}
856 		KASSERT(!write || (pgs[i]->flags & PG_FAKE) == 0);
857 
858 		/*
859 		 * if this is a read and the page is PG_FAKE,
860 		 * or this was a successful write,
861 		 * mark the page PG_CLEAN and not PG_FAKE.
862 		 */
863 
864 		if ((pgs[i]->flags & PG_FAKE) || (write && error != ENOMEM)) {
865 			pmap_clear_reference(pgs[i]);
866 			pmap_clear_modify(pgs[i]);
867 			pgs[i]->flags |= PG_CLEAN;
868 			pgs[i]->flags &= ~PG_FAKE;
869 		}
870 		if (swap) {
871 			if (pg->pqflags & PQ_ANON) {
872 				simple_unlock(&pg->uanon->an_lock);
873 			} else {
874 				simple_unlock(&pg->uobject->vmobjlock);
875 			}
876 		}
877 	}
878 	uvm_page_unbusy(pgs, npages);
879 	if (!swap) {
880 		simple_unlock(&uobj->vmobjlock);
881 	}
882 
883 #ifdef UVM_SWAP_ENCRYPT
884 freed:
885 #endif
886 	if (write && (bp->b_flags & B_AGE) != 0 && bp->b_vp != NULL) {
887 		vwakeup(bp->b_vp);
888 	}
889 	pool_put(&bufpool, bp);
890 }
891 
892 /*
893  * translate unix errno values to VM_PAGER_*.
894  */
895 
896 int
897 uvm_errno2vmerror(errno)
898 	int errno;
899 {
900 	switch (errno) {
901 	case 0:
902 		return VM_PAGER_OK;
903 	case EINVAL:
904 		return VM_PAGER_BAD;
905 	case EINPROGRESS:
906 		return VM_PAGER_PEND;
907 	case EIO:
908 		return VM_PAGER_ERROR;
909 	case EAGAIN:
910 		return VM_PAGER_AGAIN;
911 	case EBUSY:
912 		return VM_PAGER_UNLOCK;
913 	default:
914 		return VM_PAGER_ERROR;
915 	}
916 }
917