xref: /openbsd-src/sys/uvm/uvm_pager.c (revision 43003dfe3ad45d1698bed8a37f2b0f5b14f20d4f)
1 /*	$OpenBSD: uvm_pager.c,v 1.54 2009/07/22 21:05:37 oga Exp $	*/
2 /*	$NetBSD: uvm_pager.c,v 1.36 2000/11/27 18:26:41 chs Exp $	*/
3 
4 /*
5  *
6  * Copyright (c) 1997 Charles D. Cranor and Washington University.
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *      This product includes software developed by Charles D. Cranor and
20  *      Washington University.
21  * 4. The name of the author may not be used to endorse or promote products
22  *    derived from this software without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
25  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
26  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
27  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
28  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
29  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
30  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
31  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
32  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
33  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34  *
35  * from: Id: uvm_pager.c,v 1.1.2.23 1998/02/02 20:38:06 chuck Exp
36  */
37 
38 /*
39  * uvm_pager.c: generic functions used to assist the pagers.
40  */
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/proc.h>
45 #include <sys/malloc.h>
46 #include <sys/pool.h>
47 #include <sys/vnode.h>
48 #include <sys/buf.h>
49 
50 #include <uvm/uvm.h>
51 
52 struct pool *uvm_aiobuf_pool;
53 
54 struct uvm_pagerops *uvmpagerops[] = {
55 	&aobj_pager,
56 	&uvm_deviceops,
57 	&uvm_vnodeops,
58 };
59 
60 /*
61  * the pager map: provides KVA for I/O
62  *
63  * Each uvm_pseg has room for MAX_PAGERMAP_SEGS pager io space of
64  * MAXBSIZE bytes.
65  *
66  * The number of uvm_pseg instances is dynamic using an array segs.
67  * At most UVM_PSEG_COUNT instances can exist.
68  *
69  * psegs[0] always exists (so that the pager can always map in pages).
70  * psegs[0] element 0 is always reserved for the pagedaemon.
71  *
72  * Any other pseg is automatically created when no space is available
73  * and automatically destroyed when it is no longer in use.
74  */
75 #define MAX_PAGER_SEGS	16
76 #define PSEG_NUMSEGS	(PAGER_MAP_SIZE / MAX_PAGER_SEGS / MAXBSIZE)
77 struct uvm_pseg {
78 	/* Start of virtual space; 0 if not inited. */
79 	vaddr_t	start;
80 	/* Bitmap of the segments in use in this pseg. */
81 	int	use;
82 };
83 struct	mutex uvm_pseg_lck;
84 struct	uvm_pseg psegs[PSEG_NUMSEGS];
85 
86 #define UVM_PSEG_FULL(pseg)	((pseg)->use == (1 << MAX_PAGER_SEGS) - 1)
87 #define UVM_PSEG_EMPTY(pseg)	((pseg)->use == 0)
88 #define UVM_PSEG_INUSE(pseg,id)	(((pseg)->use & (1 << (id))) != 0)
89 
90 void		uvm_pseg_init(struct uvm_pseg *);
91 void		uvm_pseg_destroy(struct uvm_pseg *);
92 vaddr_t		uvm_pseg_get(int);
93 void		uvm_pseg_release(vaddr_t);
94 
95 struct vm_page	*uvm_pageratop(vaddr_t);
96 
97 /*
98  * uvm_pager_init: init pagers (at boot time)
99  */
100 
101 void
102 uvm_pager_init(void)
103 {
104 	int lcv;
105 
106 	/*
107 	 * init pager map
108 	 */
109 
110 	uvm_pseg_init(&psegs[0]);
111 	mtx_init(&uvm_pseg_lck, IPL_VM);
112 
113 	/*
114 	 * init ASYNC I/O queue
115 	 */
116 
117 	TAILQ_INIT(&uvm.aio_done);
118 
119 	/*
120 	 * call pager init functions
121 	 */
122 	for (lcv = 0 ; lcv < sizeof(uvmpagerops)/sizeof(struct uvm_pagerops *);
123 	    lcv++) {
124 		if (uvmpagerops[lcv]->pgo_init)
125 			uvmpagerops[lcv]->pgo_init();
126 	}
127 }
128 
129 /*
130  * Initialize a uvm_pseg.
131  *
132  * May fail, in which case seg->start == 0.
133  *
134  * Caller locks uvm_pseg_lck.
135  */
136 void
137 uvm_pseg_init(struct uvm_pseg *pseg)
138 {
139 	KASSERT(pseg->start == 0);
140 	KASSERT(pseg->use == 0);
141 	pseg->start = uvm_km_valloc(kernel_map, MAX_PAGER_SEGS * MAXBSIZE);
142 }
143 
144 /*
145  * Destroy a uvm_pseg.
146  *
147  * Never fails.
148  *
149  * Requires that seg != &psegs[0]
150  *
151  * Caller locks uvm_pseg_lck.
152  */
153 void
154 uvm_pseg_destroy(struct uvm_pseg *pseg)
155 {
156 	KASSERT(pseg != &psegs[0]);
157 	KASSERT(pseg->start != 0);
158 	KASSERT(pseg->use == 0);
159 	uvm_km_free(kernel_map, pseg->start, MAX_PAGER_SEGS * MAXBSIZE);
160 	pseg->start = 0;
161 }
162 
163 /*
164  * Acquire a pager map segment.
165  *
166  * Returns a vaddr for paging. 0 on failure.
167  *
168  * Caller does not lock.
169  */
170 vaddr_t
171 uvm_pseg_get(int flags)
172 {
173 	int i;
174 	struct uvm_pseg *pseg;
175 
176 	mtx_enter(&uvm_pseg_lck);
177 
178 pager_seg_restart:
179 	/* Find first pseg that has room. */
180 	for (pseg = &psegs[0]; pseg != &psegs[PSEG_NUMSEGS]; pseg++) {
181 		if (UVM_PSEG_FULL(pseg))
182 			continue;
183 
184 		if (pseg->start == 0) {
185 			/* Need initialization. */
186 			uvm_pseg_init(pseg);
187 			if (pseg->start == 0)
188 				goto pager_seg_fail;
189 		}
190 
191 		/* Keep index 0 reserved for pagedaemon. */
192 		if (pseg == &psegs[0] && curproc != uvm.pagedaemon_proc)
193 			i = 1;
194 		else
195 			i = 0;
196 
197 		for (; i < MAX_PAGER_SEGS; i++) {
198 			if (!UVM_PSEG_INUSE(pseg, i)) {
199 				pseg->use |= 1 << i;
200 				mtx_leave(&uvm_pseg_lck);
201 				return pseg->start + i * MAXBSIZE;
202 			}
203 		}
204 	}
205 
206 pager_seg_fail:
207 	if ((flags & UVMPAGER_MAPIN_WAITOK) != 0) {
208 		msleep(&psegs, &uvm_pseg_lck, PVM, "pagerseg", 0);
209 		goto pager_seg_restart;
210 	}
211 
212 	mtx_leave(&uvm_pseg_lck);
213 	return 0;
214 }
215 
216 /*
217  * Release a pager map segment.
218  *
219  * Caller does not lock.
220  *
221  * Deallocates pseg if it is no longer in use.
222  */
223 void
224 uvm_pseg_release(vaddr_t segaddr)
225 {
226 	int id;
227 	struct uvm_pseg *pseg;
228 
229 	for (pseg = &psegs[0]; pseg != &psegs[PSEG_NUMSEGS]; pseg++) {
230 		if (pseg->start <= segaddr &&
231 		    segaddr < pseg->start + MAX_PAGER_SEGS * MAXBSIZE)
232 			break;
233 	}
234 	KASSERT(pseg != &psegs[PSEG_NUMSEGS]);
235 
236 	id = (segaddr - pseg->start) / MAXBSIZE;
237 	KASSERT(id >= 0 && id < MAX_PAGER_SEGS);
238 
239 	/* test for no remainder */
240 	KDASSERT(segaddr == pseg->start + id * MAXBSIZE);
241 
242 	mtx_enter(&uvm_pseg_lck);
243 
244 	KASSERT(UVM_PSEG_INUSE(pseg, id));
245 
246 	pseg->use &= ~(1 << id);
247 	wakeup(&psegs);
248 
249 	if (pseg != &psegs[0] && UVM_PSEG_EMPTY(pseg))
250 		uvm_pseg_destroy(pseg);
251 
252 	mtx_leave(&uvm_pseg_lck);
253 }
254 
255 /*
256  * uvm_pagermapin: map pages into KVA for I/O that needs mappings
257  *
258  * We basically just km_valloc a blank map entry to reserve the space in the
259  * kernel map and then use pmap_enter() to put the mappings in by hand.
260  */
261 vaddr_t
262 uvm_pagermapin(struct vm_page **pps, int npages, int flags)
263 {
264 	vaddr_t kva, cva;
265 	vm_prot_t prot;
266 	vsize_t size;
267 	struct vm_page *pp;
268 
269 	UVMHIST_FUNC("uvm_pagermapin"); UVMHIST_CALLED(maphist);
270 
271 	UVMHIST_LOG(maphist,"(pps=%p, npages=%ld, flags=%d)",
272 	    pps, npages, flags,0);
273 
274 	prot = VM_PROT_READ;
275 	if (flags & UVMPAGER_MAPIN_READ)
276 		prot |= VM_PROT_WRITE;
277 	size = ptoa(npages);
278 
279 	KASSERT(size <= MAXBSIZE);
280 
281 	kva = uvm_pseg_get(flags);
282 	if (kva == 0) {
283 		UVMHIST_LOG(maphist,"<- NOWAIT failed", 0,0,0,0);
284 		return 0;
285 	}
286 
287 	for (cva = kva ; size != 0 ; size -= PAGE_SIZE, cva += PAGE_SIZE) {
288 		pp = *pps++;
289 		KASSERT(pp);
290 		KASSERT(pp->pg_flags & PG_BUSY);
291 		/* Allow pmap_enter to fail. */
292 		if (pmap_enter(pmap_kernel(), cva, VM_PAGE_TO_PHYS(pp),
293 		    prot, PMAP_WIRED | PMAP_CANFAIL | prot) != 0) {
294 			pmap_remove(pmap_kernel(), kva, cva);
295 			pmap_update(pmap_kernel());
296 			uvm_pseg_release(kva);
297 			UVMHIST_LOG(maphist,"<- pmap_enter failed", 0,0,0,0);
298 			return 0;
299 		}
300 	}
301 	pmap_update(pmap_kernel());
302 	UVMHIST_LOG(maphist, "<- done (KVA=0x%lx)", kva,0,0,0);
303 	return kva;
304 }
305 
306 /*
307  * uvm_pagermapout: remove KVA mapping
308  *
309  * We remove our mappings by hand and then remove the mapping.
310  */
311 void
312 uvm_pagermapout(vaddr_t kva, int npages)
313 {
314 	UVMHIST_FUNC("uvm_pagermapout"); UVMHIST_CALLED(maphist);
315 
316 	UVMHIST_LOG(maphist, " (kva=0x%lx, npages=%ld)", kva, npages,0,0);
317 
318 	pmap_remove(pmap_kernel(), kva, kva + (npages << PAGE_SHIFT));
319 	pmap_update(pmap_kernel());
320 	uvm_pseg_release(kva);
321 
322 	UVMHIST_LOG(maphist,"<- done",0,0,0,0);
323 }
324 
325 /*
326  * uvm_mk_pcluster
327  *
328  * generic "make 'pager put' cluster" function.  a pager can either
329  * [1] set pgo_mk_pcluster to NULL (never cluster), [2] set it to this
330  * generic function, or [3] set it to a pager specific function.
331  *
332  * => caller must lock object _and_ pagequeues (since we need to look
333  *    at active vs. inactive bits, etc.)
334  * => caller must make center page busy and write-protect it
335  * => we mark all cluster pages busy for the caller
336  * => the caller must unbusy all pages (and check wanted/released
337  *    status if it drops the object lock)
338  * => flags:
339  *      PGO_ALLPAGES:  all pages in object are valid targets
340  *      !PGO_ALLPAGES: use "lo" and "hi" to limit range of cluster
341  *      PGO_DOACTCLUST: include active pages in cluster.
342  *	PGO_FREE: set the PG_RELEASED bits on the cluster so they'll be freed
343  *		in async io (caller must clean on error).
344  *        NOTE: the caller should clear PG_CLEANCHK bits if PGO_DOACTCLUST.
345  *              PG_CLEANCHK is only a hint, but clearing will help reduce
346  *		the number of calls we make to the pmap layer.
347  */
348 
349 struct vm_page **
350 uvm_mk_pcluster(struct uvm_object *uobj, struct vm_page **pps, int *npages,
351     struct vm_page *center, int flags, voff_t mlo, voff_t mhi)
352 {
353 	struct vm_page **ppsp, *pclust;
354 	voff_t lo, hi, curoff;
355 	int center_idx, forward, incr;
356 	UVMHIST_FUNC("uvm_mk_pcluster"); UVMHIST_CALLED(maphist);
357 
358 	/*
359 	 * center page should already be busy and write protected.  XXX:
360 	 * suppose page is wired?  if we lock, then a process could
361 	 * fault/block on it.  if we don't lock, a process could write the
362 	 * pages in the middle of an I/O.  (consider an msync()).  let's
363 	 * lock it for now (better to delay than corrupt data?).
364 	 */
365 
366 	/*
367 	 * get cluster boundaries, check sanity, and apply our limits as well.
368 	 */
369 
370 	uobj->pgops->pgo_cluster(uobj, center->offset, &lo, &hi);
371 	if ((flags & PGO_ALLPAGES) == 0) {
372 		if (lo < mlo)
373 			lo = mlo;
374 		if (hi > mhi)
375 			hi = mhi;
376 	}
377 	if ((hi - lo) >> PAGE_SHIFT > *npages) { /* pps too small, bail out! */
378 		pps[0] = center;
379 		*npages = 1;
380 		return(pps);
381 	}
382 
383 	/*
384 	 * now determine the center and attempt to cluster around the
385 	 * edges
386 	 */
387 
388 	center_idx = (center->offset - lo) >> PAGE_SHIFT;
389 	pps[center_idx] = center;	/* plug in the center page */
390 	ppsp = &pps[center_idx];
391 	*npages = 1;
392 
393 	/*
394 	 * attempt to cluster around the left [backward], and then
395 	 * the right side [forward].
396 	 *
397 	 * note that for inactive pages (pages that have been deactivated)
398 	 * there are no valid mappings and PG_CLEAN should be up to date.
399 	 * [i.e. there is no need to query the pmap with pmap_is_modified
400 	 * since there are no mappings].
401 	 */
402 
403 	for (forward  = 0 ; forward <= 1 ; forward++) {
404 		incr = forward ? PAGE_SIZE : -PAGE_SIZE;
405 		curoff = center->offset + incr;
406 		for ( ;(forward == 0 && curoff >= lo) ||
407 		       (forward && curoff < hi);
408 		      curoff += incr) {
409 
410 			pclust = uvm_pagelookup(uobj, curoff); /* lookup page */
411 			if (pclust == NULL) {
412 				break;			/* no page */
413 			}
414 			/* handle active pages */
415 			/* NOTE: inactive pages don't have pmap mappings */
416 			if ((pclust->pg_flags & PQ_INACTIVE) == 0) {
417 				if ((flags & PGO_DOACTCLUST) == 0) {
418 					/* dont want mapped pages at all */
419 					break;
420 				}
421 
422 				/* make sure "clean" bit is sync'd */
423 				if ((pclust->pg_flags & PG_CLEANCHK) == 0) {
424 					if ((pclust->pg_flags & (PG_CLEAN|PG_BUSY))
425 					   == PG_CLEAN &&
426 					   pmap_is_modified(pclust))
427 						atomic_clearbits_int(
428 						    &pclust->pg_flags,
429 						    PG_CLEAN);
430 					/* now checked */
431 					atomic_setbits_int(&pclust->pg_flags,
432 					    PG_CLEANCHK);
433 				}
434 			}
435 
436 			/* is page available for cleaning and does it need it */
437 			if ((pclust->pg_flags & (PG_CLEAN|PG_BUSY)) != 0) {
438 				break;	/* page is already clean or is busy */
439 			}
440 
441 			/* yes!   enroll the page in our array */
442 			atomic_setbits_int(&pclust->pg_flags, PG_BUSY);
443 			UVM_PAGE_OWN(pclust, "uvm_mk_pcluster");
444 
445 			/*
446 			 * If we want to free after io is done, and we're
447 			 * async, set the released flag
448 			 */
449 			if ((flags & (PGO_FREE|PGO_SYNCIO)) == PGO_FREE)
450 				atomic_setbits_int(&pclust->pg_flags,
451 				    PG_RELEASED);
452 
453 			/* XXX: protect wired page?   see above comment. */
454 			pmap_page_protect(pclust, VM_PROT_READ);
455 			if (!forward) {
456 				ppsp--;			/* back up one page */
457 				*ppsp = pclust;
458 			} else {
459 				/* move forward one page */
460 				ppsp[*npages] = pclust;
461 			}
462 			(*npages)++;
463 		}
464 	}
465 
466 	/*
467 	 * done!  return the cluster array to the caller!!!
468 	 */
469 
470 	UVMHIST_LOG(maphist, "<- done",0,0,0,0);
471 	return(ppsp);
472 }
473 
474 /*
475  * uvm_pager_put: high level pageout routine
476  *
477  * we want to pageout page "pg" to backing store, clustering if
478  * possible.
479  *
480  * => page queues must be locked by caller
481  * => if page is not swap-backed, then "uobj" points to the object
482  *	backing it.   this object should be locked by the caller.
483  * => if page is swap-backed, then "uobj" should be NULL.
484  * => "pg" should be PG_BUSY (by caller), and !PG_CLEAN
485  *    for swap-backed memory, "pg" can be NULL if there is no page
486  *    of interest [sometimes the case for the pagedaemon]
487  * => "ppsp_ptr" should point to an array of npages vm_page pointers
488  *	for possible cluster building
489  * => flags (first two for non-swap-backed pages)
490  *	PGO_ALLPAGES: all pages in uobj are valid targets
491  *	PGO_DOACTCLUST: include "PQ_ACTIVE" pages as valid targets
492  *	PGO_SYNCIO: do SYNC I/O (no async)
493  *	PGO_PDFREECLUST: pagedaemon: drop cluster on successful I/O
494  *	PGO_FREE: tell the aio daemon to free pages in the async case.
495  * => start/stop: if (uobj && !PGO_ALLPAGES) limit targets to this range
496  *		  if (!uobj) start is the (daddr64_t) of the starting swapblk
497  * => return state:
498  *	1. we return the VM_PAGER status code of the pageout
499  *	2. we return with the page queues unlocked
500  *	3. if (uobj != NULL) [!swap_backed] we return with
501  *		uobj locked _only_ if PGO_PDFREECLUST is set
502  *		AND result != VM_PAGER_PEND.   in all other cases
503  *		we return with uobj unlocked.   [this is a hack
504  *		that allows the pagedaemon to save one lock/unlock
505  *		pair in the !swap_backed case since we have to
506  *		lock the uobj to drop the cluster anyway]
507  *	4. on errors we always drop the cluster.   thus, if we return
508  *		!PEND, !OK, then the caller only has to worry about
509  *		un-busying the main page (not the cluster pages).
510  *	5. on success, if !PGO_PDFREECLUST, we return the cluster
511  *		with all pages busy (caller must un-busy and check
512  *		wanted/released flags).
513  */
514 
515 int
516 uvm_pager_put(struct uvm_object *uobj, struct vm_page *pg,
517     struct vm_page ***ppsp_ptr, int *npages, int flags,
518     voff_t start, voff_t stop)
519 {
520 	int result;
521 	daddr64_t swblk;
522 	struct vm_page **ppsp = *ppsp_ptr;
523 	UVMHIST_FUNC("uvm_pager_put"); UVMHIST_CALLED(pdhist);
524 
525 	/*
526 	 * note that uobj is null  if we are doing a swap-backed pageout.
527 	 * note that uobj is !null if we are doing normal object pageout.
528 	 * note that the page queues must be locked to cluster.
529 	 */
530 
531 	if (uobj) {	/* if !swap-backed */
532 
533 		/*
534 		 * attempt to build a cluster for pageout using its
535 		 * make-put-cluster function (if it has one).
536 		 */
537 
538 		if (uobj->pgops->pgo_mk_pcluster) {
539 			ppsp = uobj->pgops->pgo_mk_pcluster(uobj, ppsp,
540 			    npages, pg, flags, start, stop);
541 			*ppsp_ptr = ppsp;  /* update caller's pointer */
542 		} else {
543 			ppsp[0] = pg;
544 			*npages = 1;
545 		}
546 
547 		swblk = 0;		/* XXX: keep gcc happy */
548 
549 	} else {
550 
551 		/*
552 		 * for swap-backed pageout, the caller (the pagedaemon) has
553 		 * already built the cluster for us.   the starting swap
554 		 * block we are writing to has been passed in as "start."
555 		 * "pg" could be NULL if there is no page we are especially
556 		 * interested in (in which case the whole cluster gets dropped
557 		 * in the event of an error or a sync "done").
558 		 */
559 		swblk = (daddr64_t) start;
560 		/* ppsp and npages should be ok */
561 	}
562 
563 	/* now that we've clustered we can unlock the page queues */
564 	uvm_unlock_pageq();
565 
566 	/*
567 	 * now attempt the I/O.   if we have a failure and we are
568 	 * clustered, we will drop the cluster and try again.
569 	 */
570 
571 ReTry:
572 	if (uobj) {
573 		/* object is locked */
574 		result = uobj->pgops->pgo_put(uobj, ppsp, *npages, flags);
575 		UVMHIST_LOG(pdhist, "put -> %ld", result, 0,0,0);
576 		/* object is now unlocked */
577 	} else {
578 		/* nothing locked */
579 		/* XXX daddr64_t -> int */
580 		result = uvm_swap_put(swblk, ppsp, *npages, flags);
581 		/* nothing locked */
582 	}
583 
584 	/*
585 	 * we have attempted the I/O.
586 	 *
587 	 * if the I/O was a success then:
588 	 * 	if !PGO_PDFREECLUST, we return the cluster to the
589 	 *		caller (who must un-busy all pages)
590 	 *	else we un-busy cluster pages for the pagedaemon
591 	 *
592 	 * if I/O is pending (async i/o) then we return the pending code.
593 	 * [in this case the async i/o done function must clean up when
594 	 *  i/o is done...]
595 	 */
596 
597 	if (result == VM_PAGER_PEND || result == VM_PAGER_OK) {
598 		if (result == VM_PAGER_OK && (flags & PGO_PDFREECLUST)) {
599 			/*
600 			 * drop cluster and relock object (only if I/O is
601 			 * not pending)
602 			 */
603 			if (uobj)
604 				/* required for dropcluster */
605 				simple_lock(&uobj->vmobjlock);
606 			if (*npages > 1 || pg == NULL)
607 				uvm_pager_dropcluster(uobj, pg, ppsp, npages,
608 				    PGO_PDFREECLUST);
609 			/* if (uobj): object still locked, as per
610 			 * return-state item #3 */
611 		}
612 		return (result);
613 	}
614 
615 	/*
616 	 * a pager error occured (even after dropping the cluster, if there
617 	 * was one).  give up! the caller only has one page ("pg")
618 	 * to worry about.
619 	 */
620 
621 	if (*npages > 1 || pg == NULL) {
622 		if (uobj) {
623 			simple_lock(&uobj->vmobjlock);
624 		}
625 		uvm_pager_dropcluster(uobj, pg, ppsp, npages, PGO_REALLOCSWAP);
626 
627 		/*
628 		 * for failed swap-backed pageouts with a "pg",
629 		 * we need to reset pg's swslot to either:
630 		 * "swblk" (for transient errors, so we can retry),
631 		 * or 0 (for hard errors).
632 		 */
633 
634 		if (uobj == NULL && pg != NULL) {
635 			/* XXX daddr64_t -> int */
636 			int nswblk = (result == VM_PAGER_AGAIN) ? swblk : 0;
637 			if (pg->pg_flags & PQ_ANON) {
638 				simple_lock(&pg->uanon->an_lock);
639 				pg->uanon->an_swslot = nswblk;
640 				simple_unlock(&pg->uanon->an_lock);
641 			} else {
642 				simple_lock(&pg->uobject->vmobjlock);
643 				uao_set_swslot(pg->uobject,
644 					       pg->offset >> PAGE_SHIFT,
645 					       nswblk);
646 				simple_unlock(&pg->uobject->vmobjlock);
647 			}
648 		}
649 		if (result == VM_PAGER_AGAIN) {
650 
651 			/*
652 			 * for transient failures, free all the swslots that
653 			 * we're not going to retry with.
654 			 */
655 
656 			if (uobj == NULL) {
657 				if (pg) {
658 					/* XXX daddr64_t -> int */
659 					uvm_swap_free(swblk + 1, *npages - 1);
660 				} else {
661 					/* XXX daddr64_t -> int */
662 					uvm_swap_free(swblk, *npages);
663 				}
664 			}
665 			if (pg) {
666 				ppsp[0] = pg;
667 				*npages = 1;
668 				goto ReTry;
669 			}
670 		} else if (uobj == NULL) {
671 
672 			/*
673 			 * for hard errors on swap-backed pageouts,
674 			 * mark the swslots as bad.  note that we do not
675 			 * free swslots that we mark bad.
676 			 */
677 
678 			/* XXX daddr64_t -> int */
679 			uvm_swap_markbad(swblk, *npages);
680 		}
681 	}
682 
683 	/*
684 	 * a pager error occurred (even after dropping the cluster, if there
685 	 * was one).    give up!   the caller only has one page ("pg")
686 	 * to worry about.
687 	 */
688 
689 	if (uobj && (flags & PGO_PDFREECLUST) != 0)
690 		simple_lock(&uobj->vmobjlock);
691 	return(result);
692 }
693 
694 /*
695  * uvm_pager_dropcluster: drop a cluster we have built (because we
696  * got an error, or, if PGO_PDFREECLUST we are un-busying the
697  * cluster pages on behalf of the pagedaemon).
698  *
699  * => uobj, if non-null, is a non-swap-backed object that is
700  *	locked by the caller.   we return with this object still
701  *	locked.
702  * => page queues are not locked
703  * => pg is our page of interest (the one we clustered around, can be null)
704  * => ppsp/npages is our current cluster
705  * => flags: PGO_PDFREECLUST: pageout was a success: un-busy cluster
706  *	pages on behalf of the pagedaemon.
707  *           PGO_REALLOCSWAP: drop previously allocated swap slots for
708  *		clustered swap-backed pages (except for "pg" if !NULL)
709  *		"swblk" is the start of swap alloc (e.g. for ppsp[0])
710  *		[only meaningful if swap-backed (uobj == NULL)]
711  */
712 
713 void
714 uvm_pager_dropcluster(struct uvm_object *uobj, struct vm_page *pg,
715     struct vm_page **ppsp, int *npages, int flags)
716 {
717 	int lcv;
718 
719 	/*
720 	 * drop all pages but "pg"
721 	 */
722 
723 	for (lcv = 0 ; lcv < *npages ; lcv++) {
724 
725 		/* skip "pg" or empty slot */
726 		if (ppsp[lcv] == pg || ppsp[lcv] == NULL)
727 			continue;
728 
729 		/*
730 		 * if swap-backed, gain lock on object that owns page.  note
731 		 * that PQ_ANON bit can't change as long as we are holding
732 		 * the PG_BUSY bit (so there is no need to lock the page
733 		 * queues to test it).
734 		 *
735 		 * once we have the lock, dispose of the pointer to swap, if
736 		 * requested
737 		 */
738 		if (!uobj) {
739 			if (ppsp[lcv]->pg_flags & PQ_ANON) {
740 				simple_lock(&ppsp[lcv]->uanon->an_lock);
741 				if (flags & PGO_REALLOCSWAP)
742 					  /* zap swap block */
743 					  ppsp[lcv]->uanon->an_swslot = 0;
744 			} else {
745 				simple_lock(&ppsp[lcv]->uobject->vmobjlock);
746 				if (flags & PGO_REALLOCSWAP)
747 					uao_set_swslot(ppsp[lcv]->uobject,
748 					    ppsp[lcv]->offset >> PAGE_SHIFT, 0);
749 			}
750 		}
751 
752 		/* did someone want the page while we had it busy-locked? */
753 		if (ppsp[lcv]->pg_flags & PG_WANTED) {
754 			/* still holding obj lock */
755 			wakeup(ppsp[lcv]);
756 		}
757 
758 		/* if page was released, release it.  otherwise un-busy it */
759 		if (ppsp[lcv]->pg_flags & PG_RELEASED &&
760 		    ppsp[lcv]->pg_flags & PQ_ANON) {
761 				/* so that anfree will free */
762 				atomic_clearbits_int(&ppsp[lcv]->pg_flags,
763 				    PG_BUSY);
764 				UVM_PAGE_OWN(ppsp[lcv], NULL);
765 
766 				pmap_page_protect(ppsp[lcv], VM_PROT_NONE);
767 				simple_unlock(&ppsp[lcv]->uanon->an_lock);
768 				/* kills anon and frees pg */
769 				uvm_anfree(ppsp[lcv]->uanon);
770 
771 				continue;
772 		} else {
773 			/*
774 			 * if we were planning on async io then we would
775 			 * have PG_RELEASED set, clear that with the others.
776 			 */
777 			atomic_clearbits_int(&ppsp[lcv]->pg_flags,
778 			    PG_BUSY|PG_WANTED|PG_FAKE|PG_RELEASED);
779 			UVM_PAGE_OWN(ppsp[lcv], NULL);
780 		}
781 
782 		/*
783 		 * if we are operating on behalf of the pagedaemon and we
784 		 * had a successful pageout update the page!
785 		 */
786 		if (flags & PGO_PDFREECLUST) {
787 			pmap_clear_reference(ppsp[lcv]);
788 			pmap_clear_modify(ppsp[lcv]);
789 			atomic_setbits_int(&ppsp[lcv]->pg_flags, PG_CLEAN);
790 		}
791 
792 		/* if anonymous cluster, unlock object and move on */
793 		if (!uobj) {
794 			if (ppsp[lcv]->pg_flags & PQ_ANON)
795 				simple_unlock(&ppsp[lcv]->uanon->an_lock);
796 			else
797 				simple_unlock(&ppsp[lcv]->uobject->vmobjlock);
798 		}
799 	}
800 }
801 
802 /*
803  * interrupt-context iodone handler for single-buf i/os
804  * or the top-level buf of a nested-buf i/o.
805  *
806  * => must be at splbio().
807  */
808 
809 void
810 uvm_aio_biodone(struct buf *bp)
811 {
812 	splassert(IPL_BIO);
813 
814 	/* reset b_iodone for when this is a single-buf i/o. */
815 	bp->b_iodone = uvm_aio_aiodone;
816 
817 	mtx_enter(&uvm.aiodoned_lock);	/* locks uvm.aio_done */
818 	TAILQ_INSERT_TAIL(&uvm.aio_done, bp, b_freelist);
819 	wakeup(&uvm.aiodoned);
820 	mtx_leave(&uvm.aiodoned_lock);
821 }
822 
823 /*
824  * uvm_aio_aiodone: do iodone processing for async i/os.
825  * this should be called in thread context, not interrupt context.
826  */
827 
828 void
829 uvm_aio_aiodone(struct buf *bp)
830 {
831 	int npages = bp->b_bufsize >> PAGE_SHIFT;
832 	struct vm_page *pg, *pgs[MAXPHYS >> PAGE_SHIFT];
833 	struct uvm_object *uobj;
834 	int i, error;
835 	boolean_t write, swap;
836 	UVMHIST_FUNC("uvm_aio_aiodone"); UVMHIST_CALLED(pdhist);
837 	UVMHIST_LOG(pdhist, "bp %p", bp, 0,0,0);
838 
839 	KASSERT(npages <= MAXPHYS >> PAGE_SHIFT);
840 	splassert(IPL_BIO);
841 
842 	error = (bp->b_flags & B_ERROR) ? (bp->b_error ? bp->b_error : EIO) : 0;
843 	write = (bp->b_flags & B_READ) == 0;
844 #ifdef UBC
845 	/* XXXUBC B_NOCACHE is for swap pager, should be done differently */
846 	if (write && !(bp->b_flags & B_NOCACHE) && bioops.io_pageiodone) {
847 		(*bioops.io_pageiodone)(bp);
848 	}
849 #endif
850 
851 	uobj = NULL;
852 	for (i = 0; i < npages; i++) {
853 		pgs[i] = uvm_pageratop((vaddr_t)bp->b_data + (i << PAGE_SHIFT));
854 		UVMHIST_LOG(pdhist, "pgs[%ld] = %p", i, pgs[i],0,0);
855 	}
856 	uvm_pagermapout((vaddr_t)bp->b_data, npages);
857 #ifdef UVM_SWAP_ENCRYPT
858 	/*
859 	 * XXX - assumes that we only get ASYNC writes. used to be above.
860 	 */
861 	if (pgs[0]->pg_flags & PQ_ENCRYPT) {
862 		uvm_swap_freepages(pgs, npages);
863 		goto freed;
864 	}
865 #endif /* UVM_SWAP_ENCRYPT */
866 	for (i = 0; i < npages; i++) {
867 		pg = pgs[i];
868 
869 		if (i == 0) {
870 			swap = (pg->pg_flags & PQ_SWAPBACKED) != 0;
871 			if (!swap) {
872 				uobj = pg->uobject;
873 				simple_lock(&uobj->vmobjlock);
874 			}
875 		}
876 		KASSERT(swap || pg->uobject == uobj);
877 		if (swap) {
878 			if (pg->pg_flags & PQ_ANON) {
879 				simple_lock(&pg->uanon->an_lock);
880 			} else {
881 				simple_lock(&pg->uobject->vmobjlock);
882 			}
883 		}
884 
885 		/*
886 		 * if this is a read and we got an error, mark the pages
887 		 * PG_RELEASED so that uvm_page_unbusy() will free them.
888 		 */
889 		if (!write && error) {
890 			atomic_setbits_int(&pg->pg_flags, PG_RELEASED);
891 			continue;
892 		}
893 		KASSERT(!write || (pgs[i]->pg_flags & PG_FAKE) == 0);
894 
895 		/*
896 		 * if this is a read and the page is PG_FAKE,
897 		 * or this was a successful write,
898 		 * mark the page PG_CLEAN and not PG_FAKE.
899 		 */
900 
901 		if ((pgs[i]->pg_flags & PG_FAKE) || (write && error != ENOMEM)) {
902 			pmap_clear_reference(pgs[i]);
903 			pmap_clear_modify(pgs[i]);
904 			atomic_setbits_int(&pgs[i]->pg_flags, PG_CLEAN);
905 			atomic_clearbits_int(&pgs[i]->pg_flags, PG_FAKE);
906 		}
907 		if (swap) {
908 			if (pg->pg_flags & PQ_ANON) {
909 				simple_unlock(&pg->uanon->an_lock);
910 			} else {
911 				simple_unlock(&pg->uobject->vmobjlock);
912 			}
913 		}
914 	}
915 	uvm_page_unbusy(pgs, npages);
916 	if (!swap) {
917 		simple_unlock(&uobj->vmobjlock);
918 	}
919 
920 #ifdef UVM_SWAP_ENCRYPT
921 freed:
922 #endif
923 	if (write && (bp->b_flags & B_AGE) != 0 && bp->b_vp != NULL) {
924 		vwakeup(bp->b_vp);
925 	}
926 	pool_put(&bufpool, bp);
927 }
928 
929 /*
930  * uvm_pageratop: convert KVAs in the pager map back to their page
931  * structures.
932  */
933 struct vm_page *
934 uvm_pageratop(vaddr_t kva)
935 {
936 	struct vm_page *pg;
937 	paddr_t pa;
938 	boolean_t rv;
939 
940 	rv = pmap_extract(pmap_kernel(), kva, &pa);
941 	KASSERT(rv);
942 	pg = PHYS_TO_VM_PAGE(pa);
943 	KASSERT(pg != NULL);
944 	return (pg);
945 }
946