xref: /dflybsd-src/sys/kern/vfs_bio.c (revision dae741e33c840b92a8a53bf9f01157ede145e256)
1 /*
2  * Copyright (c) 1994,1997 John S. Dyson
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice immediately at the beginning of the file, without modification,
10  *    this list of conditions, and the following disclaimer.
11  * 2. Absolutely no warranty of function or purpose is made by the author
12  *		John S. Dyson.
13  *
14  * $FreeBSD: src/sys/kern/vfs_bio.c,v 1.242.2.20 2003/05/28 18:38:10 alc Exp $
15  */
16 
17 /*
18  * this file contains a new buffer I/O scheme implementing a coherent
19  * VM object and buffer cache scheme.  Pains have been taken to make
20  * sure that the performance degradation associated with schemes such
21  * as this is not realized.
22  *
23  * Author:  John S. Dyson
24  * Significant help during the development and debugging phases
25  * had been provided by David Greenman, also of the FreeBSD core team.
26  *
27  * see man buf(9) for more info.
28  */
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/buf.h>
33 #include <sys/conf.h>
34 #include <sys/devicestat.h>
35 #include <sys/eventhandler.h>
36 #include <sys/lock.h>
37 #include <sys/malloc.h>
38 #include <sys/mount.h>
39 #include <sys/kernel.h>
40 #include <sys/kthread.h>
41 #include <sys/proc.h>
42 #include <sys/reboot.h>
43 #include <sys/resourcevar.h>
44 #include <sys/sysctl.h>
45 #include <sys/vmmeter.h>
46 #include <sys/vnode.h>
47 #include <sys/dsched.h>
48 #include <vm/vm.h>
49 #include <vm/vm_param.h>
50 #include <vm/vm_kern.h>
51 #include <vm/vm_pageout.h>
52 #include <vm/vm_page.h>
53 #include <vm/vm_object.h>
54 #include <vm/vm_extern.h>
55 #include <vm/vm_map.h>
56 #include <vm/vm_pager.h>
57 #include <vm/swap_pager.h>
58 
59 #include <sys/buf2.h>
60 #include <sys/thread2.h>
61 #include <sys/spinlock2.h>
62 #include <sys/mplock2.h>
63 #include <vm/vm_page2.h>
64 
65 #include "opt_ddb.h"
66 #ifdef DDB
67 #include <ddb/ddb.h>
68 #endif
69 
70 /*
71  * Buffer queues.
72  */
73 enum bufq_type {
74 	BQUEUE_NONE,    	/* not on any queue */
75 	BQUEUE_LOCKED,  	/* locked buffers */
76 	BQUEUE_CLEAN,   	/* non-B_DELWRI buffers */
77 	BQUEUE_DIRTY,   	/* B_DELWRI buffers */
78 	BQUEUE_DIRTY_HW,   	/* B_DELWRI buffers - heavy weight */
79 	BQUEUE_EMPTYKVA, 	/* empty buffer headers with KVA assignment */
80 	BQUEUE_EMPTY,    	/* empty buffer headers */
81 
82 	BUFFER_QUEUES		/* number of buffer queues */
83 };
84 
85 typedef enum bufq_type bufq_type_t;
86 
87 #define BD_WAKE_SIZE	16384
88 #define BD_WAKE_MASK	(BD_WAKE_SIZE - 1)
89 
90 TAILQ_HEAD(bqueues, buf) bufqueues[BUFFER_QUEUES];
91 static struct spinlock bufqspin = SPINLOCK_INITIALIZER(&bufqspin);
92 static struct spinlock bufcspin = SPINLOCK_INITIALIZER(&bufcspin);
93 
94 static MALLOC_DEFINE(M_BIOBUF, "BIO buffer", "BIO buffer");
95 
96 struct buf *buf;		/* buffer header pool */
97 
98 static void vfs_clean_pages(struct buf *bp);
99 static void vfs_clean_one_page(struct buf *bp, int pageno, vm_page_t m);
100 static void vfs_dirty_one_page(struct buf *bp, int pageno, vm_page_t m);
101 static void vfs_vmio_release(struct buf *bp);
102 static int flushbufqueues(bufq_type_t q);
103 static vm_page_t bio_page_alloc(vm_object_t obj, vm_pindex_t pg, int deficit);
104 
105 static void bd_signal(int totalspace);
106 static void buf_daemon(void);
107 static void buf_daemon_hw(void);
108 
109 /*
110  * bogus page -- for I/O to/from partially complete buffers
111  * this is a temporary solution to the problem, but it is not
112  * really that bad.  it would be better to split the buffer
113  * for input in the case of buffers partially already in memory,
114  * but the code is intricate enough already.
115  */
116 vm_page_t bogus_page;
117 
118 /*
119  * These are all static, but make the ones we export globals so we do
120  * not need to use compiler magic.
121  */
122 int bufspace;			/* locked by buffer_map */
123 int maxbufspace;
124 static int bufmallocspace;	/* atomic ops */
125 int maxbufmallocspace, lobufspace, hibufspace;
126 static int bufreusecnt, bufdefragcnt, buffreekvacnt;
127 static int lorunningspace;
128 static int hirunningspace;
129 static int runningbufreq;		/* locked by bufcspin */
130 static int dirtybufspace;		/* locked by bufcspin */
131 static int dirtybufcount;		/* locked by bufcspin */
132 static int dirtybufspacehw;		/* locked by bufcspin */
133 static int dirtybufcounthw;		/* locked by bufcspin */
134 static int runningbufspace;		/* locked by bufcspin */
135 static int runningbufcount;		/* locked by bufcspin */
136 int lodirtybufspace;
137 int hidirtybufspace;
138 static int getnewbufcalls;
139 static int getnewbufrestarts;
140 static int recoverbufcalls;
141 static int needsbuffer;		/* locked by bufcspin */
142 static int bd_request;		/* locked by bufcspin */
143 static int bd_request_hw;	/* locked by bufcspin */
144 static u_int bd_wake_ary[BD_WAKE_SIZE];
145 static u_int bd_wake_index;
146 static u_int vm_cycle_point = 40; /* 23-36 will migrate more act->inact */
147 static int debug_commit;
148 
149 static struct thread *bufdaemon_td;
150 static struct thread *bufdaemonhw_td;
151 static u_int lowmempgallocs;
152 static u_int lowmempgfails;
153 
154 /*
155  * Sysctls for operational control of the buffer cache.
156  */
157 SYSCTL_INT(_vfs, OID_AUTO, lodirtybufspace, CTLFLAG_RW, &lodirtybufspace, 0,
158 	"Number of dirty buffers to flush before bufdaemon becomes inactive");
159 SYSCTL_INT(_vfs, OID_AUTO, hidirtybufspace, CTLFLAG_RW, &hidirtybufspace, 0,
160 	"High watermark used to trigger explicit flushing of dirty buffers");
161 SYSCTL_INT(_vfs, OID_AUTO, lorunningspace, CTLFLAG_RW, &lorunningspace, 0,
162 	"Minimum amount of buffer space required for active I/O");
163 SYSCTL_INT(_vfs, OID_AUTO, hirunningspace, CTLFLAG_RW, &hirunningspace, 0,
164 	"Maximum amount of buffer space to usable for active I/O");
165 SYSCTL_UINT(_vfs, OID_AUTO, lowmempgallocs, CTLFLAG_RW, &lowmempgallocs, 0,
166 	"Page allocations done during periods of very low free memory");
167 SYSCTL_UINT(_vfs, OID_AUTO, lowmempgfails, CTLFLAG_RW, &lowmempgfails, 0,
168 	"Page allocations which failed during periods of very low free memory");
169 SYSCTL_UINT(_vfs, OID_AUTO, vm_cycle_point, CTLFLAG_RW, &vm_cycle_point, 0,
170 	"Recycle pages to active or inactive queue transition pt 0-64");
171 /*
172  * Sysctls determining current state of the buffer cache.
173  */
174 SYSCTL_INT(_vfs, OID_AUTO, nbuf, CTLFLAG_RD, &nbuf, 0,
175 	"Total number of buffers in buffer cache");
176 SYSCTL_INT(_vfs, OID_AUTO, dirtybufspace, CTLFLAG_RD, &dirtybufspace, 0,
177 	"Pending bytes of dirty buffers (all)");
178 SYSCTL_INT(_vfs, OID_AUTO, dirtybufspacehw, CTLFLAG_RD, &dirtybufspacehw, 0,
179 	"Pending bytes of dirty buffers (heavy weight)");
180 SYSCTL_INT(_vfs, OID_AUTO, dirtybufcount, CTLFLAG_RD, &dirtybufcount, 0,
181 	"Pending number of dirty buffers");
182 SYSCTL_INT(_vfs, OID_AUTO, dirtybufcounthw, CTLFLAG_RD, &dirtybufcounthw, 0,
183 	"Pending number of dirty buffers (heavy weight)");
184 SYSCTL_INT(_vfs, OID_AUTO, runningbufspace, CTLFLAG_RD, &runningbufspace, 0,
185 	"I/O bytes currently in progress due to asynchronous writes");
186 SYSCTL_INT(_vfs, OID_AUTO, runningbufcount, CTLFLAG_RD, &runningbufcount, 0,
187 	"I/O buffers currently in progress due to asynchronous writes");
188 SYSCTL_INT(_vfs, OID_AUTO, maxbufspace, CTLFLAG_RD, &maxbufspace, 0,
189 	"Hard limit on maximum amount of memory usable for buffer space");
190 SYSCTL_INT(_vfs, OID_AUTO, hibufspace, CTLFLAG_RD, &hibufspace, 0,
191 	"Soft limit on maximum amount of memory usable for buffer space");
192 SYSCTL_INT(_vfs, OID_AUTO, lobufspace, CTLFLAG_RD, &lobufspace, 0,
193 	"Minimum amount of memory to reserve for system buffer space");
194 SYSCTL_INT(_vfs, OID_AUTO, bufspace, CTLFLAG_RD, &bufspace, 0,
195 	"Amount of memory available for buffers");
196 SYSCTL_INT(_vfs, OID_AUTO, maxmallocbufspace, CTLFLAG_RD, &maxbufmallocspace,
197 	0, "Maximum amount of memory reserved for buffers using malloc");
198 SYSCTL_INT(_vfs, OID_AUTO, bufmallocspace, CTLFLAG_RD, &bufmallocspace, 0,
199 	"Amount of memory left for buffers using malloc-scheme");
200 SYSCTL_INT(_vfs, OID_AUTO, getnewbufcalls, CTLFLAG_RD, &getnewbufcalls, 0,
201 	"New buffer header acquisition requests");
202 SYSCTL_INT(_vfs, OID_AUTO, getnewbufrestarts, CTLFLAG_RD, &getnewbufrestarts,
203 	0, "New buffer header acquisition restarts");
204 SYSCTL_INT(_vfs, OID_AUTO, recoverbufcalls, CTLFLAG_RD, &recoverbufcalls, 0,
205 	"Recover VM space in an emergency");
206 SYSCTL_INT(_vfs, OID_AUTO, bufdefragcnt, CTLFLAG_RD, &bufdefragcnt, 0,
207 	"Buffer acquisition restarts due to fragmented buffer map");
208 SYSCTL_INT(_vfs, OID_AUTO, buffreekvacnt, CTLFLAG_RD, &buffreekvacnt, 0,
209 	"Amount of time KVA space was deallocated in an arbitrary buffer");
210 SYSCTL_INT(_vfs, OID_AUTO, bufreusecnt, CTLFLAG_RD, &bufreusecnt, 0,
211 	"Amount of time buffer re-use operations were successful");
212 SYSCTL_INT(_vfs, OID_AUTO, debug_commit, CTLFLAG_RW, &debug_commit, 0, "");
213 SYSCTL_INT(_debug_sizeof, OID_AUTO, buf, CTLFLAG_RD, 0, sizeof(struct buf),
214 	"sizeof(struct buf)");
215 
216 char *buf_wmesg = BUF_WMESG;
217 
218 #define VFS_BIO_NEED_ANY	0x01	/* any freeable buffer */
219 #define VFS_BIO_NEED_UNUSED02	0x02
220 #define VFS_BIO_NEED_UNUSED04	0x04
221 #define VFS_BIO_NEED_BUFSPACE	0x08	/* wait for buf space, lo hysteresis */
222 
223 /*
224  * bufspacewakeup:
225  *
226  *	Called when buffer space is potentially available for recovery.
227  *	getnewbuf() will block on this flag when it is unable to free
228  *	sufficient buffer space.  Buffer space becomes recoverable when
229  *	bp's get placed back in the queues.
230  */
231 static __inline void
232 bufspacewakeup(void)
233 {
234 	/*
235 	 * If someone is waiting for BUF space, wake them up.  Even
236 	 * though we haven't freed the kva space yet, the waiting
237 	 * process will be able to now.
238 	 */
239 	spin_lock(&bufcspin);
240 	if (needsbuffer & VFS_BIO_NEED_BUFSPACE) {
241 		needsbuffer &= ~VFS_BIO_NEED_BUFSPACE;
242 		spin_unlock(&bufcspin);
243 		wakeup(&needsbuffer);
244 	} else {
245 		spin_unlock(&bufcspin);
246 	}
247 }
248 
249 /*
250  * runningbufwakeup:
251  *
252  *	Accounting for I/O in progress.
253  *
254  */
255 static __inline void
256 runningbufwakeup(struct buf *bp)
257 {
258 	int totalspace;
259 	int limit;
260 
261 	if ((totalspace = bp->b_runningbufspace) != 0) {
262 		spin_lock(&bufcspin);
263 		runningbufspace -= totalspace;
264 		--runningbufcount;
265 		bp->b_runningbufspace = 0;
266 
267 		/*
268 		 * see waitrunningbufspace() for limit test.
269 		 */
270 		limit = hirunningspace * 4 / 6;
271 		if (runningbufreq && runningbufspace <= limit) {
272 			runningbufreq = 0;
273 			spin_unlock(&bufcspin);
274 			wakeup(&runningbufreq);
275 		} else {
276 			spin_unlock(&bufcspin);
277 		}
278 		bd_signal(totalspace);
279 	}
280 }
281 
282 /*
283  * bufcountwakeup:
284  *
285  *	Called when a buffer has been added to one of the free queues to
286  *	account for the buffer and to wakeup anyone waiting for free buffers.
287  *	This typically occurs when large amounts of metadata are being handled
288  *	by the buffer cache ( else buffer space runs out first, usually ).
289  *
290  * MPSAFE
291  */
292 static __inline void
293 bufcountwakeup(void)
294 {
295 	spin_lock(&bufcspin);
296 	if (needsbuffer) {
297 		needsbuffer &= ~VFS_BIO_NEED_ANY;
298 		spin_unlock(&bufcspin);
299 		wakeup(&needsbuffer);
300 	} else {
301 		spin_unlock(&bufcspin);
302 	}
303 }
304 
305 /*
306  * waitrunningbufspace()
307  *
308  * Wait for the amount of running I/O to drop to hirunningspace * 4 / 6.
309  * This is the point where write bursting stops so we don't want to wait
310  * for the running amount to drop below it (at least if we still want bioq
311  * to burst writes).
312  *
313  * The caller may be using this function to block in a tight loop, we
314  * must block while runningbufspace is greater then or equal to
315  * hirunningspace * 4 / 6.
316  *
317  * And even with that it may not be enough, due to the presence of
318  * B_LOCKED dirty buffers, so also wait for at least one running buffer
319  * to complete.
320  */
321 void
322 waitrunningbufspace(void)
323 {
324 	int limit = hirunningspace * 4 / 6;
325 	int dummy;
326 
327 	spin_lock(&bufcspin);
328 	if (runningbufspace > limit) {
329 		while (runningbufspace > limit) {
330 			++runningbufreq;
331 			ssleep(&runningbufreq, &bufcspin, 0, "wdrn1", 0);
332 		}
333 		spin_unlock(&bufcspin);
334 	} else if (runningbufspace > limit / 2) {
335 		++runningbufreq;
336 		spin_unlock(&bufcspin);
337 		tsleep(&dummy, 0, "wdrn2", 1);
338 	} else {
339 		spin_unlock(&bufcspin);
340 	}
341 }
342 
343 /*
344  * buf_dirty_count_severe:
345  *
346  *	Return true if we have too many dirty buffers.
347  */
348 int
349 buf_dirty_count_severe(void)
350 {
351 	return (runningbufspace + dirtybufspace >= hidirtybufspace ||
352 	        dirtybufcount >= nbuf / 2);
353 }
354 
355 /*
356  * Return true if the amount of running I/O is severe and BIOQ should
357  * start bursting.
358  */
359 int
360 buf_runningbufspace_severe(void)
361 {
362 	return (runningbufspace >= hirunningspace * 4 / 6);
363 }
364 
365 /*
366  * vfs_buf_test_cache:
367  *
368  * Called when a buffer is extended.  This function clears the B_CACHE
369  * bit if the newly extended portion of the buffer does not contain
370  * valid data.
371  *
372  * NOTE! Dirty VM pages are not processed into dirty (B_DELWRI) buffer
373  * cache buffers.  The VM pages remain dirty, as someone had mmap()'d
374  * them while a clean buffer was present.
375  */
376 static __inline__
377 void
378 vfs_buf_test_cache(struct buf *bp,
379 		  vm_ooffset_t foff, vm_offset_t off, vm_offset_t size,
380 		  vm_page_t m)
381 {
382 	if (bp->b_flags & B_CACHE) {
383 		int base = (foff + off) & PAGE_MASK;
384 		if (vm_page_is_valid(m, base, size) == 0)
385 			bp->b_flags &= ~B_CACHE;
386 	}
387 }
388 
389 /*
390  * bd_speedup()
391  *
392  * Spank the buf_daemon[_hw] if the total dirty buffer space exceeds the
393  * low water mark.
394  *
395  * MPSAFE
396  */
397 static __inline__
398 void
399 bd_speedup(void)
400 {
401 	if (dirtybufspace < lodirtybufspace && dirtybufcount < nbuf / 2)
402 		return;
403 
404 	if (bd_request == 0 &&
405 	    (dirtybufspace - dirtybufspacehw > lodirtybufspace / 2 ||
406 	     dirtybufcount - dirtybufcounthw >= nbuf / 2)) {
407 		spin_lock(&bufcspin);
408 		bd_request = 1;
409 		spin_unlock(&bufcspin);
410 		wakeup(&bd_request);
411 	}
412 	if (bd_request_hw == 0 &&
413 	    (dirtybufspacehw > lodirtybufspace / 2 ||
414 	     dirtybufcounthw >= nbuf / 2)) {
415 		spin_lock(&bufcspin);
416 		bd_request_hw = 1;
417 		spin_unlock(&bufcspin);
418 		wakeup(&bd_request_hw);
419 	}
420 }
421 
422 /*
423  * bd_heatup()
424  *
425  *	Get the buf_daemon heated up when the number of running and dirty
426  *	buffers exceeds the mid-point.
427  *
428  *	Return the total number of dirty bytes past the second mid point
429  *	as a measure of how much excess dirty data there is in the system.
430  *
431  * MPSAFE
432  */
433 int
434 bd_heatup(void)
435 {
436 	int mid1;
437 	int mid2;
438 	int totalspace;
439 
440 	mid1 = lodirtybufspace + (hidirtybufspace - lodirtybufspace) / 2;
441 
442 	totalspace = runningbufspace + dirtybufspace;
443 	if (totalspace >= mid1 || dirtybufcount >= nbuf / 2) {
444 		bd_speedup();
445 		mid2 = mid1 + (hidirtybufspace - mid1) / 2;
446 		if (totalspace >= mid2)
447 			return(totalspace - mid2);
448 	}
449 	return(0);
450 }
451 
452 /*
453  * bd_wait()
454  *
455  *	Wait for the buffer cache to flush (totalspace) bytes worth of
456  *	buffers, then return.
457  *
458  *	Regardless this function blocks while the number of dirty buffers
459  *	exceeds hidirtybufspace.
460  *
461  * MPSAFE
462  */
463 void
464 bd_wait(int totalspace)
465 {
466 	u_int i;
467 	int count;
468 
469 	if (curthread == bufdaemonhw_td || curthread == bufdaemon_td)
470 		return;
471 
472 	while (totalspace > 0) {
473 		bd_heatup();
474 		if (totalspace > runningbufspace + dirtybufspace)
475 			totalspace = runningbufspace + dirtybufspace;
476 		count = totalspace / BKVASIZE;
477 		if (count >= BD_WAKE_SIZE)
478 			count = BD_WAKE_SIZE - 1;
479 
480 		spin_lock(&bufcspin);
481 		i = (bd_wake_index + count) & BD_WAKE_MASK;
482 		++bd_wake_ary[i];
483 
484 		/*
485 		 * This is not a strict interlock, so we play a bit loose
486 		 * with locking access to dirtybufspace*
487 		 */
488 		tsleep_interlock(&bd_wake_ary[i], 0);
489 		spin_unlock(&bufcspin);
490 		tsleep(&bd_wake_ary[i], PINTERLOCKED, "flstik", hz);
491 
492 		totalspace = runningbufspace + dirtybufspace - hidirtybufspace;
493 	}
494 }
495 
496 /*
497  * bd_signal()
498  *
499  *	This function is called whenever runningbufspace or dirtybufspace
500  *	is reduced.  Track threads waiting for run+dirty buffer I/O
501  *	complete.
502  *
503  * MPSAFE
504  */
505 static void
506 bd_signal(int totalspace)
507 {
508 	u_int i;
509 
510 	if (totalspace > 0) {
511 		if (totalspace > BKVASIZE * BD_WAKE_SIZE)
512 			totalspace = BKVASIZE * BD_WAKE_SIZE;
513 		spin_lock(&bufcspin);
514 		while (totalspace > 0) {
515 			i = bd_wake_index++;
516 			i &= BD_WAKE_MASK;
517 			if (bd_wake_ary[i]) {
518 				bd_wake_ary[i] = 0;
519 				spin_unlock(&bufcspin);
520 				wakeup(&bd_wake_ary[i]);
521 				spin_lock(&bufcspin);
522 			}
523 			totalspace -= BKVASIZE;
524 		}
525 		spin_unlock(&bufcspin);
526 	}
527 }
528 
529 /*
530  * BIO tracking support routines.
531  *
532  * Release a ref on a bio_track.  Wakeup requests are atomically released
533  * along with the last reference so bk_active will never wind up set to
534  * only 0x80000000.
535  *
536  * MPSAFE
537  */
538 static
539 void
540 bio_track_rel(struct bio_track *track)
541 {
542 	int	active;
543 	int	desired;
544 
545 	/*
546 	 * Shortcut
547 	 */
548 	active = track->bk_active;
549 	if (active == 1 && atomic_cmpset_int(&track->bk_active, 1, 0))
550 		return;
551 
552 	/*
553 	 * Full-on.  Note that the wait flag is only atomically released on
554 	 * the 1->0 count transition.
555 	 *
556 	 * We check for a negative count transition using bit 30 since bit 31
557 	 * has a different meaning.
558 	 */
559 	for (;;) {
560 		desired = (active & 0x7FFFFFFF) - 1;
561 		if (desired)
562 			desired |= active & 0x80000000;
563 		if (atomic_cmpset_int(&track->bk_active, active, desired)) {
564 			if (desired & 0x40000000)
565 				panic("bio_track_rel: bad count: %p\n", track);
566 			if (active & 0x80000000)
567 				wakeup(track);
568 			break;
569 		}
570 		active = track->bk_active;
571 	}
572 }
573 
574 /*
575  * Wait for the tracking count to reach 0.
576  *
577  * Use atomic ops such that the wait flag is only set atomically when
578  * bk_active is non-zero.
579  *
580  * MPSAFE
581  */
582 int
583 bio_track_wait(struct bio_track *track, int slp_flags, int slp_timo)
584 {
585 	int	active;
586 	int	desired;
587 	int	error;
588 
589 	/*
590 	 * Shortcut
591 	 */
592 	if (track->bk_active == 0)
593 		return(0);
594 
595 	/*
596 	 * Full-on.  Note that the wait flag may only be atomically set if
597 	 * the active count is non-zero.
598 	 *
599 	 * NOTE: We cannot optimize active == desired since a wakeup could
600 	 *	 clear active prior to our tsleep_interlock().
601 	 */
602 	error = 0;
603 	while ((active = track->bk_active) != 0) {
604 		cpu_ccfence();
605 		desired = active | 0x80000000;
606 		tsleep_interlock(track, slp_flags);
607 		if (atomic_cmpset_int(&track->bk_active, active, desired)) {
608 			error = tsleep(track, slp_flags | PINTERLOCKED,
609 				       "trwait", slp_timo);
610 			if (error)
611 				break;
612 		}
613 	}
614 	return (error);
615 }
616 
617 /*
618  * bufinit:
619  *
620  *	Load time initialisation of the buffer cache, called from machine
621  *	dependant initialization code.
622  */
623 void
624 bufinit(void)
625 {
626 	struct buf *bp;
627 	vm_offset_t bogus_offset;
628 	int i;
629 
630 	/* next, make a null set of free lists */
631 	for (i = 0; i < BUFFER_QUEUES; i++)
632 		TAILQ_INIT(&bufqueues[i]);
633 
634 	/* finally, initialize each buffer header and stick on empty q */
635 	for (i = 0; i < nbuf; i++) {
636 		bp = &buf[i];
637 		bzero(bp, sizeof *bp);
638 		bp->b_flags = B_INVAL;	/* we're just an empty header */
639 		bp->b_cmd = BUF_CMD_DONE;
640 		bp->b_qindex = BQUEUE_EMPTY;
641 		initbufbio(bp);
642 		xio_init(&bp->b_xio);
643 		buf_dep_init(bp);
644 		TAILQ_INSERT_TAIL(&bufqueues[BQUEUE_EMPTY], bp, b_freelist);
645 	}
646 
647 	/*
648 	 * maxbufspace is the absolute maximum amount of buffer space we are
649 	 * allowed to reserve in KVM and in real terms.  The absolute maximum
650 	 * is nominally used by buf_daemon.  hibufspace is the nominal maximum
651 	 * used by most other processes.  The differential is required to
652 	 * ensure that buf_daemon is able to run when other processes might
653 	 * be blocked waiting for buffer space.
654 	 *
655 	 * maxbufspace is based on BKVASIZE.  Allocating buffers larger then
656 	 * this may result in KVM fragmentation which is not handled optimally
657 	 * by the system.
658 	 */
659 	maxbufspace = nbuf * BKVASIZE;
660 	hibufspace = imax(3 * maxbufspace / 4, maxbufspace - MAXBSIZE * 10);
661 	lobufspace = hibufspace - MAXBSIZE;
662 
663 	lorunningspace = 512 * 1024;
664 	/* hirunningspace -- see below */
665 
666 	/*
667 	 * Limit the amount of malloc memory since it is wired permanently
668 	 * into the kernel space.  Even though this is accounted for in
669 	 * the buffer allocation, we don't want the malloced region to grow
670 	 * uncontrolled.  The malloc scheme improves memory utilization
671 	 * significantly on average (small) directories.
672 	 */
673 	maxbufmallocspace = hibufspace / 20;
674 
675 	/*
676 	 * Reduce the chance of a deadlock occuring by limiting the number
677 	 * of delayed-write dirty buffers we allow to stack up.
678 	 *
679 	 * We don't want too much actually queued to the device at once
680 	 * (XXX this needs to be per-mount!), because the buffers will
681 	 * wind up locked for a very long period of time while the I/O
682 	 * drains.
683 	 */
684 	hidirtybufspace = hibufspace / 2;	/* dirty + running */
685 	hirunningspace = hibufspace / 16;	/* locked & queued to device */
686 	if (hirunningspace < 1024 * 1024)
687 		hirunningspace = 1024 * 1024;
688 
689 	dirtybufspace = 0;
690 	dirtybufspacehw = 0;
691 
692 	lodirtybufspace = hidirtybufspace / 2;
693 
694 	/*
695 	 * Maximum number of async ops initiated per buf_daemon loop.  This is
696 	 * somewhat of a hack at the moment, we really need to limit ourselves
697 	 * based on the number of bytes of I/O in-transit that were initiated
698 	 * from buf_daemon.
699 	 */
700 
701 	bogus_offset = kmem_alloc_pageable(&kernel_map, PAGE_SIZE);
702 	vm_object_hold(&kernel_object);
703 	bogus_page = vm_page_alloc(&kernel_object,
704 				   (bogus_offset >> PAGE_SHIFT),
705 				   VM_ALLOC_NORMAL);
706 	vm_object_drop(&kernel_object);
707 	vmstats.v_wire_count++;
708 
709 }
710 
711 /*
712  * Initialize the embedded bio structures, typically used by
713  * deprecated code which tries to allocate its own struct bufs.
714  */
715 void
716 initbufbio(struct buf *bp)
717 {
718 	bp->b_bio1.bio_buf = bp;
719 	bp->b_bio1.bio_prev = NULL;
720 	bp->b_bio1.bio_offset = NOOFFSET;
721 	bp->b_bio1.bio_next = &bp->b_bio2;
722 	bp->b_bio1.bio_done = NULL;
723 	bp->b_bio1.bio_flags = 0;
724 
725 	bp->b_bio2.bio_buf = bp;
726 	bp->b_bio2.bio_prev = &bp->b_bio1;
727 	bp->b_bio2.bio_offset = NOOFFSET;
728 	bp->b_bio2.bio_next = NULL;
729 	bp->b_bio2.bio_done = NULL;
730 	bp->b_bio2.bio_flags = 0;
731 
732 	BUF_LOCKINIT(bp);
733 }
734 
735 /*
736  * Reinitialize the embedded bio structures as well as any additional
737  * translation cache layers.
738  */
739 void
740 reinitbufbio(struct buf *bp)
741 {
742 	struct bio *bio;
743 
744 	for (bio = &bp->b_bio1; bio; bio = bio->bio_next) {
745 		bio->bio_done = NULL;
746 		bio->bio_offset = NOOFFSET;
747 	}
748 }
749 
750 /*
751  * Undo the effects of an initbufbio().
752  */
753 void
754 uninitbufbio(struct buf *bp)
755 {
756 	dsched_exit_buf(bp);
757 	BUF_LOCKFREE(bp);
758 }
759 
760 /*
761  * Push another BIO layer onto an existing BIO and return it.  The new
762  * BIO layer may already exist, holding cached translation data.
763  */
764 struct bio *
765 push_bio(struct bio *bio)
766 {
767 	struct bio *nbio;
768 
769 	if ((nbio = bio->bio_next) == NULL) {
770 		int index = bio - &bio->bio_buf->b_bio_array[0];
771 		if (index >= NBUF_BIO - 1) {
772 			panic("push_bio: too many layers bp %p\n",
773 				bio->bio_buf);
774 		}
775 		nbio = &bio->bio_buf->b_bio_array[index + 1];
776 		bio->bio_next = nbio;
777 		nbio->bio_prev = bio;
778 		nbio->bio_buf = bio->bio_buf;
779 		nbio->bio_offset = NOOFFSET;
780 		nbio->bio_done = NULL;
781 		nbio->bio_next = NULL;
782 	}
783 	KKASSERT(nbio->bio_done == NULL);
784 	return(nbio);
785 }
786 
787 /*
788  * Pop a BIO translation layer, returning the previous layer.  The
789  * must have been previously pushed.
790  */
791 struct bio *
792 pop_bio(struct bio *bio)
793 {
794 	return(bio->bio_prev);
795 }
796 
797 void
798 clearbiocache(struct bio *bio)
799 {
800 	while (bio) {
801 		bio->bio_offset = NOOFFSET;
802 		bio = bio->bio_next;
803 	}
804 }
805 
806 /*
807  * bfreekva:
808  *
809  *	Free the KVA allocation for buffer 'bp'.
810  *
811  *	Must be called from a critical section as this is the only locking for
812  *	buffer_map.
813  *
814  *	Since this call frees up buffer space, we call bufspacewakeup().
815  *
816  * MPALMOSTSAFE
817  */
818 static void
819 bfreekva(struct buf *bp)
820 {
821 	int count;
822 
823 	if (bp->b_kvasize) {
824 		++buffreekvacnt;
825 		count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
826 		vm_map_lock(&buffer_map);
827 		bufspace -= bp->b_kvasize;
828 		vm_map_delete(&buffer_map,
829 		    (vm_offset_t) bp->b_kvabase,
830 		    (vm_offset_t) bp->b_kvabase + bp->b_kvasize,
831 		    &count
832 		);
833 		vm_map_unlock(&buffer_map);
834 		vm_map_entry_release(count);
835 		bp->b_kvasize = 0;
836 		bp->b_kvabase = NULL;
837 		bufspacewakeup();
838 	}
839 }
840 
841 /*
842  * bremfree:
843  *
844  *	Remove the buffer from the appropriate free list.
845  */
846 static __inline void
847 _bremfree(struct buf *bp)
848 {
849 	if (bp->b_qindex != BQUEUE_NONE) {
850 		KASSERT(BUF_REFCNTNB(bp) == 1,
851 				("bremfree: bp %p not locked",bp));
852 		TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist);
853 		bp->b_qindex = BQUEUE_NONE;
854 	} else {
855 		if (BUF_REFCNTNB(bp) <= 1)
856 			panic("bremfree: removing a buffer not on a queue");
857 	}
858 }
859 
860 void
861 bremfree(struct buf *bp)
862 {
863 	spin_lock(&bufqspin);
864 	_bremfree(bp);
865 	spin_unlock(&bufqspin);
866 }
867 
868 static void
869 bremfree_locked(struct buf *bp)
870 {
871 	_bremfree(bp);
872 }
873 
874 /*
875  * bread:
876  *
877  *	Get a buffer with the specified data.  Look in the cache first.  We
878  *	must clear B_ERROR and B_INVAL prior to initiating I/O.  If B_CACHE
879  *	is set, the buffer is valid and we do not have to do anything ( see
880  *	getblk() ).
881  *
882  */
883 int
884 bread(struct vnode *vp, off_t loffset, int size, struct buf **bpp)
885 {
886 	return (breadn(vp, loffset, size, NULL, NULL, 0, bpp));
887 }
888 
889 /*
890  * This version of bread issues any required I/O asyncnronously and
891  * makes a callback on completion.
892  *
893  * The callback must check whether BIO_DONE is set in the bio and issue
894  * the bpdone(bp, 0) if it isn't.  The callback is responsible for clearing
895  * BIO_DONE and disposing of the I/O (bqrelse()ing it).
896  */
897 void
898 breadcb(struct vnode *vp, off_t loffset, int size,
899 	void (*func)(struct bio *), void *arg)
900 {
901 	struct buf *bp;
902 
903 	bp = getblk(vp, loffset, size, 0, 0);
904 
905 	/* if not found in cache, do some I/O */
906 	if ((bp->b_flags & B_CACHE) == 0) {
907 		bp->b_flags &= ~(B_ERROR | B_EINTR | B_INVAL);
908 		bp->b_cmd = BUF_CMD_READ;
909 		bp->b_bio1.bio_done = func;
910 		bp->b_bio1.bio_caller_info1.ptr = arg;
911 		vfs_busy_pages(vp, bp);
912 		BUF_KERNPROC(bp);
913 		vn_strategy(vp, &bp->b_bio1);
914 	} else if (func) {
915 		/*
916 		 * Since we are issuing the callback synchronously it cannot
917 		 * race the BIO_DONE, so no need for atomic ops here.
918 		 */
919 		/*bp->b_bio1.bio_done = func;*/
920 		bp->b_bio1.bio_caller_info1.ptr = arg;
921 		bp->b_bio1.bio_flags |= BIO_DONE;
922 		func(&bp->b_bio1);
923 	} else {
924 		bqrelse(bp);
925 	}
926 }
927 
928 /*
929  * breadn:
930  *
931  *	Operates like bread, but also starts asynchronous I/O on
932  *	read-ahead blocks.  We must clear B_ERROR and B_INVAL prior
933  *	to initiating I/O . If B_CACHE is set, the buffer is valid
934  *	and we do not have to do anything.
935  *
936  */
937 int
938 breadn(struct vnode *vp, off_t loffset, int size, off_t *raoffset,
939 	int *rabsize, int cnt, struct buf **bpp)
940 {
941 	struct buf *bp, *rabp;
942 	int i;
943 	int rv = 0, readwait = 0;
944 
945 	*bpp = bp = getblk(vp, loffset, size, 0, 0);
946 
947 	/* if not found in cache, do some I/O */
948 	if ((bp->b_flags & B_CACHE) == 0) {
949 		bp->b_flags &= ~(B_ERROR | B_EINTR | B_INVAL);
950 		bp->b_cmd = BUF_CMD_READ;
951 		bp->b_bio1.bio_done = biodone_sync;
952 		bp->b_bio1.bio_flags |= BIO_SYNC;
953 		vfs_busy_pages(vp, bp);
954 		vn_strategy(vp, &bp->b_bio1);
955 		++readwait;
956 	}
957 
958 	for (i = 0; i < cnt; i++, raoffset++, rabsize++) {
959 		if (inmem(vp, *raoffset))
960 			continue;
961 		rabp = getblk(vp, *raoffset, *rabsize, 0, 0);
962 
963 		if ((rabp->b_flags & B_CACHE) == 0) {
964 			rabp->b_flags &= ~(B_ERROR | B_EINTR | B_INVAL);
965 			rabp->b_cmd = BUF_CMD_READ;
966 			vfs_busy_pages(vp, rabp);
967 			BUF_KERNPROC(rabp);
968 			vn_strategy(vp, &rabp->b_bio1);
969 		} else {
970 			brelse(rabp);
971 		}
972 	}
973 	if (readwait)
974 		rv = biowait(&bp->b_bio1, "biord");
975 	return (rv);
976 }
977 
978 /*
979  * bwrite:
980  *
981  *	Synchronous write, waits for completion.
982  *
983  *	Write, release buffer on completion.  (Done by iodone
984  *	if async).  Do not bother writing anything if the buffer
985  *	is invalid.
986  *
987  *	Note that we set B_CACHE here, indicating that buffer is
988  *	fully valid and thus cacheable.  This is true even of NFS
989  *	now so we set it generally.  This could be set either here
990  *	or in biodone() since the I/O is synchronous.  We put it
991  *	here.
992  */
993 int
994 bwrite(struct buf *bp)
995 {
996 	int error;
997 
998 	if (bp->b_flags & B_INVAL) {
999 		brelse(bp);
1000 		return (0);
1001 	}
1002 	if (BUF_REFCNTNB(bp) == 0)
1003 		panic("bwrite: buffer is not busy???");
1004 
1005 	/* Mark the buffer clean */
1006 	bundirty(bp);
1007 
1008 	bp->b_flags &= ~(B_ERROR | B_EINTR);
1009 	bp->b_flags |= B_CACHE;
1010 	bp->b_cmd = BUF_CMD_WRITE;
1011 	bp->b_bio1.bio_done = biodone_sync;
1012 	bp->b_bio1.bio_flags |= BIO_SYNC;
1013 	vfs_busy_pages(bp->b_vp, bp);
1014 
1015 	/*
1016 	 * Normal bwrites pipeline writes.  NOTE: b_bufsize is only
1017 	 * valid for vnode-backed buffers.
1018 	 */
1019 	bsetrunningbufspace(bp, bp->b_bufsize);
1020 	vn_strategy(bp->b_vp, &bp->b_bio1);
1021 	error = biowait(&bp->b_bio1, "biows");
1022 	brelse(bp);
1023 
1024 	return (error);
1025 }
1026 
1027 /*
1028  * bawrite:
1029  *
1030  *	Asynchronous write.  Start output on a buffer, but do not wait for
1031  *	it to complete.  The buffer is released when the output completes.
1032  *
1033  *	bwrite() ( or the VOP routine anyway ) is responsible for handling
1034  *	B_INVAL buffers.  Not us.
1035  */
1036 void
1037 bawrite(struct buf *bp)
1038 {
1039 	if (bp->b_flags & B_INVAL) {
1040 		brelse(bp);
1041 		return;
1042 	}
1043 	if (BUF_REFCNTNB(bp) == 0)
1044 		panic("bwrite: buffer is not busy???");
1045 
1046 	/* Mark the buffer clean */
1047 	bundirty(bp);
1048 
1049 	bp->b_flags &= ~(B_ERROR | B_EINTR);
1050 	bp->b_flags |= B_CACHE;
1051 	bp->b_cmd = BUF_CMD_WRITE;
1052 	KKASSERT(bp->b_bio1.bio_done == NULL);
1053 	vfs_busy_pages(bp->b_vp, bp);
1054 
1055 	/*
1056 	 * Normal bwrites pipeline writes.  NOTE: b_bufsize is only
1057 	 * valid for vnode-backed buffers.
1058 	 */
1059 	bsetrunningbufspace(bp, bp->b_bufsize);
1060 	BUF_KERNPROC(bp);
1061 	vn_strategy(bp->b_vp, &bp->b_bio1);
1062 }
1063 
1064 /*
1065  * bowrite:
1066  *
1067  *	Ordered write.  Start output on a buffer, and flag it so that the
1068  *	device will write it in the order it was queued.  The buffer is
1069  *	released when the output completes.  bwrite() ( or the VOP routine
1070  *	anyway ) is responsible for handling B_INVAL buffers.
1071  */
1072 int
1073 bowrite(struct buf *bp)
1074 {
1075 	bp->b_flags |= B_ORDERED;
1076 	bawrite(bp);
1077 	return (0);
1078 }
1079 
1080 /*
1081  * bdwrite:
1082  *
1083  *	Delayed write. (Buffer is marked dirty).  Do not bother writing
1084  *	anything if the buffer is marked invalid.
1085  *
1086  *	Note that since the buffer must be completely valid, we can safely
1087  *	set B_CACHE.  In fact, we have to set B_CACHE here rather then in
1088  *	biodone() in order to prevent getblk from writing the buffer
1089  *	out synchronously.
1090  */
1091 void
1092 bdwrite(struct buf *bp)
1093 {
1094 	if (BUF_REFCNTNB(bp) == 0)
1095 		panic("bdwrite: buffer is not busy");
1096 
1097 	if (bp->b_flags & B_INVAL) {
1098 		brelse(bp);
1099 		return;
1100 	}
1101 	bdirty(bp);
1102 
1103 	if (dsched_is_clear_buf_priv(bp))
1104 		dsched_new_buf(bp);
1105 
1106 	/*
1107 	 * Set B_CACHE, indicating that the buffer is fully valid.  This is
1108 	 * true even of NFS now.
1109 	 */
1110 	bp->b_flags |= B_CACHE;
1111 
1112 	/*
1113 	 * This bmap keeps the system from needing to do the bmap later,
1114 	 * perhaps when the system is attempting to do a sync.  Since it
1115 	 * is likely that the indirect block -- or whatever other datastructure
1116 	 * that the filesystem needs is still in memory now, it is a good
1117 	 * thing to do this.  Note also, that if the pageout daemon is
1118 	 * requesting a sync -- there might not be enough memory to do
1119 	 * the bmap then...  So, this is important to do.
1120 	 */
1121 	if (bp->b_bio2.bio_offset == NOOFFSET) {
1122 		VOP_BMAP(bp->b_vp, bp->b_loffset, &bp->b_bio2.bio_offset,
1123 			 NULL, NULL, BUF_CMD_WRITE);
1124 	}
1125 
1126 	/*
1127 	 * Because the underlying pages may still be mapped and
1128 	 * writable trying to set the dirty buffer (b_dirtyoff/end)
1129 	 * range here will be inaccurate.
1130 	 *
1131 	 * However, we must still clean the pages to satisfy the
1132 	 * vnode_pager and pageout daemon, so theythink the pages
1133 	 * have been "cleaned".  What has really occured is that
1134 	 * they've been earmarked for later writing by the buffer
1135 	 * cache.
1136 	 *
1137 	 * So we get the b_dirtyoff/end update but will not actually
1138 	 * depend on it (NFS that is) until the pages are busied for
1139 	 * writing later on.
1140 	 */
1141 	vfs_clean_pages(bp);
1142 	bqrelse(bp);
1143 
1144 	/*
1145 	 * note: we cannot initiate I/O from a bdwrite even if we wanted to,
1146 	 * due to the softdep code.
1147 	 */
1148 }
1149 
1150 /*
1151  * Fake write - return pages to VM system as dirty, leave the buffer clean.
1152  * This is used by tmpfs.
1153  *
1154  * It is important for any VFS using this routine to NOT use it for
1155  * IO_SYNC or IO_ASYNC operations which occur when the system really
1156  * wants to flush VM pages to backing store.
1157  */
1158 void
1159 buwrite(struct buf *bp)
1160 {
1161 	vm_page_t m;
1162 	int i;
1163 
1164 	/*
1165 	 * Only works for VMIO buffers.  If the buffer is already
1166 	 * marked for delayed-write we can't avoid the bdwrite().
1167 	 */
1168 	if ((bp->b_flags & B_VMIO) == 0 || (bp->b_flags & B_DELWRI)) {
1169 		bdwrite(bp);
1170 		return;
1171 	}
1172 
1173 	/*
1174 	 * Set valid & dirty.
1175 	 */
1176 	for (i = 0; i < bp->b_xio.xio_npages; i++) {
1177 		m = bp->b_xio.xio_pages[i];
1178 		vfs_dirty_one_page(bp, i, m);
1179 	}
1180 	bqrelse(bp);
1181 }
1182 
1183 /*
1184  * bdirty:
1185  *
1186  *	Turn buffer into delayed write request by marking it B_DELWRI.
1187  *	B_RELBUF and B_NOCACHE must be cleared.
1188  *
1189  *	We reassign the buffer to itself to properly update it in the
1190  *	dirty/clean lists.
1191  *
1192  *	Must be called from a critical section.
1193  *	The buffer must be on BQUEUE_NONE.
1194  */
1195 void
1196 bdirty(struct buf *bp)
1197 {
1198 	KASSERT(bp->b_qindex == BQUEUE_NONE, ("bdirty: buffer %p still on queue %d", bp, bp->b_qindex));
1199 	if (bp->b_flags & B_NOCACHE) {
1200 		kprintf("bdirty: clearing B_NOCACHE on buf %p\n", bp);
1201 		bp->b_flags &= ~B_NOCACHE;
1202 	}
1203 	if (bp->b_flags & B_INVAL) {
1204 		kprintf("bdirty: warning, dirtying invalid buffer %p\n", bp);
1205 	}
1206 	bp->b_flags &= ~B_RELBUF;
1207 
1208 	if ((bp->b_flags & B_DELWRI) == 0) {
1209 		lwkt_gettoken(&bp->b_vp->v_token);
1210 		bp->b_flags |= B_DELWRI;
1211 		reassignbuf(bp);
1212 		lwkt_reltoken(&bp->b_vp->v_token);
1213 
1214 		spin_lock(&bufcspin);
1215 		++dirtybufcount;
1216 		dirtybufspace += bp->b_bufsize;
1217 		if (bp->b_flags & B_HEAVY) {
1218 			++dirtybufcounthw;
1219 			dirtybufspacehw += bp->b_bufsize;
1220 		}
1221 		spin_unlock(&bufcspin);
1222 
1223 		bd_heatup();
1224 	}
1225 }
1226 
1227 /*
1228  * Set B_HEAVY, indicating that this is a heavy-weight buffer that
1229  * needs to be flushed with a different buf_daemon thread to avoid
1230  * deadlocks.  B_HEAVY also imposes restrictions in getnewbuf().
1231  */
1232 void
1233 bheavy(struct buf *bp)
1234 {
1235 	if ((bp->b_flags & B_HEAVY) == 0) {
1236 		bp->b_flags |= B_HEAVY;
1237 		if (bp->b_flags & B_DELWRI) {
1238 			spin_lock(&bufcspin);
1239 			++dirtybufcounthw;
1240 			dirtybufspacehw += bp->b_bufsize;
1241 			spin_unlock(&bufcspin);
1242 		}
1243 	}
1244 }
1245 
1246 /*
1247  * bundirty:
1248  *
1249  *	Clear B_DELWRI for buffer.
1250  *
1251  *	Must be called from a critical section.
1252  *
1253  *	The buffer is typically on BQUEUE_NONE but there is one case in
1254  *	brelse() that calls this function after placing the buffer on
1255  *	a different queue.
1256  *
1257  * MPSAFE
1258  */
1259 void
1260 bundirty(struct buf *bp)
1261 {
1262 	if (bp->b_flags & B_DELWRI) {
1263 		lwkt_gettoken(&bp->b_vp->v_token);
1264 		bp->b_flags &= ~B_DELWRI;
1265 		reassignbuf(bp);
1266 		lwkt_reltoken(&bp->b_vp->v_token);
1267 
1268 		spin_lock(&bufcspin);
1269 		--dirtybufcount;
1270 		dirtybufspace -= bp->b_bufsize;
1271 		if (bp->b_flags & B_HEAVY) {
1272 			--dirtybufcounthw;
1273 			dirtybufspacehw -= bp->b_bufsize;
1274 		}
1275 		spin_unlock(&bufcspin);
1276 
1277 		bd_signal(bp->b_bufsize);
1278 	}
1279 	/*
1280 	 * Since it is now being written, we can clear its deferred write flag.
1281 	 */
1282 	bp->b_flags &= ~B_DEFERRED;
1283 }
1284 
1285 /*
1286  * Set the b_runningbufspace field, used to track how much I/O is
1287  * in progress at any given moment.
1288  */
1289 void
1290 bsetrunningbufspace(struct buf *bp, int bytes)
1291 {
1292 	bp->b_runningbufspace = bytes;
1293 	if (bytes) {
1294 		spin_lock(&bufcspin);
1295 		runningbufspace += bytes;
1296 		++runningbufcount;
1297 		spin_unlock(&bufcspin);
1298 	}
1299 }
1300 
1301 /*
1302  * brelse:
1303  *
1304  *	Release a busy buffer and, if requested, free its resources.  The
1305  *	buffer will be stashed in the appropriate bufqueue[] allowing it
1306  *	to be accessed later as a cache entity or reused for other purposes.
1307  *
1308  * MPALMOSTSAFE
1309  */
1310 void
1311 brelse(struct buf *bp)
1312 {
1313 #ifdef INVARIANTS
1314 	int saved_flags = bp->b_flags;
1315 #endif
1316 
1317 	KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)), ("brelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
1318 
1319 	/*
1320 	 * If B_NOCACHE is set we are being asked to destroy the buffer and
1321 	 * its backing store.  Clear B_DELWRI.
1322 	 *
1323 	 * B_NOCACHE is set in two cases: (1) when the caller really wants
1324 	 * to destroy the buffer and backing store and (2) when the caller
1325 	 * wants to destroy the buffer and backing store after a write
1326 	 * completes.
1327 	 */
1328 	if ((bp->b_flags & (B_NOCACHE|B_DELWRI)) == (B_NOCACHE|B_DELWRI)) {
1329 		bundirty(bp);
1330 	}
1331 
1332 	if ((bp->b_flags & (B_INVAL | B_DELWRI)) == B_DELWRI) {
1333 		/*
1334 		 * A re-dirtied buffer is only subject to destruction
1335 		 * by B_INVAL.  B_ERROR and B_NOCACHE are ignored.
1336 		 */
1337 		/* leave buffer intact */
1338 	} else if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR)) ||
1339 		   (bp->b_bufsize <= 0)) {
1340 		/*
1341 		 * Either a failed read or we were asked to free or not
1342 		 * cache the buffer.  This path is reached with B_DELWRI
1343 		 * set only if B_INVAL is already set.  B_NOCACHE governs
1344 		 * backing store destruction.
1345 		 *
1346 		 * NOTE: HAMMER will set B_LOCKED in buf_deallocate if the
1347 		 * buffer cannot be immediately freed.
1348 		 */
1349 		bp->b_flags |= B_INVAL;
1350 		if (LIST_FIRST(&bp->b_dep) != NULL)
1351 			buf_deallocate(bp);
1352 		if (bp->b_flags & B_DELWRI) {
1353 			spin_lock(&bufcspin);
1354 			--dirtybufcount;
1355 			dirtybufspace -= bp->b_bufsize;
1356 			if (bp->b_flags & B_HEAVY) {
1357 				--dirtybufcounthw;
1358 				dirtybufspacehw -= bp->b_bufsize;
1359 			}
1360 			spin_unlock(&bufcspin);
1361 
1362 			bd_signal(bp->b_bufsize);
1363 		}
1364 		bp->b_flags &= ~(B_DELWRI | B_CACHE);
1365 	}
1366 
1367 	/*
1368 	 * We must clear B_RELBUF if B_DELWRI or B_LOCKED is set,
1369 	 * or if b_refs is non-zero.
1370 	 *
1371 	 * If vfs_vmio_release() is called with either bit set, the
1372 	 * underlying pages may wind up getting freed causing a previous
1373 	 * write (bdwrite()) to get 'lost' because pages associated with
1374 	 * a B_DELWRI bp are marked clean.  Pages associated with a
1375 	 * B_LOCKED buffer may be mapped by the filesystem.
1376 	 *
1377 	 * If we want to release the buffer ourselves (rather then the
1378 	 * originator asking us to release it), give the originator a
1379 	 * chance to countermand the release by setting B_LOCKED.
1380 	 *
1381 	 * We still allow the B_INVAL case to call vfs_vmio_release(), even
1382 	 * if B_DELWRI is set.
1383 	 *
1384 	 * If B_DELWRI is not set we may have to set B_RELBUF if we are low
1385 	 * on pages to return pages to the VM page queues.
1386 	 */
1387 	if ((bp->b_flags & (B_DELWRI | B_LOCKED)) || bp->b_refs) {
1388 		bp->b_flags &= ~B_RELBUF;
1389 	} else if (vm_page_count_severe()) {
1390 		if (LIST_FIRST(&bp->b_dep) != NULL)
1391 			buf_deallocate(bp);		/* can set B_LOCKED */
1392 		if (bp->b_flags & (B_DELWRI | B_LOCKED))
1393 			bp->b_flags &= ~B_RELBUF;
1394 		else
1395 			bp->b_flags |= B_RELBUF;
1396 	}
1397 
1398 	/*
1399 	 * Make sure b_cmd is clear.  It may have already been cleared by
1400 	 * biodone().
1401 	 *
1402 	 * At this point destroying the buffer is governed by the B_INVAL
1403 	 * or B_RELBUF flags.
1404 	 */
1405 	bp->b_cmd = BUF_CMD_DONE;
1406 	dsched_exit_buf(bp);
1407 
1408 	/*
1409 	 * VMIO buffer rundown.  Make sure the VM page array is restored
1410 	 * after an I/O may have replaces some of the pages with bogus pages
1411 	 * in order to not destroy dirty pages in a fill-in read.
1412 	 *
1413 	 * Note that due to the code above, if a buffer is marked B_DELWRI
1414 	 * then the B_RELBUF and B_NOCACHE bits will always be clear.
1415 	 * B_INVAL may still be set, however.
1416 	 *
1417 	 * For clean buffers, B_INVAL or B_RELBUF will destroy the buffer
1418 	 * but not the backing store.   B_NOCACHE will destroy the backing
1419 	 * store.
1420 	 *
1421 	 * Note that dirty NFS buffers contain byte-granular write ranges
1422 	 * and should not be destroyed w/ B_INVAL even if the backing store
1423 	 * is left intact.
1424 	 */
1425 	if (bp->b_flags & B_VMIO) {
1426 		/*
1427 		 * Rundown for VMIO buffers which are not dirty NFS buffers.
1428 		 */
1429 		int i, j, resid;
1430 		vm_page_t m;
1431 		off_t foff;
1432 		vm_pindex_t poff;
1433 		vm_object_t obj;
1434 		struct vnode *vp;
1435 
1436 		vp = bp->b_vp;
1437 
1438 		/*
1439 		 * Get the base offset and length of the buffer.  Note that
1440 		 * in the VMIO case if the buffer block size is not
1441 		 * page-aligned then b_data pointer may not be page-aligned.
1442 		 * But our b_xio.xio_pages array *IS* page aligned.
1443 		 *
1444 		 * block sizes less then DEV_BSIZE (usually 512) are not
1445 		 * supported due to the page granularity bits (m->valid,
1446 		 * m->dirty, etc...).
1447 		 *
1448 		 * See man buf(9) for more information
1449 		 */
1450 
1451 		resid = bp->b_bufsize;
1452 		foff = bp->b_loffset;
1453 
1454 		for (i = 0; i < bp->b_xio.xio_npages; i++) {
1455 			m = bp->b_xio.xio_pages[i];
1456 			vm_page_flag_clear(m, PG_ZERO);
1457 			/*
1458 			 * If we hit a bogus page, fixup *all* of them
1459 			 * now.  Note that we left these pages wired
1460 			 * when we removed them so they had better exist,
1461 			 * and they cannot be ripped out from under us so
1462 			 * no critical section protection is necessary.
1463 			 */
1464 			if (m == bogus_page) {
1465 				obj = vp->v_object;
1466 				poff = OFF_TO_IDX(bp->b_loffset);
1467 
1468 				vm_object_hold(obj);
1469 				for (j = i; j < bp->b_xio.xio_npages; j++) {
1470 					vm_page_t mtmp;
1471 
1472 					mtmp = bp->b_xio.xio_pages[j];
1473 					if (mtmp == bogus_page) {
1474 						mtmp = vm_page_lookup(obj, poff + j);
1475 						if (!mtmp) {
1476 							panic("brelse: page missing");
1477 						}
1478 						bp->b_xio.xio_pages[j] = mtmp;
1479 					}
1480 				}
1481 				bp->b_flags &= ~B_HASBOGUS;
1482 				vm_object_drop(obj);
1483 
1484 				if ((bp->b_flags & B_INVAL) == 0) {
1485 					pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
1486 						bp->b_xio.xio_pages, bp->b_xio.xio_npages);
1487 				}
1488 				m = bp->b_xio.xio_pages[i];
1489 			}
1490 
1491 			/*
1492 			 * Invalidate the backing store if B_NOCACHE is set
1493 			 * (e.g. used with vinvalbuf()).  If this is NFS
1494 			 * we impose a requirement that the block size be
1495 			 * a multiple of PAGE_SIZE and create a temporary
1496 			 * hack to basically invalidate the whole page.  The
1497 			 * problem is that NFS uses really odd buffer sizes
1498 			 * especially when tracking piecemeal writes and
1499 			 * it also vinvalbuf()'s a lot, which would result
1500 			 * in only partial page validation and invalidation
1501 			 * here.  If the file page is mmap()'d, however,
1502 			 * all the valid bits get set so after we invalidate
1503 			 * here we would end up with weird m->valid values
1504 			 * like 0xfc.  nfs_getpages() can't handle this so
1505 			 * we clear all the valid bits for the NFS case
1506 			 * instead of just some of them.
1507 			 *
1508 			 * The real bug is the VM system having to set m->valid
1509 			 * to VM_PAGE_BITS_ALL for faulted-in pages, which
1510 			 * itself is an artifact of the whole 512-byte
1511 			 * granular mess that exists to support odd block
1512 			 * sizes and UFS meta-data block sizes (e.g. 6144).
1513 			 * A complete rewrite is required.
1514 			 *
1515 			 * XXX
1516 			 */
1517 			if (bp->b_flags & (B_NOCACHE|B_ERROR)) {
1518 				int poffset = foff & PAGE_MASK;
1519 				int presid;
1520 
1521 				presid = PAGE_SIZE - poffset;
1522 				if (bp->b_vp->v_tag == VT_NFS &&
1523 				    bp->b_vp->v_type == VREG) {
1524 					; /* entire page */
1525 				} else if (presid > resid) {
1526 					presid = resid;
1527 				}
1528 				KASSERT(presid >= 0, ("brelse: extra page"));
1529 				vm_page_set_invalid(m, poffset, presid);
1530 
1531 				/*
1532 				 * Also make sure any swap cache is removed
1533 				 * as it is now stale (HAMMER in particular
1534 				 * uses B_NOCACHE to deal with buffer
1535 				 * aliasing).
1536 				 */
1537 				swap_pager_unswapped(m);
1538 			}
1539 			resid -= PAGE_SIZE - (foff & PAGE_MASK);
1540 			foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
1541 		}
1542 		if (bp->b_flags & (B_INVAL | B_RELBUF))
1543 			vfs_vmio_release(bp);
1544 	} else {
1545 		/*
1546 		 * Rundown for non-VMIO buffers.
1547 		 */
1548 		if (bp->b_flags & (B_INVAL | B_RELBUF)) {
1549 			if (bp->b_bufsize)
1550 				allocbuf(bp, 0);
1551 			KKASSERT (LIST_FIRST(&bp->b_dep) == NULL);
1552 			if (bp->b_vp)
1553 				brelvp(bp);
1554 		}
1555 	}
1556 
1557 	if (bp->b_qindex != BQUEUE_NONE)
1558 		panic("brelse: free buffer onto another queue???");
1559 	if (BUF_REFCNTNB(bp) > 1) {
1560 		/* Temporary panic to verify exclusive locking */
1561 		/* This panic goes away when we allow shared refs */
1562 		panic("brelse: multiple refs");
1563 		/* NOT REACHED */
1564 		return;
1565 	}
1566 
1567 	/*
1568 	 * Figure out the correct queue to place the cleaned up buffer on.
1569 	 * Buffers placed in the EMPTY or EMPTYKVA had better already be
1570 	 * disassociated from their vnode.
1571 	 */
1572 	spin_lock(&bufqspin);
1573 	if (bp->b_flags & B_LOCKED) {
1574 		/*
1575 		 * Buffers that are locked are placed in the locked queue
1576 		 * immediately, regardless of their state.
1577 		 */
1578 		bp->b_qindex = BQUEUE_LOCKED;
1579 		TAILQ_INSERT_TAIL(&bufqueues[BQUEUE_LOCKED], bp, b_freelist);
1580 	} else if (bp->b_bufsize == 0) {
1581 		/*
1582 		 * Buffers with no memory.  Due to conditionals near the top
1583 		 * of brelse() such buffers should probably already be
1584 		 * marked B_INVAL and disassociated from their vnode.
1585 		 */
1586 		bp->b_flags |= B_INVAL;
1587 		KASSERT(bp->b_vp == NULL, ("bp1 %p flags %08x/%08x vnode %p unexpectededly still associated!", bp, saved_flags, bp->b_flags, bp->b_vp));
1588 		KKASSERT((bp->b_flags & B_HASHED) == 0);
1589 		if (bp->b_kvasize) {
1590 			bp->b_qindex = BQUEUE_EMPTYKVA;
1591 		} else {
1592 			bp->b_qindex = BQUEUE_EMPTY;
1593 		}
1594 		TAILQ_INSERT_HEAD(&bufqueues[bp->b_qindex], bp, b_freelist);
1595 	} else if (bp->b_flags & (B_INVAL | B_NOCACHE | B_RELBUF)) {
1596 		/*
1597 		 * Buffers with junk contents.   Again these buffers had better
1598 		 * already be disassociated from their vnode.
1599 		 */
1600 		KASSERT(bp->b_vp == NULL, ("bp2 %p flags %08x/%08x vnode %p unexpectededly still associated!", bp, saved_flags, bp->b_flags, bp->b_vp));
1601 		KKASSERT((bp->b_flags & B_HASHED) == 0);
1602 		bp->b_flags |= B_INVAL;
1603 		bp->b_qindex = BQUEUE_CLEAN;
1604 		TAILQ_INSERT_HEAD(&bufqueues[BQUEUE_CLEAN], bp, b_freelist);
1605 	} else {
1606 		/*
1607 		 * Remaining buffers.  These buffers are still associated with
1608 		 * their vnode.
1609 		 */
1610 		switch(bp->b_flags & (B_DELWRI|B_HEAVY)) {
1611 		case B_DELWRI:
1612 		    bp->b_qindex = BQUEUE_DIRTY;
1613 		    TAILQ_INSERT_TAIL(&bufqueues[BQUEUE_DIRTY], bp, b_freelist);
1614 		    break;
1615 		case B_DELWRI | B_HEAVY:
1616 		    bp->b_qindex = BQUEUE_DIRTY_HW;
1617 		    TAILQ_INSERT_TAIL(&bufqueues[BQUEUE_DIRTY_HW], bp,
1618 				      b_freelist);
1619 		    break;
1620 		default:
1621 		    /*
1622 		     * NOTE: Buffers are always placed at the end of the
1623 		     * queue.  If B_AGE is not set the buffer will cycle
1624 		     * through the queue twice.
1625 		     */
1626 		    bp->b_qindex = BQUEUE_CLEAN;
1627 		    TAILQ_INSERT_TAIL(&bufqueues[BQUEUE_CLEAN], bp, b_freelist);
1628 		    break;
1629 		}
1630 	}
1631 	spin_unlock(&bufqspin);
1632 
1633 	/*
1634 	 * If B_INVAL, clear B_DELWRI.  We've already placed the buffer
1635 	 * on the correct queue.
1636 	 */
1637 	if ((bp->b_flags & (B_INVAL|B_DELWRI)) == (B_INVAL|B_DELWRI))
1638 		bundirty(bp);
1639 
1640 	/*
1641 	 * The bp is on an appropriate queue unless locked.  If it is not
1642 	 * locked or dirty we can wakeup threads waiting for buffer space.
1643 	 *
1644 	 * We've already handled the B_INVAL case ( B_DELWRI will be clear
1645 	 * if B_INVAL is set ).
1646 	 */
1647 	if ((bp->b_flags & (B_LOCKED|B_DELWRI)) == 0)
1648 		bufcountwakeup();
1649 
1650 	/*
1651 	 * Something we can maybe free or reuse
1652 	 */
1653 	if (bp->b_bufsize || bp->b_kvasize)
1654 		bufspacewakeup();
1655 
1656 	/*
1657 	 * Clean up temporary flags and unlock the buffer.
1658 	 */
1659 	bp->b_flags &= ~(B_ORDERED | B_NOCACHE | B_RELBUF | B_DIRECT);
1660 	BUF_UNLOCK(bp);
1661 }
1662 
1663 /*
1664  * bqrelse:
1665  *
1666  *	Release a buffer back to the appropriate queue but do not try to free
1667  *	it.  The buffer is expected to be used again soon.
1668  *
1669  *	bqrelse() is used by bdwrite() to requeue a delayed write, and used by
1670  *	biodone() to requeue an async I/O on completion.  It is also used when
1671  *	known good buffers need to be requeued but we think we may need the data
1672  *	again soon.
1673  *
1674  *	XXX we should be able to leave the B_RELBUF hint set on completion.
1675  *
1676  * MPSAFE
1677  */
1678 void
1679 bqrelse(struct buf *bp)
1680 {
1681 	KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)), ("bqrelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
1682 
1683 	if (bp->b_qindex != BQUEUE_NONE)
1684 		panic("bqrelse: free buffer onto another queue???");
1685 	if (BUF_REFCNTNB(bp) > 1) {
1686 		/* do not release to free list */
1687 		panic("bqrelse: multiple refs");
1688 		return;
1689 	}
1690 
1691 	buf_act_advance(bp);
1692 
1693 	spin_lock(&bufqspin);
1694 	if (bp->b_flags & B_LOCKED) {
1695 		/*
1696 		 * Locked buffers are released to the locked queue.  However,
1697 		 * if the buffer is dirty it will first go into the dirty
1698 		 * queue and later on after the I/O completes successfully it
1699 		 * will be released to the locked queue.
1700 		 */
1701 		bp->b_qindex = BQUEUE_LOCKED;
1702 		TAILQ_INSERT_TAIL(&bufqueues[BQUEUE_LOCKED], bp, b_freelist);
1703 	} else if (bp->b_flags & B_DELWRI) {
1704 		bp->b_qindex = (bp->b_flags & B_HEAVY) ?
1705 			       BQUEUE_DIRTY_HW : BQUEUE_DIRTY;
1706 		TAILQ_INSERT_TAIL(&bufqueues[bp->b_qindex], bp, b_freelist);
1707 	} else if (vm_page_count_severe()) {
1708 		/*
1709 		 * We are too low on memory, we have to try to free the
1710 		 * buffer (most importantly: the wired pages making up its
1711 		 * backing store) *now*.
1712 		 */
1713 		spin_unlock(&bufqspin);
1714 		brelse(bp);
1715 		return;
1716 	} else {
1717 		bp->b_qindex = BQUEUE_CLEAN;
1718 		TAILQ_INSERT_TAIL(&bufqueues[BQUEUE_CLEAN], bp, b_freelist);
1719 	}
1720 	spin_unlock(&bufqspin);
1721 
1722 	if ((bp->b_flags & B_LOCKED) == 0 &&
1723 	    ((bp->b_flags & B_INVAL) || (bp->b_flags & B_DELWRI) == 0)) {
1724 		bufcountwakeup();
1725 	}
1726 
1727 	/*
1728 	 * Something we can maybe free or reuse.
1729 	 */
1730 	if (bp->b_bufsize && !(bp->b_flags & B_DELWRI))
1731 		bufspacewakeup();
1732 
1733 	/*
1734 	 * Final cleanup and unlock.  Clear bits that are only used while a
1735 	 * buffer is actively locked.
1736 	 */
1737 	bp->b_flags &= ~(B_ORDERED | B_NOCACHE | B_RELBUF);
1738 	dsched_exit_buf(bp);
1739 	BUF_UNLOCK(bp);
1740 }
1741 
1742 /*
1743  * Hold a buffer, preventing it from being reused.  This will prevent
1744  * normal B_RELBUF operations on the buffer but will not prevent B_INVAL
1745  * operations.  If a B_INVAL operation occurs the buffer will remain held
1746  * but the underlying pages may get ripped out.
1747  *
1748  * These functions are typically used in VOP_READ/VOP_WRITE functions
1749  * to hold a buffer during a copyin or copyout, preventing deadlocks
1750  * or recursive lock panics when read()/write() is used over mmap()'d
1751  * space.
1752  *
1753  * NOTE: bqhold() requires that the buffer be locked at the time of the
1754  *	 hold.  bqdrop() has no requirements other than the buffer having
1755  *	 previously been held.
1756  */
1757 void
1758 bqhold(struct buf *bp)
1759 {
1760 	atomic_add_int(&bp->b_refs, 1);
1761 }
1762 
1763 void
1764 bqdrop(struct buf *bp)
1765 {
1766 	KKASSERT(bp->b_refs > 0);
1767 	atomic_add_int(&bp->b_refs, -1);
1768 }
1769 
1770 /*
1771  * vfs_vmio_release:
1772  *
1773  *	Return backing pages held by the buffer 'bp' back to the VM system
1774  *	if possible.  The pages are freed if they are no longer valid or
1775  *	attempt to free if it was used for direct I/O otherwise they are
1776  *	sent to the page cache.
1777  *
1778  *	Pages that were marked busy are left alone and skipped.
1779  *
1780  *	The KVA mapping (b_data) for the underlying pages is removed by
1781  *	this function.
1782  */
1783 static void
1784 vfs_vmio_release(struct buf *bp)
1785 {
1786 	int i;
1787 	vm_page_t m;
1788 
1789 	for (i = 0; i < bp->b_xio.xio_npages; i++) {
1790 		m = bp->b_xio.xio_pages[i];
1791 		bp->b_xio.xio_pages[i] = NULL;
1792 
1793 		vm_page_busy_wait(m, FALSE, "vmiopg");
1794 
1795 		/*
1796 		 * The VFS is telling us this is not a meta-data buffer
1797 		 * even if it is backed by a block device.
1798 		 */
1799 		if (bp->b_flags & B_NOTMETA)
1800 			vm_page_flag_set(m, PG_NOTMETA);
1801 
1802 		/*
1803 		 * This is a very important bit of code.  We try to track
1804 		 * VM page use whether the pages are wired into the buffer
1805 		 * cache or not.  While wired into the buffer cache the
1806 		 * bp tracks the act_count.
1807 		 *
1808 		 * We can choose to place unwired pages on the inactive
1809 		 * queue (0) or active queue (1).  If we place too many
1810 		 * on the active queue the queue will cycle the act_count
1811 		 * on pages we'd like to keep, just from single-use pages
1812 		 * (such as when doing a tar-up or file scan).
1813 		 */
1814 		if (bp->b_act_count < vm_cycle_point)
1815 			vm_page_unwire(m, 0);
1816 		else
1817 			vm_page_unwire(m, 1);
1818 
1819 		/*
1820 		 * We don't mess with busy pages, it is the responsibility
1821 		 * of the process that busied the pages to deal with them.
1822 		 *
1823 		 * However, the caller may have marked the page invalid and
1824 		 * we must still make sure the page is no longer mapped.
1825 		 */
1826 		if ((m->flags & PG_BUSY) || (m->busy != 0)) {
1827 			vm_page_protect(m, VM_PROT_NONE);
1828 			vm_page_wakeup(m);
1829 			continue;
1830 		}
1831 
1832 		if (m->wire_count == 0) {
1833 			vm_page_flag_clear(m, PG_ZERO);
1834 			/*
1835 			 * Might as well free the page if we can and it has
1836 			 * no valid data.  We also free the page if the
1837 			 * buffer was used for direct I/O.
1838 			 */
1839 #if 0
1840 			if ((bp->b_flags & B_ASYNC) == 0 && !m->valid &&
1841 					m->hold_count == 0) {
1842 				vm_page_protect(m, VM_PROT_NONE);
1843 				vm_page_free(m);
1844 			} else
1845 #endif
1846 			/*
1847 			 * Cache the page if we are really low on free
1848 			 * pages.
1849 			 *
1850 			 * Also bypass the active and inactive queues
1851 			 * if B_NOTMETA is set.  This flag is set by HAMMER
1852 			 * on a regular file buffer when double buffering
1853 			 * is enabled or on a block device buffer representing
1854 			 * file data when double buffering is not enabled.
1855 			 * The flag prevents two copies of the same data from
1856 			 * being cached for long periods of time.
1857 			 */
1858 			if (bp->b_flags & B_DIRECT) {
1859 				vm_page_wakeup(m);
1860 				vm_page_try_to_free(m);
1861 			} else if ((bp->b_flags & B_NOTMETA) ||
1862 				   vm_page_count_severe()) {
1863 				m->act_count = bp->b_act_count;
1864 				vm_page_wakeup(m);
1865 				vm_page_try_to_cache(m);
1866 			} else {
1867 				m->act_count = bp->b_act_count;
1868 				vm_page_wakeup(m);
1869 			}
1870 		} else {
1871 			vm_page_wakeup(m);
1872 		}
1873 	}
1874 
1875 	pmap_qremove(trunc_page((vm_offset_t) bp->b_data),
1876 		     bp->b_xio.xio_npages);
1877 	if (bp->b_bufsize) {
1878 		bufspacewakeup();
1879 		bp->b_bufsize = 0;
1880 	}
1881 	bp->b_xio.xio_npages = 0;
1882 	bp->b_flags &= ~B_VMIO;
1883 	KKASSERT (LIST_FIRST(&bp->b_dep) == NULL);
1884 	if (bp->b_vp)
1885 		brelvp(bp);
1886 }
1887 
1888 /*
1889  * vfs_bio_awrite:
1890  *
1891  *	Implement clustered async writes for clearing out B_DELWRI buffers.
1892  *	This is much better then the old way of writing only one buffer at
1893  *	a time.  Note that we may not be presented with the buffers in the
1894  *	correct order, so we search for the cluster in both directions.
1895  *
1896  *	The buffer is locked on call.
1897  */
1898 int
1899 vfs_bio_awrite(struct buf *bp)
1900 {
1901 	int i;
1902 	int j;
1903 	off_t loffset = bp->b_loffset;
1904 	struct vnode *vp = bp->b_vp;
1905 	int nbytes;
1906 	struct buf *bpa;
1907 	int nwritten;
1908 	int size;
1909 
1910 	/*
1911 	 * right now we support clustered writing only to regular files.  If
1912 	 * we find a clusterable block we could be in the middle of a cluster
1913 	 * rather then at the beginning.
1914 	 *
1915 	 * NOTE: b_bio1 contains the logical loffset and is aliased
1916 	 * to b_loffset.  b_bio2 contains the translated block number.
1917 	 */
1918 	if ((vp->v_type == VREG) &&
1919 	    (vp->v_mount != 0) && /* Only on nodes that have the size info */
1920 	    (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) {
1921 
1922 		size = vp->v_mount->mnt_stat.f_iosize;
1923 
1924 		for (i = size; i < MAXPHYS; i += size) {
1925 			if ((bpa = findblk(vp, loffset + i, FINDBLK_TEST)) &&
1926 			    BUF_REFCNT(bpa) == 0 &&
1927 			    ((bpa->b_flags & (B_DELWRI | B_CLUSTEROK | B_INVAL)) ==
1928 			    (B_DELWRI | B_CLUSTEROK)) &&
1929 			    (bpa->b_bufsize == size)) {
1930 				if ((bpa->b_bio2.bio_offset == NOOFFSET) ||
1931 				    (bpa->b_bio2.bio_offset !=
1932 				     bp->b_bio2.bio_offset + i))
1933 					break;
1934 			} else {
1935 				break;
1936 			}
1937 		}
1938 		for (j = size; i + j <= MAXPHYS && j <= loffset; j += size) {
1939 			if ((bpa = findblk(vp, loffset - j, FINDBLK_TEST)) &&
1940 			    BUF_REFCNT(bpa) == 0 &&
1941 			    ((bpa->b_flags & (B_DELWRI | B_CLUSTEROK | B_INVAL)) ==
1942 			    (B_DELWRI | B_CLUSTEROK)) &&
1943 			    (bpa->b_bufsize == size)) {
1944 				if ((bpa->b_bio2.bio_offset == NOOFFSET) ||
1945 				    (bpa->b_bio2.bio_offset !=
1946 				     bp->b_bio2.bio_offset - j))
1947 					break;
1948 			} else {
1949 				break;
1950 			}
1951 		}
1952 		j -= size;
1953 		nbytes = (i + j);
1954 
1955 		/*
1956 		 * this is a possible cluster write
1957 		 */
1958 		if (nbytes != size) {
1959 			BUF_UNLOCK(bp);
1960 			nwritten = cluster_wbuild(vp, size,
1961 						  loffset - j, nbytes);
1962 			return nwritten;
1963 		}
1964 	}
1965 
1966 	/*
1967 	 * default (old) behavior, writing out only one block
1968 	 *
1969 	 * XXX returns b_bufsize instead of b_bcount for nwritten?
1970 	 */
1971 	nwritten = bp->b_bufsize;
1972 	bremfree(bp);
1973 	bawrite(bp);
1974 
1975 	return nwritten;
1976 }
1977 
1978 /*
1979  * getnewbuf:
1980  *
1981  *	Find and initialize a new buffer header, freeing up existing buffers
1982  *	in the bufqueues as necessary.  The new buffer is returned locked.
1983  *
1984  *	Important:  B_INVAL is not set.  If the caller wishes to throw the
1985  *	buffer away, the caller must set B_INVAL prior to calling brelse().
1986  *
1987  *	We block if:
1988  *		We have insufficient buffer headers
1989  *		We have insufficient buffer space
1990  *		buffer_map is too fragmented ( space reservation fails )
1991  *		If we have to flush dirty buffers ( but we try to avoid this )
1992  *
1993  *	To avoid VFS layer recursion we do not flush dirty buffers ourselves.
1994  *	Instead we ask the buf daemon to do it for us.  We attempt to
1995  *	avoid piecemeal wakeups of the pageout daemon.
1996  *
1997  * MPALMOSTSAFE
1998  */
1999 struct buf *
2000 getnewbuf(int blkflags, int slptimeo, int size, int maxsize)
2001 {
2002 	struct buf *bp;
2003 	struct buf *nbp;
2004 	int defrag = 0;
2005 	int nqindex;
2006 	int slpflags = (blkflags & GETBLK_PCATCH) ? PCATCH : 0;
2007 	static int flushingbufs;
2008 
2009 	/*
2010 	 * We can't afford to block since we might be holding a vnode lock,
2011 	 * which may prevent system daemons from running.  We deal with
2012 	 * low-memory situations by proactively returning memory and running
2013 	 * async I/O rather then sync I/O.
2014 	 */
2015 
2016 	++getnewbufcalls;
2017 	--getnewbufrestarts;
2018 restart:
2019 	++getnewbufrestarts;
2020 
2021 	/*
2022 	 * Setup for scan.  If we do not have enough free buffers,
2023 	 * we setup a degenerate case that immediately fails.  Note
2024 	 * that if we are specially marked process, we are allowed to
2025 	 * dip into our reserves.
2026 	 *
2027 	 * The scanning sequence is nominally:  EMPTY->EMPTYKVA->CLEAN
2028 	 *
2029 	 * We start with EMPTYKVA.  If the list is empty we backup to EMPTY.
2030 	 * However, there are a number of cases (defragging, reusing, ...)
2031 	 * where we cannot backup.
2032 	 */
2033 	nqindex = BQUEUE_EMPTYKVA;
2034 	spin_lock(&bufqspin);
2035 	nbp = TAILQ_FIRST(&bufqueues[BQUEUE_EMPTYKVA]);
2036 
2037 	if (nbp == NULL) {
2038 		/*
2039 		 * If no EMPTYKVA buffers and we are either
2040 		 * defragging or reusing, locate a CLEAN buffer
2041 		 * to free or reuse.  If bufspace useage is low
2042 		 * skip this step so we can allocate a new buffer.
2043 		 */
2044 		if (defrag || bufspace >= lobufspace) {
2045 			nqindex = BQUEUE_CLEAN;
2046 			nbp = TAILQ_FIRST(&bufqueues[BQUEUE_CLEAN]);
2047 		}
2048 
2049 		/*
2050 		 * If we could not find or were not allowed to reuse a
2051 		 * CLEAN buffer, check to see if it is ok to use an EMPTY
2052 		 * buffer.  We can only use an EMPTY buffer if allocating
2053 		 * its KVA would not otherwise run us out of buffer space.
2054 		 */
2055 		if (nbp == NULL && defrag == 0 &&
2056 		    bufspace + maxsize < hibufspace) {
2057 			nqindex = BQUEUE_EMPTY;
2058 			nbp = TAILQ_FIRST(&bufqueues[BQUEUE_EMPTY]);
2059 		}
2060 	}
2061 
2062 	/*
2063 	 * Run scan, possibly freeing data and/or kva mappings on the fly
2064 	 * depending.
2065 	 *
2066 	 * WARNING!  bufqspin is held!
2067 	 */
2068 	while ((bp = nbp) != NULL) {
2069 		int qindex = nqindex;
2070 
2071 		nbp = TAILQ_NEXT(bp, b_freelist);
2072 
2073 		/*
2074 		 * BQUEUE_CLEAN - B_AGE special case.  If not set the bp
2075 		 * cycles through the queue twice before being selected.
2076 		 */
2077 		if (qindex == BQUEUE_CLEAN &&
2078 		    (bp->b_flags & B_AGE) == 0 && nbp) {
2079 			bp->b_flags |= B_AGE;
2080 			TAILQ_REMOVE(&bufqueues[qindex], bp, b_freelist);
2081 			TAILQ_INSERT_TAIL(&bufqueues[qindex], bp, b_freelist);
2082 			continue;
2083 		}
2084 
2085 		/*
2086 		 * Calculate next bp ( we can only use it if we do not block
2087 		 * or do other fancy things ).
2088 		 */
2089 		if (nbp == NULL) {
2090 			switch(qindex) {
2091 			case BQUEUE_EMPTY:
2092 				nqindex = BQUEUE_EMPTYKVA;
2093 				if ((nbp = TAILQ_FIRST(&bufqueues[BQUEUE_EMPTYKVA])))
2094 					break;
2095 				/* fall through */
2096 			case BQUEUE_EMPTYKVA:
2097 				nqindex = BQUEUE_CLEAN;
2098 				if ((nbp = TAILQ_FIRST(&bufqueues[BQUEUE_CLEAN])))
2099 					break;
2100 				/* fall through */
2101 			case BQUEUE_CLEAN:
2102 				/*
2103 				 * nbp is NULL.
2104 				 */
2105 				break;
2106 			}
2107 		}
2108 
2109 		/*
2110 		 * Sanity Checks
2111 		 */
2112 		KASSERT(bp->b_qindex == qindex,
2113 			("getnewbuf: inconsistent queue %d bp %p", qindex, bp));
2114 
2115 		/*
2116 		 * Note: we no longer distinguish between VMIO and non-VMIO
2117 		 * buffers.
2118 		 */
2119 		KASSERT((bp->b_flags & B_DELWRI) == 0,
2120 			("delwri buffer %p found in queue %d", bp, qindex));
2121 
2122 		/*
2123 		 * Do not try to reuse a buffer with a non-zero b_refs.
2124 		 * This is an unsynchronized test.  A synchronized test
2125 		 * is also performed after we lock the buffer.
2126 		 */
2127 		if (bp->b_refs)
2128 			continue;
2129 
2130 		/*
2131 		 * If we are defragging then we need a buffer with
2132 		 * b_kvasize != 0.  XXX this situation should no longer
2133 		 * occur, if defrag is non-zero the buffer's b_kvasize
2134 		 * should also be non-zero at this point.  XXX
2135 		 */
2136 		if (defrag && bp->b_kvasize == 0) {
2137 			kprintf("Warning: defrag empty buffer %p\n", bp);
2138 			continue;
2139 		}
2140 
2141 		/*
2142 		 * Start freeing the bp.  This is somewhat involved.  nbp
2143 		 * remains valid only for BQUEUE_EMPTY[KVA] bp's.  Buffers
2144 		 * on the clean list must be disassociated from their
2145 		 * current vnode.  Buffers on the empty[kva] lists have
2146 		 * already been disassociated.
2147 		 *
2148 		 * b_refs is checked after locking along with queue changes.
2149 		 * We must check here to deal with zero->nonzero transitions
2150 		 * made by the owner of the buffer lock, which is used by
2151 		 * VFS's to hold the buffer while issuing an unlocked
2152 		 * uiomove()s.  We cannot invalidate the buffer's pages
2153 		 * for this case.  Once we successfully lock a buffer the
2154 		 * only 0->1 transitions of b_refs will occur via findblk().
2155 		 *
2156 		 * We must also check for queue changes after successful
2157 		 * locking as the current lock holder may dispose of the
2158 		 * buffer and change its queue.
2159 		 */
2160 		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT) != 0) {
2161 			spin_unlock(&bufqspin);
2162 			tsleep(&bd_request, 0, "gnbxxx", (hz + 99) / 100);
2163 			goto restart;
2164 		}
2165 		if (bp->b_qindex != qindex || bp->b_refs) {
2166 			spin_unlock(&bufqspin);
2167 			BUF_UNLOCK(bp);
2168 			goto restart;
2169 		}
2170 		bremfree_locked(bp);
2171 		spin_unlock(&bufqspin);
2172 
2173 		/*
2174 		 * Dependancies must be handled before we disassociate the
2175 		 * vnode.
2176 		 *
2177 		 * NOTE: HAMMER will set B_LOCKED if the buffer cannot
2178 		 * be immediately disassociated.  HAMMER then becomes
2179 		 * responsible for releasing the buffer.
2180 		 *
2181 		 * NOTE: bufqspin is UNLOCKED now.
2182 		 */
2183 		if (LIST_FIRST(&bp->b_dep) != NULL) {
2184 			buf_deallocate(bp);
2185 			if (bp->b_flags & B_LOCKED) {
2186 				bqrelse(bp);
2187 				goto restart;
2188 			}
2189 			KKASSERT(LIST_FIRST(&bp->b_dep) == NULL);
2190 		}
2191 
2192 		if (qindex == BQUEUE_CLEAN) {
2193 			if (bp->b_flags & B_VMIO)
2194 				vfs_vmio_release(bp);
2195 			if (bp->b_vp)
2196 				brelvp(bp);
2197 		}
2198 
2199 		/*
2200 		 * NOTE:  nbp is now entirely invalid.  We can only restart
2201 		 * the scan from this point on.
2202 		 *
2203 		 * Get the rest of the buffer freed up.  b_kva* is still
2204 		 * valid after this operation.
2205 		 */
2206 		KASSERT(bp->b_vp == NULL,
2207 			("bp3 %p flags %08x vnode %p qindex %d "
2208 			 "unexpectededly still associated!",
2209 			 bp, bp->b_flags, bp->b_vp, qindex));
2210 		KKASSERT((bp->b_flags & B_HASHED) == 0);
2211 
2212 		/*
2213 		 * critical section protection is not required when
2214 		 * scrapping a buffer's contents because it is already
2215 		 * wired.
2216 		 */
2217 		if (bp->b_bufsize)
2218 			allocbuf(bp, 0);
2219 
2220 		bp->b_flags = B_BNOCLIP;
2221 		bp->b_cmd = BUF_CMD_DONE;
2222 		bp->b_vp = NULL;
2223 		bp->b_error = 0;
2224 		bp->b_resid = 0;
2225 		bp->b_bcount = 0;
2226 		bp->b_xio.xio_npages = 0;
2227 		bp->b_dirtyoff = bp->b_dirtyend = 0;
2228 		bp->b_act_count = ACT_INIT;
2229 		reinitbufbio(bp);
2230 		KKASSERT(LIST_FIRST(&bp->b_dep) == NULL);
2231 		buf_dep_init(bp);
2232 		if (blkflags & GETBLK_BHEAVY)
2233 			bp->b_flags |= B_HEAVY;
2234 
2235 		/*
2236 		 * If we are defragging then free the buffer.
2237 		 */
2238 		if (defrag) {
2239 			bp->b_flags |= B_INVAL;
2240 			bfreekva(bp);
2241 			brelse(bp);
2242 			defrag = 0;
2243 			goto restart;
2244 		}
2245 
2246 		/*
2247 		 * If we are overcomitted then recover the buffer and its
2248 		 * KVM space.  This occurs in rare situations when multiple
2249 		 * processes are blocked in getnewbuf() or allocbuf().
2250 		 */
2251 		if (bufspace >= hibufspace)
2252 			flushingbufs = 1;
2253 		if (flushingbufs && bp->b_kvasize != 0) {
2254 			bp->b_flags |= B_INVAL;
2255 			bfreekva(bp);
2256 			brelse(bp);
2257 			goto restart;
2258 		}
2259 		if (bufspace < lobufspace)
2260 			flushingbufs = 0;
2261 
2262 		/*
2263 		 * b_refs can transition to a non-zero value while we hold
2264 		 * the buffer locked due to a findblk().  Our brelvp() above
2265 		 * interlocked any future possible transitions due to
2266 		 * findblk()s.
2267 		 *
2268 		 * If we find b_refs to be non-zero we can destroy the
2269 		 * buffer's contents but we cannot yet reuse the buffer.
2270 		 */
2271 		if (bp->b_refs) {
2272 			bp->b_flags |= B_INVAL;
2273 			bfreekva(bp);
2274 			brelse(bp);
2275 			goto restart;
2276 		}
2277 		break;
2278 		/* NOT REACHED, bufqspin not held */
2279 	}
2280 
2281 	/*
2282 	 * If we exhausted our list, sleep as appropriate.  We may have to
2283 	 * wakeup various daemons and write out some dirty buffers.
2284 	 *
2285 	 * Generally we are sleeping due to insufficient buffer space.
2286 	 *
2287 	 * NOTE: bufqspin is held if bp is NULL, else it is not held.
2288 	 */
2289 	if (bp == NULL) {
2290 		int flags;
2291 		char *waitmsg;
2292 
2293 		spin_unlock(&bufqspin);
2294 		if (defrag) {
2295 			flags = VFS_BIO_NEED_BUFSPACE;
2296 			waitmsg = "nbufkv";
2297 		} else if (bufspace >= hibufspace) {
2298 			waitmsg = "nbufbs";
2299 			flags = VFS_BIO_NEED_BUFSPACE;
2300 		} else {
2301 			waitmsg = "newbuf";
2302 			flags = VFS_BIO_NEED_ANY;
2303 		}
2304 
2305 		bd_speedup();	/* heeeelp */
2306 		spin_lock(&bufcspin);
2307 		needsbuffer |= flags;
2308 		while (needsbuffer & flags) {
2309 			if (ssleep(&needsbuffer, &bufcspin,
2310 				   slpflags, waitmsg, slptimeo)) {
2311 				spin_unlock(&bufcspin);
2312 				return (NULL);
2313 			}
2314 		}
2315 		spin_unlock(&bufcspin);
2316 	} else {
2317 		/*
2318 		 * We finally have a valid bp.  We aren't quite out of the
2319 		 * woods, we still have to reserve kva space.  In order
2320 		 * to keep fragmentation sane we only allocate kva in
2321 		 * BKVASIZE chunks.
2322 		 *
2323 		 * (bufqspin is not held)
2324 		 */
2325 		maxsize = (maxsize + BKVAMASK) & ~BKVAMASK;
2326 
2327 		if (maxsize != bp->b_kvasize) {
2328 			vm_offset_t addr = 0;
2329 			int count;
2330 
2331 			bfreekva(bp);
2332 
2333 			count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
2334 			vm_map_lock(&buffer_map);
2335 
2336 			if (vm_map_findspace(&buffer_map,
2337 				    vm_map_min(&buffer_map), maxsize,
2338 				    maxsize, 0, &addr)) {
2339 				/*
2340 				 * Uh oh.  Buffer map is too fragmented.  We
2341 				 * must defragment the map.
2342 				 */
2343 				vm_map_unlock(&buffer_map);
2344 				vm_map_entry_release(count);
2345 				++bufdefragcnt;
2346 				defrag = 1;
2347 				bp->b_flags |= B_INVAL;
2348 				brelse(bp);
2349 				goto restart;
2350 			}
2351 			if (addr) {
2352 				vm_map_insert(&buffer_map, &count,
2353 					NULL, 0,
2354 					addr, addr + maxsize,
2355 					VM_MAPTYPE_NORMAL,
2356 					VM_PROT_ALL, VM_PROT_ALL,
2357 					MAP_NOFAULT);
2358 
2359 				bp->b_kvabase = (caddr_t) addr;
2360 				bp->b_kvasize = maxsize;
2361 				bufspace += bp->b_kvasize;
2362 				++bufreusecnt;
2363 			}
2364 			vm_map_unlock(&buffer_map);
2365 			vm_map_entry_release(count);
2366 		}
2367 		bp->b_data = bp->b_kvabase;
2368 	}
2369 	return(bp);
2370 }
2371 
2372 /*
2373  * This routine is called in an emergency to recover VM pages from the
2374  * buffer cache by cashing in clean buffers.  The idea is to recover
2375  * enough pages to be able to satisfy a stuck bio_page_alloc().
2376  *
2377  * MPSAFE
2378  */
2379 static int
2380 recoverbufpages(void)
2381 {
2382 	struct buf *bp;
2383 	int bytes = 0;
2384 
2385 	++recoverbufcalls;
2386 
2387 	spin_lock(&bufqspin);
2388 	while (bytes < MAXBSIZE) {
2389 		bp = TAILQ_FIRST(&bufqueues[BQUEUE_CLEAN]);
2390 		if (bp == NULL)
2391 			break;
2392 
2393 		/*
2394 		 * BQUEUE_CLEAN - B_AGE special case.  If not set the bp
2395 		 * cycles through the queue twice before being selected.
2396 		 */
2397 		if ((bp->b_flags & B_AGE) == 0 && TAILQ_NEXT(bp, b_freelist)) {
2398 			bp->b_flags |= B_AGE;
2399 			TAILQ_REMOVE(&bufqueues[BQUEUE_CLEAN], bp, b_freelist);
2400 			TAILQ_INSERT_TAIL(&bufqueues[BQUEUE_CLEAN],
2401 					  bp, b_freelist);
2402 			continue;
2403 		}
2404 
2405 		/*
2406 		 * Sanity Checks
2407 		 */
2408 		KKASSERT(bp->b_qindex == BQUEUE_CLEAN);
2409 		KKASSERT((bp->b_flags & B_DELWRI) == 0);
2410 
2411 		/*
2412 		 * Start freeing the bp.  This is somewhat involved.
2413 		 *
2414 		 * Buffers on the clean list must be disassociated from
2415 		 * their current vnode
2416 		 */
2417 
2418 		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT) != 0) {
2419 			kprintf("recoverbufpages: warning, locked buf %p, "
2420 				"race corrected\n",
2421 				bp);
2422 			ssleep(&bd_request, &bufqspin, 0, "gnbxxx", hz / 100);
2423 			continue;
2424 		}
2425 		if (bp->b_qindex != BQUEUE_CLEAN) {
2426 			kprintf("recoverbufpages: warning, BUF_LOCK blocked "
2427 				"unexpectedly on buf %p index %d, race "
2428 				"corrected\n",
2429 				bp, bp->b_qindex);
2430 			BUF_UNLOCK(bp);
2431 			continue;
2432 		}
2433 		bremfree_locked(bp);
2434 		spin_unlock(&bufqspin);
2435 
2436 		/*
2437 		 * Dependancies must be handled before we disassociate the
2438 		 * vnode.
2439 		 *
2440 		 * NOTE: HAMMER will set B_LOCKED if the buffer cannot
2441 		 * be immediately disassociated.  HAMMER then becomes
2442 		 * responsible for releasing the buffer.
2443 		 */
2444 		if (LIST_FIRST(&bp->b_dep) != NULL) {
2445 			buf_deallocate(bp);
2446 			if (bp->b_flags & B_LOCKED) {
2447 				bqrelse(bp);
2448 				spin_lock(&bufqspin);
2449 				continue;
2450 			}
2451 			KKASSERT(LIST_FIRST(&bp->b_dep) == NULL);
2452 		}
2453 
2454 		bytes += bp->b_bufsize;
2455 
2456 		if (bp->b_flags & B_VMIO) {
2457 			bp->b_flags |= B_DIRECT;    /* try to free pages */
2458 			vfs_vmio_release(bp);
2459 		}
2460 		if (bp->b_vp)
2461 			brelvp(bp);
2462 
2463 		KKASSERT(bp->b_vp == NULL);
2464 		KKASSERT((bp->b_flags & B_HASHED) == 0);
2465 
2466 		/*
2467 		 * critical section protection is not required when
2468 		 * scrapping a buffer's contents because it is already
2469 		 * wired.
2470 		 */
2471 		if (bp->b_bufsize)
2472 			allocbuf(bp, 0);
2473 
2474 		bp->b_flags = B_BNOCLIP;
2475 		bp->b_cmd = BUF_CMD_DONE;
2476 		bp->b_vp = NULL;
2477 		bp->b_error = 0;
2478 		bp->b_resid = 0;
2479 		bp->b_bcount = 0;
2480 		bp->b_xio.xio_npages = 0;
2481 		bp->b_dirtyoff = bp->b_dirtyend = 0;
2482 		reinitbufbio(bp);
2483 		KKASSERT(LIST_FIRST(&bp->b_dep) == NULL);
2484 		buf_dep_init(bp);
2485 		bp->b_flags |= B_INVAL;
2486 		/* bfreekva(bp); */
2487 		brelse(bp);
2488 		spin_lock(&bufqspin);
2489 	}
2490 	spin_unlock(&bufqspin);
2491 	return(bytes);
2492 }
2493 
2494 /*
2495  * buf_daemon:
2496  *
2497  *	Buffer flushing daemon.  Buffers are normally flushed by the
2498  *	update daemon but if it cannot keep up this process starts to
2499  *	take the load in an attempt to prevent getnewbuf() from blocking.
2500  *
2501  *	Once a flush is initiated it does not stop until the number
2502  *	of buffers falls below lodirtybuffers, but we will wake up anyone
2503  *	waiting at the mid-point.
2504  */
2505 
2506 static struct kproc_desc buf_kp = {
2507 	"bufdaemon",
2508 	buf_daemon,
2509 	&bufdaemon_td
2510 };
2511 SYSINIT(bufdaemon, SI_SUB_KTHREAD_BUF, SI_ORDER_FIRST,
2512 	kproc_start, &buf_kp)
2513 
2514 static struct kproc_desc bufhw_kp = {
2515 	"bufdaemon_hw",
2516 	buf_daemon_hw,
2517 	&bufdaemonhw_td
2518 };
2519 SYSINIT(bufdaemon_hw, SI_SUB_KTHREAD_BUF, SI_ORDER_FIRST,
2520 	kproc_start, &bufhw_kp)
2521 
2522 /*
2523  * MPSAFE thread
2524  */
2525 static void
2526 buf_daemon(void)
2527 {
2528 	int limit;
2529 
2530 	/*
2531 	 * This process needs to be suspended prior to shutdown sync.
2532 	 */
2533 	EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_kproc,
2534 			      bufdaemon_td, SHUTDOWN_PRI_LAST);
2535 	curthread->td_flags |= TDF_SYSTHREAD;
2536 
2537 	/*
2538 	 * This process is allowed to take the buffer cache to the limit
2539 	 */
2540 	for (;;) {
2541 		kproc_suspend_loop();
2542 
2543 		/*
2544 		 * Do the flush as long as the number of dirty buffers
2545 		 * (including those running) exceeds lodirtybufspace.
2546 		 *
2547 		 * When flushing limit running I/O to hirunningspace
2548 		 * Do the flush.  Limit the amount of in-transit I/O we
2549 		 * allow to build up, otherwise we would completely saturate
2550 		 * the I/O system.  Wakeup any waiting processes before we
2551 		 * normally would so they can run in parallel with our drain.
2552 		 *
2553 		 * Our aggregate normal+HW lo water mark is lodirtybufspace,
2554 		 * but because we split the operation into two threads we
2555 		 * have to cut it in half for each thread.
2556 		 */
2557 		waitrunningbufspace();
2558 		limit = lodirtybufspace / 2;
2559 		while (runningbufspace + dirtybufspace > limit ||
2560 		       dirtybufcount - dirtybufcounthw >= nbuf / 2) {
2561 			if (flushbufqueues(BQUEUE_DIRTY) == 0)
2562 				break;
2563 			if (runningbufspace < hirunningspace)
2564 				continue;
2565 			waitrunningbufspace();
2566 		}
2567 
2568 		/*
2569 		 * We reached our low water mark, reset the
2570 		 * request and sleep until we are needed again.
2571 		 * The sleep is just so the suspend code works.
2572 		 */
2573 		spin_lock(&bufcspin);
2574 		if (bd_request == 0)
2575 			ssleep(&bd_request, &bufcspin, 0, "psleep", hz);
2576 		bd_request = 0;
2577 		spin_unlock(&bufcspin);
2578 	}
2579 }
2580 
2581 /*
2582  * MPSAFE thread
2583  */
2584 static void
2585 buf_daemon_hw(void)
2586 {
2587 	int limit;
2588 
2589 	/*
2590 	 * This process needs to be suspended prior to shutdown sync.
2591 	 */
2592 	EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_kproc,
2593 			      bufdaemonhw_td, SHUTDOWN_PRI_LAST);
2594 	curthread->td_flags |= TDF_SYSTHREAD;
2595 
2596 	/*
2597 	 * This process is allowed to take the buffer cache to the limit
2598 	 */
2599 	for (;;) {
2600 		kproc_suspend_loop();
2601 
2602 		/*
2603 		 * Do the flush.  Limit the amount of in-transit I/O we
2604 		 * allow to build up, otherwise we would completely saturate
2605 		 * the I/O system.  Wakeup any waiting processes before we
2606 		 * normally would so they can run in parallel with our drain.
2607 		 *
2608 		 * Once we decide to flush push the queued I/O up to
2609 		 * hirunningspace in order to trigger bursting by the bioq
2610 		 * subsystem.
2611 		 *
2612 		 * Our aggregate normal+HW lo water mark is lodirtybufspace,
2613 		 * but because we split the operation into two threads we
2614 		 * have to cut it in half for each thread.
2615 		 */
2616 		waitrunningbufspace();
2617 		limit = lodirtybufspace / 2;
2618 		while (runningbufspace + dirtybufspacehw > limit ||
2619 		       dirtybufcounthw >= nbuf / 2) {
2620 			if (flushbufqueues(BQUEUE_DIRTY_HW) == 0)
2621 				break;
2622 			if (runningbufspace < hirunningspace)
2623 				continue;
2624 			waitrunningbufspace();
2625 		}
2626 
2627 		/*
2628 		 * We reached our low water mark, reset the
2629 		 * request and sleep until we are needed again.
2630 		 * The sleep is just so the suspend code works.
2631 		 */
2632 		spin_lock(&bufcspin);
2633 		if (bd_request_hw == 0)
2634 			ssleep(&bd_request_hw, &bufcspin, 0, "psleep", hz);
2635 		bd_request_hw = 0;
2636 		spin_unlock(&bufcspin);
2637 	}
2638 }
2639 
2640 /*
2641  * flushbufqueues:
2642  *
2643  *	Try to flush a buffer in the dirty queue.  We must be careful to
2644  *	free up B_INVAL buffers instead of write them, which NFS is
2645  *	particularly sensitive to.
2646  *
2647  *	B_RELBUF may only be set by VFSs.  We do set B_AGE to indicate
2648  *	that we really want to try to get the buffer out and reuse it
2649  *	due to the write load on the machine.
2650  *
2651  *	We must lock the buffer in order to check its validity before we
2652  *	can mess with its contents.  bufqspin isn't enough.
2653  */
2654 static int
2655 flushbufqueues(bufq_type_t q)
2656 {
2657 	struct buf *bp;
2658 	int r = 0;
2659 	int spun;
2660 
2661 	spin_lock(&bufqspin);
2662 	spun = 1;
2663 
2664 	bp = TAILQ_FIRST(&bufqueues[q]);
2665 	while (bp) {
2666 		if ((bp->b_flags & B_DELWRI) == 0) {
2667 			kprintf("Unexpected clean buffer %p\n", bp);
2668 			bp = TAILQ_NEXT(bp, b_freelist);
2669 			continue;
2670 		}
2671 		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) {
2672 			bp = TAILQ_NEXT(bp, b_freelist);
2673 			continue;
2674 		}
2675 		KKASSERT(bp->b_qindex == q);
2676 
2677 		/*
2678 		 * Must recheck B_DELWRI after successfully locking
2679 		 * the buffer.
2680 		 */
2681 		if ((bp->b_flags & B_DELWRI) == 0) {
2682 			BUF_UNLOCK(bp);
2683 			bp = TAILQ_NEXT(bp, b_freelist);
2684 			continue;
2685 		}
2686 
2687 		if (bp->b_flags & B_INVAL) {
2688 			_bremfree(bp);
2689 			spin_unlock(&bufqspin);
2690 			spun = 0;
2691 			brelse(bp);
2692 			++r;
2693 			break;
2694 		}
2695 
2696 		spin_unlock(&bufqspin);
2697 		lwkt_yield();
2698 		spun = 0;
2699 
2700 		if (LIST_FIRST(&bp->b_dep) != NULL &&
2701 		    (bp->b_flags & B_DEFERRED) == 0 &&
2702 		    buf_countdeps(bp, 0)) {
2703 			spin_lock(&bufqspin);
2704 			spun = 1;
2705 			TAILQ_REMOVE(&bufqueues[q], bp, b_freelist);
2706 			TAILQ_INSERT_TAIL(&bufqueues[q], bp, b_freelist);
2707 			bp->b_flags |= B_DEFERRED;
2708 			BUF_UNLOCK(bp);
2709 			bp = TAILQ_FIRST(&bufqueues[q]);
2710 			continue;
2711 		}
2712 
2713 		/*
2714 		 * If the buffer has a dependancy, buf_checkwrite() must
2715 		 * also return 0 for us to be able to initate the write.
2716 		 *
2717 		 * If the buffer is flagged B_ERROR it may be requeued
2718 		 * over and over again, we try to avoid a live lock.
2719 		 *
2720 		 * NOTE: buf_checkwrite is MPSAFE.
2721 		 */
2722 		if (LIST_FIRST(&bp->b_dep) != NULL && buf_checkwrite(bp)) {
2723 			bremfree(bp);
2724 			brelse(bp);
2725 		} else if (bp->b_flags & B_ERROR) {
2726 			tsleep(bp, 0, "bioer", 1);
2727 			bp->b_flags &= ~B_AGE;
2728 			vfs_bio_awrite(bp);
2729 		} else {
2730 			bp->b_flags |= B_AGE;
2731 			vfs_bio_awrite(bp);
2732 		}
2733 		++r;
2734 		break;
2735 	}
2736 	if (spun)
2737 		spin_unlock(&bufqspin);
2738 	return (r);
2739 }
2740 
2741 /*
2742  * inmem:
2743  *
2744  *	Returns true if no I/O is needed to access the associated VM object.
2745  *	This is like findblk except it also hunts around in the VM system for
2746  *	the data.
2747  *
2748  *	Note that we ignore vm_page_free() races from interrupts against our
2749  *	lookup, since if the caller is not protected our return value will not
2750  *	be any more valid then otherwise once we exit the critical section.
2751  */
2752 int
2753 inmem(struct vnode *vp, off_t loffset)
2754 {
2755 	vm_object_t obj;
2756 	vm_offset_t toff, tinc, size;
2757 	vm_page_t m;
2758 	int res = 1;
2759 
2760 	if (findblk(vp, loffset, FINDBLK_TEST))
2761 		return 1;
2762 	if (vp->v_mount == NULL)
2763 		return 0;
2764 	if ((obj = vp->v_object) == NULL)
2765 		return 0;
2766 
2767 	size = PAGE_SIZE;
2768 	if (size > vp->v_mount->mnt_stat.f_iosize)
2769 		size = vp->v_mount->mnt_stat.f_iosize;
2770 
2771 	vm_object_hold(obj);
2772 	for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) {
2773 		m = vm_page_lookup(obj, OFF_TO_IDX(loffset + toff));
2774 		if (m == NULL) {
2775 			res = 0;
2776 			break;
2777 		}
2778 		tinc = size;
2779 		if (tinc > PAGE_SIZE - ((toff + loffset) & PAGE_MASK))
2780 			tinc = PAGE_SIZE - ((toff + loffset) & PAGE_MASK);
2781 		if (vm_page_is_valid(m,
2782 		    (vm_offset_t) ((toff + loffset) & PAGE_MASK), tinc) == 0) {
2783 			res = 0;
2784 			break;
2785 		}
2786 	}
2787 	vm_object_drop(obj);
2788 	return (res);
2789 }
2790 
2791 /*
2792  * findblk:
2793  *
2794  *	Locate and return the specified buffer.  Unless flagged otherwise,
2795  *	a locked buffer will be returned if it exists or NULL if it does not.
2796  *
2797  *	findblk()'d buffers are still on the bufqueues and if you intend
2798  *	to use your (locked NON-TEST) buffer you need to bremfree(bp)
2799  *	and possibly do other stuff to it.
2800  *
2801  *	FINDBLK_TEST	- Do not lock the buffer.  The caller is responsible
2802  *			  for locking the buffer and ensuring that it remains
2803  *			  the desired buffer after locking.
2804  *
2805  *	FINDBLK_NBLOCK	- Lock the buffer non-blocking.  If we are unable
2806  *			  to acquire the lock we return NULL, even if the
2807  *			  buffer exists.
2808  *
2809  *	FINDBLK_REF	- Returns the buffer ref'd, which prevents normal
2810  *			  reuse by getnewbuf() but does not prevent
2811  *			  disassociation (B_INVAL).  Used to avoid deadlocks
2812  *			  against random (vp,loffset)s due to reassignment.
2813  *
2814  *	(0)		- Lock the buffer blocking.
2815  *
2816  * MPSAFE
2817  */
2818 struct buf *
2819 findblk(struct vnode *vp, off_t loffset, int flags)
2820 {
2821 	struct buf *bp;
2822 	int lkflags;
2823 
2824 	lkflags = LK_EXCLUSIVE;
2825 	if (flags & FINDBLK_NBLOCK)
2826 		lkflags |= LK_NOWAIT;
2827 
2828 	for (;;) {
2829 		/*
2830 		 * Lookup.  Ref the buf while holding v_token to prevent
2831 		 * reuse (but does not prevent diassociation).
2832 		 */
2833 		lwkt_gettoken(&vp->v_token);
2834 		bp = buf_rb_hash_RB_LOOKUP(&vp->v_rbhash_tree, loffset);
2835 		if (bp == NULL) {
2836 			lwkt_reltoken(&vp->v_token);
2837 			return(NULL);
2838 		}
2839 		bqhold(bp);
2840 		lwkt_reltoken(&vp->v_token);
2841 
2842 		/*
2843 		 * If testing only break and return bp, do not lock.
2844 		 */
2845 		if (flags & FINDBLK_TEST)
2846 			break;
2847 
2848 		/*
2849 		 * Lock the buffer, return an error if the lock fails.
2850 		 * (only FINDBLK_NBLOCK can cause the lock to fail).
2851 		 */
2852 		if (BUF_LOCK(bp, lkflags)) {
2853 			atomic_subtract_int(&bp->b_refs, 1);
2854 			/* bp = NULL; not needed */
2855 			return(NULL);
2856 		}
2857 
2858 		/*
2859 		 * Revalidate the locked buf before allowing it to be
2860 		 * returned.
2861 		 */
2862 		if (bp->b_vp == vp && bp->b_loffset == loffset)
2863 			break;
2864 		atomic_subtract_int(&bp->b_refs, 1);
2865 		BUF_UNLOCK(bp);
2866 	}
2867 
2868 	/*
2869 	 * Success
2870 	 */
2871 	if ((flags & FINDBLK_REF) == 0)
2872 		atomic_subtract_int(&bp->b_refs, 1);
2873 	return(bp);
2874 }
2875 
2876 /*
2877  * getcacheblk:
2878  *
2879  *	Similar to getblk() except only returns the buffer if it is
2880  *	B_CACHE and requires no other manipulation.  Otherwise NULL
2881  *	is returned.
2882  *
2883  *	If B_RAM is set the buffer might be just fine, but we return
2884  *	NULL anyway because we want the code to fall through to the
2885  *	cluster read.  Otherwise read-ahead breaks.
2886  *
2887  *	If blksize is 0 the buffer cache buffer must already be fully
2888  *	cached.
2889  *
2890  *	If blksize is non-zero getblk() will be used, allowing a buffer
2891  *	to be reinstantiated from its VM backing store.  The buffer must
2892  *	still be fully cached after reinstantiation to be returned.
2893  */
2894 struct buf *
2895 getcacheblk(struct vnode *vp, off_t loffset, int blksize)
2896 {
2897 	struct buf *bp;
2898 
2899 	if (blksize) {
2900 		bp = getblk(vp, loffset, blksize, 0, 0);
2901 		if (bp) {
2902 			if ((bp->b_flags & (B_INVAL | B_CACHE | B_RAM)) ==
2903 			    B_CACHE) {
2904 				bp->b_flags &= ~B_AGE;
2905 			} else {
2906 				brelse(bp);
2907 				bp = NULL;
2908 			}
2909 		}
2910 	} else {
2911 		bp = findblk(vp, loffset, 0);
2912 		if (bp) {
2913 			if ((bp->b_flags & (B_INVAL | B_CACHE | B_RAM)) ==
2914 			    B_CACHE) {
2915 				bp->b_flags &= ~B_AGE;
2916 				bremfree(bp);
2917 			} else {
2918 				BUF_UNLOCK(bp);
2919 				bp = NULL;
2920 			}
2921 		}
2922 	}
2923 	return (bp);
2924 }
2925 
2926 /*
2927  * getblk:
2928  *
2929  *	Get a block given a specified block and offset into a file/device.
2930  * 	B_INVAL may or may not be set on return.  The caller should clear
2931  *	B_INVAL prior to initiating a READ.
2932  *
2933  *	IT IS IMPORTANT TO UNDERSTAND THAT IF YOU CALL GETBLK() AND B_CACHE
2934  *	IS NOT SET, YOU MUST INITIALIZE THE RETURNED BUFFER, ISSUE A READ,
2935  *	OR SET B_INVAL BEFORE RETIRING IT.  If you retire a getblk'd buffer
2936  *	without doing any of those things the system will likely believe
2937  *	the buffer to be valid (especially if it is not B_VMIO), and the
2938  *	next getblk() will return the buffer with B_CACHE set.
2939  *
2940  *	For a non-VMIO buffer, B_CACHE is set to the opposite of B_INVAL for
2941  *	an existing buffer.
2942  *
2943  *	For a VMIO buffer, B_CACHE is modified according to the backing VM.
2944  *	If getblk()ing a previously 0-sized invalid buffer, B_CACHE is set
2945  *	and then cleared based on the backing VM.  If the previous buffer is
2946  *	non-0-sized but invalid, B_CACHE will be cleared.
2947  *
2948  *	If getblk() must create a new buffer, the new buffer is returned with
2949  *	both B_INVAL and B_CACHE clear unless it is a VMIO buffer, in which
2950  *	case it is returned with B_INVAL clear and B_CACHE set based on the
2951  *	backing VM.
2952  *
2953  *	getblk() also forces a bwrite() for any B_DELWRI buffer whos
2954  *	B_CACHE bit is clear.
2955  *
2956  *	What this means, basically, is that the caller should use B_CACHE to
2957  *	determine whether the buffer is fully valid or not and should clear
2958  *	B_INVAL prior to issuing a read.  If the caller intends to validate
2959  *	the buffer by loading its data area with something, the caller needs
2960  *	to clear B_INVAL.  If the caller does this without issuing an I/O,
2961  *	the caller should set B_CACHE ( as an optimization ), else the caller
2962  *	should issue the I/O and biodone() will set B_CACHE if the I/O was
2963  *	a write attempt or if it was a successfull read.  If the caller
2964  *	intends to issue a READ, the caller must clear B_INVAL and B_ERROR
2965  *	prior to issuing the READ.  biodone() will *not* clear B_INVAL.
2966  *
2967  *	getblk flags:
2968  *
2969  *	GETBLK_PCATCH - catch signal if blocked, can cause NULL return
2970  *	GETBLK_BHEAVY - heavy-weight buffer cache buffer
2971  *
2972  * MPALMOSTSAFE
2973  */
2974 struct buf *
2975 getblk(struct vnode *vp, off_t loffset, int size, int blkflags, int slptimeo)
2976 {
2977 	struct buf *bp;
2978 	int slpflags = (blkflags & GETBLK_PCATCH) ? PCATCH : 0;
2979 	int error;
2980 	int lkflags;
2981 
2982 	if (size > MAXBSIZE)
2983 		panic("getblk: size(%d) > MAXBSIZE(%d)", size, MAXBSIZE);
2984 	if (vp->v_object == NULL)
2985 		panic("getblk: vnode %p has no object!", vp);
2986 
2987 loop:
2988 	if ((bp = findblk(vp, loffset, FINDBLK_REF | FINDBLK_TEST)) != NULL) {
2989 		/*
2990 		 * The buffer was found in the cache, but we need to lock it.
2991 		 * We must acquire a ref on the bp to prevent reuse, but
2992 		 * this will not prevent disassociation (brelvp()) so we
2993 		 * must recheck (vp,loffset) after acquiring the lock.
2994 		 *
2995 		 * Without the ref the buffer could potentially be reused
2996 		 * before we acquire the lock and create a deadlock
2997 		 * situation between the thread trying to reuse the buffer
2998 		 * and us due to the fact that we would wind up blocking
2999 		 * on a random (vp,loffset).
3000 		 */
3001 		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) {
3002 			if (blkflags & GETBLK_NOWAIT) {
3003 				bqdrop(bp);
3004 				return(NULL);
3005 			}
3006 			lkflags = LK_EXCLUSIVE | LK_SLEEPFAIL;
3007 			if (blkflags & GETBLK_PCATCH)
3008 				lkflags |= LK_PCATCH;
3009 			error = BUF_TIMELOCK(bp, lkflags, "getblk", slptimeo);
3010 			if (error) {
3011 				bqdrop(bp);
3012 				if (error == ENOLCK)
3013 					goto loop;
3014 				return (NULL);
3015 			}
3016 			/* buffer may have changed on us */
3017 		}
3018 		bqdrop(bp);
3019 
3020 		/*
3021 		 * Once the buffer has been locked, make sure we didn't race
3022 		 * a buffer recyclement.  Buffers that are no longer hashed
3023 		 * will have b_vp == NULL, so this takes care of that check
3024 		 * as well.
3025 		 */
3026 		if (bp->b_vp != vp || bp->b_loffset != loffset) {
3027 			kprintf("Warning buffer %p (vp %p loffset %lld) "
3028 				"was recycled\n",
3029 				bp, vp, (long long)loffset);
3030 			BUF_UNLOCK(bp);
3031 			goto loop;
3032 		}
3033 
3034 		/*
3035 		 * If SZMATCH any pre-existing buffer must be of the requested
3036 		 * size or NULL is returned.  The caller absolutely does not
3037 		 * want getblk() to bwrite() the buffer on a size mismatch.
3038 		 */
3039 		if ((blkflags & GETBLK_SZMATCH) && size != bp->b_bcount) {
3040 			BUF_UNLOCK(bp);
3041 			return(NULL);
3042 		}
3043 
3044 		/*
3045 		 * All vnode-based buffers must be backed by a VM object.
3046 		 */
3047 		KKASSERT(bp->b_flags & B_VMIO);
3048 		KKASSERT(bp->b_cmd == BUF_CMD_DONE);
3049 		bp->b_flags &= ~B_AGE;
3050 
3051 		/*
3052 		 * Make sure that B_INVAL buffers do not have a cached
3053 		 * block number translation.
3054 		 */
3055 		if ((bp->b_flags & B_INVAL) && (bp->b_bio2.bio_offset != NOOFFSET)) {
3056 			kprintf("Warning invalid buffer %p (vp %p loffset %lld)"
3057 				" did not have cleared bio_offset cache\n",
3058 				bp, vp, (long long)loffset);
3059 			clearbiocache(&bp->b_bio2);
3060 		}
3061 
3062 		/*
3063 		 * The buffer is locked.  B_CACHE is cleared if the buffer is
3064 		 * invalid.
3065 		 */
3066 		if (bp->b_flags & B_INVAL)
3067 			bp->b_flags &= ~B_CACHE;
3068 		bremfree(bp);
3069 
3070 		/*
3071 		 * Any size inconsistancy with a dirty buffer or a buffer
3072 		 * with a softupdates dependancy must be resolved.  Resizing
3073 		 * the buffer in such circumstances can lead to problems.
3074 		 *
3075 		 * Dirty or dependant buffers are written synchronously.
3076 		 * Other types of buffers are simply released and
3077 		 * reconstituted as they may be backed by valid, dirty VM
3078 		 * pages (but not marked B_DELWRI).
3079 		 *
3080 		 * NFS NOTE: NFS buffers which straddle EOF are oddly-sized
3081 		 * and may be left over from a prior truncation (and thus
3082 		 * no longer represent the actual EOF point), so we
3083 		 * definitely do not want to B_NOCACHE the backing store.
3084 		 */
3085 		if (size != bp->b_bcount) {
3086 			if (bp->b_flags & B_DELWRI) {
3087 				bp->b_flags |= B_RELBUF;
3088 				bwrite(bp);
3089 			} else if (LIST_FIRST(&bp->b_dep)) {
3090 				bp->b_flags |= B_RELBUF;
3091 				bwrite(bp);
3092 			} else {
3093 				bp->b_flags |= B_RELBUF;
3094 				brelse(bp);
3095 			}
3096 			goto loop;
3097 		}
3098 		KKASSERT(size <= bp->b_kvasize);
3099 		KASSERT(bp->b_loffset != NOOFFSET,
3100 			("getblk: no buffer offset"));
3101 
3102 		/*
3103 		 * A buffer with B_DELWRI set and B_CACHE clear must
3104 		 * be committed before we can return the buffer in
3105 		 * order to prevent the caller from issuing a read
3106 		 * ( due to B_CACHE not being set ) and overwriting
3107 		 * it.
3108 		 *
3109 		 * Most callers, including NFS and FFS, need this to
3110 		 * operate properly either because they assume they
3111 		 * can issue a read if B_CACHE is not set, or because
3112 		 * ( for example ) an uncached B_DELWRI might loop due
3113 		 * to softupdates re-dirtying the buffer.  In the latter
3114 		 * case, B_CACHE is set after the first write completes,
3115 		 * preventing further loops.
3116 		 *
3117 		 * NOTE!  b*write() sets B_CACHE.  If we cleared B_CACHE
3118 		 * above while extending the buffer, we cannot allow the
3119 		 * buffer to remain with B_CACHE set after the write
3120 		 * completes or it will represent a corrupt state.  To
3121 		 * deal with this we set B_NOCACHE to scrap the buffer
3122 		 * after the write.
3123 		 *
3124 		 * XXX Should this be B_RELBUF instead of B_NOCACHE?
3125 		 *     I'm not even sure this state is still possible
3126 		 *     now that getblk() writes out any dirty buffers
3127 		 *     on size changes.
3128 		 *
3129 		 * We might be able to do something fancy, like setting
3130 		 * B_CACHE in bwrite() except if B_DELWRI is already set,
3131 		 * so the below call doesn't set B_CACHE, but that gets real
3132 		 * confusing.  This is much easier.
3133 		 */
3134 
3135 		if ((bp->b_flags & (B_CACHE|B_DELWRI)) == B_DELWRI) {
3136 			kprintf("getblk: Warning, bp %p loff=%jx DELWRI set "
3137 				"and CACHE clear, b_flags %08x\n",
3138 				bp, (intmax_t)bp->b_loffset, bp->b_flags);
3139 			bp->b_flags |= B_NOCACHE;
3140 			bwrite(bp);
3141 			goto loop;
3142 		}
3143 	} else {
3144 		/*
3145 		 * Buffer is not in-core, create new buffer.  The buffer
3146 		 * returned by getnewbuf() is locked.  Note that the returned
3147 		 * buffer is also considered valid (not marked B_INVAL).
3148 		 *
3149 		 * Calculating the offset for the I/O requires figuring out
3150 		 * the block size.  We use DEV_BSIZE for VBLK or VCHR and
3151 		 * the mount's f_iosize otherwise.  If the vnode does not
3152 		 * have an associated mount we assume that the passed size is
3153 		 * the block size.
3154 		 *
3155 		 * Note that vn_isdisk() cannot be used here since it may
3156 		 * return a failure for numerous reasons.   Note that the
3157 		 * buffer size may be larger then the block size (the caller
3158 		 * will use block numbers with the proper multiple).  Beware
3159 		 * of using any v_* fields which are part of unions.  In
3160 		 * particular, in DragonFly the mount point overloading
3161 		 * mechanism uses the namecache only and the underlying
3162 		 * directory vnode is not a special case.
3163 		 */
3164 		int bsize, maxsize;
3165 
3166 		if (vp->v_type == VBLK || vp->v_type == VCHR)
3167 			bsize = DEV_BSIZE;
3168 		else if (vp->v_mount)
3169 			bsize = vp->v_mount->mnt_stat.f_iosize;
3170 		else
3171 			bsize = size;
3172 
3173 		maxsize = size + (loffset & PAGE_MASK);
3174 		maxsize = imax(maxsize, bsize);
3175 
3176 		bp = getnewbuf(blkflags, slptimeo, size, maxsize);
3177 		if (bp == NULL) {
3178 			if (slpflags || slptimeo)
3179 				return NULL;
3180 			goto loop;
3181 		}
3182 
3183 		/*
3184 		 * Atomically insert the buffer into the hash, so that it can
3185 		 * be found by findblk().
3186 		 *
3187 		 * If bgetvp() returns non-zero a collision occured, and the
3188 		 * bp will not be associated with the vnode.
3189 		 *
3190 		 * Make sure the translation layer has been cleared.
3191 		 */
3192 		bp->b_loffset = loffset;
3193 		bp->b_bio2.bio_offset = NOOFFSET;
3194 		/* bp->b_bio2.bio_next = NULL; */
3195 
3196 		if (bgetvp(vp, bp, size)) {
3197 			bp->b_flags |= B_INVAL;
3198 			brelse(bp);
3199 			goto loop;
3200 		}
3201 
3202 		/*
3203 		 * All vnode-based buffers must be backed by a VM object.
3204 		 */
3205 		KKASSERT(vp->v_object != NULL);
3206 		bp->b_flags |= B_VMIO;
3207 		KKASSERT(bp->b_cmd == BUF_CMD_DONE);
3208 
3209 		allocbuf(bp, size);
3210 	}
3211 	KKASSERT(dsched_is_clear_buf_priv(bp));
3212 	return (bp);
3213 }
3214 
3215 /*
3216  * regetblk(bp)
3217  *
3218  * Reacquire a buffer that was previously released to the locked queue,
3219  * or reacquire a buffer which is interlocked by having bioops->io_deallocate
3220  * set B_LOCKED (which handles the acquisition race).
3221  *
3222  * To this end, either B_LOCKED must be set or the dependancy list must be
3223  * non-empty.
3224  *
3225  * MPSAFE
3226  */
3227 void
3228 regetblk(struct buf *bp)
3229 {
3230 	KKASSERT((bp->b_flags & B_LOCKED) || LIST_FIRST(&bp->b_dep) != NULL);
3231 	BUF_LOCK(bp, LK_EXCLUSIVE | LK_RETRY);
3232 	bremfree(bp);
3233 }
3234 
3235 /*
3236  * geteblk:
3237  *
3238  *	Get an empty, disassociated buffer of given size.  The buffer is
3239  *	initially set to B_INVAL.
3240  *
3241  *	critical section protection is not required for the allocbuf()
3242  *	call because races are impossible here.
3243  *
3244  * MPALMOSTSAFE
3245  */
3246 struct buf *
3247 geteblk(int size)
3248 {
3249 	struct buf *bp;
3250 	int maxsize;
3251 
3252 	maxsize = (size + BKVAMASK) & ~BKVAMASK;
3253 
3254 	while ((bp = getnewbuf(0, 0, size, maxsize)) == 0)
3255 		;
3256 	allocbuf(bp, size);
3257 	bp->b_flags |= B_INVAL;	/* b_dep cleared by getnewbuf() */
3258 	KKASSERT(dsched_is_clear_buf_priv(bp));
3259 	return (bp);
3260 }
3261 
3262 
3263 /*
3264  * allocbuf:
3265  *
3266  *	This code constitutes the buffer memory from either anonymous system
3267  *	memory (in the case of non-VMIO operations) or from an associated
3268  *	VM object (in the case of VMIO operations).  This code is able to
3269  *	resize a buffer up or down.
3270  *
3271  *	Note that this code is tricky, and has many complications to resolve
3272  *	deadlock or inconsistant data situations.  Tread lightly!!!
3273  *	There are B_CACHE and B_DELWRI interactions that must be dealt with by
3274  *	the caller.  Calling this code willy nilly can result in the loss of
3275  *	data.
3276  *
3277  *	allocbuf() only adjusts B_CACHE for VMIO buffers.  getblk() deals with
3278  *	B_CACHE for the non-VMIO case.
3279  *
3280  *	This routine does not need to be called from a critical section but you
3281  *	must own the buffer.
3282  *
3283  * MPSAFE
3284  */
3285 int
3286 allocbuf(struct buf *bp, int size)
3287 {
3288 	int newbsize, mbsize;
3289 	int i;
3290 
3291 	if (BUF_REFCNT(bp) == 0)
3292 		panic("allocbuf: buffer not busy");
3293 
3294 	if (bp->b_kvasize < size)
3295 		panic("allocbuf: buffer too small");
3296 
3297 	if ((bp->b_flags & B_VMIO) == 0) {
3298 		caddr_t origbuf;
3299 		int origbufsize;
3300 		/*
3301 		 * Just get anonymous memory from the kernel.  Don't
3302 		 * mess with B_CACHE.
3303 		 */
3304 		mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
3305 		if (bp->b_flags & B_MALLOC)
3306 			newbsize = mbsize;
3307 		else
3308 			newbsize = round_page(size);
3309 
3310 		if (newbsize < bp->b_bufsize) {
3311 			/*
3312 			 * Malloced buffers are not shrunk
3313 			 */
3314 			if (bp->b_flags & B_MALLOC) {
3315 				if (newbsize) {
3316 					bp->b_bcount = size;
3317 				} else {
3318 					kfree(bp->b_data, M_BIOBUF);
3319 					if (bp->b_bufsize) {
3320 						atomic_subtract_int(&bufmallocspace, bp->b_bufsize);
3321 						bufspacewakeup();
3322 						bp->b_bufsize = 0;
3323 					}
3324 					bp->b_data = bp->b_kvabase;
3325 					bp->b_bcount = 0;
3326 					bp->b_flags &= ~B_MALLOC;
3327 				}
3328 				return 1;
3329 			}
3330 			vm_hold_free_pages(
3331 			    bp,
3332 			    (vm_offset_t) bp->b_data + newbsize,
3333 			    (vm_offset_t) bp->b_data + bp->b_bufsize);
3334 		} else if (newbsize > bp->b_bufsize) {
3335 			/*
3336 			 * We only use malloced memory on the first allocation.
3337 			 * and revert to page-allocated memory when the buffer
3338 			 * grows.
3339 			 */
3340 			if ((bufmallocspace < maxbufmallocspace) &&
3341 				(bp->b_bufsize == 0) &&
3342 				(mbsize <= PAGE_SIZE/2)) {
3343 
3344 				bp->b_data = kmalloc(mbsize, M_BIOBUF, M_WAITOK);
3345 				bp->b_bufsize = mbsize;
3346 				bp->b_bcount = size;
3347 				bp->b_flags |= B_MALLOC;
3348 				atomic_add_int(&bufmallocspace, mbsize);
3349 				return 1;
3350 			}
3351 			origbuf = NULL;
3352 			origbufsize = 0;
3353 			/*
3354 			 * If the buffer is growing on its other-than-first
3355 			 * allocation, then we revert to the page-allocation
3356 			 * scheme.
3357 			 */
3358 			if (bp->b_flags & B_MALLOC) {
3359 				origbuf = bp->b_data;
3360 				origbufsize = bp->b_bufsize;
3361 				bp->b_data = bp->b_kvabase;
3362 				if (bp->b_bufsize) {
3363 					atomic_subtract_int(&bufmallocspace,
3364 							    bp->b_bufsize);
3365 					bufspacewakeup();
3366 					bp->b_bufsize = 0;
3367 				}
3368 				bp->b_flags &= ~B_MALLOC;
3369 				newbsize = round_page(newbsize);
3370 			}
3371 			vm_hold_load_pages(
3372 			    bp,
3373 			    (vm_offset_t) bp->b_data + bp->b_bufsize,
3374 			    (vm_offset_t) bp->b_data + newbsize);
3375 			if (origbuf) {
3376 				bcopy(origbuf, bp->b_data, origbufsize);
3377 				kfree(origbuf, M_BIOBUF);
3378 			}
3379 		}
3380 	} else {
3381 		vm_page_t m;
3382 		int desiredpages;
3383 
3384 		newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
3385 		desiredpages = ((int)(bp->b_loffset & PAGE_MASK) +
3386 				newbsize + PAGE_MASK) >> PAGE_SHIFT;
3387 		KKASSERT(desiredpages <= XIO_INTERNAL_PAGES);
3388 
3389 		if (bp->b_flags & B_MALLOC)
3390 			panic("allocbuf: VMIO buffer can't be malloced");
3391 		/*
3392 		 * Set B_CACHE initially if buffer is 0 length or will become
3393 		 * 0-length.
3394 		 */
3395 		if (size == 0 || bp->b_bufsize == 0)
3396 			bp->b_flags |= B_CACHE;
3397 
3398 		if (newbsize < bp->b_bufsize) {
3399 			/*
3400 			 * DEV_BSIZE aligned new buffer size is less then the
3401 			 * DEV_BSIZE aligned existing buffer size.  Figure out
3402 			 * if we have to remove any pages.
3403 			 */
3404 			if (desiredpages < bp->b_xio.xio_npages) {
3405 				for (i = desiredpages; i < bp->b_xio.xio_npages; i++) {
3406 					/*
3407 					 * the page is not freed here -- it
3408 					 * is the responsibility of
3409 					 * vnode_pager_setsize
3410 					 */
3411 					m = bp->b_xio.xio_pages[i];
3412 					KASSERT(m != bogus_page,
3413 					    ("allocbuf: bogus page found"));
3414 					vm_page_busy_wait(m, TRUE, "biodep");
3415 					bp->b_xio.xio_pages[i] = NULL;
3416 					vm_page_unwire(m, 0);
3417 					vm_page_wakeup(m);
3418 				}
3419 				pmap_qremove((vm_offset_t) trunc_page((vm_offset_t)bp->b_data) +
3420 				    (desiredpages << PAGE_SHIFT), (bp->b_xio.xio_npages - desiredpages));
3421 				bp->b_xio.xio_npages = desiredpages;
3422 			}
3423 		} else if (size > bp->b_bcount) {
3424 			/*
3425 			 * We are growing the buffer, possibly in a
3426 			 * byte-granular fashion.
3427 			 */
3428 			struct vnode *vp;
3429 			vm_object_t obj;
3430 			vm_offset_t toff;
3431 			vm_offset_t tinc;
3432 
3433 			/*
3434 			 * Step 1, bring in the VM pages from the object,
3435 			 * allocating them if necessary.  We must clear
3436 			 * B_CACHE if these pages are not valid for the
3437 			 * range covered by the buffer.
3438 			 *
3439 			 * critical section protection is required to protect
3440 			 * against interrupts unbusying and freeing pages
3441 			 * between our vm_page_lookup() and our
3442 			 * busycheck/wiring call.
3443 			 */
3444 			vp = bp->b_vp;
3445 			obj = vp->v_object;
3446 
3447 			vm_object_hold(obj);
3448 			while (bp->b_xio.xio_npages < desiredpages) {
3449 				vm_page_t m;
3450 				vm_pindex_t pi;
3451 				int error;
3452 
3453 				pi = OFF_TO_IDX(bp->b_loffset) +
3454 				     bp->b_xio.xio_npages;
3455 
3456 				/*
3457 				 * Blocking on m->busy might lead to a
3458 				 * deadlock:
3459 				 *
3460 				 *  vm_fault->getpages->cluster_read->allocbuf
3461 				 */
3462 				m = vm_page_lookup_busy_try(obj, pi, FALSE,
3463 							    &error);
3464 				if (error) {
3465 					vm_page_sleep_busy(m, FALSE, "pgtblk");
3466 					continue;
3467 				}
3468 				if (m == NULL) {
3469 					/*
3470 					 * note: must allocate system pages
3471 					 * since blocking here could intefere
3472 					 * with paging I/O, no matter which
3473 					 * process we are.
3474 					 */
3475 					m = bio_page_alloc(obj, pi, desiredpages - bp->b_xio.xio_npages);
3476 					if (m) {
3477 						vm_page_wire(m);
3478 						vm_page_flag_clear(m, PG_ZERO);
3479 						vm_page_wakeup(m);
3480 						bp->b_flags &= ~B_CACHE;
3481 						bp->b_xio.xio_pages[bp->b_xio.xio_npages] = m;
3482 						++bp->b_xio.xio_npages;
3483 					}
3484 					continue;
3485 				}
3486 
3487 				/*
3488 				 * We found a page and were able to busy it.
3489 				 */
3490 				vm_page_flag_clear(m, PG_ZERO);
3491 				vm_page_wire(m);
3492 				vm_page_wakeup(m);
3493 				bp->b_xio.xio_pages[bp->b_xio.xio_npages] = m;
3494 				++bp->b_xio.xio_npages;
3495 				if (bp->b_act_count < m->act_count)
3496 					bp->b_act_count = m->act_count;
3497 			}
3498 			vm_object_drop(obj);
3499 
3500 			/*
3501 			 * Step 2.  We've loaded the pages into the buffer,
3502 			 * we have to figure out if we can still have B_CACHE
3503 			 * set.  Note that B_CACHE is set according to the
3504 			 * byte-granular range ( bcount and size ), not the
3505 			 * aligned range ( newbsize ).
3506 			 *
3507 			 * The VM test is against m->valid, which is DEV_BSIZE
3508 			 * aligned.  Needless to say, the validity of the data
3509 			 * needs to also be DEV_BSIZE aligned.  Note that this
3510 			 * fails with NFS if the server or some other client
3511 			 * extends the file's EOF.  If our buffer is resized,
3512 			 * B_CACHE may remain set! XXX
3513 			 */
3514 
3515 			toff = bp->b_bcount;
3516 			tinc = PAGE_SIZE - ((bp->b_loffset + toff) & PAGE_MASK);
3517 
3518 			while ((bp->b_flags & B_CACHE) && toff < size) {
3519 				vm_pindex_t pi;
3520 
3521 				if (tinc > (size - toff))
3522 					tinc = size - toff;
3523 
3524 				pi = ((bp->b_loffset & PAGE_MASK) + toff) >>
3525 				    PAGE_SHIFT;
3526 
3527 				vfs_buf_test_cache(
3528 				    bp,
3529 				    bp->b_loffset,
3530 				    toff,
3531 				    tinc,
3532 				    bp->b_xio.xio_pages[pi]
3533 				);
3534 				toff += tinc;
3535 				tinc = PAGE_SIZE;
3536 			}
3537 
3538 			/*
3539 			 * Step 3, fixup the KVM pmap.  Remember that
3540 			 * bp->b_data is relative to bp->b_loffset, but
3541 			 * bp->b_loffset may be offset into the first page.
3542 			 */
3543 
3544 			bp->b_data = (caddr_t)
3545 			    trunc_page((vm_offset_t)bp->b_data);
3546 			pmap_qenter(
3547 			    (vm_offset_t)bp->b_data,
3548 			    bp->b_xio.xio_pages,
3549 			    bp->b_xio.xio_npages
3550 			);
3551 			bp->b_data = (caddr_t)((vm_offset_t)bp->b_data |
3552 			    (vm_offset_t)(bp->b_loffset & PAGE_MASK));
3553 		}
3554 	}
3555 
3556 	/* adjust space use on already-dirty buffer */
3557 	if (bp->b_flags & B_DELWRI) {
3558 		spin_lock(&bufcspin);
3559 		dirtybufspace += newbsize - bp->b_bufsize;
3560 		if (bp->b_flags & B_HEAVY)
3561 			dirtybufspacehw += newbsize - bp->b_bufsize;
3562 		spin_unlock(&bufcspin);
3563 	}
3564 	if (newbsize < bp->b_bufsize)
3565 		bufspacewakeup();
3566 	bp->b_bufsize = newbsize;	/* actual buffer allocation	*/
3567 	bp->b_bcount = size;		/* requested buffer size	*/
3568 	return 1;
3569 }
3570 
3571 /*
3572  * biowait:
3573  *
3574  *	Wait for buffer I/O completion, returning error status. B_EINTR
3575  *	is converted into an EINTR error but not cleared (since a chain
3576  *	of biowait() calls may occur).
3577  *
3578  *	On return bpdone() will have been called but the buffer will remain
3579  *	locked and will not have been brelse()'d.
3580  *
3581  *	NOTE!  If a timeout is specified and ETIMEDOUT occurs the I/O is
3582  *	likely still in progress on return.
3583  *
3584  *	NOTE!  This operation is on a BIO, not a BUF.
3585  *
3586  *	NOTE!  BIO_DONE is cleared by vn_strategy()
3587  *
3588  * MPSAFE
3589  */
3590 static __inline int
3591 _biowait(struct bio *bio, const char *wmesg, int to)
3592 {
3593 	struct buf *bp = bio->bio_buf;
3594 	u_int32_t flags;
3595 	u_int32_t nflags;
3596 	int error;
3597 
3598 	KKASSERT(bio == &bp->b_bio1);
3599 	for (;;) {
3600 		flags = bio->bio_flags;
3601 		if (flags & BIO_DONE)
3602 			break;
3603 		nflags = flags | BIO_WANT;
3604 		tsleep_interlock(bio, 0);
3605 		if (atomic_cmpset_int(&bio->bio_flags, flags, nflags)) {
3606 			if (wmesg)
3607 				error = tsleep(bio, PINTERLOCKED, wmesg, to);
3608 			else if (bp->b_cmd == BUF_CMD_READ)
3609 				error = tsleep(bio, PINTERLOCKED, "biord", to);
3610 			else
3611 				error = tsleep(bio, PINTERLOCKED, "biowr", to);
3612 			if (error) {
3613 				kprintf("tsleep error biowait %d\n", error);
3614 				return (error);
3615 			}
3616 		}
3617 	}
3618 
3619 	/*
3620 	 * Finish up.
3621 	 */
3622 	KKASSERT(bp->b_cmd == BUF_CMD_DONE);
3623 	bio->bio_flags &= ~(BIO_DONE | BIO_SYNC);
3624 	if (bp->b_flags & B_EINTR)
3625 		return (EINTR);
3626 	if (bp->b_flags & B_ERROR)
3627 		return (bp->b_error ? bp->b_error : EIO);
3628 	return (0);
3629 }
3630 
3631 int
3632 biowait(struct bio *bio, const char *wmesg)
3633 {
3634 	return(_biowait(bio, wmesg, 0));
3635 }
3636 
3637 int
3638 biowait_timeout(struct bio *bio, const char *wmesg, int to)
3639 {
3640 	return(_biowait(bio, wmesg, to));
3641 }
3642 
3643 /*
3644  * This associates a tracking count with an I/O.  vn_strategy() and
3645  * dev_dstrategy() do this automatically but there are a few cases
3646  * where a vnode or device layer is bypassed when a block translation
3647  * is cached.  In such cases bio_start_transaction() may be called on
3648  * the bypassed layers so the system gets an I/O in progress indication
3649  * for those higher layers.
3650  */
3651 void
3652 bio_start_transaction(struct bio *bio, struct bio_track *track)
3653 {
3654 	bio->bio_track = track;
3655 	if (dsched_is_clear_buf_priv(bio->bio_buf))
3656 		dsched_new_buf(bio->bio_buf);
3657 	bio_track_ref(track);
3658 }
3659 
3660 /*
3661  * Initiate I/O on a vnode.
3662  *
3663  * SWAPCACHE OPERATION:
3664  *
3665  *	Real buffer cache buffers have a non-NULL bp->b_vp.  Unfortunately
3666  *	devfs also uses b_vp for fake buffers so we also have to check
3667  *	that B_PAGING is 0.  In this case the passed 'vp' is probably the
3668  *	underlying block device.  The swap assignments are related to the
3669  *	buffer cache buffer's b_vp, not the passed vp.
3670  *
3671  *	The passed vp == bp->b_vp only in the case where the strategy call
3672  *	is made on the vp itself for its own buffers (a regular file or
3673  *	block device vp).  The filesystem usually then re-calls vn_strategy()
3674  *	after translating the request to an underlying device.
3675  *
3676  *	Cluster buffers set B_CLUSTER and the passed vp is the vp of the
3677  *	underlying buffer cache buffers.
3678  *
3679  *	We can only deal with page-aligned buffers at the moment, because
3680  *	we can't tell what the real dirty state for pages straddling a buffer
3681  *	are.
3682  *
3683  *	In order to call swap_pager_strategy() we must provide the VM object
3684  *	and base offset for the underlying buffer cache pages so it can find
3685  *	the swap blocks.
3686  */
3687 void
3688 vn_strategy(struct vnode *vp, struct bio *bio)
3689 {
3690 	struct bio_track *track;
3691 	struct buf *bp = bio->bio_buf;
3692 
3693 	KKASSERT(bp->b_cmd != BUF_CMD_DONE);
3694 
3695 	/*
3696 	 * Set when an I/O is issued on the bp.  Cleared by consumers
3697 	 * (aka HAMMER), allowing the consumer to determine if I/O had
3698 	 * actually occurred.
3699 	 */
3700 	bp->b_flags |= B_IODEBUG;
3701 
3702 	/*
3703 	 * Handle the swap cache intercept.
3704 	 */
3705 	if (vn_cache_strategy(vp, bio))
3706 		return;
3707 
3708 	/*
3709 	 * Otherwise do the operation through the filesystem
3710 	 */
3711         if (bp->b_cmd == BUF_CMD_READ)
3712                 track = &vp->v_track_read;
3713         else
3714                 track = &vp->v_track_write;
3715 	KKASSERT((bio->bio_flags & BIO_DONE) == 0);
3716 	bio->bio_track = track;
3717 	if (dsched_is_clear_buf_priv(bio->bio_buf))
3718 		dsched_new_buf(bio->bio_buf);
3719 	bio_track_ref(track);
3720         vop_strategy(*vp->v_ops, vp, bio);
3721 }
3722 
3723 static void vn_cache_strategy_callback(struct bio *bio);
3724 
3725 int
3726 vn_cache_strategy(struct vnode *vp, struct bio *bio)
3727 {
3728 	struct buf *bp = bio->bio_buf;
3729 	struct bio *nbio;
3730 	vm_object_t object;
3731 	vm_page_t m;
3732 	int i;
3733 
3734 	/*
3735 	 * Is this buffer cache buffer suitable for reading from
3736 	 * the swap cache?
3737 	 */
3738 	if (vm_swapcache_read_enable == 0 ||
3739 	    bp->b_cmd != BUF_CMD_READ ||
3740 	    ((bp->b_flags & B_CLUSTER) == 0 &&
3741 	     (bp->b_vp == NULL || (bp->b_flags & B_PAGING))) ||
3742 	    ((int)bp->b_loffset & PAGE_MASK) != 0 ||
3743 	    (bp->b_bcount & PAGE_MASK) != 0) {
3744 		return(0);
3745 	}
3746 
3747 	/*
3748 	 * Figure out the original VM object (it will match the underlying
3749 	 * VM pages).  Note that swap cached data uses page indices relative
3750 	 * to that object, not relative to bio->bio_offset.
3751 	 */
3752 	if (bp->b_flags & B_CLUSTER)
3753 		object = vp->v_object;
3754 	else
3755 		object = bp->b_vp->v_object;
3756 
3757 	/*
3758 	 * In order to be able to use the swap cache all underlying VM
3759 	 * pages must be marked as such, and we can't have any bogus pages.
3760 	 */
3761 	for (i = 0; i < bp->b_xio.xio_npages; ++i) {
3762 		m = bp->b_xio.xio_pages[i];
3763 		if ((m->flags & PG_SWAPPED) == 0)
3764 			break;
3765 		if (m == bogus_page)
3766 			break;
3767 	}
3768 
3769 	/*
3770 	 * If we are good then issue the I/O using swap_pager_strategy().
3771 	 *
3772 	 * We can only do this if the buffer actually supports object-backed
3773 	 * I/O.  If it doesn't npages will be 0.
3774 	 */
3775 	if (i && i == bp->b_xio.xio_npages) {
3776 		m = bp->b_xio.xio_pages[0];
3777 		nbio = push_bio(bio);
3778 		nbio->bio_done = vn_cache_strategy_callback;
3779 		nbio->bio_offset = ptoa(m->pindex);
3780 		KKASSERT(m->object == object);
3781 		swap_pager_strategy(object, nbio);
3782 		return(1);
3783 	}
3784 	return(0);
3785 }
3786 
3787 /*
3788  * This is a bit of a hack but since the vn_cache_strategy() function can
3789  * override a VFS's strategy function we must make sure that the bio, which
3790  * is probably bio2, doesn't leak an unexpected offset value back to the
3791  * filesystem.  The filesystem (e.g. UFS) might otherwise assume that the
3792  * bio went through its own file strategy function and the the bio2 offset
3793  * is a cached disk offset when, in fact, it isn't.
3794  */
3795 static void
3796 vn_cache_strategy_callback(struct bio *bio)
3797 {
3798 	bio->bio_offset = NOOFFSET;
3799 	biodone(pop_bio(bio));
3800 }
3801 
3802 /*
3803  * bpdone:
3804  *
3805  *	Finish I/O on a buffer after all BIOs have been processed.
3806  *	Called when the bio chain is exhausted or by biowait.  If called
3807  *	by biowait, elseit is typically 0.
3808  *
3809  *	bpdone is also responsible for setting B_CACHE in a B_VMIO bp.
3810  *	In a non-VMIO bp, B_CACHE will be set on the next getblk()
3811  *	assuming B_INVAL is clear.
3812  *
3813  *	For the VMIO case, we set B_CACHE if the op was a read and no
3814  *	read error occured, or if the op was a write.  B_CACHE is never
3815  *	set if the buffer is invalid or otherwise uncacheable.
3816  *
3817  *	bpdone does not mess with B_INVAL, allowing the I/O routine or the
3818  *	initiator to leave B_INVAL set to brelse the buffer out of existance
3819  *	in the biodone routine.
3820  */
3821 void
3822 bpdone(struct buf *bp, int elseit)
3823 {
3824 	buf_cmd_t cmd;
3825 
3826 	KASSERT(BUF_REFCNTNB(bp) > 0,
3827 		("biodone: bp %p not busy %d", bp, BUF_REFCNTNB(bp)));
3828 	KASSERT(bp->b_cmd != BUF_CMD_DONE,
3829 		("biodone: bp %p already done!", bp));
3830 
3831 	/*
3832 	 * No more BIOs are left.  All completion functions have been dealt
3833 	 * with, now we clean up the buffer.
3834 	 */
3835 	cmd = bp->b_cmd;
3836 	bp->b_cmd = BUF_CMD_DONE;
3837 
3838 	/*
3839 	 * Only reads and writes are processed past this point.
3840 	 */
3841 	if (cmd != BUF_CMD_READ && cmd != BUF_CMD_WRITE) {
3842 		if (cmd == BUF_CMD_FREEBLKS)
3843 			bp->b_flags |= B_NOCACHE;
3844 		if (elseit)
3845 			brelse(bp);
3846 		return;
3847 	}
3848 
3849 	/*
3850 	 * Warning: softupdates may re-dirty the buffer, and HAMMER can do
3851 	 * a lot worse.  XXX - move this above the clearing of b_cmd
3852 	 */
3853 	if (LIST_FIRST(&bp->b_dep) != NULL)
3854 		buf_complete(bp);	/* MPSAFE */
3855 
3856 	/*
3857 	 * A failed write must re-dirty the buffer unless B_INVAL
3858 	 * was set.  Only applicable to normal buffers (with VPs).
3859 	 * vinum buffers may not have a vp.
3860 	 */
3861 	if (cmd == BUF_CMD_WRITE &&
3862 	    (bp->b_flags & (B_ERROR | B_INVAL)) == B_ERROR) {
3863 		bp->b_flags &= ~B_NOCACHE;
3864 		if (bp->b_vp)
3865 			bdirty(bp);
3866 	}
3867 
3868 	if (bp->b_flags & B_VMIO) {
3869 		int i;
3870 		vm_ooffset_t foff;
3871 		vm_page_t m;
3872 		vm_object_t obj;
3873 		int iosize;
3874 		struct vnode *vp = bp->b_vp;
3875 
3876 		obj = vp->v_object;
3877 
3878 #if defined(VFS_BIO_DEBUG)
3879 		if (vp->v_auxrefs == 0)
3880 			panic("biodone: zero vnode hold count");
3881 		if ((vp->v_flag & VOBJBUF) == 0)
3882 			panic("biodone: vnode is not setup for merged cache");
3883 #endif
3884 
3885 		foff = bp->b_loffset;
3886 		KASSERT(foff != NOOFFSET, ("biodone: no buffer offset"));
3887 		KASSERT(obj != NULL, ("biodone: missing VM object"));
3888 
3889 #if defined(VFS_BIO_DEBUG)
3890 		if (obj->paging_in_progress < bp->b_xio.xio_npages) {
3891 			kprintf("biodone: paging in progress(%d) < "
3892 				"bp->b_xio.xio_npages(%d)\n",
3893 				obj->paging_in_progress,
3894 				bp->b_xio.xio_npages);
3895 		}
3896 #endif
3897 
3898 		/*
3899 		 * Set B_CACHE if the op was a normal read and no error
3900 		 * occured.  B_CACHE is set for writes in the b*write()
3901 		 * routines.
3902 		 */
3903 		iosize = bp->b_bcount - bp->b_resid;
3904 		if (cmd == BUF_CMD_READ &&
3905 		    (bp->b_flags & (B_INVAL|B_NOCACHE|B_ERROR)) == 0) {
3906 			bp->b_flags |= B_CACHE;
3907 		}
3908 
3909 		vm_object_hold(obj);
3910 		for (i = 0; i < bp->b_xio.xio_npages; i++) {
3911 			int bogusflag = 0;
3912 			int resid;
3913 
3914 			resid = ((foff + PAGE_SIZE) & ~(off_t)PAGE_MASK) - foff;
3915 			if (resid > iosize)
3916 				resid = iosize;
3917 
3918 			/*
3919 			 * cleanup bogus pages, restoring the originals.  Since
3920 			 * the originals should still be wired, we don't have
3921 			 * to worry about interrupt/freeing races destroying
3922 			 * the VM object association.
3923 			 */
3924 			m = bp->b_xio.xio_pages[i];
3925 			if (m == bogus_page) {
3926 				bogusflag = 1;
3927 				m = vm_page_lookup(obj, OFF_TO_IDX(foff));
3928 				if (m == NULL)
3929 					panic("biodone: page disappeared");
3930 				bp->b_xio.xio_pages[i] = m;
3931 				pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
3932 					bp->b_xio.xio_pages, bp->b_xio.xio_npages);
3933 			}
3934 #if defined(VFS_BIO_DEBUG)
3935 			if (OFF_TO_IDX(foff) != m->pindex) {
3936 				kprintf("biodone: foff(%lu)/m->pindex(%ld) "
3937 					"mismatch\n",
3938 					(unsigned long)foff, (long)m->pindex);
3939 			}
3940 #endif
3941 
3942 			/*
3943 			 * In the write case, the valid and clean bits are
3944 			 * already changed correctly (see bdwrite()), so we
3945 			 * only need to do this here in the read case.
3946 			 */
3947 			vm_page_busy_wait(m, FALSE, "bpdpgw");
3948 			if (cmd == BUF_CMD_READ && !bogusflag && resid > 0) {
3949 				vfs_clean_one_page(bp, i, m);
3950 			}
3951 			vm_page_flag_clear(m, PG_ZERO);
3952 
3953 			/*
3954 			 * when debugging new filesystems or buffer I/O
3955 			 * methods, this is the most common error that pops
3956 			 * up.  if you see this, you have not set the page
3957 			 * busy flag correctly!!!
3958 			 */
3959 			if (m->busy == 0) {
3960 				kprintf("biodone: page busy < 0, "
3961 				    "pindex: %d, foff: 0x(%x,%x), "
3962 				    "resid: %d, index: %d\n",
3963 				    (int) m->pindex, (int)(foff >> 32),
3964 						(int) foff & 0xffffffff, resid, i);
3965 				if (!vn_isdisk(vp, NULL))
3966 					kprintf(" iosize: %ld, loffset: %lld, "
3967 						"flags: 0x%08x, npages: %d\n",
3968 					    bp->b_vp->v_mount->mnt_stat.f_iosize,
3969 					    (long long)bp->b_loffset,
3970 					    bp->b_flags, bp->b_xio.xio_npages);
3971 				else
3972 					kprintf(" VDEV, loffset: %lld, flags: 0x%08x, npages: %d\n",
3973 					    (long long)bp->b_loffset,
3974 					    bp->b_flags, bp->b_xio.xio_npages);
3975 				kprintf(" valid: 0x%x, dirty: 0x%x, wired: %d\n",
3976 				    m->valid, m->dirty, m->wire_count);
3977 				panic("biodone: page busy < 0");
3978 			}
3979 			vm_page_io_finish(m);
3980 			vm_page_wakeup(m);
3981 			vm_object_pip_wakeup(obj);
3982 			foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
3983 			iosize -= resid;
3984 		}
3985 		bp->b_flags &= ~B_HASBOGUS;
3986 		vm_object_drop(obj);
3987 	}
3988 
3989 	/*
3990 	 * Finish up by releasing the buffer.  There are no more synchronous
3991 	 * or asynchronous completions, those were handled by bio_done
3992 	 * callbacks.
3993 	 */
3994 	if (elseit) {
3995 		if (bp->b_flags & (B_NOCACHE|B_INVAL|B_ERROR|B_RELBUF))
3996 			brelse(bp);
3997 		else
3998 			bqrelse(bp);
3999 	}
4000 }
4001 
4002 /*
4003  * Normal biodone.
4004  */
4005 void
4006 biodone(struct bio *bio)
4007 {
4008 	struct buf *bp = bio->bio_buf;
4009 
4010 	runningbufwakeup(bp);
4011 
4012 	/*
4013 	 * Run up the chain of BIO's.   Leave b_cmd intact for the duration.
4014 	 */
4015 	while (bio) {
4016 		biodone_t *done_func;
4017 		struct bio_track *track;
4018 
4019 		/*
4020 		 * BIO tracking.  Most but not all BIOs are tracked.
4021 		 */
4022 		if ((track = bio->bio_track) != NULL) {
4023 			bio_track_rel(track);
4024 			bio->bio_track = NULL;
4025 		}
4026 
4027 		/*
4028 		 * A bio_done function terminates the loop.  The function
4029 		 * will be responsible for any further chaining and/or
4030 		 * buffer management.
4031 		 *
4032 		 * WARNING!  The done function can deallocate the buffer!
4033 		 */
4034 		if ((done_func = bio->bio_done) != NULL) {
4035 			bio->bio_done = NULL;
4036 			done_func(bio);
4037 			return;
4038 		}
4039 		bio = bio->bio_prev;
4040 	}
4041 
4042 	/*
4043 	 * If we've run out of bio's do normal [a]synchronous completion.
4044 	 */
4045 	bpdone(bp, 1);
4046 }
4047 
4048 /*
4049  * Synchronous biodone - this terminates a synchronous BIO.
4050  *
4051  * bpdone() is called with elseit=FALSE, leaving the buffer completed
4052  * but still locked.  The caller must brelse() the buffer after waiting
4053  * for completion.
4054  */
4055 void
4056 biodone_sync(struct bio *bio)
4057 {
4058 	struct buf *bp = bio->bio_buf;
4059 	int flags;
4060 	int nflags;
4061 
4062 	KKASSERT(bio == &bp->b_bio1);
4063 	bpdone(bp, 0);
4064 
4065 	for (;;) {
4066 		flags = bio->bio_flags;
4067 		nflags = (flags | BIO_DONE) & ~BIO_WANT;
4068 
4069 		if (atomic_cmpset_int(&bio->bio_flags, flags, nflags)) {
4070 			if (flags & BIO_WANT)
4071 				wakeup(bio);
4072 			break;
4073 		}
4074 	}
4075 }
4076 
4077 /*
4078  * vfs_unbusy_pages:
4079  *
4080  *	This routine is called in lieu of iodone in the case of
4081  *	incomplete I/O.  This keeps the busy status for pages
4082  *	consistant.
4083  */
4084 void
4085 vfs_unbusy_pages(struct buf *bp)
4086 {
4087 	int i;
4088 
4089 	runningbufwakeup(bp);
4090 
4091 	if (bp->b_flags & B_VMIO) {
4092 		struct vnode *vp = bp->b_vp;
4093 		vm_object_t obj;
4094 
4095 		obj = vp->v_object;
4096 		vm_object_hold(obj);
4097 
4098 		for (i = 0; i < bp->b_xio.xio_npages; i++) {
4099 			vm_page_t m = bp->b_xio.xio_pages[i];
4100 
4101 			/*
4102 			 * When restoring bogus changes the original pages
4103 			 * should still be wired, so we are in no danger of
4104 			 * losing the object association and do not need
4105 			 * critical section protection particularly.
4106 			 */
4107 			if (m == bogus_page) {
4108 				m = vm_page_lookup(obj, OFF_TO_IDX(bp->b_loffset) + i);
4109 				if (!m) {
4110 					panic("vfs_unbusy_pages: page missing");
4111 				}
4112 				bp->b_xio.xio_pages[i] = m;
4113 				pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
4114 					bp->b_xio.xio_pages, bp->b_xio.xio_npages);
4115 			}
4116 			vm_page_busy_wait(m, FALSE, "bpdpgw");
4117 			vm_page_flag_clear(m, PG_ZERO);
4118 			vm_page_io_finish(m);
4119 			vm_page_wakeup(m);
4120 			vm_object_pip_wakeup(obj);
4121 		}
4122 		bp->b_flags &= ~B_HASBOGUS;
4123 		vm_object_drop(obj);
4124 	}
4125 }
4126 
4127 /*
4128  * vfs_busy_pages:
4129  *
4130  *	This routine is called before a device strategy routine.
4131  *	It is used to tell the VM system that paging I/O is in
4132  *	progress, and treat the pages associated with the buffer
4133  *	almost as being PG_BUSY.  Also the object 'paging_in_progress'
4134  *	flag is handled to make sure that the object doesn't become
4135  *	inconsistant.
4136  *
4137  *	Since I/O has not been initiated yet, certain buffer flags
4138  *	such as B_ERROR or B_INVAL may be in an inconsistant state
4139  *	and should be ignored.
4140  *
4141  * MPSAFE
4142  */
4143 void
4144 vfs_busy_pages(struct vnode *vp, struct buf *bp)
4145 {
4146 	int i, bogus;
4147 	struct lwp *lp = curthread->td_lwp;
4148 
4149 	/*
4150 	 * The buffer's I/O command must already be set.  If reading,
4151 	 * B_CACHE must be 0 (double check against callers only doing
4152 	 * I/O when B_CACHE is 0).
4153 	 */
4154 	KKASSERT(bp->b_cmd != BUF_CMD_DONE);
4155 	KKASSERT(bp->b_cmd == BUF_CMD_WRITE || (bp->b_flags & B_CACHE) == 0);
4156 
4157 	if (bp->b_flags & B_VMIO) {
4158 		vm_object_t obj;
4159 
4160 		obj = vp->v_object;
4161 		KASSERT(bp->b_loffset != NOOFFSET,
4162 			("vfs_busy_pages: no buffer offset"));
4163 
4164 		/*
4165 		 * Busy all the pages.  We have to busy them all at once
4166 		 * to avoid deadlocks.
4167 		 */
4168 retry:
4169 		for (i = 0; i < bp->b_xio.xio_npages; i++) {
4170 			vm_page_t m = bp->b_xio.xio_pages[i];
4171 
4172 			if (vm_page_busy_try(m, FALSE)) {
4173 				vm_page_sleep_busy(m, FALSE, "vbpage");
4174 				while (--i >= 0)
4175 					vm_page_wakeup(bp->b_xio.xio_pages[i]);
4176 				goto retry;
4177 			}
4178 		}
4179 
4180 		/*
4181 		 * Setup for I/O, soft-busy the page right now because
4182 		 * the next loop may block.
4183 		 */
4184 		for (i = 0; i < bp->b_xio.xio_npages; i++) {
4185 			vm_page_t m = bp->b_xio.xio_pages[i];
4186 
4187 			vm_page_flag_clear(m, PG_ZERO);
4188 			if ((bp->b_flags & B_CLUSTER) == 0) {
4189 				vm_object_pip_add(obj, 1);
4190 				vm_page_io_start(m);
4191 			}
4192 		}
4193 
4194 		/*
4195 		 * Adjust protections for I/O and do bogus-page mapping.
4196 		 * Assume that vm_page_protect() can block (it can block
4197 		 * if VM_PROT_NONE, don't take any chances regardless).
4198 		 *
4199 		 * In particular note that for writes we must incorporate
4200 		 * page dirtyness from the VM system into the buffer's
4201 		 * dirty range.
4202 		 *
4203 		 * For reads we theoretically must incorporate page dirtyness
4204 		 * from the VM system to determine if the page needs bogus
4205 		 * replacement, but we shortcut the test by simply checking
4206 		 * that all m->valid bits are set, indicating that the page
4207 		 * is fully valid and does not need to be re-read.  For any
4208 		 * VM system dirtyness the page will also be fully valid
4209 		 * since it was mapped at one point.
4210 		 */
4211 		bogus = 0;
4212 		for (i = 0; i < bp->b_xio.xio_npages; i++) {
4213 			vm_page_t m = bp->b_xio.xio_pages[i];
4214 
4215 			vm_page_flag_clear(m, PG_ZERO);	/* XXX */
4216 			if (bp->b_cmd == BUF_CMD_WRITE) {
4217 				/*
4218 				 * When readying a vnode-backed buffer for
4219 				 * a write we must zero-fill any invalid
4220 				 * portions of the backing VM pages, mark
4221 				 * it valid and clear related dirty bits.
4222 				 *
4223 				 * vfs_clean_one_page() incorporates any
4224 				 * VM dirtyness and updates the b_dirtyoff
4225 				 * range (after we've made the page RO).
4226 				 *
4227 				 * It is also expected that the pmap modified
4228 				 * bit has already been cleared by the
4229 				 * vm_page_protect().  We may not be able
4230 				 * to clear all dirty bits for a page if it
4231 				 * was also memory mapped (NFS).
4232 				 *
4233 				 * Finally be sure to unassign any swap-cache
4234 				 * backing store as it is now stale.
4235 				 */
4236 				vm_page_protect(m, VM_PROT_READ);
4237 				vfs_clean_one_page(bp, i, m);
4238 				swap_pager_unswapped(m);
4239 			} else if (m->valid == VM_PAGE_BITS_ALL) {
4240 				/*
4241 				 * When readying a vnode-backed buffer for
4242 				 * read we must replace any dirty pages with
4243 				 * a bogus page so dirty data is not destroyed
4244 				 * when filling gaps.
4245 				 *
4246 				 * To avoid testing whether the page is
4247 				 * dirty we instead test that the page was
4248 				 * at some point mapped (m->valid fully
4249 				 * valid) with the understanding that
4250 				 * this also covers the dirty case.
4251 				 */
4252 				bp->b_xio.xio_pages[i] = bogus_page;
4253 				bp->b_flags |= B_HASBOGUS;
4254 				bogus++;
4255 			} else if (m->valid & m->dirty) {
4256 				/*
4257 				 * This case should not occur as partial
4258 				 * dirtyment can only happen if the buffer
4259 				 * is B_CACHE, and this code is not entered
4260 				 * if the buffer is B_CACHE.
4261 				 */
4262 				kprintf("Warning: vfs_busy_pages - page not "
4263 					"fully valid! loff=%jx bpf=%08x "
4264 					"idx=%d val=%02x dir=%02x\n",
4265 					(intmax_t)bp->b_loffset, bp->b_flags,
4266 					i, m->valid, m->dirty);
4267 				vm_page_protect(m, VM_PROT_NONE);
4268 			} else {
4269 				/*
4270 				 * The page is not valid and can be made
4271 				 * part of the read.
4272 				 */
4273 				vm_page_protect(m, VM_PROT_NONE);
4274 			}
4275 			vm_page_wakeup(m);
4276 		}
4277 		if (bogus) {
4278 			pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
4279 				bp->b_xio.xio_pages, bp->b_xio.xio_npages);
4280 		}
4281 	}
4282 
4283 	/*
4284 	 * This is the easiest place to put the process accounting for the I/O
4285 	 * for now.
4286 	 */
4287 	if (lp != NULL) {
4288 		if (bp->b_cmd == BUF_CMD_READ)
4289 			lp->lwp_ru.ru_inblock++;
4290 		else
4291 			lp->lwp_ru.ru_oublock++;
4292 	}
4293 }
4294 
4295 /*
4296  * Tell the VM system that the pages associated with this buffer
4297  * are clean.  This is used for delayed writes where the data is
4298  * going to go to disk eventually without additional VM intevention.
4299  *
4300  * NOTE: While we only really need to clean through to b_bcount, we
4301  *	 just go ahead and clean through to b_bufsize.
4302  */
4303 static void
4304 vfs_clean_pages(struct buf *bp)
4305 {
4306 	vm_page_t m;
4307 	int i;
4308 
4309 	if ((bp->b_flags & B_VMIO) == 0)
4310 		return;
4311 
4312 	KASSERT(bp->b_loffset != NOOFFSET,
4313 		("vfs_clean_pages: no buffer offset"));
4314 
4315 	for (i = 0; i < bp->b_xio.xio_npages; i++) {
4316 		m = bp->b_xio.xio_pages[i];
4317 		vfs_clean_one_page(bp, i, m);
4318 	}
4319 }
4320 
4321 /*
4322  * vfs_clean_one_page:
4323  *
4324  *	Set the valid bits and clear the dirty bits in a page within a
4325  *	buffer.  The range is restricted to the buffer's size and the
4326  *	buffer's logical offset might index into the first page.
4327  *
4328  *	The caller has busied or soft-busied the page and it is not mapped,
4329  *	test and incorporate the dirty bits into b_dirtyoff/end before
4330  *	clearing them.  Note that we need to clear the pmap modified bits
4331  *	after determining the the page was dirty, vm_page_set_validclean()
4332  *	does not do it for us.
4333  *
4334  *	This routine is typically called after a read completes (dirty should
4335  *	be zero in that case as we are not called on bogus-replace pages),
4336  *	or before a write is initiated.
4337  */
4338 static void
4339 vfs_clean_one_page(struct buf *bp, int pageno, vm_page_t m)
4340 {
4341 	int bcount;
4342 	int xoff;
4343 	int soff;
4344 	int eoff;
4345 
4346 	/*
4347 	 * Calculate offset range within the page but relative to buffer's
4348 	 * loffset.  loffset might be offset into the first page.
4349 	 */
4350 	xoff = (int)bp->b_loffset & PAGE_MASK;	/* loffset offset into pg 0 */
4351 	bcount = bp->b_bcount + xoff;		/* offset adjusted */
4352 
4353 	if (pageno == 0) {
4354 		soff = xoff;
4355 		eoff = PAGE_SIZE;
4356 	} else {
4357 		soff = (pageno << PAGE_SHIFT);
4358 		eoff = soff + PAGE_SIZE;
4359 	}
4360 	if (eoff > bcount)
4361 		eoff = bcount;
4362 	if (soff >= eoff)
4363 		return;
4364 
4365 	/*
4366 	 * Test dirty bits and adjust b_dirtyoff/end.
4367 	 *
4368 	 * If dirty pages are incorporated into the bp any prior
4369 	 * B_NEEDCOMMIT state (NFS) must be cleared because the
4370 	 * caller has not taken into account the new dirty data.
4371 	 *
4372 	 * If the page was memory mapped the dirty bits might go beyond the
4373 	 * end of the buffer, but we can't really make the assumption that
4374 	 * a file EOF straddles the buffer (even though this is the case for
4375 	 * NFS if B_NEEDCOMMIT is also set).  So for the purposes of clearing
4376 	 * B_NEEDCOMMIT we only test the dirty bits covered by the buffer.
4377 	 * This also saves some console spam.
4378 	 *
4379 	 * When clearing B_NEEDCOMMIT we must also clear B_CLUSTEROK,
4380 	 * NFS can handle huge commits but not huge writes.
4381 	 */
4382 	vm_page_test_dirty(m);
4383 	if (m->dirty) {
4384 		if ((bp->b_flags & B_NEEDCOMMIT) &&
4385 		    (m->dirty & vm_page_bits(soff & PAGE_MASK, eoff - soff))) {
4386 			if (debug_commit)
4387 			kprintf("Warning: vfs_clean_one_page: bp %p "
4388 				"loff=%jx,%d flgs=%08x clr B_NEEDCOMMIT"
4389 				" cmd %d vd %02x/%02x x/s/e %d %d %d "
4390 				"doff/end %d %d\n",
4391 				bp, (intmax_t)bp->b_loffset, bp->b_bcount,
4392 				bp->b_flags, bp->b_cmd,
4393 				m->valid, m->dirty, xoff, soff, eoff,
4394 				bp->b_dirtyoff, bp->b_dirtyend);
4395 			bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
4396 			if (debug_commit)
4397 				print_backtrace(-1);
4398 		}
4399 		/*
4400 		 * Only clear the pmap modified bits if ALL the dirty bits
4401 		 * are set, otherwise the system might mis-clear portions
4402 		 * of a page.
4403 		 */
4404 		if (m->dirty == VM_PAGE_BITS_ALL &&
4405 		    (bp->b_flags & B_NEEDCOMMIT) == 0) {
4406 			pmap_clear_modify(m);
4407 		}
4408 		if (bp->b_dirtyoff > soff - xoff)
4409 			bp->b_dirtyoff = soff - xoff;
4410 		if (bp->b_dirtyend < eoff - xoff)
4411 			bp->b_dirtyend = eoff - xoff;
4412 	}
4413 
4414 	/*
4415 	 * Set related valid bits, clear related dirty bits.
4416 	 * Does not mess with the pmap modified bit.
4417 	 *
4418 	 * WARNING!  We cannot just clear all of m->dirty here as the
4419 	 *	     buffer cache buffers may use a DEV_BSIZE'd aligned
4420 	 *	     block size, or have an odd size (e.g. NFS at file EOF).
4421 	 *	     The putpages code can clear m->dirty to 0.
4422 	 *
4423 	 *	     If a VOP_WRITE generates a buffer cache buffer which
4424 	 *	     covers the same space as mapped writable pages the
4425 	 *	     buffer flush might not be able to clear all the dirty
4426 	 *	     bits and still require a putpages from the VM system
4427 	 *	     to finish it off.
4428 	 *
4429 	 * WARNING!  vm_page_set_validclean() currently assumes vm_token
4430 	 *	     is held.  The page might not be busied (bdwrite() case).
4431 	 *	     XXX remove this comment once we've validated that this
4432 	 *	     is no longer an issue.
4433 	 */
4434 	vm_page_set_validclean(m, soff & PAGE_MASK, eoff - soff);
4435 }
4436 
4437 /*
4438  * Similar to vfs_clean_one_page() but sets the bits to valid and dirty.
4439  * The page data is assumed to be valid (there is no zeroing here).
4440  */
4441 static void
4442 vfs_dirty_one_page(struct buf *bp, int pageno, vm_page_t m)
4443 {
4444 	int bcount;
4445 	int xoff;
4446 	int soff;
4447 	int eoff;
4448 
4449 	/*
4450 	 * Calculate offset range within the page but relative to buffer's
4451 	 * loffset.  loffset might be offset into the first page.
4452 	 */
4453 	xoff = (int)bp->b_loffset & PAGE_MASK;	/* loffset offset into pg 0 */
4454 	bcount = bp->b_bcount + xoff;		/* offset adjusted */
4455 
4456 	if (pageno == 0) {
4457 		soff = xoff;
4458 		eoff = PAGE_SIZE;
4459 	} else {
4460 		soff = (pageno << PAGE_SHIFT);
4461 		eoff = soff + PAGE_SIZE;
4462 	}
4463 	if (eoff > bcount)
4464 		eoff = bcount;
4465 	if (soff >= eoff)
4466 		return;
4467 	vm_page_set_validdirty(m, soff & PAGE_MASK, eoff - soff);
4468 }
4469 
4470 /*
4471  * vfs_bio_clrbuf:
4472  *
4473  *	Clear a buffer.  This routine essentially fakes an I/O, so we need
4474  *	to clear B_ERROR and B_INVAL.
4475  *
4476  *	Note that while we only theoretically need to clear through b_bcount,
4477  *	we go ahead and clear through b_bufsize.
4478  */
4479 
4480 void
4481 vfs_bio_clrbuf(struct buf *bp)
4482 {
4483 	int i, mask = 0;
4484 	caddr_t sa, ea;
4485 	if ((bp->b_flags & (B_VMIO | B_MALLOC)) == B_VMIO) {
4486 		bp->b_flags &= ~(B_INVAL | B_EINTR | B_ERROR);
4487 		if ((bp->b_xio.xio_npages == 1) && (bp->b_bufsize < PAGE_SIZE) &&
4488 		    (bp->b_loffset & PAGE_MASK) == 0) {
4489 			mask = (1 << (bp->b_bufsize / DEV_BSIZE)) - 1;
4490 			if ((bp->b_xio.xio_pages[0]->valid & mask) == mask) {
4491 				bp->b_resid = 0;
4492 				return;
4493 			}
4494 			if (((bp->b_xio.xio_pages[0]->flags & PG_ZERO) == 0) &&
4495 			    ((bp->b_xio.xio_pages[0]->valid & mask) == 0)) {
4496 				bzero(bp->b_data, bp->b_bufsize);
4497 				bp->b_xio.xio_pages[0]->valid |= mask;
4498 				bp->b_resid = 0;
4499 				return;
4500 			}
4501 		}
4502 		sa = bp->b_data;
4503 		for(i=0;i<bp->b_xio.xio_npages;i++,sa=ea) {
4504 			int j = ((vm_offset_t)sa & PAGE_MASK) / DEV_BSIZE;
4505 			ea = (caddr_t)trunc_page((vm_offset_t)sa + PAGE_SIZE);
4506 			ea = (caddr_t)(vm_offset_t)ulmin(
4507 			    (u_long)(vm_offset_t)ea,
4508 			    (u_long)(vm_offset_t)bp->b_data + bp->b_bufsize);
4509 			mask = ((1 << ((ea - sa) / DEV_BSIZE)) - 1) << j;
4510 			if ((bp->b_xio.xio_pages[i]->valid & mask) == mask)
4511 				continue;
4512 			if ((bp->b_xio.xio_pages[i]->valid & mask) == 0) {
4513 				if ((bp->b_xio.xio_pages[i]->flags & PG_ZERO) == 0) {
4514 					bzero(sa, ea - sa);
4515 				}
4516 			} else {
4517 				for (; sa < ea; sa += DEV_BSIZE, j++) {
4518 					if (((bp->b_xio.xio_pages[i]->flags & PG_ZERO) == 0) &&
4519 						(bp->b_xio.xio_pages[i]->valid & (1<<j)) == 0)
4520 						bzero(sa, DEV_BSIZE);
4521 				}
4522 			}
4523 			bp->b_xio.xio_pages[i]->valid |= mask;
4524 			vm_page_flag_clear(bp->b_xio.xio_pages[i], PG_ZERO);
4525 		}
4526 		bp->b_resid = 0;
4527 	} else {
4528 		clrbuf(bp);
4529 	}
4530 }
4531 
4532 /*
4533  * vm_hold_load_pages:
4534  *
4535  *	Load pages into the buffer's address space.  The pages are
4536  *	allocated from the kernel object in order to reduce interference
4537  *	with the any VM paging I/O activity.  The range of loaded
4538  *	pages will be wired.
4539  *
4540  *	If a page cannot be allocated, the 'pagedaemon' is woken up to
4541  *	retrieve the full range (to - from) of pages.
4542  *
4543  * MPSAFE
4544  */
4545 void
4546 vm_hold_load_pages(struct buf *bp, vm_offset_t from, vm_offset_t to)
4547 {
4548 	vm_offset_t pg;
4549 	vm_page_t p;
4550 	int index;
4551 
4552 	to = round_page(to);
4553 	from = round_page(from);
4554 	index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
4555 
4556 	pg = from;
4557 	while (pg < to) {
4558 		/*
4559 		 * Note: must allocate system pages since blocking here
4560 		 * could intefere with paging I/O, no matter which
4561 		 * process we are.
4562 		 */
4563 		vm_object_hold(&kernel_object);
4564 		p = bio_page_alloc(&kernel_object, pg >> PAGE_SHIFT,
4565 				   (vm_pindex_t)((to - pg) >> PAGE_SHIFT));
4566 		vm_object_drop(&kernel_object);
4567 		if (p) {
4568 			vm_page_wire(p);
4569 			p->valid = VM_PAGE_BITS_ALL;
4570 			vm_page_flag_clear(p, PG_ZERO);
4571 			pmap_kenter(pg, VM_PAGE_TO_PHYS(p));
4572 			bp->b_xio.xio_pages[index] = p;
4573 			vm_page_wakeup(p);
4574 
4575 			pg += PAGE_SIZE;
4576 			++index;
4577 		}
4578 	}
4579 	bp->b_xio.xio_npages = index;
4580 }
4581 
4582 /*
4583  * Allocate pages for a buffer cache buffer.
4584  *
4585  * Under extremely severe memory conditions even allocating out of the
4586  * system reserve can fail.  If this occurs we must allocate out of the
4587  * interrupt reserve to avoid a deadlock with the pageout daemon.
4588  *
4589  * The pageout daemon can run (putpages -> VOP_WRITE -> getblk -> allocbuf).
4590  * If the buffer cache's vm_page_alloc() fails a vm_wait() can deadlock
4591  * against the pageout daemon if pages are not freed from other sources.
4592  *
4593  * MPSAFE
4594  */
4595 static
4596 vm_page_t
4597 bio_page_alloc(vm_object_t obj, vm_pindex_t pg, int deficit)
4598 {
4599 	vm_page_t p;
4600 
4601 	ASSERT_LWKT_TOKEN_HELD(vm_object_token(obj));
4602 
4603 	/*
4604 	 * Try a normal allocation, allow use of system reserve.
4605 	 */
4606 	p = vm_page_alloc(obj, pg, VM_ALLOC_NORMAL | VM_ALLOC_SYSTEM);
4607 	if (p)
4608 		return(p);
4609 
4610 	/*
4611 	 * The normal allocation failed and we clearly have a page
4612 	 * deficit.  Try to reclaim some clean VM pages directly
4613 	 * from the buffer cache.
4614 	 */
4615 	vm_pageout_deficit += deficit;
4616 	recoverbufpages();
4617 
4618 	/*
4619 	 * We may have blocked, the caller will know what to do if the
4620 	 * page now exists.
4621 	 */
4622 	if (vm_page_lookup(obj, pg)) {
4623 		return(NULL);
4624 	}
4625 
4626 	/*
4627 	 * Only system threads can use the interrupt reserve
4628 	 */
4629 	if ((curthread->td_flags & TDF_SYSTHREAD) == 0) {
4630 		vm_wait(hz);
4631 		return(NULL);
4632 	}
4633 
4634 
4635 	/*
4636 	 * Allocate and allow use of the interrupt reserve.
4637 	 *
4638 	 * If after all that we still can't allocate a VM page we are
4639 	 * in real trouble, but we slog on anyway hoping that the system
4640 	 * won't deadlock.
4641 	 */
4642 	p = vm_page_alloc(obj, pg, VM_ALLOC_NORMAL | VM_ALLOC_SYSTEM |
4643 				   VM_ALLOC_INTERRUPT);
4644 	if (p) {
4645 		if (vm_page_count_severe()) {
4646 			++lowmempgallocs;
4647 			vm_wait(hz / 20 + 1);
4648 		}
4649 	} else {
4650 		kprintf("bio_page_alloc: Memory exhausted during bufcache "
4651 			"page allocation\n");
4652 		++lowmempgfails;
4653 		vm_wait(hz);
4654 	}
4655 	return(p);
4656 }
4657 
4658 /*
4659  * vm_hold_free_pages:
4660  *
4661  *	Return pages associated with the buffer back to the VM system.
4662  *
4663  *	The range of pages underlying the buffer's address space will
4664  *	be unmapped and un-wired.
4665  *
4666  * MPSAFE
4667  */
4668 void
4669 vm_hold_free_pages(struct buf *bp, vm_offset_t from, vm_offset_t to)
4670 {
4671 	vm_offset_t pg;
4672 	vm_page_t p;
4673 	int index, newnpages;
4674 
4675 	from = round_page(from);
4676 	to = round_page(to);
4677 	index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
4678 	newnpages = index;
4679 
4680 	for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
4681 		p = bp->b_xio.xio_pages[index];
4682 		if (p && (index < bp->b_xio.xio_npages)) {
4683 			if (p->busy) {
4684 				kprintf("vm_hold_free_pages: doffset: %lld, "
4685 					"loffset: %lld\n",
4686 					(long long)bp->b_bio2.bio_offset,
4687 					(long long)bp->b_loffset);
4688 			}
4689 			bp->b_xio.xio_pages[index] = NULL;
4690 			pmap_kremove(pg);
4691 			vm_page_busy_wait(p, FALSE, "vmhldpg");
4692 			vm_page_unwire(p, 0);
4693 			vm_page_free(p);
4694 		}
4695 	}
4696 	bp->b_xio.xio_npages = newnpages;
4697 }
4698 
4699 /*
4700  * vmapbuf:
4701  *
4702  *	Map a user buffer into KVM via a pbuf.  On return the buffer's
4703  *	b_data, b_bufsize, and b_bcount will be set, and its XIO page array
4704  *	initialized.
4705  */
4706 int
4707 vmapbuf(struct buf *bp, caddr_t udata, int bytes)
4708 {
4709 	caddr_t addr;
4710 	vm_offset_t va;
4711 	vm_page_t m;
4712 	int vmprot;
4713 	int error;
4714 	int pidx;
4715 	int i;
4716 
4717 	/*
4718 	 * bp had better have a command and it better be a pbuf.
4719 	 */
4720 	KKASSERT(bp->b_cmd != BUF_CMD_DONE);
4721 	KKASSERT(bp->b_flags & B_PAGING);
4722 	KKASSERT(bp->b_kvabase);
4723 
4724 	if (bytes < 0)
4725 		return (-1);
4726 
4727 	/*
4728 	 * Map the user data into KVM.  Mappings have to be page-aligned.
4729 	 */
4730 	addr = (caddr_t)trunc_page((vm_offset_t)udata);
4731 	pidx = 0;
4732 
4733 	vmprot = VM_PROT_READ;
4734 	if (bp->b_cmd == BUF_CMD_READ)
4735 		vmprot |= VM_PROT_WRITE;
4736 
4737 	while (addr < udata + bytes) {
4738 		/*
4739 		 * Do the vm_fault if needed; do the copy-on-write thing
4740 		 * when reading stuff off device into memory.
4741 		 *
4742 		 * vm_fault_page*() returns a held VM page.
4743 		 */
4744 		va = (addr >= udata) ? (vm_offset_t)addr : (vm_offset_t)udata;
4745 		va = trunc_page(va);
4746 
4747 		m = vm_fault_page_quick(va, vmprot, &error);
4748 		if (m == NULL) {
4749 			for (i = 0; i < pidx; ++i) {
4750 			    vm_page_unhold(bp->b_xio.xio_pages[i]);
4751 			    bp->b_xio.xio_pages[i] = NULL;
4752 			}
4753 			return(-1);
4754 		}
4755 		bp->b_xio.xio_pages[pidx] = m;
4756 		addr += PAGE_SIZE;
4757 		++pidx;
4758 	}
4759 
4760 	/*
4761 	 * Map the page array and set the buffer fields to point to
4762 	 * the mapped data buffer.
4763 	 */
4764 	if (pidx > btoc(MAXPHYS))
4765 		panic("vmapbuf: mapped more than MAXPHYS");
4766 	pmap_qenter((vm_offset_t)bp->b_kvabase, bp->b_xio.xio_pages, pidx);
4767 
4768 	bp->b_xio.xio_npages = pidx;
4769 	bp->b_data = bp->b_kvabase + ((int)(intptr_t)udata & PAGE_MASK);
4770 	bp->b_bcount = bytes;
4771 	bp->b_bufsize = bytes;
4772 	return(0);
4773 }
4774 
4775 /*
4776  * vunmapbuf:
4777  *
4778  *	Free the io map PTEs associated with this IO operation.
4779  *	We also invalidate the TLB entries and restore the original b_addr.
4780  */
4781 void
4782 vunmapbuf(struct buf *bp)
4783 {
4784 	int pidx;
4785 	int npages;
4786 
4787 	KKASSERT(bp->b_flags & B_PAGING);
4788 
4789 	npages = bp->b_xio.xio_npages;
4790 	pmap_qremove(trunc_page((vm_offset_t)bp->b_data), npages);
4791 	for (pidx = 0; pidx < npages; ++pidx) {
4792 		vm_page_unhold(bp->b_xio.xio_pages[pidx]);
4793 		bp->b_xio.xio_pages[pidx] = NULL;
4794 	}
4795 	bp->b_xio.xio_npages = 0;
4796 	bp->b_data = bp->b_kvabase;
4797 }
4798 
4799 /*
4800  * Scan all buffers in the system and issue the callback.
4801  */
4802 int
4803 scan_all_buffers(int (*callback)(struct buf *, void *), void *info)
4804 {
4805 	int count = 0;
4806 	int error;
4807 	int n;
4808 
4809 	for (n = 0; n < nbuf; ++n) {
4810 		if ((error = callback(&buf[n], info)) < 0) {
4811 			count = error;
4812 			break;
4813 		}
4814 		count += error;
4815 	}
4816 	return (count);
4817 }
4818 
4819 /*
4820  * nestiobuf_iodone: biodone callback for nested buffers and propagate
4821  * completion to the master buffer.
4822  */
4823 static void
4824 nestiobuf_iodone(struct bio *bio)
4825 {
4826 	struct bio *mbio;
4827 	struct buf *mbp, *bp;
4828 	struct devstat *stats;
4829 	int error;
4830 	int donebytes;
4831 
4832 	bp = bio->bio_buf;
4833 	mbio = bio->bio_caller_info1.ptr;
4834 	stats = bio->bio_caller_info2.ptr;
4835 	mbp = mbio->bio_buf;
4836 
4837 	KKASSERT(bp->b_bcount <= bp->b_bufsize);
4838 	KKASSERT(mbp != bp);
4839 
4840 	error = bp->b_error;
4841 	if (bp->b_error == 0 &&
4842 	    (bp->b_bcount < bp->b_bufsize || bp->b_resid > 0)) {
4843 		/*
4844 		 * Not all got transfered, raise an error. We have no way to
4845 		 * propagate these conditions to mbp.
4846 		 */
4847 		error = EIO;
4848 	}
4849 
4850 	donebytes = bp->b_bufsize;
4851 
4852 	relpbuf(bp, NULL);
4853 
4854 	nestiobuf_done(mbio, donebytes, error, stats);
4855 }
4856 
4857 void
4858 nestiobuf_done(struct bio *mbio, int donebytes, int error, struct devstat *stats)
4859 {
4860 	struct buf *mbp;
4861 
4862 	mbp = mbio->bio_buf;
4863 
4864 	KKASSERT((int)(intptr_t)mbio->bio_driver_info > 0);
4865 
4866 	/*
4867 	 * If an error occured, propagate it to the master buffer.
4868 	 *
4869 	 * Several biodone()s may wind up running concurrently so
4870 	 * use an atomic op to adjust b_flags.
4871 	 */
4872 	if (error) {
4873 		mbp->b_error = error;
4874 		atomic_set_int(&mbp->b_flags, B_ERROR);
4875 	}
4876 
4877 	/*
4878 	 * Decrement the operations in progress counter and terminate the
4879 	 * I/O if this was the last bit.
4880 	 */
4881 	if (atomic_fetchadd_int((int *)&mbio->bio_driver_info, -1) == 1) {
4882 		mbp->b_resid = 0;
4883 		if (stats)
4884 			devstat_end_transaction_buf(stats, mbp);
4885 		biodone(mbio);
4886 	}
4887 }
4888 
4889 /*
4890  * Initialize a nestiobuf for use.  Set an initial count of 1 to prevent
4891  * the mbio from being biodone()'d while we are still adding sub-bios to
4892  * it.
4893  */
4894 void
4895 nestiobuf_init(struct bio *bio)
4896 {
4897 	bio->bio_driver_info = (void *)1;
4898 }
4899 
4900 /*
4901  * The BIOs added to the nestedio have already been started, remove the
4902  * count that placeheld our mbio and biodone() it if the count would
4903  * transition to 0.
4904  */
4905 void
4906 nestiobuf_start(struct bio *mbio)
4907 {
4908 	struct buf *mbp = mbio->bio_buf;
4909 
4910 	/*
4911 	 * Decrement the operations in progress counter and terminate the
4912 	 * I/O if this was the last bit.
4913 	 */
4914 	if (atomic_fetchadd_int((int *)&mbio->bio_driver_info, -1) == 1) {
4915 		if (mbp->b_flags & B_ERROR)
4916 			mbp->b_resid = mbp->b_bcount;
4917 		else
4918 			mbp->b_resid = 0;
4919 		biodone(mbio);
4920 	}
4921 }
4922 
4923 /*
4924  * Set an intermediate error prior to calling nestiobuf_start()
4925  */
4926 void
4927 nestiobuf_error(struct bio *mbio, int error)
4928 {
4929 	struct buf *mbp = mbio->bio_buf;
4930 
4931 	if (error) {
4932 		mbp->b_error = error;
4933 		atomic_set_int(&mbp->b_flags, B_ERROR);
4934 	}
4935 }
4936 
4937 /*
4938  * nestiobuf_add: setup a "nested" buffer.
4939  *
4940  * => 'mbp' is a "master" buffer which is being divided into sub pieces.
4941  * => 'bp' should be a buffer allocated by getiobuf.
4942  * => 'offset' is a byte offset in the master buffer.
4943  * => 'size' is a size in bytes of this nested buffer.
4944  */
4945 void
4946 nestiobuf_add(struct bio *mbio, struct buf *bp, int offset, size_t size, struct devstat *stats)
4947 {
4948 	struct buf *mbp = mbio->bio_buf;
4949 	struct vnode *vp = mbp->b_vp;
4950 
4951 	KKASSERT(mbp->b_bcount >= offset + size);
4952 
4953 	atomic_add_int((int *)&mbio->bio_driver_info, 1);
4954 
4955 	/* kernel needs to own the lock for it to be released in biodone */
4956 	BUF_KERNPROC(bp);
4957 	bp->b_vp = vp;
4958 	bp->b_cmd = mbp->b_cmd;
4959 	bp->b_bio1.bio_done = nestiobuf_iodone;
4960 	bp->b_data = (char *)mbp->b_data + offset;
4961 	bp->b_resid = bp->b_bcount = size;
4962 	bp->b_bufsize = bp->b_bcount;
4963 
4964 	bp->b_bio1.bio_track = NULL;
4965 	bp->b_bio1.bio_caller_info1.ptr = mbio;
4966 	bp->b_bio1.bio_caller_info2.ptr = stats;
4967 }
4968 
4969 /*
4970  * print out statistics from the current status of the buffer pool
4971  * this can be toggeled by the system control option debug.syncprt
4972  */
4973 #ifdef DEBUG
4974 void
4975 vfs_bufstats(void)
4976 {
4977         int i, j, count;
4978         struct buf *bp;
4979         struct bqueues *dp;
4980         int counts[(MAXBSIZE / PAGE_SIZE) + 1];
4981         static char *bname[3] = { "LOCKED", "LRU", "AGE" };
4982 
4983         for (dp = bufqueues, i = 0; dp < &bufqueues[3]; dp++, i++) {
4984                 count = 0;
4985                 for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++)
4986                         counts[j] = 0;
4987 
4988 		spin_lock(&bufqspin);
4989                 TAILQ_FOREACH(bp, dp, b_freelist) {
4990                         counts[bp->b_bufsize/PAGE_SIZE]++;
4991                         count++;
4992                 }
4993 		spin_unlock(&bufqspin);
4994 
4995                 kprintf("%s: total-%d", bname[i], count);
4996                 for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++)
4997                         if (counts[j] != 0)
4998                                 kprintf(", %d-%d", j * PAGE_SIZE, counts[j]);
4999                 kprintf("\n");
5000         }
5001 }
5002 #endif
5003 
5004 #ifdef DDB
5005 
5006 DB_SHOW_COMMAND(buffer, db_show_buffer)
5007 {
5008 	/* get args */
5009 	struct buf *bp = (struct buf *)addr;
5010 
5011 	if (!have_addr) {
5012 		db_printf("usage: show buffer <addr>\n");
5013 		return;
5014 	}
5015 
5016 	db_printf("b_flags = 0x%b\n", (u_int)bp->b_flags, PRINT_BUF_FLAGS);
5017 	db_printf("b_cmd = %d\n", bp->b_cmd);
5018 	db_printf("b_error = %d, b_bufsize = %d, b_bcount = %d, "
5019 		  "b_resid = %d\n, b_data = %p, "
5020 		  "bio_offset(disk) = %lld, bio_offset(phys) = %lld\n",
5021 		  bp->b_error, bp->b_bufsize, bp->b_bcount, bp->b_resid,
5022 		  bp->b_data,
5023 		  (long long)bp->b_bio2.bio_offset,
5024 		  (long long)(bp->b_bio2.bio_next ?
5025 				bp->b_bio2.bio_next->bio_offset : (off_t)-1));
5026 	if (bp->b_xio.xio_npages) {
5027 		int i;
5028 		db_printf("b_xio.xio_npages = %d, pages(OBJ, IDX, PA): ",
5029 			bp->b_xio.xio_npages);
5030 		for (i = 0; i < bp->b_xio.xio_npages; i++) {
5031 			vm_page_t m;
5032 			m = bp->b_xio.xio_pages[i];
5033 			db_printf("(%p, 0x%lx, 0x%lx)", (void *)m->object,
5034 			    (u_long)m->pindex, (u_long)VM_PAGE_TO_PHYS(m));
5035 			if ((i + 1) < bp->b_xio.xio_npages)
5036 				db_printf(",");
5037 		}
5038 		db_printf("\n");
5039 	}
5040 }
5041 #endif /* DDB */
5042