13020e3beSMatthew Dillon /*
23020e3beSMatthew Dillon * Copyright (c) 1982, 1986, 1989, 1993
33020e3beSMatthew Dillon * The Regents of the University of California. All rights reserved.
43020e3beSMatthew Dillon * (c) UNIX System Laboratories, Inc.
53020e3beSMatthew Dillon * All or some portions of this file are derived from material licensed
63020e3beSMatthew Dillon * to the University of California by American Telephone and Telegraph
73020e3beSMatthew Dillon * Co. or Unix System Laboratories, Inc. and are reproduced herein with
83020e3beSMatthew Dillon * the permission of UNIX System Laboratories, Inc.
93020e3beSMatthew Dillon *
103020e3beSMatthew Dillon * Redistribution and use in source and binary forms, with or without
113020e3beSMatthew Dillon * modification, are permitted provided that the following conditions
123020e3beSMatthew Dillon * are met:
133020e3beSMatthew Dillon * 1. Redistributions of source code must retain the above copyright
143020e3beSMatthew Dillon * notice, this list of conditions and the following disclaimer.
153020e3beSMatthew Dillon * 2. Redistributions in binary form must reproduce the above copyright
163020e3beSMatthew Dillon * notice, this list of conditions and the following disclaimer in the
173020e3beSMatthew Dillon * documentation and/or other materials provided with the distribution.
182c64e990Szrj * 3. Neither the name of the University nor the names of its contributors
193020e3beSMatthew Dillon * may be used to endorse or promote products derived from this software
203020e3beSMatthew Dillon * without specific prior written permission.
213020e3beSMatthew Dillon *
223020e3beSMatthew Dillon * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
233020e3beSMatthew Dillon * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
243020e3beSMatthew Dillon * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
253020e3beSMatthew Dillon * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
263020e3beSMatthew Dillon * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
273020e3beSMatthew Dillon * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
283020e3beSMatthew Dillon * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
293020e3beSMatthew Dillon * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
303020e3beSMatthew Dillon * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
313020e3beSMatthew Dillon * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
323020e3beSMatthew Dillon * SUCH DAMAGE.
333020e3beSMatthew Dillon *
343020e3beSMatthew Dillon * @(#)buf.h 8.9 (Berkeley) 3/30/95
353020e3beSMatthew Dillon * $FreeBSD: src/sys/sys/buf.h,v 1.88.2.10 2003/01/25 19:02:23 dillon Exp $
363020e3beSMatthew Dillon */
373020e3beSMatthew Dillon
383020e3beSMatthew Dillon #ifndef _SYS_BUF2_H_
393020e3beSMatthew Dillon #define _SYS_BUF2_H_
403020e3beSMatthew Dillon
413020e3beSMatthew Dillon #ifdef _KERNEL
423020e3beSMatthew Dillon
431bd40720SMatthew Dillon #ifndef _SYS_BUF_H_
44*120385e8SSascha Wildner #include <sys/buf.h>
4516523a43SMatthew Dillon #endif
46408357d8SMatthew Dillon #ifndef _SYS_MOUNT_H_
47408357d8SMatthew Dillon #include <sys/mount.h>
48408357d8SMatthew Dillon #endif
49408357d8SMatthew Dillon #ifndef _SYS_VNODE_H_
50408357d8SMatthew Dillon #include <sys/vnode.h>
51408357d8SMatthew Dillon #endif
520e8bd897SMatthew Dillon #ifndef _VM_VM_PAGE_H_
530e8bd897SMatthew Dillon #include <vm/vm_page.h>
540e8bd897SMatthew Dillon #endif
55aabba8b4SMatthew Dillon
563020e3beSMatthew Dillon /*
573020e3beSMatthew Dillon * Initialize a lock.
583020e3beSMatthew Dillon */
593020e3beSMatthew Dillon #define BUF_LOCKINIT(bp) \
605b49787bSMatthew Dillon lockinit(&(bp)->b_lock, buf_wmesg, 0, LK_NOCOLLSTATS)
613020e3beSMatthew Dillon
623020e3beSMatthew Dillon /*
633020e3beSMatthew Dillon *
643020e3beSMatthew Dillon * Get a lock sleeping non-interruptably until it becomes available.
6565c6c519SMatthew Dillon *
6665c6c519SMatthew Dillon * XXX lk_wmesg can race, but should not result in any operational issues.
673020e3beSMatthew Dillon */
683020e3beSMatthew Dillon static __inline int
BUF_LOCK(struct buf * bp,int locktype)693020e3beSMatthew Dillon BUF_LOCK(struct buf *bp, int locktype)
703020e3beSMatthew Dillon {
713020e3beSMatthew Dillon bp->b_lock.lk_wmesg = buf_wmesg;
72df4f70a6SMatthew Dillon return (lockmgr(&(bp)->b_lock, locktype));
733020e3beSMatthew Dillon }
743020e3beSMatthew Dillon /*
753020e3beSMatthew Dillon * Get a lock sleeping with specified interruptably and timeout.
76f2770c70SMatthew Dillon *
7765c6c519SMatthew Dillon * XXX lk_timo can race against other entities calling BUF_TIMELOCK,
7865c6c519SMatthew Dillon * but will not interfere with entities calling BUF_LOCK since LK_TIMELOCK
7965c6c519SMatthew Dillon * will not be set in that case.
8065c6c519SMatthew Dillon *
8165c6c519SMatthew Dillon * XXX lk_wmesg can race, but should not result in any operational issues.
823020e3beSMatthew Dillon */
833020e3beSMatthew Dillon static __inline int
BUF_TIMELOCK(struct buf * bp,int locktype,char * wmesg,int timo)84f2770c70SMatthew Dillon BUF_TIMELOCK(struct buf *bp, int locktype, char *wmesg, int timo)
853020e3beSMatthew Dillon {
863020e3beSMatthew Dillon bp->b_lock.lk_wmesg = wmesg;
873020e3beSMatthew Dillon bp->b_lock.lk_timo = timo;
88df4f70a6SMatthew Dillon return (lockmgr(&(bp)->b_lock, locktype | LK_TIMELOCK));
893020e3beSMatthew Dillon }
903020e3beSMatthew Dillon /*
913020e3beSMatthew Dillon * Release a lock. Only the acquiring process may free the lock unless
923020e3beSMatthew Dillon * it has been handed off to biodone.
933020e3beSMatthew Dillon */
943020e3beSMatthew Dillon static __inline void
BUF_UNLOCK(struct buf * bp)953020e3beSMatthew Dillon BUF_UNLOCK(struct buf *bp)
963020e3beSMatthew Dillon {
97df4f70a6SMatthew Dillon lockmgr(&(bp)->b_lock, LK_RELEASE);
983020e3beSMatthew Dillon }
993020e3beSMatthew Dillon
1003020e3beSMatthew Dillon /*
1013020e3beSMatthew Dillon * When initiating asynchronous I/O, change ownership of the lock to the
1023020e3beSMatthew Dillon * kernel. Once done, the lock may legally released by biodone. The
1033020e3beSMatthew Dillon * original owning process can no longer acquire it recursively, but must
1043020e3beSMatthew Dillon * wait until the I/O is completed and the lock has been freed by biodone.
1053020e3beSMatthew Dillon */
1063020e3beSMatthew Dillon static __inline void
BUF_KERNPROC(struct buf * bp)1073020e3beSMatthew Dillon BUF_KERNPROC(struct buf *bp)
1083020e3beSMatthew Dillon {
109df4f70a6SMatthew Dillon lockmgr_kernproc(&(bp)->b_lock);
1103020e3beSMatthew Dillon }
1113020e3beSMatthew Dillon /*
1123020e3beSMatthew Dillon * Find out the number of references to a lock.
11377bb9400SMatthew Dillon *
11477bb9400SMatthew Dillon * The non-blocking version should only be used for assertions in cases
11577bb9400SMatthew Dillon * where the buffer is expected to be owned or otherwise data stable.
1163020e3beSMatthew Dillon */
1173020e3beSMatthew Dillon static __inline int
BUF_LOCKINUSE(struct buf * bp)1183b6a19b2SMatthew Dillon BUF_LOCKINUSE(struct buf *bp)
1193020e3beSMatthew Dillon {
1203b6a19b2SMatthew Dillon return (lockinuse(&(bp)->b_lock));
12177bb9400SMatthew Dillon }
12277bb9400SMatthew Dillon
12377bb9400SMatthew Dillon /*
12477bb9400SMatthew Dillon * Free a buffer lock.
12577bb9400SMatthew Dillon */
12677bb9400SMatthew Dillon #define BUF_LOCKFREE(bp) \
1273b6a19b2SMatthew Dillon if (BUF_LOCKINUSE(bp)) \
12877bb9400SMatthew Dillon panic("free locked buf")
12977bb9400SMatthew Dillon
1303020e3beSMatthew Dillon static __inline void
bioq_init(struct bio_queue_head * bioq)13130e5862eSMatthew Dillon bioq_init(struct bio_queue_head *bioq)
1323020e3beSMatthew Dillon {
13330e5862eSMatthew Dillon TAILQ_INIT(&bioq->queue);
13430e5862eSMatthew Dillon bioq->off_unused = 0;
13530e5862eSMatthew Dillon bioq->reorder = 0;
13630e5862eSMatthew Dillon bioq->transition = NULL;
13730e5862eSMatthew Dillon bioq->bio_unused = NULL;
1383020e3beSMatthew Dillon }
1393020e3beSMatthew Dillon
1403020e3beSMatthew Dillon static __inline void
bioq_insert_tail(struct bio_queue_head * bioq,struct bio * bio)14130e5862eSMatthew Dillon bioq_insert_tail(struct bio_queue_head *bioq, struct bio *bio)
1423020e3beSMatthew Dillon {
14330e5862eSMatthew Dillon bioq->transition = NULL;
14430e5862eSMatthew Dillon TAILQ_INSERT_TAIL(&bioq->queue, bio, bio_act);
1453020e3beSMatthew Dillon }
1463020e3beSMatthew Dillon
1473020e3beSMatthew Dillon static __inline void
bioq_remove(struct bio_queue_head * bioq,struct bio * bio)14830e5862eSMatthew Dillon bioq_remove(struct bio_queue_head *bioq, struct bio *bio)
149539f339eSMatthew Dillon {
15030e5862eSMatthew Dillon /*
15130e5862eSMatthew Dillon * Adjust read insertion point when removing the bioq. The
15230e5862eSMatthew Dillon * bio after the insert point is a write so move backwards
15330e5862eSMatthew Dillon * one (NULL will indicate all the reads have cleared).
15430e5862eSMatthew Dillon */
15530e5862eSMatthew Dillon if (bio == bioq->transition)
15630e5862eSMatthew Dillon bioq->transition = TAILQ_NEXT(bio, bio_act);
15730e5862eSMatthew Dillon TAILQ_REMOVE(&bioq->queue, bio, bio_act);
1583020e3beSMatthew Dillon }
1593020e3beSMatthew Dillon
16081b5c339SMatthew Dillon static __inline struct bio *
bioq_first(struct bio_queue_head * bioq)16130e5862eSMatthew Dillon bioq_first(struct bio_queue_head *bioq)
1623020e3beSMatthew Dillon {
16330e5862eSMatthew Dillon return (TAILQ_FIRST(&bioq->queue));
1643020e3beSMatthew Dillon }
1653020e3beSMatthew Dillon
166f05544e5SSascha Wildner static __inline struct bio *
bioq_takefirst(struct bio_queue_head * bioq)167f05544e5SSascha Wildner bioq_takefirst(struct bio_queue_head *bioq)
168f05544e5SSascha Wildner {
169f05544e5SSascha Wildner struct bio *bp;
170f05544e5SSascha Wildner
171f05544e5SSascha Wildner bp = TAILQ_FIRST(&bioq->queue);
172f05544e5SSascha Wildner if (bp != NULL)
173f05544e5SSascha Wildner bioq_remove(bioq, bp);
174f05544e5SSascha Wildner return (bp);
175f05544e5SSascha Wildner }
176f05544e5SSascha Wildner
177408357d8SMatthew Dillon /*
1780e8bd897SMatthew Dillon * Adjust buffer cache buffer's activity count. This
1790e8bd897SMatthew Dillon * works similarly to vm_page->act_count.
1800e8bd897SMatthew Dillon */
1810e8bd897SMatthew Dillon static __inline void
buf_act_advance(struct buf * bp)1820e8bd897SMatthew Dillon buf_act_advance(struct buf *bp)
1830e8bd897SMatthew Dillon {
1840e8bd897SMatthew Dillon if (bp->b_act_count > ACT_MAX - ACT_ADVANCE)
1850e8bd897SMatthew Dillon bp->b_act_count = ACT_MAX;
1860e8bd897SMatthew Dillon else
1870e8bd897SMatthew Dillon bp->b_act_count += ACT_ADVANCE;
1880e8bd897SMatthew Dillon }
1890e8bd897SMatthew Dillon
1900e8bd897SMatthew Dillon static __inline void
buf_act_decline(struct buf * bp)1910e8bd897SMatthew Dillon buf_act_decline(struct buf *bp)
1920e8bd897SMatthew Dillon {
1930e8bd897SMatthew Dillon if (bp->b_act_count < ACT_DECLINE)
1940e8bd897SMatthew Dillon bp->b_act_count = 0;
1950e8bd897SMatthew Dillon else
1960e8bd897SMatthew Dillon bp->b_act_count -= ACT_DECLINE;
1970e8bd897SMatthew Dillon }
1980e8bd897SMatthew Dillon
1990e8bd897SMatthew Dillon /*
200408357d8SMatthew Dillon * biodeps inlines - used by softupdates and HAMMER.
20159a647b1SMatthew Dillon *
20259a647b1SMatthew Dillon * All bioops are MPSAFE
203408357d8SMatthew Dillon */
204408357d8SMatthew Dillon static __inline void
buf_dep_init(struct buf * bp)205408357d8SMatthew Dillon buf_dep_init(struct buf *bp)
206408357d8SMatthew Dillon {
207408357d8SMatthew Dillon bp->b_ops = NULL;
208408357d8SMatthew Dillon LIST_INIT(&bp->b_dep);
209408357d8SMatthew Dillon }
210408357d8SMatthew Dillon
211389ee6beSNicolas Thery /*
212389ee6beSNicolas Thery * Precondition: the buffer has some dependencies.
21359a647b1SMatthew Dillon *
21459a647b1SMatthew Dillon * MPSAFE
215389ee6beSNicolas Thery */
216408357d8SMatthew Dillon static __inline void
buf_deallocate(struct buf * bp)217408357d8SMatthew Dillon buf_deallocate(struct buf *bp)
218408357d8SMatthew Dillon {
219408357d8SMatthew Dillon struct bio_ops *ops = bp->b_ops;
220408357d8SMatthew Dillon
221389ee6beSNicolas Thery KKASSERT(! LIST_EMPTY(&bp->b_dep));
222408357d8SMatthew Dillon if (ops)
223408357d8SMatthew Dillon ops->io_deallocate(bp);
224408357d8SMatthew Dillon }
225408357d8SMatthew Dillon
22659a647b1SMatthew Dillon /*
227b3f55d88SMatthew Dillon * This callback is made from flushbufqueues() which uses BUF_LOCK().
228b3f55d88SMatthew Dillon * Since it isn't going through a normal buffer aquisition mechanic
229b3f55d88SMatthew Dillon * and calling the filesystem back enforce the vnode's KVABIO support.
23059a647b1SMatthew Dillon */
231408357d8SMatthew Dillon static __inline int
buf_countdeps(struct buf * bp,int n)232408357d8SMatthew Dillon buf_countdeps(struct buf *bp, int n)
233408357d8SMatthew Dillon {
234408357d8SMatthew Dillon struct bio_ops *ops = bp->b_ops;
235408357d8SMatthew Dillon int r;
236408357d8SMatthew Dillon
237b3f55d88SMatthew Dillon if (ops) {
238b3f55d88SMatthew Dillon if (bp->b_vp == NULL || (bp->b_vp->v_flag & VKVABIO) == 0)
239b3f55d88SMatthew Dillon bkvasync_all(bp);
240408357d8SMatthew Dillon r = ops->io_countdeps(bp, n);
241b3f55d88SMatthew Dillon } else {
242408357d8SMatthew Dillon r = 0;
243b3f55d88SMatthew Dillon }
244408357d8SMatthew Dillon return(r);
245408357d8SMatthew Dillon }
246408357d8SMatthew Dillon
24759a647b1SMatthew Dillon /*
24859a647b1SMatthew Dillon * MPSAFE
24959a647b1SMatthew Dillon */
250408357d8SMatthew Dillon static __inline void
buf_start(struct buf * bp)251408357d8SMatthew Dillon buf_start(struct buf *bp)
252408357d8SMatthew Dillon {
253408357d8SMatthew Dillon struct bio_ops *ops = bp->b_ops;
254408357d8SMatthew Dillon
255408357d8SMatthew Dillon if (ops)
256408357d8SMatthew Dillon ops->io_start(bp);
257408357d8SMatthew Dillon }
258408357d8SMatthew Dillon
25959a647b1SMatthew Dillon /*
26059a647b1SMatthew Dillon * MPSAFE
26159a647b1SMatthew Dillon */
262408357d8SMatthew Dillon static __inline void
buf_complete(struct buf * bp)263408357d8SMatthew Dillon buf_complete(struct buf *bp)
264408357d8SMatthew Dillon {
265408357d8SMatthew Dillon struct bio_ops *ops = bp->b_ops;
266408357d8SMatthew Dillon
267408357d8SMatthew Dillon if (ops)
268408357d8SMatthew Dillon ops->io_complete(bp);
269408357d8SMatthew Dillon }
270408357d8SMatthew Dillon
27159a647b1SMatthew Dillon /*
27259a647b1SMatthew Dillon * MPSAFE
27359a647b1SMatthew Dillon */
274408357d8SMatthew Dillon static __inline int
buf_fsync(struct vnode * vp)275408357d8SMatthew Dillon buf_fsync(struct vnode *vp)
276408357d8SMatthew Dillon {
277408357d8SMatthew Dillon struct bio_ops *ops = vp->v_mount->mnt_bioops;
278408357d8SMatthew Dillon int r;
279408357d8SMatthew Dillon
280408357d8SMatthew Dillon if (ops)
281408357d8SMatthew Dillon r = ops->io_fsync(vp);
282408357d8SMatthew Dillon else
283408357d8SMatthew Dillon r = 0;
284408357d8SMatthew Dillon return(r);
285408357d8SMatthew Dillon }
286408357d8SMatthew Dillon
28759a647b1SMatthew Dillon /*
28859a647b1SMatthew Dillon * MPSAFE
28959a647b1SMatthew Dillon */
290408357d8SMatthew Dillon static __inline void
buf_movedeps(struct buf * bp1,struct buf * bp2)291408357d8SMatthew Dillon buf_movedeps(struct buf *bp1, struct buf *bp2)
292408357d8SMatthew Dillon {
293408357d8SMatthew Dillon struct bio_ops *ops = bp1->b_ops;
294408357d8SMatthew Dillon
295408357d8SMatthew Dillon if (ops)
296408357d8SMatthew Dillon ops->io_movedeps(bp1, bp2);
297408357d8SMatthew Dillon }
298408357d8SMatthew Dillon
29959a647b1SMatthew Dillon /*
30059a647b1SMatthew Dillon * MPSAFE
30159a647b1SMatthew Dillon */
30227bc0cb1SMatthew Dillon static __inline int
buf_checkread(struct buf * bp)30327bc0cb1SMatthew Dillon buf_checkread(struct buf *bp)
30427bc0cb1SMatthew Dillon {
30527bc0cb1SMatthew Dillon struct bio_ops *ops = bp->b_ops;
30627bc0cb1SMatthew Dillon
30727bc0cb1SMatthew Dillon if (ops)
30827bc0cb1SMatthew Dillon return(ops->io_checkread(bp));
30927bc0cb1SMatthew Dillon return(0);
31027bc0cb1SMatthew Dillon }
31127bc0cb1SMatthew Dillon
31259a647b1SMatthew Dillon /*
313b3f55d88SMatthew Dillon * This callback is made from flushbufqueues() which uses BUF_LOCK().
314b3f55d88SMatthew Dillon * Since it isn't going through a normal buffer aquisition mechanic
315b3f55d88SMatthew Dillon * and calling the filesystem back enforce the vnode's KVABIO support.
31659a647b1SMatthew Dillon */
31727bc0cb1SMatthew Dillon static __inline int
buf_checkwrite(struct buf * bp)31827bc0cb1SMatthew Dillon buf_checkwrite(struct buf *bp)
31927bc0cb1SMatthew Dillon {
32027bc0cb1SMatthew Dillon struct bio_ops *ops = bp->b_ops;
32127bc0cb1SMatthew Dillon
322b3f55d88SMatthew Dillon if (ops) {
323b3f55d88SMatthew Dillon if (bp->b_vp == NULL || (bp->b_vp->v_flag & VKVABIO) == 0)
324b3f55d88SMatthew Dillon bkvasync_all(bp);
32527bc0cb1SMatthew Dillon return(ops->io_checkwrite(bp));
326b3f55d88SMatthew Dillon }
32727bc0cb1SMatthew Dillon return(0);
32827bc0cb1SMatthew Dillon }
32927bc0cb1SMatthew Dillon
330ae8e83e6SMatthew Dillon /*
331ae8e83e6SMatthew Dillon * Chained biodone. The bio callback was made and the callback function
332ae8e83e6SMatthew Dillon * wishes to chain the biodone. If no BIO's are left we call bpdone()
333ae8e83e6SMatthew Dillon * with elseit=TRUE (asynchronous completion).
33459a647b1SMatthew Dillon *
33559a647b1SMatthew Dillon * MPSAFE
336ae8e83e6SMatthew Dillon */
337ae8e83e6SMatthew Dillon static __inline void
biodone_chain(struct bio * bio)338ae8e83e6SMatthew Dillon biodone_chain(struct bio *bio)
339ae8e83e6SMatthew Dillon {
340ae8e83e6SMatthew Dillon if (bio->bio_prev)
341ae8e83e6SMatthew Dillon biodone(bio->bio_prev);
342ae8e83e6SMatthew Dillon else
343ae8e83e6SMatthew Dillon bpdone(bio->bio_buf, 1);
344ae8e83e6SMatthew Dillon }
345ae8e83e6SMatthew Dillon
34654341a3bSMatthew Dillon static __inline int
bread(struct vnode * vp,off_t loffset,int size,struct buf ** bpp)34754341a3bSMatthew Dillon bread(struct vnode *vp, off_t loffset, int size, struct buf **bpp)
34854341a3bSMatthew Dillon {
34954341a3bSMatthew Dillon *bpp = NULL;
350d32579c3SMatthew Dillon return(breadnx(vp, loffset, size, B_NOTMETA,
351d32579c3SMatthew Dillon NULL, NULL, 0, bpp));
352d32579c3SMatthew Dillon }
353d32579c3SMatthew Dillon
354d32579c3SMatthew Dillon static __inline int
bread_kvabio(struct vnode * vp,off_t loffset,int size,struct buf ** bpp)355d32579c3SMatthew Dillon bread_kvabio(struct vnode *vp, off_t loffset, int size, struct buf **bpp)
356d32579c3SMatthew Dillon {
357d32579c3SMatthew Dillon *bpp = NULL;
358d32579c3SMatthew Dillon return(breadnx(vp, loffset, size, B_NOTMETA | B_KVABIO,
359d32579c3SMatthew Dillon NULL, NULL, 0, bpp));
36054341a3bSMatthew Dillon }
36154341a3bSMatthew Dillon
36254341a3bSMatthew Dillon static __inline int
breadn(struct vnode * vp,off_t loffset,int size,off_t * raoffset,int * rabsize,int cnt,struct buf ** bpp)36354341a3bSMatthew Dillon breadn(struct vnode *vp, off_t loffset, int size, off_t *raoffset,
36454341a3bSMatthew Dillon int *rabsize, int cnt, struct buf **bpp)
36554341a3bSMatthew Dillon {
36654341a3bSMatthew Dillon *bpp = NULL;
3679c93755aSMatthew Dillon return(breadnx(vp, loffset, size, B_NOTMETA, raoffset,
3689c93755aSMatthew Dillon rabsize, cnt, bpp));
36954341a3bSMatthew Dillon }
37054341a3bSMatthew Dillon
37154341a3bSMatthew Dillon static __inline int
cluster_read(struct vnode * vp,off_t filesize,off_t loffset,int blksize,size_t minreq,size_t maxreq,struct buf ** bpp)37254341a3bSMatthew Dillon cluster_read(struct vnode *vp, off_t filesize, off_t loffset,
37354341a3bSMatthew Dillon int blksize, size_t minreq, size_t maxreq, struct buf **bpp)
37454341a3bSMatthew Dillon {
37554341a3bSMatthew Dillon *bpp = NULL;
3769c93755aSMatthew Dillon return(cluster_readx(vp, filesize, loffset, blksize, B_NOTMETA,
3779c93755aSMatthew Dillon minreq, maxreq, bpp));
37854341a3bSMatthew Dillon }
37954341a3bSMatthew Dillon
380e14ccbbeSMatthew Dillon static __inline int
cluster_read_kvabio(struct vnode * vp,off_t filesize,off_t loffset,int blksize,size_t minreq,size_t maxreq,struct buf ** bpp)381e14ccbbeSMatthew Dillon cluster_read_kvabio(struct vnode *vp, off_t filesize, off_t loffset,
382e14ccbbeSMatthew Dillon int blksize, size_t minreq, size_t maxreq, struct buf **bpp)
383e14ccbbeSMatthew Dillon {
384e14ccbbeSMatthew Dillon *bpp = NULL;
385e14ccbbeSMatthew Dillon return(cluster_readx(vp, filesize, loffset, blksize,
386e14ccbbeSMatthew Dillon B_NOTMETA | B_KVABIO,
387e14ccbbeSMatthew Dillon minreq, maxreq, bpp));
388e14ccbbeSMatthew Dillon }
389e14ccbbeSMatthew Dillon
3903020e3beSMatthew Dillon #endif /* _KERNEL */
3913020e3beSMatthew Dillon
3923020e3beSMatthew Dillon #endif /* !_SYS_BUF2_H_ */
393