Lines Matching defs:bp

162     "struct buf *"/*bp*/,
165 "struct buf *"/*bp*/,
171 SDT_PROBE_DEFINE1(io, kernel, , getnewbuf__done, "struct buf *"/*bp*/);
176 "struct buf *"/*bp*/);
177 SDT_PROBE_DEFINE2(io, kernel, , brelse, "struct buf *"/*bp*/, "int"/*set*/);
178 SDT_PROBE_DEFINE1(io, kernel, , wait__start, "struct buf *"/*bp*/);
179 SDT_PROBE_DEFINE1(io, kernel, , wait__done, "struct buf *"/*bp*/);
348 checkfreelist(buf_t *bp, struct bqueue *dp, int ison)
356 if (b == bp)
369 binsheadfree(buf_t *bp, struct bqueue *dp)
373 KASSERT(bp->b_freelistindex == -1);
374 TAILQ_INSERT_HEAD(&dp->bq_queue, bp, b_freelist);
375 dp->bq_bytes += bp->b_bufsize;
376 bp->b_freelistindex = dp - bufqueues;
380 binstailfree(buf_t *bp, struct bqueue *dp)
384 KASSERTMSG(bp->b_freelistindex == -1, "double free of buffer? "
385 "bp=%p, b_freelistindex=%d\n", bp, bp->b_freelistindex);
386 TAILQ_INSERT_TAIL(&dp->bq_queue, bp, b_freelist);
387 dp->bq_bytes += bp->b_bufsize;
388 bp->b_freelistindex = dp - bufqueues;
392 bremfree(buf_t *bp)
395 int bqidx = bp->b_freelistindex;
401 KDASSERT(checkfreelist(bp, dp, 1));
402 KASSERT(dp->bq_bytes >= bp->b_bufsize);
403 TAILQ_REMOVE(&dp->bq_queue, bp, b_freelist);
404 dp->bq_bytes -= bp->b_bufsize;
407 if (bp == dp->bq_marker)
411 bp->b_freelistindex = -1;
689 buf_t *bp;
692 bp = getblk(vp, blkno, size, 0, 0);
697 if (bp == NULL) {
707 if (!ISSET(bp->b_oflags, (BO_DONE | BO_DELWRI))) {
709 SET(bp->b_flags, B_READ | async);
711 BIO_SETPRIO(bp, BPRIO_TIMELIMITED);
713 BIO_SETPRIO(bp, BPRIO_TIMECRITICAL);
714 VOP_STRATEGY(vp, bp);
719 brelse(bp, 0);
738 return bp;
748 buf_t *bp;
754 bp = *bpp = bio_doread(vp, blkno, size, 0);
755 if (bp == NULL)
759 error = biowait(bp);
761 error = fscow_run(bp, true);
763 brelse(bp, 0);
778 buf_t *bp;
783 bp = *bpp = bio_doread(vp, blkno, size, 0);
784 if (bp == NULL)
804 error = biowait(bp);
806 error = fscow_run(bp, true);
808 brelse(bp, 0);
819 bwrite(buf_t *bp)
825 BIOHIST_FUNC(__func__); BIOHIST_CALLARGS(biohist, "bp=%#jx",
826 (uintptr_t)bp, 0, 0, 0);
828 KASSERT(ISSET(bp->b_cflags, BC_BUSY));
829 KASSERT(!cv_has_waiters(&bp->b_done));
831 vp = bp->b_vp;
841 KASSERT(bp->b_objlock == vp->v_interlock);
851 if (bp->b_iodone != mp->mnt_wapbl_op->wo_wapbl_biodone) {
852 bdwrite(bp);
864 sync = !ISSET(bp->b_flags, B_ASYNC);
866 bdwrite(bp);
886 bp->b_error = 0;
887 wasdelayed = ISSET(bp->b_oflags, BO_DELWRI);
888 CLR(bp->b_flags, B_READ);
891 mutex_enter(bp->b_objlock);
892 CLR(bp->b_oflags, BO_DONE | BO_DELWRI);
893 reassignbuf(bp, bp->b_vp);
895 cv_broadcast(&bp->b_busy);
899 mutex_enter(bp->b_objlock);
900 CLR(bp->b_oflags, BO_DONE | BO_DELWRI);
904 mutex_exit(bp->b_objlock);
908 BIO_SETPRIO(bp, BPRIO_TIMECRITICAL);
910 BIO_SETPRIO(bp, BPRIO_TIMELIMITED);
912 VOP_STRATEGY(vp, bp);
916 rv = biowait(bp);
919 brelse(bp, 0);
949 bdwrite(buf_t *bp)
952 BIOHIST_FUNC(__func__); BIOHIST_CALLARGS(biohist, "bp=%#jx",
953 (uintptr_t)bp, 0, 0, 0);
955 KASSERT(bp->b_vp == NULL || bp->b_vp->v_tag != VT_UFS ||
956 bp->b_vp->v_type == VBLK || ISSET(bp->b_flags, B_COWDONE));
957 KASSERT(ISSET(bp->b_cflags, BC_BUSY));
958 KASSERT(!cv_has_waiters(&bp->b_done));
961 if (bdev_type(bp->b_dev) == D_TAPE) {
962 bawrite(bp);
966 if (wapbl_vphaswapbl(bp->b_vp)) {
967 struct mount *mp = wapbl_vptomp(bp->b_vp);
969 if (bp->b_iodone != mp->mnt_wapbl_op->wo_wapbl_biodone) {
970 WAPBL_ADD_BUF(mp, bp);
980 KASSERT(bp->b_vp == NULL || bp->b_objlock == bp->b_vp->v_interlock);
982 if (!ISSET(bp->b_oflags, BO_DELWRI)) {
984 mutex_enter(bp->b_objlock);
985 SET(bp->b_oflags, BO_DELWRI);
987 reassignbuf(bp, bp->b_vp);
989 cv_broadcast(&bp->b_busy);
992 mutex_enter(bp->b_objlock);
995 CLR(bp->b_oflags, BO_DONE);
996 mutex_exit(bp->b_objlock);
998 brelse(bp, 0);
1005 bawrite(buf_t *bp)
1008 KASSERT(ISSET(bp->b_cflags, BC_BUSY));
1009 KASSERT(bp->b_vp != NULL);
1011 SET(bp->b_flags, B_ASYNC);
1012 VOP_BWRITE(bp->b_vp, bp);
1020 brelsel(buf_t *bp, int set)
1025 SDT_PROBE2(io, kernel, , brelse, bp, set);
1027 KASSERT(bp != NULL);
1029 KASSERT(!cv_has_waiters(&bp->b_done));
1031 SET(bp->b_cflags, set);
1033 KASSERT(ISSET(bp->b_cflags, BC_BUSY));
1034 KASSERT(bp->b_iodone == NULL);
1040 if (ISSET(bp->b_cflags, BC_WANTED))
1041 CLR(bp->b_cflags, BC_WANTED|BC_AGE);
1044 if (ISSET(bp->b_flags, B_COWDONE)) {
1045 mutex_enter(bp->b_objlock);
1046 if (!ISSET(bp->b_oflags, BO_DELWRI))
1047 CLR(bp->b_flags, B_COWDONE);
1048 mutex_exit(bp->b_objlock);
1056 if (ISSET(bp->b_flags, B_LOCKED))
1057 bp->b_error = 0;
1060 if (ISSET(bp->b_cflags, BC_NOCACHE) || bp->b_error != 0)
1061 SET(bp->b_cflags, BC_INVAL);
1063 if (ISSET(bp->b_cflags, BC_VFLUSH)) {
1070 CLR(bp->b_cflags, BC_VFLUSH);
1071 if (!ISSET(bp->b_cflags, BC_INVAL|BC_AGE) &&
1072 !ISSET(bp->b_flags, B_LOCKED) && bp->b_error == 0) {
1073 KDASSERT(checkfreelist(bp, &bufqueues[BQ_LRU], 1));
1076 bremfree(bp);
1080 KDASSERT(checkfreelist(bp, &bufqueues[BQ_AGE], 0));
1081 KDASSERT(checkfreelist(bp, &bufqueues[BQ_LRU], 0));
1082 KDASSERT(checkfreelist(bp, &bufqueues[BQ_LOCKED], 0));
1084 if ((bp->b_bufsize <= 0) || ISSET(bp->b_cflags, BC_INVAL)) {
1089 if (ISSET(bp->b_flags, B_LOCKED)) {
1090 if (wapbl_vphaswapbl(vp = bp->b_vp)) {
1093 KASSERT(bp->b_iodone !=
1095 WAPBL_REMOVE_BUF(mp, bp);
1099 mutex_enter(bp->b_objlock);
1100 CLR(bp->b_oflags, BO_DONE|BO_DELWRI);
1101 if ((vp = bp->b_vp) != NULL) {
1102 KASSERT(bp->b_objlock == vp->v_interlock);
1103 reassignbuf(bp, bp->b_vp);
1104 brelvp(bp);
1107 KASSERT(bp->b_objlock == &buffer_lock);
1108 mutex_exit(bp->b_objlock);
1111 cv_broadcast(&bp->b_busy);
1112 if (bp->b_bufsize <= 0)
1118 binsheadfree(bp, bufq);
1128 if (ISSET(bp->b_flags, B_LOCKED)) {
1131 } else if (!ISSET(bp->b_cflags, BC_AGE)) {
1138 binstailfree(bp, bufq);
1142 CLR(bp->b_cflags, BC_AGE|BC_BUSY|BC_NOCACHE);
1143 CLR(bp->b_flags, B_ASYNC);
1154 if (bp->b_bufsize <= 0) {
1155 cv_broadcast(&bp->b_busy);
1156 buf_destroy(bp);
1158 memset((char *)bp, 0, sizeof(*bp));
1160 pool_cache_put(buf_cache, bp);
1162 cv_signal(&bp->b_busy);
1166 brelse(buf_t *bp, int set)
1170 brelsel(bp, set);
1184 buf_t *bp;
1189 LIST_FOREACH(bp, BUFHASH(vp, blkno), b_hash) {
1190 if (bp->b_lblkno == blkno && bp->b_vp == vp &&
1191 !ISSET(bp->b_cflags, BC_INVAL)) {
1192 KASSERT(bp->b_objlock == vp->v_interlock);
1193 return (bp);
1212 buf_t *bp;
1217 bp = incore(vp, blkno);
1218 if (bp != NULL) {
1219 err = bbusy(bp, ((slpflag & PCATCH) != 0), slptimeo, NULL);
1228 KASSERT(!cv_has_waiters(&bp->b_done));
1230 if (ISSET(bp->b_oflags, BO_DONE|BO_DELWRI) &&
1231 bp->b_bcount < size && vp->v_type != VBLK)
1234 bremfree(bp);
1237 if ((bp = getnewbuf(slpflag, slptimeo, 0)) == NULL)
1242 brelsel(bp, 0);
1246 LIST_INSERT_HEAD(BUFHASH(vp, blkno), bp, b_hash);
1247 bp->b_blkno = bp->b_lblkno = bp->b_rawblkno = blkno;
1249 bgetvp(vp, bp);
1259 if (ISSET(bp->b_flags, B_LOCKED)) {
1260 KASSERT(bp->b_bufsize >= size);
1262 if (allocbuf(bp, size, preserve)) {
1264 LIST_REMOVE(bp, b_hash);
1265 brelsel(bp, BC_INVAL);
1272 BIO_SETPRIO(bp, BPRIO_DEFAULT);
1273 SDT_PROBE4(io, kernel, , getblk__done, vp, blkno, size, bp);
1274 return bp;
1283 buf_t *bp;
1287 while ((bp = getnewbuf(0, 0, 0)) == NULL)
1290 SET(bp->b_cflags, BC_INVAL);
1291 LIST_INSERT_HEAD(&invalhash, bp, b_hash);
1293 BIO_SETPRIO(bp, BPRIO_DEFAULT);
1294 error = allocbuf(bp, size, 0);
1296 return bp;
1308 allocbuf(buf_t *bp, int size, int preserve)
1319 oldcount = bp->b_bcount;
1321 bp->b_bcount = size;
1323 oldsize = bp->b_bufsize;
1341 memcpy(addr, bp->b_data, MIN(oldsize,desired_size));
1342 if (bp->b_data != NULL)
1343 buf_mrelease(bp->b_data, oldsize);
1344 bp->b_data = addr;
1345 bp->b_bufsize = desired_size;
1370 if (wapbl_vphaswapbl(bp->b_vp)) {
1371 WAPBL_RESIZE_BUF(wapbl_vptomp(bp->b_vp), bp,
1389 buf_t *bp;
1403 bp = pool_cache_get(buf_cache, PR_NOWAIT);
1404 if (bp != NULL) {
1405 memset((char *)bp, 0, sizeof(*bp));
1406 buf_init(bp);
1407 SET(bp->b_cflags, BC_BUSY); /* mark buffer busy */
1410 bp->b_freelistindex = -1;
1412 SDT_PROBE1(io, kernel, , getnewbuf__done, bp);
1413 return bp;
1419 if ((bp = TAILQ_FIRST(&bufqueues[BQ_AGE].bq_queue)) != NULL) {
1420 KASSERT(!ISSET(bp->b_oflags, BO_DELWRI));
1422 TAILQ_FOREACH(bp, &bufqueues[BQ_LRU].bq_queue, b_freelist) {
1423 if (ISSET(bp->b_cflags, BC_VFLUSH) ||
1424 !ISSET(bp->b_oflags, BO_DELWRI))
1426 if (fstrans_start_nowait(bp->b_vp->v_mount) == 0) {
1428 transmp = bp->b_vp->v_mount;
1433 if (bp != NULL) {
1434 KASSERT(!ISSET(bp->b_cflags, BC_BUSY) ||
1435 ISSET(bp->b_cflags, BC_VFLUSH));
1436 bremfree(bp);
1439 SET(bp->b_cflags, BC_BUSY);
1442 cv_broadcast(&bp->b_busy);
1461 if (bp->b_bufsize <= 0)
1462 panic("buffer %p: on queue but empty", bp);
1465 if (ISSET(bp->b_cflags, BC_VFLUSH)) {
1471 CLR(bp->b_cflags, BC_VFLUSH);
1472 SET(bp->b_cflags, BC_AGE);
1476 KASSERT(ISSET(bp->b_cflags, BC_BUSY));
1477 KASSERT(!cv_has_waiters(&bp->b_done));
1483 if (ISSET(bp->b_oflags, BO_DELWRI)) {
1488 SET(bp->b_cflags, BC_AGE);
1490 bawrite(bp);
1500 vp = bp->b_vp;
1503 bp->b_cflags = BC_BUSY;
1504 bp->b_oflags = 0;
1505 bp->b_flags = 0;
1506 bp->b_dev = NODEV;
1507 bp->b_blkno = 0;
1508 bp->b_lblkno = 0;
1509 bp->b_rawblkno = 0;
1510 bp->b_iodone = 0;
1511 bp->b_error = 0;
1512 bp->b_resid = 0;
1513 bp->b_bcount = 0;
1515 LIST_REMOVE(bp, b_hash);
1520 brelvp(bp);
1524 SDT_PROBE1(io, kernel, , getnewbuf__done, bp);
1525 return bp;
1534 buf_t *bp;
1540 bp = incore(vp, blkno);
1541 if (bp != NULL) {
1542 err = bbusy(bp, 0, 0, NULL);
1545 bremfree(bp);
1546 if (ISSET(bp->b_oflags, BO_DELWRI)) {
1547 SET(bp->b_cflags, BC_NOCACHE);
1549 bwrite(bp);
1551 brelsel(bp, BC_INVAL);
1566 buf_t *bp;
1572 if ((bp = getnewbuf(PCATCH, 1, 1)) == NULL)
1575 KASSERT((bp->b_cflags & BC_WANTED) == 0);
1576 size = bp->b_bufsize;
1579 buf_mrelease(bp->b_data, size);
1580 bp->b_bcount = bp->b_bufsize = 0;
1583 brelsel(bp, 0);
1609 biowait(buf_t *bp)
1614 KASSERT(ISSET(bp->b_cflags, BC_BUSY));
1616 SDT_PROBE1(io, kernel, , wait__start, bp);
1618 mutex_enter(bp->b_objlock);
1620 BIOHIST_CALLARGS(biohist, "bp=%#jx, oflags=0x%jx, ret_addr=%#jx",
1621 (uintptr_t)bp, bp->b_oflags,
1624 while (!ISSET(bp->b_oflags, BO_DONE | BO_DELWRI)) {
1625 BIOHIST_LOG(biohist, "waiting bp=%#jx",
1626 (uintptr_t)bp, 0, 0, 0);
1627 cv_wait(&bp->b_done, bp->b_objlock);
1629 mutex_exit(bp->b_objlock);
1631 SDT_PROBE1(io, kernel, , wait__done, bp);
1633 BIOHIST_LOG(biohist, "return %jd", bp->b_error, 0, 0, 0);
1635 return bp->b_error;
1655 biodone(buf_t *bp)
1661 KASSERT(!ISSET(bp->b_oflags, BO_DONE));
1666 TAILQ_INSERT_TAIL(&curcpu()->ci_data.cpu_biodone, bp, b_actq);
1668 BIOHIST_CALLARGS(biohist, "bp=%#jx, softint scheduled",
1669 (uintptr_t)bp, 0, 0, 0);
1674 biodone2(bp);
1678 SDT_PROBE_DEFINE1(io, kernel, , done, "struct buf *"/*bp*/);
1681 biodone2(buf_t *bp)
1685 SDT_PROBE1(io, kernel, ,done, bp);
1688 BIOHIST_CALLARGS(biohist, "bp=%#jx", (uintptr_t)bp, 0, 0, 0);
1690 mutex_enter(bp->b_objlock);
1692 if (ISSET(bp->b_oflags, BO_DONE))
1694 CLR(bp->b_flags, B_COWDONE);
1695 SET(bp->b_oflags, BO_DONE);
1696 BIO_SETPRIO(bp, BPRIO_DEFAULT);
1699 if (!ISSET(bp->b_flags, B_READ))
1700 vwakeup(bp);
1702 if ((callout = bp->b_iodone) != NULL) {
1707 KASSERT(!cv_has_waiters(&bp->b_done));
1708 bp->b_iodone = NULL;
1709 mutex_exit(bp->b_objlock);
1710 (*callout)(bp);
1711 } else if (ISSET(bp->b_flags, B_ASYNC)) {
1714 KASSERT(!cv_has_waiters(&bp->b_done));
1715 mutex_exit(bp->b_objlock);
1716 brelse(bp, 0);
1720 cv_broadcast(&bp->b_done);
1721 mutex_exit(bp->b_objlock);
1729 buf_t *bp;
1740 bp = TAILQ_FIRST(&ci->ci_data.cpu_biodone);
1741 TAILQ_REMOVE(&ci->ci_data.cpu_biodone, bp, b_actq);
1744 BIOHIST_LOG(biohist, "bp=%#jx", (uintptr_t)bp, 0, 0, 0);
1745 biodone2(bp);
1779 buf_t *bp;
1830 TAILQ_FOREACH(bp, &bq->bq_queue, b_freelist) {
1831 bq->bq_marker = bp;
1833 sysctl_fillbuf(bp, &bs);
1839 if (bq->bq_marker != bp) {
1975 buf_t *bp;
1989 LIST_FOREACH(bp, &bufhashtbl[i], b_hash) {
2016 buf_t *bp;
2024 TAILQ_FOREACH(bp, &dp->bq_queue, b_freelist) {
2025 counts[bp->b_bufsize / PAGE_SIZE]++;
2042 buf_t *bp;
2044 bp = pool_cache_get(bufio_cache, (waitok ? PR_WAITOK : PR_NOWAIT));
2045 if (bp == NULL)
2046 return bp;
2048 buf_init(bp);
2050 if ((bp->b_vp = vp) != NULL) {
2051 bp->b_objlock = vp->v_interlock;
2053 KASSERT(bp->b_objlock == &buffer_lock);
2056 return bp;
2060 putiobuf(buf_t *bp)
2063 buf_destroy(bp);
2064 pool_cache_put(bufio_cache, bp);
2072 nestiobuf_iodone(buf_t *bp)
2074 buf_t *mbp = bp->b_private;
2078 KASSERT(bp->b_bcount <= bp->b_bufsize);
2079 KASSERT(mbp != bp);
2081 error = bp->b_error;
2082 if (bp->b_error == 0 &&
2083 (bp->b_bcount < bp->b_bufsize || bp->b_resid > 0)) {
2091 donebytes = bp->b_bufsize;
2093 putiobuf(bp);
2101 * => 'bp' should be a buffer allocated by getiobuf.
2107 nestiobuf_setup(buf_t *mbp, buf_t *bp, int offset, size_t size)
2113 bp->b_vp = vp;
2114 bp->b_dev = mbp->b_dev;
2115 bp->b_objlock = mbp->b_objlock;
2116 bp->b_cflags = BC_BUSY;
2117 bp->b_flags = B_ASYNC | b_pass;
2118 bp->b_iodone = nestiobuf_iodone;
2119 bp->b_data = (char *)mbp->b_data + offset;
2120 bp->b_resid = bp->b_bcount = size;
2121 bp->b_bufsize = bp->b_bcount;
2122 bp->b_private = mbp;
2123 BIO_COPYPRIO(bp, mbp);
2124 if (BUF_ISWRITE(bp) && vp != NULL) {
2160 buf_init(buf_t *bp)
2163 cv_init(&bp->b_busy, "biolock");
2164 cv_init(&bp->b_done, "biowait");
2165 bp->b_dev = NODEV;
2166 bp->b_error = 0;
2167 bp->b_flags = 0;
2168 bp->b_cflags = 0;
2169 bp->b_oflags = 0;
2170 bp->b_objlock = &buffer_lock;
2171 bp->b_iodone = NULL;
2172 bp->b_dev = NODEV;
2173 bp->b_vnbufs.le_next = NOLIST;
2174 BIO_SETPRIO(bp, BPRIO_DEFAULT);
2178 buf_destroy(buf_t *bp)
2181 cv_destroy(&bp->b_done);
2182 cv_destroy(&bp->b_busy);
2186 bbusy(buf_t *bp, bool intr, int timo, kmutex_t *interlock)
2192 SDT_PROBE4(io, kernel, , bbusy__start, bp, intr, timo, interlock);
2194 if ((bp->b_cflags & BC_BUSY) != 0) {
2199 bp->b_cflags |= BC_WANTED;
2203 error = cv_timedwait_sig(&bp->b_busy, &bufcache_lock,
2206 error = cv_timedwait(&bp->b_busy, &bufcache_lock,
2218 bp->b_cflags |= BC_BUSY;
2223 bp, intr, timo, interlock, error);