Lines Matching defs:vnd

1 /*	$NetBSD: vnd.c,v 1.290 2024/08/15 21:08:20 mlelstv Exp $	*/
94 __KERNEL_RCSID(0, "$NetBSD: vnd.c,v 1.290 2024/08/15 21:08:20 mlelstv Exp $");
159 #define VND_GETXFER(vnd) pool_get(&(vnd)->sc_vxpool, PR_WAITOK)
160 #define VND_PUTXFER(vnd, vx) pool_put(&(vnd)->sc_vxpool, (vx))
165 #define VND_MAXPENDING(vnd) ((vnd)->sc_maxactive * 4)
166 #define VND_MAXPAGES(vnd) (1024 * 1024 / PAGE_SIZE)
236 CFATTACH_DECL3_NEW(vnd, sizeof(struct vnd_softc),
482 struct vnd_softc *vnd =
488 if (vnd == NULL) {
492 lp = vnd->sc_dkdev.dk_label;
494 if ((vnd->sc_flags & VNF_INITED) == 0) {
510 if ((vnd->sc_flags & VNF_READONLY) && !(bp->b_flags & B_READ)) {
526 vnd->sc_size) <= 0)
529 if (bounds_check_with_label(&vnd->sc_dkdev,
530 bp, vnd->sc_flags & (VNF_WLABEL|VNF_LABELLING)) <= 0)
547 pp = &vnd->sc_dkdev.dk_label->d_partitions[
557 if ((vnd->sc_flags & VNF_USE_VN_RDWR)) {
561 * thread to add requests, as a wedge on vnd queues
564 if (curlwp != vnd->sc_kthread && curlwp != uvm.pagedaemon_lwp) {
565 while (vnd->sc_pending >= VND_MAXPENDING(vnd))
566 tsleep(&vnd->sc_pending, PRIBIO, "vndpc", 0);
568 vnd->sc_pending++;
569 KASSERT(vnd->sc_pending > 0);
571 bufq_put(vnd->sc_tab, bp);
572 wakeup(&vnd->sc_tab);
583 vnode_has_strategy(struct vnd_softc *vnd)
585 return vnode_has_op(vnd->sc_vp, VOFFSET(vop_bmap)) &&
586 vnode_has_op(vnd->sc_vp, VOFFSET(vop_strategy));
593 vnode_has_large_blocks(struct vnd_softc *vnd)
597 iosize = vnd->sc_iosize;
598 vnd_secsize = vnd->sc_geom.vng_secsize;
610 vnode_strategy_probe(struct vnd_softc *vnd)
615 if (!vnode_has_strategy(vnd))
618 if (vnode_has_large_blocks(vnd))
625 vn_lock(vnd->sc_vp, LK_EXCLUSIVE | LK_RETRY);
626 error = VOP_BMAP(vnd->sc_vp, 0, NULL, &nbn, NULL);
627 VOP_UNLOCK(vnd->sc_vp);
640 struct vnd_softc *vnd = arg;
648 if ((vnd->sc_flags & VNF_USE_VN_RDWR) == 0 &&
649 ! vnode_has_strategy(vnd))
650 vnd->sc_flags |= VNF_USE_VN_RDWR;
653 * to access blocks as small as defined by the vnd geometry.
655 if ((vnd->sc_flags & VNF_USE_VN_RDWR) == 0 &&
656 vnode_has_large_blocks(vnd))
657 vnd->sc_flags |= VNF_USE_VN_RDWR;
661 printf("vndthread: vp %p, %s\n", vnd->sc_vp,
662 (vnd->sc_flags & VNF_USE_VN_RDWR) == 0 ?
668 vnd->sc_flags |= VNF_KTHREAD;
669 wakeup(&vnd->sc_kthread);
675 while ((vnd->sc_flags & VNF_VUNCONF) == 0) {
680 obp = bufq_get(vnd->sc_tab);
682 tsleep(&vnd->sc_tab, PRIBIO, "vndbp", 0);
685 if ((vnd->sc_flags & VNF_USE_VN_RDWR)) {
686 KASSERT(vnd->sc_pending > 0);
687 if (vnd->sc_pending-- == VND_MAXPENDING(vnd))
688 wakeup(&vnd->sc_pending);
696 if (vnd->sc_vp->v_mount == NULL) {
702 if ((obp->b_flags & B_READ) != 0 && (vnd->sc_flags & VNF_COMP)) {
707 vnd->sc_dkdev.dk_label->d_secsize;
719 vnx = VND_GETXFER(vnd);
721 vnx->vx_vnd = vnd;
724 while (vnd->sc_active >= vnd->sc_maxactive) {
725 tsleep(&vnd->sc_tab, PRIBIO, "vndac", 0);
727 vnd->sc_active++;
731 disk_busy(&vnd->sc_dkdev);
740 bp->b_vp = vnd->sc_vp;
747 fstrans_start_lazy(vnd->sc_vp->v_mount);
750 if ((vnd->sc_flags & VNF_USE_VN_RDWR) == 0)
751 handle_with_strategy(vnd, obp, bp);
753 handle_with_rdwr(vnd, obp, bp);
755 fstrans_done(vnd->sc_vp->v_mount);
765 vnd->sc_flags &= (~VNF_KTHREAD | VNF_VUNCONF);
766 wakeup(&vnd->sc_kthread);
797 * 'obp' is a pointer to the original request fed to the vnd device.
800 handle_with_rdwr(struct vnd_softc *vnd, const struct buf *obp, struct buf *bp)
809 offset = obp->b_rawblkno * vnd->sc_dkdev.dk_label->d_secsize;
811 vp = vnd->sc_vp;
815 printf("vnd (rdwr): vp %p, %s, rawblkno 0x%" PRIx64
819 vnd->sc_dkdev.dk_label->d_secsize, offset,
828 vnd->sc_cred, &resid, NULL);
832 * Avoid caching too many pages, the vnd user
838 if (npages > VND_MAXPAGES(vnd)) {
859 * 'obp' is a pointer to the original request fed to the vnd device.
862 handle_with_strategy(struct vnd_softc *vnd, const struct buf *obp,
875 bn = obp->b_rawblkno * vnd->sc_dkdev.dk_label->d_secsize;
877 bsize = vnd->sc_vp->v_mount->mnt_stat.f_iosize;
898 vn_lock(vnd->sc_vp, LK_EXCLUSIVE | LK_RETRY);
899 error = VOP_BMAP(vnd->sc_vp, bn / bsize, &vp, &nbn, &nra);
900 VOP_UNLOCK(vnd->sc_vp);
929 " sz 0x%zx\n", vnd->sc_vp, vp, (long long)bn,
941 (long) (vnd-vnd_softc), &nbp->vb_buf,
980 struct vnd_softc *vnd = vnx->vx_vnd;
986 KASSERT(vnd->sc_active > 0);
993 disk_unbusy(&vnd->sc_dkdev, bp->b_bcount - bp->b_resid,
995 vnd->sc_active--;
996 if (vnd->sc_active == 0) {
997 wakeup(&vnd->sc_tab);
1004 VND_PUTXFER(vnd, vnx);
1056 struct vnd_softc *vnd;
1063 vnd = device_lookup_private(&vnd_cd, *un);
1064 if (vnd == NULL)
1067 if ((vnd->sc_flags & VNF_INITED) == 0)
1070 vn_lock(vnd->sc_vp, LK_SHARED | LK_RETRY);
1071 error = VOP_GETATTR(vnd->sc_vp, va, l->l_cred);
1072 VOP_UNLOCK(vnd->sc_vp);
1077 vnddoclear(struct vnd_softc *vnd, int pmask, int minor, bool force)
1081 if ((error = vndlock(vnd)) != 0)
1089 if (DK_BUSY(vnd, pmask) && !force) {
1090 vndunlock(vnd);
1095 dkwedge_delall(&vnd->sc_dkdev);
1104 vnd->sc_flags |= VNF_CLEARING;
1105 vndunlock(vnd);
1106 vndclear(vnd, minor);
1113 pool_destroy(&vnd->sc_vxpool);
1116 disk_detach(&vnd->sc_dkdev);
1150 struct vnd_softc *vnd;
1208 vnd = device_lookup_private(&vnd_cd, unit);
1209 if (vnd == NULL)
1239 if (vnd->sc_flags & VNF_INITED)
1244 if ((vnd->sc_flags & VNF_INITED) == 0)
1249 error = disk_ioctl(&vnd->sc_dkdev, dev, cmd, data, flag, l);
1256 if ((error = vndlock(vnd)) != 0)
1263 vnd->sc_flags |= VNF_USE_VN_RDWR;
1279 vnd->sc_flags |= VNF_USE_VN_RDWR;
1316 vnd->sc_comp_blksz = be32toh(ch->block_size);
1318 vnd->sc_comp_numoffs = be32toh(ch->num_blocks) + 1;
1320 if (!DK_DEV_BSIZE_OK(vnd->sc_comp_blksz)) {
1325 KASSERT(0 < vnd->sc_comp_blksz);
1326 KASSERT(0 < vnd->sc_comp_numoffs);
1334 if (SIZE_MAX/sizeof(uint64_t) < vnd->sc_comp_numoffs) {
1342 sizeof(uint64_t)*vnd->sc_comp_numoffs) ||
1343 (UQUAD_MAX/vnd->sc_comp_blksz <
1344 vnd->sc_comp_numoffs - 1)) {
1351 KASSERT(vnd->sc_comp_numoffs - 1 <=
1352 UQUAD_MAX/vnd->sc_comp_blksz);
1354 ((u_quad_t)vnd->sc_comp_numoffs - 1) *
1355 (u_quad_t)vnd->sc_comp_blksz;
1359 vnd->sc_comp_offsets =
1360 malloc(sizeof(uint64_t) * vnd->sc_comp_numoffs,
1365 (void *)vnd->sc_comp_offsets,
1366 sizeof(uint64_t) * vnd->sc_comp_numoffs,
1378 for (i = 0; i < vnd->sc_comp_numoffs - 1; i++) {
1379 vnd->sc_comp_offsets[i] =
1380 be64toh(vnd->sc_comp_offsets[i]);
1382 be64toh(vnd->sc_comp_offsets[i + 1])
1383 - vnd->sc_comp_offsets[i];
1387 vnd->sc_comp_offsets[vnd->sc_comp_numoffs - 1] =
1388 be64toh(vnd->sc_comp_offsets[vnd->sc_comp_numoffs
1392 vnd->sc_comp_buff = malloc(comp_maxsize,
1396 vnd->sc_comp_decombuf = malloc(vnd->sc_comp_blksz,
1398 vnd->sc_comp_buffblk = -1;
1401 memset(&vnd->sc_comp_stream, 0, sizeof(z_stream));
1402 vnd->sc_comp_stream.zalloc = vnd_alloc;
1403 vnd->sc_comp_stream.zfree = vnd_free;
1404 error = inflateInit2(&vnd->sc_comp_stream, MAX_WBITS);
1406 if (vnd->sc_comp_stream.msg)
1407 printf("vnd%d: compressed file, %s\n",
1408 unit, vnd->sc_comp_stream.msg);
1414 vnd->sc_flags |= VNF_COMP | VNF_READONLY;
1423 vnd->sc_vp = vp;
1424 vnd->sc_size = btodb(vattr.va_size); /* note truncation */
1429 error = bdev_ioctl(vattr.va_fsid, DIOCGSECTORSIZE, &vnd->sc_iosize, FKIOCTL, l);
1431 vnd->sc_iosize = vnd->sc_vp->v_mount->mnt_stat.f_frsize;
1434 if (vnd->sc_iosize == 0)
1435 vnd->sc_iosize = DEV_BSIZE;
1443 memcpy(&vnd->sc_geom, &vio->vnd_geom,
1449 if (!DK_DEV_BSIZE_OK(vnd->sc_geom.vng_secsize) ||
1450 vnd->sc_geom.vng_ntracks == 0 ||
1451 vnd->sc_geom.vng_nsectors == 0) {
1459 if (vnd->sc_geom.vng_ncylinders == 0)
1460 vnd->sc_geom.vng_ncylinders = vnd->sc_size / (
1461 (vnd->sc_geom.vng_secsize / DEV_BSIZE) *
1462 vnd->sc_geom.vng_ntracks *
1463 vnd->sc_geom.vng_nsectors);
1469 geomsize = (int64_t)vnd->sc_geom.vng_nsectors *
1470 vnd->sc_geom.vng_ntracks *
1471 vnd->sc_geom.vng_ncylinders *
1472 (vnd->sc_geom.vng_secsize / DEV_BSIZE);
1478 if (vnd->sc_size < geomsize) {
1482 } else if (vnd->sc_size >= (32 * 64)) {
1487 vnd->sc_geom.vng_secsize = DEV_BSIZE;
1488 vnd->sc_geom.vng_nsectors = 32;
1489 vnd->sc_geom.vng_ntracks = 64;
1490 vnd->sc_geom.vng_ncylinders = vnd->sc_size / (64 * 32);
1492 vnd->sc_geom.vng_secsize = DEV_BSIZE;
1493 vnd->sc_geom.vng_nsectors = 1;
1494 vnd->sc_geom.vng_ntracks = 1;
1495 vnd->sc_geom.vng_ncylinders = vnd->sc_size;
1498 vnd_set_geometry(vnd);
1501 vnd->sc_flags |= VNF_READONLY;
1504 if ((error = vndsetcred(vnd, l->l_cred)) != 0)
1507 vndthrottle(vnd, vnd->sc_vp);
1508 vio->vnd_osize = dbtob(vnd->sc_size);
1510 vio->vnd_size = dbtob(vnd->sc_size);
1511 vnd->sc_flags |= VNF_INITED;
1514 error = kthread_create(PRI_NONE, 0, NULL, vndthread, vnd,
1515 &vnd->sc_kthread, "%s", device_xname(vnd->sc_dev));
1518 while ((vnd->sc_flags & VNF_KTHREAD) == 0) {
1519 tsleep(&vnd->sc_kthread, PRIBIO, "vndthr", 0);
1524 vnd->sc_vp, (unsigned long) vnd->sc_size,
1525 vnd->sc_geom.vng_secsize,
1526 vnd->sc_geom.vng_nsectors,
1527 vnd->sc_geom.vng_ntracks,
1528 vnd->sc_geom.vng_ncylinders);
1532 disk_attach(&vnd->sc_dkdev);
1535 pool_init(&vnd->sc_vxpool, sizeof(struct vndxfer), 0,
1538 vndunlock(vnd);
1543 dkwedge_discover(&vnd->sc_dkdev);
1553 if (vnd->sc_comp_offsets) {
1554 free(vnd->sc_comp_offsets, M_DEVBUF);
1555 vnd->sc_comp_offsets = NULL;
1557 if (vnd->sc_comp_buff) {
1558 free(vnd->sc_comp_buff, M_DEVBUF);
1559 vnd->sc_comp_buff = NULL;
1561 if (vnd->sc_comp_decombuf) {
1562 free(vnd->sc_comp_decombuf, M_DEVBUF);
1563 vnd->sc_comp_decombuf = NULL;
1566 vndunlock(vnd);
1575 if ((error = vnddoclear(vnd, pmask, minor(dev), force)) != 0)
1590 if ((error = vndlock(vnd)) != 0)
1593 vnd->sc_flags |= VNF_LABELLING;
1604 error = setdisklabel(vnd->sc_dkdev.dk_label,
1605 lp, 0, vnd->sc_dkdev.dk_cpulabel);
1613 vndstrategy, vnd->sc_dkdev.dk_label,
1614 vnd->sc_dkdev.dk_cpulabel);
1617 vnd->sc_flags &= ~VNF_LABELLING;
1619 vndunlock(vnd);
1628 vnd->sc_flags |= VNF_KLABEL;
1630 vnd->sc_flags &= ~VNF_KLABEL;
1635 vnd->sc_flags |= VNF_WLABEL;
1637 vnd->sc_flags &= ~VNF_WLABEL;
1641 vndgetdefaultlabel(vnd, (struct disklabel *)data);
1646 vndgetdefaultlabel(vnd, &newlabel);
1659 bufq_getstrategyname(vnd->sc_tab),
1671 vn_lock(vnd->sc_vp, LK_EXCLUSIVE | LK_RETRY);
1672 error = VOP_FSYNC(vnd->sc_vp, vnd->sc_cred,
1674 VOP_UNLOCK(vnd->sc_vp);
1691 vndsetcred(struct vnd_softc *vnd, kauth_cred_t cred)
1698 vnd->sc_cred = kauth_cred_dup(cred);
1703 aiov.iov_len = uimin(DEV_BSIZE, dbtob(vnd->sc_size));
1710 vn_lock(vnd->sc_vp, LK_EXCLUSIVE | LK_RETRY);
1711 error = VOP_READ(vnd->sc_vp, &auio, 0, vnd->sc_cred);
1714 * Because vnd does all IO directly through the vnode
1720 error = vinvalbuf(vnd->sc_vp, V_SAVE, vnd->sc_cred,
1723 VOP_UNLOCK(vnd->sc_vp);
1733 vndthrottle(struct vnd_softc *vnd, struct vnode *vp)
1737 vnd->sc_maxactive = 2;
1739 vnd->sc_maxactive = 8;
1741 if (vnd->sc_maxactive < 1)
1742 vnd->sc_maxactive = 1;
1749 struct vnd_softc *vnd;
1751 for (vnd = &vnd_softc[0]; vnd < &vnd_softc[numvnd]; vnd++)
1752 if (vnd->sc_flags & VNF_INITED)
1753 vndclear(vnd);
1758 vndclear(struct vnd_softc *vnd, int myminor)
1760 struct vnode *vp = vnd->sc_vp;
1767 printf("vndclear(%p): vp %p\n", vnd, vp);
1775 mn = DISKMINOR(device_unit(vnd->sc_dev), i);
1782 if ((vnd->sc_flags & VNF_READONLY) == 0)
1786 bufq_drain(vnd->sc_tab);
1789 vnd->sc_flags |= VNF_VUNCONF;
1790 wakeup(&vnd->sc_tab);
1791 while (vnd->sc_flags & VNF_KTHREAD)
1792 tsleep(&vnd->sc_kthread, PRIBIO, "vnthr", 0);
1796 if (vnd->sc_flags & VNF_COMP) {
1797 if (vnd->sc_comp_offsets) {
1798 free(vnd->sc_comp_offsets, M_DEVBUF);
1799 vnd->sc_comp_offsets = NULL;
1801 if (vnd->sc_comp_buff) {
1802 free(vnd->sc_comp_buff, M_DEVBUF);
1803 vnd->sc_comp_buff = NULL;
1805 if (vnd->sc_comp_decombuf) {
1806 free(vnd->sc_comp_decombuf, M_DEVBUF);
1807 vnd->sc_comp_decombuf = NULL;
1811 vnd->sc_flags &=
1816 (void) vn_close(vp, fflags, vnd->sc_cred);
1817 kauth_cred_free(vnd->sc_cred);
1818 vnd->sc_vp = NULL;
1819 vnd->sc_cred = NULL;
1820 vnd->sc_size = 0;
1887 strncpy(lp->d_typename, "vnd", sizeof(lp->d_typename));
1906 * Read the disklabel from a vnd. If one is not present, create a fake one.
1998 struct vnd_softc *vnd =
2020 comp_block = bn / (off_t)vnd->sc_comp_blksz;
2023 if (comp_block >= vnd->sc_comp_numoffs) {
2030 if (comp_block != vnd->sc_comp_buffblk) {
2031 length = vnd->sc_comp_offsets[comp_block + 1] -
2032 vnd->sc_comp_offsets[comp_block];
2033 vn_lock(vnd->sc_vp, LK_EXCLUSIVE | LK_RETRY);
2034 error = vn_rdwr(UIO_READ, vnd->sc_vp, vnd->sc_comp_buff,
2035 length, vnd->sc_comp_offsets[comp_block],
2036 UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, vnd->sc_cred,
2040 VOP_UNLOCK(vnd->sc_vp);
2045 vnd->sc_comp_stream.next_in = vnd->sc_comp_buff;
2046 vnd->sc_comp_stream.avail_in = length;
2047 vnd->sc_comp_stream.next_out = vnd->sc_comp_decombuf;
2048 vnd->sc_comp_stream.avail_out = vnd->sc_comp_blksz;
2049 inflateReset(&vnd->sc_comp_stream);
2050 error = inflate(&vnd->sc_comp_stream, Z_FINISH);
2052 if (vnd->sc_comp_stream.msg)
2053 aprint_normal_dev(vnd->sc_dev,
2055 vnd->sc_comp_stream.msg);
2057 VOP_UNLOCK(vnd->sc_vp);
2061 vnd->sc_comp_buffblk = comp_block;
2062 VOP_UNLOCK(vnd->sc_vp);
2066 offset_in_buffer = bn % (off_t)vnd->sc_comp_blksz;
2067 length_in_buffer = vnd->sc_comp_blksz - offset_in_buffer;
2076 error = uiomove(vnd->sc_comp_decombuf + offset_in_buffer,
2106 vnd_set_geometry(struct vnd_softc *vnd)
2108 struct disk_geom *dg = &vnd->sc_dkdev.dk_geom;
2113 spb = vnd->sc_geom.vng_secsize / DEV_BSIZE;
2114 dg->dg_secperunit = vnd->sc_size / spb;
2115 dg->dg_secsize = vnd->sc_geom.vng_secsize;
2116 dg->dg_nsectors = vnd->sc_geom.vng_nsectors;
2117 dg->dg_ntracks = vnd->sc_geom.vng_ntracks;
2118 dg->dg_ncylinders = vnd->sc_geom.vng_ncylinders;
2126 disk_set_info(vnd->sc_dev, &vnd->sc_dkdev, NULL);
2135 MODULE(MODULE_CLASS_DRIVER, vnd, VND_DEPENDS);
2140 CFDRIVER_DECL(vnd, DV_DISK, NULL);
2154 error = devsw_attach("vnd", &vnd_bdevsw, &vnd_bmajor,