137486Smckusick /* 237486Smckusick * Copyright (c) 1989 The Regents of the University of California. 337486Smckusick * All rights reserved. 437486Smckusick * 537486Smckusick * Redistribution and use in source and binary forms are permitted 637486Smckusick * provided that the above copyright notice and this paragraph are 737486Smckusick * duplicated in all such forms and that any documentation, 837486Smckusick * advertising materials, and other materials related to such 937486Smckusick * distribution and use acknowledge that the software was developed 1037486Smckusick * by the University of California, Berkeley. The name of the 1137486Smckusick * University may not be used to endorse or promote products derived 1237486Smckusick * from this software without specific prior written permission. 1337486Smckusick * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR 1437486Smckusick * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED 1537486Smckusick * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. 1637486Smckusick * 17*39365Smckusick * @(#)spec_vnops.c 7.8 (Berkeley) 10/22/89 1837486Smckusick */ 1937486Smckusick 2037486Smckusick #include "param.h" 2137486Smckusick #include "systm.h" 2237725Smckusick #include "user.h" 2337725Smckusick #include "kernel.h" 2437486Smckusick #include "conf.h" 2537486Smckusick #include "buf.h" 26*39365Smckusick #include "mount.h" 2737486Smckusick #include "vnode.h" 2837486Smckusick #include "../ufs/inode.h" 2937486Smckusick #include "errno.h" 3037486Smckusick 3139292Smckusick int blk_lookup(), 3239292Smckusick blk_open(), 3337486Smckusick blk_read(), 3437486Smckusick blk_write(), 3537486Smckusick blk_strategy(), 3637486Smckusick blk_ioctl(), 3737486Smckusick blk_select(), 3837486Smckusick blk_lock(), 3937486Smckusick blk_unlock(), 4037486Smckusick blk_close(), 4137486Smckusick blk_badop(), 4237486Smckusick blk_nullop(); 4337486Smckusick 4437486Smckusick int ufs_getattr(), 4537725Smckusick ufs_setattr(), 4637725Smckusick ufs_access(), 4737725Smckusick ufs_inactive(); 4837486Smckusick 4937486Smckusick struct vnodeops blk_vnodeops = { 5039292Smckusick blk_lookup, 5137486Smckusick blk_badop, 5237486Smckusick blk_badop, 5337486Smckusick blk_open, 5437486Smckusick blk_close, 5537725Smckusick ufs_access, 5637486Smckusick ufs_getattr, 5737486Smckusick ufs_setattr, 5837486Smckusick blk_read, 5937486Smckusick blk_write, 6037486Smckusick blk_ioctl, 6137486Smckusick blk_select, 6237486Smckusick blk_badop, 6337486Smckusick blk_nullop, 6437486Smckusick blk_badop, 6537486Smckusick blk_badop, 6637486Smckusick blk_badop, 6737486Smckusick blk_badop, 6837486Smckusick blk_badop, 6937486Smckusick blk_badop, 7037486Smckusick blk_badop, 7137486Smckusick blk_badop, 7237486Smckusick blk_badop, 7337486Smckusick blk_badop, 7437725Smckusick ufs_inactive, 7537486Smckusick blk_lock, 7637486Smckusick blk_unlock, 7737486Smckusick blk_badop, 7837486Smckusick blk_strategy, 7937486Smckusick }; 8037486Smckusick 8137486Smckusick /* 8239292Smckusick * Trivial lookup routine that always fails. 8339292Smckusick */ 8439292Smckusick blk_lookup(vp, ndp) 8539292Smckusick struct vnode *vp; 8639292Smckusick struct nameidata *ndp; 8739292Smckusick { 8839292Smckusick 8939292Smckusick ndp->ni_dvp = vp; 9039292Smckusick ndp->ni_vp = NULL; 9139292Smckusick return (ENOTDIR); 9239292Smckusick } 9339292Smckusick 9439292Smckusick /* 9537486Smckusick * Open called to allow handler 9637486Smckusick * of special files to initialize and 9737486Smckusick * validate before actual IO. 9837486Smckusick */ 9937725Smckusick /* ARGSUSED */ 10037486Smckusick blk_open(vp, mode, cred) 10137486Smckusick register struct vnode *vp; 10237486Smckusick int mode; 10337486Smckusick struct ucred *cred; 10437486Smckusick { 10537486Smckusick dev_t dev = (dev_t)vp->v_rdev; 10637486Smckusick register int maj = major(dev); 10737486Smckusick 108*39365Smckusick if (vp->v_mount && (vp->v_mount->m_flag & M_NODEV)) 109*39365Smckusick return (ENXIO); 110*39365Smckusick 11137486Smckusick switch (vp->v_type) { 11237486Smckusick 11337486Smckusick case VCHR: 11437486Smckusick if ((u_int)maj >= nchrdev) 11537486Smckusick return (ENXIO); 116*39365Smckusick return ((*cdevsw[maj].d_open)(dev, mode, IFCHR)); 11737486Smckusick 11837486Smckusick case VBLK: 11937486Smckusick if ((u_int)maj >= nblkdev) 12037486Smckusick return (ENXIO); 121*39365Smckusick return ((*bdevsw[maj].d_open)(dev, mode, IFBLK)); 12237486Smckusick } 12337486Smckusick return (0); 12437486Smckusick } 12537486Smckusick 12637486Smckusick /* 12737486Smckusick * Check access permissions for a block device. 12837486Smckusick */ 12937486Smckusick blk_access(vp, mode, cred) 13037486Smckusick struct vnode *vp; 13137486Smckusick int mode; 13237486Smckusick struct ucred *cred; 13337486Smckusick { 13437486Smckusick 13537725Smckusick return (iaccess(VTOI(vp), mode, cred)); 13637486Smckusick } 13737486Smckusick 13837486Smckusick /* 13937486Smckusick * Vnode op for read 14037486Smckusick */ 14137486Smckusick blk_read(vp, uio, offp, ioflag, cred) 14237486Smckusick register struct vnode *vp; 14337486Smckusick struct uio *uio; 14437486Smckusick off_t *offp; 14537486Smckusick int ioflag; 14637486Smckusick struct ucred *cred; 14737486Smckusick { 14837486Smckusick int count, error; 14937486Smckusick 15037725Smckusick if (vp->v_type == VBLK && vp->v_data) 15137725Smckusick VOP_LOCK(vp); 15237486Smckusick uio->uio_offset = *offp; 15337486Smckusick count = uio->uio_resid; 15437725Smckusick error = readblkvp(vp, uio, cred, ioflag); 15537486Smckusick *offp += count - uio->uio_resid; 15637725Smckusick if (vp->v_type == VBLK && vp->v_data) 15737725Smckusick VOP_UNLOCK(vp); 15837486Smckusick return (error); 15937486Smckusick } 16037486Smckusick 16137486Smckusick /* 16237486Smckusick * Vnode op for write 16337486Smckusick */ 16437486Smckusick blk_write(vp, uio, offp, ioflag, cred) 16537486Smckusick register struct vnode *vp; 16637486Smckusick struct uio *uio; 16737486Smckusick off_t *offp; 16837486Smckusick int ioflag; 16937486Smckusick struct ucred *cred; 17037486Smckusick { 17137486Smckusick int count, error; 17237486Smckusick 17337725Smckusick if (vp->v_type == VBLK && vp->v_data) 17437725Smckusick VOP_LOCK(vp); 17537486Smckusick uio->uio_offset = *offp; 17637486Smckusick count = uio->uio_resid; 17737725Smckusick error = writeblkvp(vp, uio, cred, ioflag); 17837486Smckusick *offp += count - uio->uio_resid; 17937725Smckusick if (vp->v_type == VBLK && vp->v_data) 18037725Smckusick VOP_UNLOCK(vp); 18137486Smckusick return (error); 18237486Smckusick } 18337486Smckusick 18437486Smckusick /* 18537486Smckusick * Device ioctl operation. 18637486Smckusick */ 18737725Smckusick /* ARGSUSED */ 18837486Smckusick blk_ioctl(vp, com, data, fflag, cred) 18937486Smckusick struct vnode *vp; 19037486Smckusick register int com; 19137486Smckusick caddr_t data; 19237486Smckusick int fflag; 19337486Smckusick struct ucred *cred; 19437486Smckusick { 19537725Smckusick dev_t dev = vp->v_rdev; 19637486Smckusick 19737486Smckusick switch (vp->v_type) { 19837486Smckusick 19937486Smckusick case VCHR: 20037486Smckusick return ((*cdevsw[major(dev)].d_ioctl)(dev, com, data, fflag)); 20137486Smckusick 20237486Smckusick case VBLK: 20337486Smckusick return ((*bdevsw[major(dev)].d_ioctl)(dev, com, data, fflag)); 20437486Smckusick 20537486Smckusick default: 20637486Smckusick panic("blk_ioctl"); 20737486Smckusick /* NOTREACHED */ 20837486Smckusick } 20937486Smckusick } 21037486Smckusick 21137725Smckusick /* ARGSUSED */ 21237486Smckusick blk_select(vp, which, cred) 21337486Smckusick struct vnode *vp; 21437486Smckusick int which; 21537486Smckusick struct ucred *cred; 21637486Smckusick { 21737486Smckusick register dev_t dev; 21837486Smckusick 21937486Smckusick switch (vp->v_type) { 22037486Smckusick 22137486Smckusick default: 22237486Smckusick return (1); /* XXX */ 22337486Smckusick 22437486Smckusick case VCHR: 22537725Smckusick dev = vp->v_rdev; 22637486Smckusick return (*cdevsw[major(dev)].d_select)(dev, which); 22737486Smckusick } 22837486Smckusick } 22937486Smckusick 23037486Smckusick /* 23137486Smckusick * Just call the device strategy routine 23237486Smckusick */ 23337486Smckusick blk_strategy(bp) 23437486Smckusick register struct buf *bp; 23537486Smckusick { 23637486Smckusick (*bdevsw[major(bp->b_dev)].d_strategy)(bp); 23737486Smckusick return (0); 23837486Smckusick } 23937486Smckusick 24037486Smckusick blk_lock(vp) 24137486Smckusick struct vnode *vp; 24237486Smckusick { 24337486Smckusick register struct inode *ip = VTOI(vp); 24437486Smckusick 24537486Smckusick if (ip) 24637486Smckusick ILOCK(ip); 24737486Smckusick return (0); 24837486Smckusick } 24937486Smckusick 25037486Smckusick blk_unlock(vp) 25137486Smckusick struct vnode *vp; 25237486Smckusick { 25337486Smckusick register struct inode *ip = VTOI(vp); 25437486Smckusick 25537486Smckusick if (ip) 25637486Smckusick IUNLOCK(ip); 25737486Smckusick return (0); 25837486Smckusick } 25937486Smckusick 26037486Smckusick /* 26137486Smckusick * Device close routine 26237486Smckusick */ 26337725Smckusick /* ARGSUSED */ 26437486Smckusick blk_close(vp, flag, cred) 26537725Smckusick register struct vnode *vp; 26637486Smckusick int flag; 26737486Smckusick struct ucred *cred; 26837486Smckusick { 26937725Smckusick register struct inode *ip = VTOI(vp); 27037486Smckusick dev_t dev = vp->v_rdev; 27137725Smckusick int (*cfunc)(); 27237725Smckusick int error, mode; 27337486Smckusick 27437725Smckusick if (vp->v_count > 1 && !(ip->i_flag & ILOCKED)) 27537725Smckusick ITIMES(ip, &time, &time); 27637725Smckusick 27737725Smckusick switch (vp->v_type) { 27837725Smckusick 27937725Smckusick case VCHR: 28037725Smckusick if (vp->v_count > 1) 28137725Smckusick return (0); 28237725Smckusick cfunc = cdevsw[major(dev)].d_close; 28337725Smckusick mode = IFCHR; 28437725Smckusick break; 28537725Smckusick 28637725Smckusick case VBLK: 28737725Smckusick /* 28837725Smckusick * On last close of a block device (that isn't mounted) 28937725Smckusick * we must invalidate any in core blocks, so that 29037725Smckusick * we can, for instance, change floppy disks. 29137725Smckusick */ 29238777Smckusick bflush(vp->v_mount); 29338777Smckusick if (binval(vp->v_mount)) 29438613Smckusick return (0); 29537725Smckusick /* 29637725Smckusick * We don't want to really close the device if it is still 29737725Smckusick * in use. Since every use (buffer, inode, swap, cmap) 29837725Smckusick * holds a reference to the vnode, and because we ensure 29937725Smckusick * that there cannot be more than one vnode per device, 30037725Smckusick * we need only check that we are down to the last 30137725Smckusick * reference before closing. 30237725Smckusick */ 30337725Smckusick if (vp->v_count > 1) 30437725Smckusick return (0); 30537725Smckusick cfunc = bdevsw[major(dev)].d_close; 30637725Smckusick mode = IFBLK; 30737725Smckusick break; 30837725Smckusick 30937725Smckusick default: 31037725Smckusick panic("blk_close: not special"); 31137725Smckusick } 31237725Smckusick 31337725Smckusick /* XXX what is this doing below the vnode op call */ 31437725Smckusick if (setjmp(&u.u_qsave)) { 31537725Smckusick /* 31637725Smckusick * If device close routine is interrupted, 31737725Smckusick * must return so closef can clean up. 31837725Smckusick */ 31937725Smckusick error = EINTR; 32037725Smckusick } else 32137725Smckusick error = (*cfunc)(dev, flag, mode); 32237725Smckusick return (error); 32337486Smckusick } 32437486Smckusick 32537486Smckusick /* 32637486Smckusick * Block device bad operation 32737486Smckusick */ 32837486Smckusick blk_badop() 32937486Smckusick { 33037486Smckusick 33139292Smckusick panic("blk_badop called"); 33239292Smckusick /* NOTREACHED */ 33337486Smckusick } 33437486Smckusick 33537486Smckusick /* 33637486Smckusick * Block device null operation 33737486Smckusick */ 33837486Smckusick blk_nullop() 33937486Smckusick { 34037486Smckusick 34137486Smckusick return (0); 34237486Smckusick } 343