137486Smckusick /* 237486Smckusick * Copyright (c) 1989 The Regents of the University of California. 337486Smckusick * All rights reserved. 437486Smckusick * 537486Smckusick * Redistribution and use in source and binary forms are permitted 637486Smckusick * provided that the above copyright notice and this paragraph are 737486Smckusick * duplicated in all such forms and that any documentation, 837486Smckusick * advertising materials, and other materials related to such 937486Smckusick * distribution and use acknowledge that the software was developed 1037486Smckusick * by the University of California, Berkeley. The name of the 1137486Smckusick * University may not be used to endorse or promote products derived 1237486Smckusick * from this software without specific prior written permission. 1337486Smckusick * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR 1437486Smckusick * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED 1537486Smckusick * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. 1637486Smckusick * 17*39292Smckusick * @(#)spec_vnops.c 7.6 (Berkeley) 10/13/89 1837486Smckusick */ 1937486Smckusick 2037486Smckusick #include "param.h" 2137486Smckusick #include "systm.h" 2237725Smckusick #include "user.h" 2337725Smckusick #include "kernel.h" 2437486Smckusick #include "conf.h" 2537486Smckusick #include "buf.h" 2637486Smckusick #include "vnode.h" 2737486Smckusick #include "../ufs/inode.h" 2837486Smckusick #include "stat.h" 2937486Smckusick #include "errno.h" 3037486Smckusick #include "malloc.h" 3137486Smckusick 32*39292Smckusick int blk_lookup(), 33*39292Smckusick blk_open(), 3437486Smckusick blk_read(), 3537486Smckusick blk_write(), 3637486Smckusick blk_strategy(), 3737486Smckusick blk_ioctl(), 3837486Smckusick blk_select(), 3937486Smckusick blk_lock(), 4037486Smckusick blk_unlock(), 4137486Smckusick blk_close(), 4237486Smckusick blk_badop(), 4337486Smckusick blk_nullop(); 4437486Smckusick 4537486Smckusick int ufs_getattr(), 4637725Smckusick ufs_setattr(), 4737725Smckusick ufs_access(), 4837725Smckusick ufs_inactive(); 4937486Smckusick 5037486Smckusick struct vnodeops blk_vnodeops = { 51*39292Smckusick blk_lookup, 5237486Smckusick blk_badop, 5337486Smckusick blk_badop, 5437486Smckusick blk_open, 5537486Smckusick blk_close, 5637725Smckusick ufs_access, 5737486Smckusick ufs_getattr, 5837486Smckusick ufs_setattr, 5937486Smckusick blk_read, 6037486Smckusick blk_write, 6137486Smckusick blk_ioctl, 6237486Smckusick blk_select, 6337486Smckusick blk_badop, 6437486Smckusick blk_nullop, 6537486Smckusick blk_badop, 6637486Smckusick blk_badop, 6737486Smckusick blk_badop, 6837486Smckusick blk_badop, 6937486Smckusick blk_badop, 7037486Smckusick blk_badop, 7137486Smckusick blk_badop, 7237486Smckusick blk_badop, 7337486Smckusick blk_badop, 7437486Smckusick blk_badop, 7537725Smckusick ufs_inactive, 7637486Smckusick blk_lock, 7737486Smckusick blk_unlock, 7837486Smckusick blk_badop, 7937486Smckusick blk_strategy, 8037486Smckusick }; 8137486Smckusick 8237486Smckusick /* 83*39292Smckusick * Trivial lookup routine that always fails. 84*39292Smckusick */ 85*39292Smckusick blk_lookup(vp, ndp) 86*39292Smckusick struct vnode *vp; 87*39292Smckusick struct nameidata *ndp; 88*39292Smckusick { 89*39292Smckusick 90*39292Smckusick ndp->ni_dvp = vp; 91*39292Smckusick ndp->ni_vp = NULL; 92*39292Smckusick return (ENOTDIR); 93*39292Smckusick } 94*39292Smckusick 95*39292Smckusick /* 9637486Smckusick * Open called to allow handler 9737486Smckusick * of special files to initialize and 9837486Smckusick * validate before actual IO. 9937486Smckusick */ 10037725Smckusick /* ARGSUSED */ 10137486Smckusick blk_open(vp, mode, cred) 10237486Smckusick register struct vnode *vp; 10337486Smckusick int mode; 10437486Smckusick struct ucred *cred; 10537486Smckusick { 10637486Smckusick dev_t dev = (dev_t)vp->v_rdev; 10737486Smckusick register int maj = major(dev); 10837486Smckusick 10937486Smckusick switch (vp->v_type) { 11037486Smckusick 11137486Smckusick case VCHR: 11237486Smckusick if ((u_int)maj >= nchrdev) 11337486Smckusick return (ENXIO); 11437486Smckusick return ((*cdevsw[maj].d_open)(dev, mode, S_IFCHR)); 11537486Smckusick 11637486Smckusick case VBLK: 11737486Smckusick if ((u_int)maj >= nblkdev) 11837486Smckusick return (ENXIO); 11937486Smckusick return ((*bdevsw[maj].d_open)(dev, mode, S_IFBLK)); 12037486Smckusick } 12137486Smckusick return (0); 12237486Smckusick } 12337486Smckusick 12437486Smckusick /* 12537486Smckusick * Check access permissions for a block device. 12637486Smckusick */ 12737486Smckusick blk_access(vp, mode, cred) 12837486Smckusick struct vnode *vp; 12937486Smckusick int mode; 13037486Smckusick struct ucred *cred; 13137486Smckusick { 13237486Smckusick 13337725Smckusick return (iaccess(VTOI(vp), mode, cred)); 13437486Smckusick } 13537486Smckusick 13637486Smckusick /* 13737486Smckusick * Vnode op for read 13837486Smckusick */ 13937486Smckusick blk_read(vp, uio, offp, ioflag, cred) 14037486Smckusick register struct vnode *vp; 14137486Smckusick struct uio *uio; 14237486Smckusick off_t *offp; 14337486Smckusick int ioflag; 14437486Smckusick struct ucred *cred; 14537486Smckusick { 14637486Smckusick int count, error; 14737486Smckusick 14837725Smckusick if (vp->v_type == VBLK && vp->v_data) 14937725Smckusick VOP_LOCK(vp); 15037486Smckusick uio->uio_offset = *offp; 15137486Smckusick count = uio->uio_resid; 15237725Smckusick error = readblkvp(vp, uio, cred, ioflag); 15337486Smckusick *offp += count - uio->uio_resid; 15437725Smckusick if (vp->v_type == VBLK && vp->v_data) 15537725Smckusick VOP_UNLOCK(vp); 15637486Smckusick return (error); 15737486Smckusick } 15837486Smckusick 15937486Smckusick /* 16037486Smckusick * Vnode op for write 16137486Smckusick */ 16237486Smckusick blk_write(vp, uio, offp, ioflag, cred) 16337486Smckusick register struct vnode *vp; 16437486Smckusick struct uio *uio; 16537486Smckusick off_t *offp; 16637486Smckusick int ioflag; 16737486Smckusick struct ucred *cred; 16837486Smckusick { 16937486Smckusick int count, error; 17037486Smckusick 17137725Smckusick if (vp->v_type == VBLK && vp->v_data) 17237725Smckusick VOP_LOCK(vp); 17337486Smckusick uio->uio_offset = *offp; 17437486Smckusick count = uio->uio_resid; 17537725Smckusick error = writeblkvp(vp, uio, cred, ioflag); 17637486Smckusick *offp += count - uio->uio_resid; 17737725Smckusick if (vp->v_type == VBLK && vp->v_data) 17837725Smckusick VOP_UNLOCK(vp); 17937486Smckusick return (error); 18037486Smckusick } 18137486Smckusick 18237486Smckusick /* 18337486Smckusick * Device ioctl operation. 18437486Smckusick */ 18537725Smckusick /* ARGSUSED */ 18637486Smckusick blk_ioctl(vp, com, data, fflag, cred) 18737486Smckusick struct vnode *vp; 18837486Smckusick register int com; 18937486Smckusick caddr_t data; 19037486Smckusick int fflag; 19137486Smckusick struct ucred *cred; 19237486Smckusick { 19337725Smckusick dev_t dev = vp->v_rdev; 19437486Smckusick 19537486Smckusick switch (vp->v_type) { 19637486Smckusick 19737486Smckusick case VCHR: 19837486Smckusick return ((*cdevsw[major(dev)].d_ioctl)(dev, com, data, fflag)); 19937486Smckusick 20037486Smckusick case VBLK: 20137486Smckusick return ((*bdevsw[major(dev)].d_ioctl)(dev, com, data, fflag)); 20237486Smckusick 20337486Smckusick default: 20437486Smckusick panic("blk_ioctl"); 20537486Smckusick /* NOTREACHED */ 20637486Smckusick } 20737486Smckusick } 20837486Smckusick 20937725Smckusick /* ARGSUSED */ 21037486Smckusick blk_select(vp, which, cred) 21137486Smckusick struct vnode *vp; 21237486Smckusick int which; 21337486Smckusick struct ucred *cred; 21437486Smckusick { 21537486Smckusick register dev_t dev; 21637486Smckusick 21737486Smckusick switch (vp->v_type) { 21837486Smckusick 21937486Smckusick default: 22037486Smckusick return (1); /* XXX */ 22137486Smckusick 22237486Smckusick case VCHR: 22337725Smckusick dev = vp->v_rdev; 22437486Smckusick return (*cdevsw[major(dev)].d_select)(dev, which); 22537486Smckusick } 22637486Smckusick } 22737486Smckusick 22837486Smckusick /* 22937486Smckusick * Just call the device strategy routine 23037486Smckusick */ 23137486Smckusick blk_strategy(bp) 23237486Smckusick register struct buf *bp; 23337486Smckusick { 23437486Smckusick (*bdevsw[major(bp->b_dev)].d_strategy)(bp); 23537486Smckusick return (0); 23637486Smckusick } 23737486Smckusick 23837486Smckusick blk_lock(vp) 23937486Smckusick struct vnode *vp; 24037486Smckusick { 24137486Smckusick register struct inode *ip = VTOI(vp); 24237486Smckusick 24337486Smckusick if (ip) 24437486Smckusick ILOCK(ip); 24537486Smckusick return (0); 24637486Smckusick } 24737486Smckusick 24837486Smckusick blk_unlock(vp) 24937486Smckusick struct vnode *vp; 25037486Smckusick { 25137486Smckusick register struct inode *ip = VTOI(vp); 25237486Smckusick 25337486Smckusick if (ip) 25437486Smckusick IUNLOCK(ip); 25537486Smckusick return (0); 25637486Smckusick } 25737486Smckusick 25837486Smckusick /* 25937486Smckusick * Device close routine 26037486Smckusick */ 26137725Smckusick /* ARGSUSED */ 26237486Smckusick blk_close(vp, flag, cred) 26337725Smckusick register struct vnode *vp; 26437486Smckusick int flag; 26537486Smckusick struct ucred *cred; 26637486Smckusick { 26737725Smckusick register struct inode *ip = VTOI(vp); 26837486Smckusick dev_t dev = vp->v_rdev; 26937725Smckusick int (*cfunc)(); 27037725Smckusick int error, mode; 27137486Smckusick 27237725Smckusick if (vp->v_count > 1 && !(ip->i_flag & ILOCKED)) 27337725Smckusick ITIMES(ip, &time, &time); 27437725Smckusick 27537725Smckusick switch (vp->v_type) { 27637725Smckusick 27737725Smckusick case VCHR: 27837725Smckusick if (vp->v_count > 1) 27937725Smckusick return (0); 28037725Smckusick cfunc = cdevsw[major(dev)].d_close; 28137725Smckusick mode = IFCHR; 28237725Smckusick break; 28337725Smckusick 28437725Smckusick case VBLK: 28537725Smckusick /* 28637725Smckusick * On last close of a block device (that isn't mounted) 28737725Smckusick * we must invalidate any in core blocks, so that 28837725Smckusick * we can, for instance, change floppy disks. 28937725Smckusick */ 29038777Smckusick bflush(vp->v_mount); 29138777Smckusick if (binval(vp->v_mount)) 29238613Smckusick return (0); 29337725Smckusick /* 29437725Smckusick * We don't want to really close the device if it is still 29537725Smckusick * in use. Since every use (buffer, inode, swap, cmap) 29637725Smckusick * holds a reference to the vnode, and because we ensure 29737725Smckusick * that there cannot be more than one vnode per device, 29837725Smckusick * we need only check that we are down to the last 29937725Smckusick * reference before closing. 30037725Smckusick */ 30137725Smckusick if (vp->v_count > 1) 30237725Smckusick return (0); 30337725Smckusick cfunc = bdevsw[major(dev)].d_close; 30437725Smckusick mode = IFBLK; 30537725Smckusick break; 30637725Smckusick 30737725Smckusick default: 30837725Smckusick panic("blk_close: not special"); 30937725Smckusick } 31037725Smckusick 31137725Smckusick /* XXX what is this doing below the vnode op call */ 31237725Smckusick if (setjmp(&u.u_qsave)) { 31337725Smckusick /* 31437725Smckusick * If device close routine is interrupted, 31537725Smckusick * must return so closef can clean up. 31637725Smckusick */ 31737725Smckusick error = EINTR; 31837725Smckusick } else 31937725Smckusick error = (*cfunc)(dev, flag, mode); 32037725Smckusick /* 32137725Smckusick * Most device close routines don't return errors, 32237725Smckusick * and dup2() doesn't work right on error. 32337725Smckusick */ 32437725Smckusick error = 0; /* XXX */ 32537725Smckusick return (error); 32637486Smckusick } 32737486Smckusick 32837486Smckusick /* 32937486Smckusick * Block device bad operation 33037486Smckusick */ 33137486Smckusick blk_badop() 33237486Smckusick { 33337486Smckusick 334*39292Smckusick panic("blk_badop called"); 335*39292Smckusick /* NOTREACHED */ 33637486Smckusick } 33737486Smckusick 33837486Smckusick /* 33937486Smckusick * Block device null operation 34037486Smckusick */ 34137486Smckusick blk_nullop() 34237486Smckusick { 34337486Smckusick 34437486Smckusick return (0); 34537486Smckusick } 346