xref: /onnv-gate/usr/src/uts/common/fs/hsfs/hsfs_vnops.c (revision 494:e082e44c7fce)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
50Sstevel@tonic-gate  * Common Development and Distribution License, Version 1.0 only
60Sstevel@tonic-gate  * (the "License").  You may not use this file except in compliance
70Sstevel@tonic-gate  * with the License.
80Sstevel@tonic-gate  *
90Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
100Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
110Sstevel@tonic-gate  * See the License for the specific language governing permissions
120Sstevel@tonic-gate  * and limitations under the License.
130Sstevel@tonic-gate  *
140Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
150Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
160Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
170Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
180Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
190Sstevel@tonic-gate  *
200Sstevel@tonic-gate  * CDDL HEADER END
210Sstevel@tonic-gate  */
220Sstevel@tonic-gate /*
230Sstevel@tonic-gate  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
240Sstevel@tonic-gate  * Use is subject to license terms.
250Sstevel@tonic-gate  */
260Sstevel@tonic-gate 
270Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
280Sstevel@tonic-gate 
290Sstevel@tonic-gate /*
300Sstevel@tonic-gate  * Vnode operations for the High Sierra filesystem
310Sstevel@tonic-gate  */
320Sstevel@tonic-gate 
330Sstevel@tonic-gate #include <sys/types.h>
340Sstevel@tonic-gate #include <sys/t_lock.h>
350Sstevel@tonic-gate #include <sys/param.h>
360Sstevel@tonic-gate #include <sys/time.h>
370Sstevel@tonic-gate #include <sys/systm.h>
380Sstevel@tonic-gate #include <sys/sysmacros.h>
390Sstevel@tonic-gate #include <sys/resource.h>
400Sstevel@tonic-gate #include <sys/signal.h>
410Sstevel@tonic-gate #include <sys/cred.h>
420Sstevel@tonic-gate #include <sys/user.h>
430Sstevel@tonic-gate #include <sys/buf.h>
440Sstevel@tonic-gate #include <sys/vfs.h>
450Sstevel@tonic-gate #include <sys/stat.h>
460Sstevel@tonic-gate #include <sys/vnode.h>
470Sstevel@tonic-gate #include <sys/mode.h>
480Sstevel@tonic-gate #include <sys/proc.h>
490Sstevel@tonic-gate #include <sys/disp.h>
500Sstevel@tonic-gate #include <sys/file.h>
510Sstevel@tonic-gate #include <sys/fcntl.h>
520Sstevel@tonic-gate #include <sys/flock.h>
530Sstevel@tonic-gate #include <sys/kmem.h>
540Sstevel@tonic-gate #include <sys/uio.h>
550Sstevel@tonic-gate #include <sys/conf.h>
560Sstevel@tonic-gate #include <sys/errno.h>
570Sstevel@tonic-gate #include <sys/mman.h>
580Sstevel@tonic-gate #include <sys/pathname.h>
590Sstevel@tonic-gate #include <sys/debug.h>
600Sstevel@tonic-gate #include <sys/vmsystm.h>
610Sstevel@tonic-gate #include <sys/cmn_err.h>
620Sstevel@tonic-gate #include <sys/fbuf.h>
630Sstevel@tonic-gate #include <sys/dirent.h>
640Sstevel@tonic-gate #include <sys/errno.h>
650Sstevel@tonic-gate 
660Sstevel@tonic-gate #include <vm/hat.h>
670Sstevel@tonic-gate #include <vm/page.h>
680Sstevel@tonic-gate #include <vm/pvn.h>
690Sstevel@tonic-gate #include <vm/as.h>
700Sstevel@tonic-gate #include <vm/seg.h>
710Sstevel@tonic-gate #include <vm/seg_map.h>
720Sstevel@tonic-gate #include <vm/seg_kmem.h>
730Sstevel@tonic-gate #include <vm/seg_vn.h>
740Sstevel@tonic-gate #include <vm/rm.h>
750Sstevel@tonic-gate #include <vm/page.h>
760Sstevel@tonic-gate #include <sys/swap.h>
770Sstevel@tonic-gate 
780Sstevel@tonic-gate #include <sys/fs/hsfs_spec.h>
790Sstevel@tonic-gate #include <sys/fs/hsfs_node.h>
800Sstevel@tonic-gate #include <sys/fs/hsfs_impl.h>
810Sstevel@tonic-gate #include <sys/fs/hsfs_susp.h>
820Sstevel@tonic-gate #include <sys/fs/hsfs_rrip.h>
830Sstevel@tonic-gate 
840Sstevel@tonic-gate #include <fs/fs_subr.h>
850Sstevel@tonic-gate 
860Sstevel@tonic-gate /* ARGSUSED */
870Sstevel@tonic-gate static int
880Sstevel@tonic-gate hsfs_fsync(vnode_t *cp, int syncflag, cred_t *cred)
890Sstevel@tonic-gate {
900Sstevel@tonic-gate 	return (0);
910Sstevel@tonic-gate }
920Sstevel@tonic-gate 
930Sstevel@tonic-gate 
940Sstevel@tonic-gate /*ARGSUSED*/
950Sstevel@tonic-gate static int
960Sstevel@tonic-gate hsfs_read(struct vnode *vp, struct uio *uiop, int ioflag, struct cred *cred,
970Sstevel@tonic-gate 	struct caller_context *ct)
980Sstevel@tonic-gate {
99206Speterte 	caddr_t base;
100206Speterte 	offset_t diff;
101206Speterte 	int error;
1020Sstevel@tonic-gate 	struct hsnode *hp;
103206Speterte 	uint_t filesize;
1040Sstevel@tonic-gate 
1050Sstevel@tonic-gate 	hp = VTOH(vp);
1060Sstevel@tonic-gate 	/*
1070Sstevel@tonic-gate 	 * if vp is of type VDIR, make sure dirent
1080Sstevel@tonic-gate 	 * is filled up with all info (because of ptbl)
1090Sstevel@tonic-gate 	 */
1100Sstevel@tonic-gate 	if (vp->v_type == VDIR) {
1110Sstevel@tonic-gate 		if (hp->hs_dirent.ext_size == 0)
1120Sstevel@tonic-gate 			hs_filldirent(vp, &hp->hs_dirent);
1130Sstevel@tonic-gate 	}
1140Sstevel@tonic-gate 	filesize = hp->hs_dirent.ext_size;
1150Sstevel@tonic-gate 
116206Speterte 	/* Sanity checks. */
117206Speterte 	if (uiop->uio_resid == 0 ||		/* No data wanted. */
118206Speterte 	    uiop->uio_loffset >= MAXOFF_T ||	/* Offset too big. */
119206Speterte 	    uiop->uio_loffset >= filesize)	/* Past EOF. */
120206Speterte 		return (0);
1210Sstevel@tonic-gate 
1220Sstevel@tonic-gate 	do {
123206Speterte 		/*
124206Speterte 		 * We want to ask for only the "right" amount of data.
125206Speterte 		 * In this case that means:-
126206Speterte 		 *
127206Speterte 		 * We can't get data from beyond our EOF. If asked,
128206Speterte 		 * we will give a short read.
129206Speterte 		 *
130206Speterte 		 * segmap_getmapflt returns buffers of MAXBSIZE bytes.
131206Speterte 		 * These buffers are always MAXBSIZE aligned.
132206Speterte 		 * If our starting offset is not MAXBSIZE aligned,
133206Speterte 		 * we can only ask for less than MAXBSIZE bytes.
134206Speterte 		 *
135206Speterte 		 * If our requested offset and length are such that
136206Speterte 		 * they belong in different MAXBSIZE aligned slots
137206Speterte 		 * then we'll be making more than one call on
138206Speterte 		 * segmap_getmapflt.
139206Speterte 		 *
140206Speterte 		 * This diagram shows the variables we use and their
141206Speterte 		 * relationships.
142206Speterte 		 *
143206Speterte 		 * |<-----MAXBSIZE----->|
144206Speterte 		 * +--------------------------...+
145206Speterte 		 * |.....mapon->|<--n-->|....*...|EOF
146206Speterte 		 * +--------------------------...+
147206Speterte 		 * uio_loffset->|
148206Speterte 		 * uio_resid....|<---------->|
149206Speterte 		 * diff.........|<-------------->|
150206Speterte 		 *
151206Speterte 		 * So, in this case our offset is not aligned
152206Speterte 		 * and our request takes us outside of the
153206Speterte 		 * MAXBSIZE window. We will break this up into
154206Speterte 		 * two segmap_getmapflt calls.
155206Speterte 		 */
156206Speterte 		size_t nbytes;
157206Speterte 		offset_t mapon;
158206Speterte 		size_t n;
159206Speterte 		uint_t flags;
1600Sstevel@tonic-gate 
161206Speterte 		mapon = uiop->uio_loffset & MAXBOFFSET;
162206Speterte 		diff = filesize - uiop->uio_loffset;
163206Speterte 		nbytes = (size_t)MIN(MAXBSIZE - mapon, uiop->uio_resid);
164206Speterte 		n = MIN(diff, nbytes);
165206Speterte 		if (n <= 0) {
166206Speterte 			/* EOF or request satisfied. */
167206Speterte 			return (0);
1680Sstevel@tonic-gate 		}
1690Sstevel@tonic-gate 
170206Speterte 		base = segmap_getmapflt(segkmap, vp,
171206Speterte 		    (u_offset_t)uiop->uio_loffset, n, 1, S_READ);
1720Sstevel@tonic-gate 
173206Speterte 		error = uiomove(base + mapon, n, UIO_READ, uiop);
174206Speterte 
1750Sstevel@tonic-gate 		if (error == 0) {
1760Sstevel@tonic-gate 			/*
1770Sstevel@tonic-gate 			 * if read a whole block, or read to eof,
1780Sstevel@tonic-gate 			 *  won't need this buffer again soon.
1790Sstevel@tonic-gate 			 */
180206Speterte 			if (n + mapon == MAXBSIZE ||
181206Speterte 			    uiop->uio_loffset == filesize)
1820Sstevel@tonic-gate 				flags = SM_DONTNEED;
1830Sstevel@tonic-gate 			else
1840Sstevel@tonic-gate 				flags = 0;
1850Sstevel@tonic-gate 			error = segmap_release(segkmap, base, flags);
1860Sstevel@tonic-gate 		} else
1870Sstevel@tonic-gate 			(void) segmap_release(segkmap, base, 0);
1880Sstevel@tonic-gate 	} while (error == 0 && uiop->uio_resid > 0);
1890Sstevel@tonic-gate 
1900Sstevel@tonic-gate 	return (error);
1910Sstevel@tonic-gate }
1920Sstevel@tonic-gate 
1930Sstevel@tonic-gate /*ARGSUSED2*/
1940Sstevel@tonic-gate static int
1950Sstevel@tonic-gate hsfs_getattr(
1960Sstevel@tonic-gate 	struct vnode *vp,
1970Sstevel@tonic-gate 	struct vattr *vap,
1980Sstevel@tonic-gate 	int flags,
1990Sstevel@tonic-gate 	struct cred *cred)
2000Sstevel@tonic-gate {
2010Sstevel@tonic-gate 	struct hsnode *hp;
2020Sstevel@tonic-gate 	struct vfs *vfsp;
2030Sstevel@tonic-gate 	struct hsfs *fsp;
2040Sstevel@tonic-gate 
2050Sstevel@tonic-gate 	hp = VTOH(vp);
2060Sstevel@tonic-gate 	fsp = VFS_TO_HSFS(vp->v_vfsp);
2070Sstevel@tonic-gate 	vfsp = vp->v_vfsp;
2080Sstevel@tonic-gate 
2090Sstevel@tonic-gate 	if ((hp->hs_dirent.ext_size == 0) && (vp->v_type == VDIR)) {
2100Sstevel@tonic-gate 		hs_filldirent(vp, &hp->hs_dirent);
2110Sstevel@tonic-gate 	}
2120Sstevel@tonic-gate 	vap->va_type = IFTOVT(hp->hs_dirent.mode);
2130Sstevel@tonic-gate 	vap->va_mode = hp->hs_dirent.mode;
2140Sstevel@tonic-gate 	vap->va_uid = hp->hs_dirent.uid;
2150Sstevel@tonic-gate 	vap->va_gid = hp->hs_dirent.gid;
2160Sstevel@tonic-gate 
2170Sstevel@tonic-gate 	vap->va_fsid = vfsp->vfs_dev;
2180Sstevel@tonic-gate 	vap->va_nodeid = (ino64_t)hp->hs_nodeid;
2190Sstevel@tonic-gate 	vap->va_nlink = hp->hs_dirent.nlink;
2200Sstevel@tonic-gate 	vap->va_size =	(offset_t)hp->hs_dirent.ext_size;
2210Sstevel@tonic-gate 
2220Sstevel@tonic-gate 	vap->va_atime.tv_sec = hp->hs_dirent.adate.tv_sec;
2230Sstevel@tonic-gate 	vap->va_atime.tv_nsec = hp->hs_dirent.adate.tv_usec*1000;
2240Sstevel@tonic-gate 	vap->va_mtime.tv_sec = hp->hs_dirent.mdate.tv_sec;
2250Sstevel@tonic-gate 	vap->va_mtime.tv_nsec = hp->hs_dirent.mdate.tv_usec*1000;
2260Sstevel@tonic-gate 	vap->va_ctime.tv_sec = hp->hs_dirent.cdate.tv_sec;
2270Sstevel@tonic-gate 	vap->va_ctime.tv_nsec = hp->hs_dirent.cdate.tv_usec*1000;
2280Sstevel@tonic-gate 	if (vp->v_type == VCHR || vp->v_type == VBLK)
2290Sstevel@tonic-gate 		vap->va_rdev = hp->hs_dirent.r_dev;
2300Sstevel@tonic-gate 	else
2310Sstevel@tonic-gate 		vap->va_rdev = 0;
2320Sstevel@tonic-gate 	vap->va_blksize = vfsp->vfs_bsize;
2330Sstevel@tonic-gate 	/* no. of blocks = no. of data blocks + no. of xar blocks */
2340Sstevel@tonic-gate 	vap->va_nblocks = (fsblkcnt64_t)howmany(vap->va_size + (u_longlong_t)
2350Sstevel@tonic-gate 	    (hp->hs_dirent.xar_len << fsp->hsfs_vol.lbn_shift), DEV_BSIZE);
2360Sstevel@tonic-gate 	vap->va_seq = hp->hs_seq;
2370Sstevel@tonic-gate 	return (0);
2380Sstevel@tonic-gate }
2390Sstevel@tonic-gate 
2400Sstevel@tonic-gate /*ARGSUSED*/
2410Sstevel@tonic-gate static int
2420Sstevel@tonic-gate hsfs_readlink(struct vnode *vp, struct uio *uiop, struct cred *cred)
2430Sstevel@tonic-gate {
2440Sstevel@tonic-gate 	struct hsnode *hp;
2450Sstevel@tonic-gate 
2460Sstevel@tonic-gate 	if (vp->v_type != VLNK)
2470Sstevel@tonic-gate 		return (EINVAL);
2480Sstevel@tonic-gate 
2490Sstevel@tonic-gate 	hp = VTOH(vp);
2500Sstevel@tonic-gate 
2510Sstevel@tonic-gate 	if (hp->hs_dirent.sym_link == (char *)NULL)
2520Sstevel@tonic-gate 		return (ENOENT);
2530Sstevel@tonic-gate 
2540Sstevel@tonic-gate 	return (uiomove(hp->hs_dirent.sym_link,
2550Sstevel@tonic-gate 	    (size_t)MIN(hp->hs_dirent.ext_size,
2560Sstevel@tonic-gate 	    uiop->uio_resid), UIO_READ, uiop));
2570Sstevel@tonic-gate }
2580Sstevel@tonic-gate 
2590Sstevel@tonic-gate /*ARGSUSED*/
2600Sstevel@tonic-gate static void
2610Sstevel@tonic-gate hsfs_inactive(struct vnode *vp, struct cred *cred)
2620Sstevel@tonic-gate {
2630Sstevel@tonic-gate 	struct hsnode *hp;
2640Sstevel@tonic-gate 	struct hsfs *fsp;
2650Sstevel@tonic-gate 
2660Sstevel@tonic-gate 	int nopage;
2670Sstevel@tonic-gate 
2680Sstevel@tonic-gate 	hp = VTOH(vp);
2690Sstevel@tonic-gate 	fsp = VFS_TO_HSFS(vp->v_vfsp);
2700Sstevel@tonic-gate 	/*
2710Sstevel@tonic-gate 	 * Note: acquiring and holding v_lock for quite a while
2720Sstevel@tonic-gate 	 * here serializes on the vnode; this is unfortunate, but
2730Sstevel@tonic-gate 	 * likely not to overly impact performance, as the underlying
2740Sstevel@tonic-gate 	 * device (CDROM drive) is quite slow.
2750Sstevel@tonic-gate 	 */
2760Sstevel@tonic-gate 	rw_enter(&fsp->hsfs_hash_lock, RW_WRITER);
2770Sstevel@tonic-gate 	mutex_enter(&hp->hs_contents_lock);
2780Sstevel@tonic-gate 	mutex_enter(&vp->v_lock);
2790Sstevel@tonic-gate 
2800Sstevel@tonic-gate 	if (vp->v_count < 1) {
2810Sstevel@tonic-gate 		panic("hsfs_inactive: v_count < 1");
2820Sstevel@tonic-gate 		/*NOTREACHED*/
2830Sstevel@tonic-gate 	}
2840Sstevel@tonic-gate 
2850Sstevel@tonic-gate 	if (vp->v_count > 1 || (hp->hs_flags & HREF) == 0) {
2860Sstevel@tonic-gate 		vp->v_count--;	/* release hold from vn_rele */
2870Sstevel@tonic-gate 		mutex_exit(&vp->v_lock);
2880Sstevel@tonic-gate 		mutex_exit(&hp->hs_contents_lock);
2890Sstevel@tonic-gate 		rw_exit(&fsp->hsfs_hash_lock);
2900Sstevel@tonic-gate 		return;
2910Sstevel@tonic-gate 	}
2920Sstevel@tonic-gate 	vp->v_count--;	/* release hold from vn_rele */
2930Sstevel@tonic-gate 	if (vp->v_count == 0) {
2940Sstevel@tonic-gate 		/*
2950Sstevel@tonic-gate 		 * Free the hsnode.
2960Sstevel@tonic-gate 		 * If there are no pages associated with the
2970Sstevel@tonic-gate 		 * hsnode, give it back to the kmem_cache,
2980Sstevel@tonic-gate 		 * else put at the end of this file system's
2990Sstevel@tonic-gate 		 * internal free list.
3000Sstevel@tonic-gate 		 */
3010Sstevel@tonic-gate 		nopage = !vn_has_cached_data(vp);
3020Sstevel@tonic-gate 		hp->hs_flags = 0;
3030Sstevel@tonic-gate 		/*
3040Sstevel@tonic-gate 		 * exit these locks now, since hs_freenode may
3050Sstevel@tonic-gate 		 * kmem_free the hsnode and embedded vnode
3060Sstevel@tonic-gate 		 */
3070Sstevel@tonic-gate 		mutex_exit(&vp->v_lock);
3080Sstevel@tonic-gate 		mutex_exit(&hp->hs_contents_lock);
3090Sstevel@tonic-gate 		hs_freenode(vp, fsp, nopage);
3100Sstevel@tonic-gate 	} else {
3110Sstevel@tonic-gate 		mutex_exit(&vp->v_lock);
3120Sstevel@tonic-gate 		mutex_exit(&hp->hs_contents_lock);
3130Sstevel@tonic-gate 	}
3140Sstevel@tonic-gate 	rw_exit(&fsp->hsfs_hash_lock);
3150Sstevel@tonic-gate }
3160Sstevel@tonic-gate 
3170Sstevel@tonic-gate 
3180Sstevel@tonic-gate /*ARGSUSED*/
3190Sstevel@tonic-gate static int
3200Sstevel@tonic-gate hsfs_lookup(
3210Sstevel@tonic-gate 	struct vnode *dvp,
3220Sstevel@tonic-gate 	char *nm,
3230Sstevel@tonic-gate 	struct vnode **vpp,
3240Sstevel@tonic-gate 	struct pathname *pnp,
3250Sstevel@tonic-gate 	int flags,
3260Sstevel@tonic-gate 	struct vnode *rdir,
3270Sstevel@tonic-gate 	struct cred *cred)
3280Sstevel@tonic-gate {
3290Sstevel@tonic-gate 	int error;
3300Sstevel@tonic-gate 	int namelen = (int)strlen(nm);
3310Sstevel@tonic-gate 
3320Sstevel@tonic-gate 	if (*nm == '\0') {
3330Sstevel@tonic-gate 		VN_HOLD(dvp);
3340Sstevel@tonic-gate 		*vpp = dvp;
3350Sstevel@tonic-gate 		return (0);
3360Sstevel@tonic-gate 	}
3370Sstevel@tonic-gate 
3380Sstevel@tonic-gate 	/*
3390Sstevel@tonic-gate 	 * If we're looking for ourself, life is simple.
3400Sstevel@tonic-gate 	 */
3410Sstevel@tonic-gate 	if (namelen == 1 && *nm == '.') {
3420Sstevel@tonic-gate 		if (error = hs_access(dvp, (mode_t)VEXEC, cred))
3430Sstevel@tonic-gate 			return (error);
3440Sstevel@tonic-gate 		VN_HOLD(dvp);
3450Sstevel@tonic-gate 		*vpp = dvp;
3460Sstevel@tonic-gate 		return (0);
3470Sstevel@tonic-gate 	}
3480Sstevel@tonic-gate 
3490Sstevel@tonic-gate 	return (hs_dirlook(dvp, nm, namelen, vpp, cred));
3500Sstevel@tonic-gate }
3510Sstevel@tonic-gate 
3520Sstevel@tonic-gate 
3530Sstevel@tonic-gate /*ARGSUSED*/
3540Sstevel@tonic-gate static int
3550Sstevel@tonic-gate hsfs_readdir(
3560Sstevel@tonic-gate 	struct vnode	*vp,
3570Sstevel@tonic-gate 	struct uio	*uiop,
3580Sstevel@tonic-gate 	struct cred	*cred,
3590Sstevel@tonic-gate 	int		*eofp)
3600Sstevel@tonic-gate {
3610Sstevel@tonic-gate 	struct hsnode	*dhp;
3620Sstevel@tonic-gate 	struct hsfs	*fsp;
3630Sstevel@tonic-gate 	struct hs_direntry hd;
3640Sstevel@tonic-gate 	struct dirent64	*nd;
3650Sstevel@tonic-gate 	int		error;
3660Sstevel@tonic-gate 	uint_t		offset;		/* real offset in directory */
3670Sstevel@tonic-gate 	uint_t		dirsiz;		/* real size of directory */
3680Sstevel@tonic-gate 	uchar_t		*blkp;
3690Sstevel@tonic-gate 	int		hdlen;		/* length of hs directory entry */
3700Sstevel@tonic-gate 	long		ndlen;		/* length of dirent entry */
3710Sstevel@tonic-gate 	int		bytes_wanted;
3720Sstevel@tonic-gate 	size_t		bufsize;	/* size of dirent buffer */
3730Sstevel@tonic-gate 	char		*outbuf;	/* ptr to dirent buffer */
3740Sstevel@tonic-gate 	char		*dname;
3750Sstevel@tonic-gate 	int		dnamelen;
3760Sstevel@tonic-gate 	size_t		dname_size;
3770Sstevel@tonic-gate 	struct fbuf	*fbp;
3780Sstevel@tonic-gate 	uint_t		last_offset;	/* last index into current dir block */
3790Sstevel@tonic-gate 	ulong_t		dir_lbn;	/* lbn of directory */
3800Sstevel@tonic-gate 	ino64_t		dirino;	/* temporary storage before storing in dirent */
3810Sstevel@tonic-gate 	off_t		diroff;
3820Sstevel@tonic-gate 
3830Sstevel@tonic-gate 	dhp = VTOH(vp);
3840Sstevel@tonic-gate 	fsp = VFS_TO_HSFS(vp->v_vfsp);
3850Sstevel@tonic-gate 	if (dhp->hs_dirent.ext_size == 0)
3860Sstevel@tonic-gate 		hs_filldirent(vp, &dhp->hs_dirent);
3870Sstevel@tonic-gate 	dirsiz = dhp->hs_dirent.ext_size;
3880Sstevel@tonic-gate 	dir_lbn = dhp->hs_dirent.ext_lbn;
3890Sstevel@tonic-gate 	if (uiop->uio_loffset >= dirsiz) {	/* at or beyond EOF */
3900Sstevel@tonic-gate 		if (eofp)
3910Sstevel@tonic-gate 			*eofp = 1;
3920Sstevel@tonic-gate 		return (0);
3930Sstevel@tonic-gate 	}
3940Sstevel@tonic-gate 	ASSERT(uiop->uio_loffset <= MAXOFF_T);
3950Sstevel@tonic-gate 	offset = (uint_t)uiop->uio_offset;
3960Sstevel@tonic-gate 
3970Sstevel@tonic-gate 	dname_size = fsp->hsfs_namemax + 1;	/* 1 for the ending NUL */
3980Sstevel@tonic-gate 	dname = kmem_alloc(dname_size, KM_SLEEP);
3990Sstevel@tonic-gate 	bufsize = uiop->uio_resid + sizeof (struct dirent64);
4000Sstevel@tonic-gate 
4010Sstevel@tonic-gate 	outbuf = kmem_alloc(bufsize, KM_SLEEP);
4020Sstevel@tonic-gate 	nd = (struct dirent64 *)outbuf;
4030Sstevel@tonic-gate 
4040Sstevel@tonic-gate 	while (offset < dirsiz) {
405*494Sfrankho 		bytes_wanted = MIN(MAXBSIZE, dirsiz - (offset & MAXBMASK));
4060Sstevel@tonic-gate 
4070Sstevel@tonic-gate 		error = fbread(vp, (offset_t)(offset & MAXBMASK),
4080Sstevel@tonic-gate 			(unsigned int)bytes_wanted, S_READ, &fbp);
4090Sstevel@tonic-gate 		if (error)
4100Sstevel@tonic-gate 			goto done;
4110Sstevel@tonic-gate 
4120Sstevel@tonic-gate 		blkp = (uchar_t *)fbp->fb_addr;
413*494Sfrankho 		last_offset = (offset & MAXBMASK) + fbp->fb_count;
4140Sstevel@tonic-gate 
4150Sstevel@tonic-gate #define	rel_offset(offset) ((offset) & MAXBOFFSET)	/* index into blkp */
4160Sstevel@tonic-gate 
4170Sstevel@tonic-gate 		while (offset < last_offset) {
4180Sstevel@tonic-gate 			/*
419*494Sfrankho 			 * Very similar validation code is found in
420*494Sfrankho 			 * process_dirblock(), hsfs_node.c.
421*494Sfrankho 			 * For an explanation, see there.
422*494Sfrankho 			 * It may make sense for the future to
423*494Sfrankho 			 * "consolidate" the code in hs_parsedir(),
424*494Sfrankho 			 * process_dirblock() and hsfs_readdir() into
425*494Sfrankho 			 * a single utility function.
4260Sstevel@tonic-gate 			 */
4270Sstevel@tonic-gate 			hdlen = (int)((uchar_t)
4280Sstevel@tonic-gate 				HDE_DIR_LEN(&blkp[rel_offset(offset)]));
429*494Sfrankho 			if (hdlen < HDE_ROOT_DIR_REC_SIZE ||
430*494Sfrankho 			    offset + hdlen > last_offset) {
4310Sstevel@tonic-gate 				/*
432*494Sfrankho 				 * advance to next sector boundary
4330Sstevel@tonic-gate 				 */
434*494Sfrankho 				offset = roundup(offset + 1, HS_SECTOR_SIZE);
435*494Sfrankho 				if (hdlen)
436*494Sfrankho 					hs_log_bogus_disk_warning(fsp,
437*494Sfrankho 					    HSFS_ERR_TRAILING_JUNK, 0);
438*494Sfrankho 
439*494Sfrankho 				continue;
4400Sstevel@tonic-gate 			}
4410Sstevel@tonic-gate 
4420Sstevel@tonic-gate 			bzero(&hd, sizeof (hd));
4430Sstevel@tonic-gate 
4440Sstevel@tonic-gate 			/*
4450Sstevel@tonic-gate 			 * Just ignore invalid directory entries.
4460Sstevel@tonic-gate 			 * XXX - maybe hs_parsedir() will detect EXISTENCE bit
4470Sstevel@tonic-gate 			 */
4480Sstevel@tonic-gate 			if (!hs_parsedir(fsp, &blkp[rel_offset(offset)],
4490Sstevel@tonic-gate 				&hd, dname, &dnamelen)) {
4500Sstevel@tonic-gate 				/*
4510Sstevel@tonic-gate 				 * Determine if there is enough room
4520Sstevel@tonic-gate 				 */
4530Sstevel@tonic-gate 				ndlen = (long)DIRENT64_RECLEN((dnamelen));
4540Sstevel@tonic-gate 
4550Sstevel@tonic-gate 				if ((ndlen + ((char *)nd - outbuf)) >
4560Sstevel@tonic-gate 				    uiop->uio_resid) {
4570Sstevel@tonic-gate 					fbrelse(fbp, S_READ);
4580Sstevel@tonic-gate 					goto done; /* output buffer full */
4590Sstevel@tonic-gate 				}
4600Sstevel@tonic-gate 
4610Sstevel@tonic-gate 				diroff = offset + hdlen;
4620Sstevel@tonic-gate 				/*
4630Sstevel@tonic-gate 				 * Generate nodeid.
4640Sstevel@tonic-gate 				 * If a directory, nodeid points to the
4650Sstevel@tonic-gate 				 * canonical dirent describing the directory:
4660Sstevel@tonic-gate 				 * the dirent of the "." entry for the
4670Sstevel@tonic-gate 				 * directory, which is pointed to by all
4680Sstevel@tonic-gate 				 * dirents for that directory.
4690Sstevel@tonic-gate 				 * Otherwise, nodeid points to dirent of file.
4700Sstevel@tonic-gate 				 */
4710Sstevel@tonic-gate 				if (hd.type == VDIR) {
4720Sstevel@tonic-gate 					dirino = (ino64_t)
4730Sstevel@tonic-gate 					    MAKE_NODEID(hd.ext_lbn, 0,
4740Sstevel@tonic-gate 					    vp->v_vfsp);
4750Sstevel@tonic-gate 				} else {
4760Sstevel@tonic-gate 					struct hs_volume *hvp;
4770Sstevel@tonic-gate 					offset_t lbn, off;
4780Sstevel@tonic-gate 
4790Sstevel@tonic-gate 					/*
4800Sstevel@tonic-gate 					 * Normalize lbn and off
4810Sstevel@tonic-gate 					 */
4820Sstevel@tonic-gate 					hvp = &fsp->hsfs_vol;
4830Sstevel@tonic-gate 					lbn = dir_lbn +
4840Sstevel@tonic-gate 					    (offset >> hvp->lbn_shift);
4850Sstevel@tonic-gate 					off = offset & hvp->lbn_maxoffset;
4860Sstevel@tonic-gate 					dirino = (ino64_t)MAKE_NODEID(lbn,
4870Sstevel@tonic-gate 					    off, vp->v_vfsp);
4880Sstevel@tonic-gate 				}
4890Sstevel@tonic-gate 
4900Sstevel@tonic-gate 
4910Sstevel@tonic-gate 				/* strncpy(9f) will zero uninitialized bytes */
4920Sstevel@tonic-gate 
4930Sstevel@tonic-gate 				ASSERT(strlen(dname) + 1 <=
4940Sstevel@tonic-gate 				    DIRENT64_NAMELEN(ndlen));
4950Sstevel@tonic-gate 				(void) strncpy(nd->d_name, dname,
4960Sstevel@tonic-gate 				    DIRENT64_NAMELEN(ndlen));
4970Sstevel@tonic-gate 				nd->d_reclen = (ushort_t)ndlen;
4980Sstevel@tonic-gate 				nd->d_off = (offset_t)diroff;
4990Sstevel@tonic-gate 				nd->d_ino = dirino;
5000Sstevel@tonic-gate 				nd = (struct dirent64 *)((char *)nd + ndlen);
5010Sstevel@tonic-gate 
5020Sstevel@tonic-gate 				/*
5030Sstevel@tonic-gate 				 * free up space allocated for symlink
5040Sstevel@tonic-gate 				 */
5050Sstevel@tonic-gate 				if (hd.sym_link != (char *)NULL) {
5060Sstevel@tonic-gate 					kmem_free(hd.sym_link,
5070Sstevel@tonic-gate 					    (size_t)(hd.ext_size+1));
5080Sstevel@tonic-gate 					hd.sym_link = (char *)NULL;
5090Sstevel@tonic-gate 				}
5100Sstevel@tonic-gate 			}
5110Sstevel@tonic-gate 			offset += hdlen;
5120Sstevel@tonic-gate 		}
5130Sstevel@tonic-gate 		fbrelse(fbp, S_READ);
5140Sstevel@tonic-gate 	}
5150Sstevel@tonic-gate 
5160Sstevel@tonic-gate 	/*
5170Sstevel@tonic-gate 	 * Got here for one of the following reasons:
5180Sstevel@tonic-gate 	 *	1) outbuf is full (error == 0)
5190Sstevel@tonic-gate 	 *	2) end of directory reached (error == 0)
5200Sstevel@tonic-gate 	 *	3) error reading directory sector (error != 0)
5210Sstevel@tonic-gate 	 *	4) directory entry crosses sector boundary (error == 0)
5220Sstevel@tonic-gate 	 *
5230Sstevel@tonic-gate 	 * If any directory entries have been copied, don't report
5240Sstevel@tonic-gate 	 * case 4.  Instead, return the valid directory entries.
5250Sstevel@tonic-gate 	 *
5260Sstevel@tonic-gate 	 * If no entries have been copied, report the error.
5270Sstevel@tonic-gate 	 * If case 4, this will be indistiguishable from EOF.
5280Sstevel@tonic-gate 	 */
5290Sstevel@tonic-gate done:
5300Sstevel@tonic-gate 	ndlen = ((char *)nd - outbuf);
5310Sstevel@tonic-gate 	if (ndlen != 0) {
5320Sstevel@tonic-gate 		error = uiomove(outbuf, (size_t)ndlen, UIO_READ, uiop);
5330Sstevel@tonic-gate 		uiop->uio_offset = offset;
5340Sstevel@tonic-gate 	}
5350Sstevel@tonic-gate 	kmem_free(dname, dname_size);
5360Sstevel@tonic-gate 	kmem_free(outbuf, bufsize);
5370Sstevel@tonic-gate 	if (eofp && error == 0)
5380Sstevel@tonic-gate 		*eofp = (uiop->uio_offset >= dirsiz);
5390Sstevel@tonic-gate 	return (error);
5400Sstevel@tonic-gate }
5410Sstevel@tonic-gate 
5420Sstevel@tonic-gate static int
5430Sstevel@tonic-gate hsfs_fid(struct vnode *vp, struct fid *fidp)
5440Sstevel@tonic-gate {
5450Sstevel@tonic-gate 	struct hsnode *hp;
5460Sstevel@tonic-gate 	struct hsfid *fid;
5470Sstevel@tonic-gate 
5480Sstevel@tonic-gate 	if (fidp->fid_len < (sizeof (*fid) - sizeof (fid->hf_len))) {
5490Sstevel@tonic-gate 		fidp->fid_len = sizeof (*fid) - sizeof (fid->hf_len);
5500Sstevel@tonic-gate 		return (ENOSPC);
5510Sstevel@tonic-gate 	}
5520Sstevel@tonic-gate 
5530Sstevel@tonic-gate 	fid = (struct hsfid *)fidp;
5540Sstevel@tonic-gate 	fid->hf_len = sizeof (*fid) - sizeof (fid->hf_len);
5550Sstevel@tonic-gate 	hp = VTOH(vp);
5560Sstevel@tonic-gate 	mutex_enter(&hp->hs_contents_lock);
5570Sstevel@tonic-gate 	fid->hf_dir_lbn = hp->hs_dir_lbn;
5580Sstevel@tonic-gate 	fid->hf_dir_off = (ushort_t)hp->hs_dir_off;
5590Sstevel@tonic-gate 	mutex_exit(&hp->hs_contents_lock);
5600Sstevel@tonic-gate 	return (0);
5610Sstevel@tonic-gate }
5620Sstevel@tonic-gate 
5630Sstevel@tonic-gate /*ARGSUSED*/
5640Sstevel@tonic-gate static int
5650Sstevel@tonic-gate hsfs_open(struct vnode **vpp, int flag, struct cred *cred)
5660Sstevel@tonic-gate {
5670Sstevel@tonic-gate 	return (0);
5680Sstevel@tonic-gate }
5690Sstevel@tonic-gate 
5700Sstevel@tonic-gate /*ARGSUSED*/
5710Sstevel@tonic-gate static int
5720Sstevel@tonic-gate hsfs_close(
5730Sstevel@tonic-gate 	struct vnode *vp,
5740Sstevel@tonic-gate 	int flag,
5750Sstevel@tonic-gate 	int count,
5760Sstevel@tonic-gate 	offset_t offset,
5770Sstevel@tonic-gate 	struct cred *cred)
5780Sstevel@tonic-gate {
5790Sstevel@tonic-gate 	(void) cleanlocks(vp, ttoproc(curthread)->p_pid, 0);
5800Sstevel@tonic-gate 	cleanshares(vp, ttoproc(curthread)->p_pid);
5810Sstevel@tonic-gate 	return (0);
5820Sstevel@tonic-gate }
5830Sstevel@tonic-gate 
5840Sstevel@tonic-gate /*ARGSUSED2*/
5850Sstevel@tonic-gate static int
5860Sstevel@tonic-gate hsfs_access(struct vnode *vp, int mode, int flags, cred_t *cred)
5870Sstevel@tonic-gate {
5880Sstevel@tonic-gate 	return (hs_access(vp, (mode_t)mode, cred));
5890Sstevel@tonic-gate }
5900Sstevel@tonic-gate 
5910Sstevel@tonic-gate /*
5920Sstevel@tonic-gate  * the seek time of a CD-ROM is very slow, and data transfer
5930Sstevel@tonic-gate  * rate is even worse (max. 150K per sec).  The design
5940Sstevel@tonic-gate  * decision is to reduce access to cd-rom as much as possible,
5950Sstevel@tonic-gate  * and to transfer a sizable block (read-ahead) of data at a time.
5960Sstevel@tonic-gate  * UFS style of read ahead one block at a time is not appropriate,
5970Sstevel@tonic-gate  * and is not supported
5980Sstevel@tonic-gate  */
5990Sstevel@tonic-gate 
6000Sstevel@tonic-gate /*
6010Sstevel@tonic-gate  * KLUSTSIZE should be a multiple of PAGESIZE and <= MAXPHYS.
6020Sstevel@tonic-gate  */
6030Sstevel@tonic-gate #define	KLUSTSIZE	(56 * 1024)
6040Sstevel@tonic-gate /* we don't support read ahead */
6050Sstevel@tonic-gate int hsfs_lostpage;	/* no. of times we lost original page */
6060Sstevel@tonic-gate 
6070Sstevel@tonic-gate /*
6080Sstevel@tonic-gate  * Used to prevent biodone() from releasing buf resources that
6090Sstevel@tonic-gate  * we didn't allocate in quite the usual way.
6100Sstevel@tonic-gate  */
6110Sstevel@tonic-gate /*ARGSUSED*/
6120Sstevel@tonic-gate int
6130Sstevel@tonic-gate hsfs_iodone(struct buf *bp)
6140Sstevel@tonic-gate {
6150Sstevel@tonic-gate 	sema_v(&bp->b_io);
6160Sstevel@tonic-gate 	return (0);
6170Sstevel@tonic-gate }
6180Sstevel@tonic-gate 
6190Sstevel@tonic-gate /*
6200Sstevel@tonic-gate  * Each file may have a different interleaving on disk.  This makes
6210Sstevel@tonic-gate  * things somewhat interesting.  The gist is that there are some
6220Sstevel@tonic-gate  * number of contiguous data sectors, followed by some other number
6230Sstevel@tonic-gate  * of contiguous skip sectors.  The sum of those two sets of sectors
6240Sstevel@tonic-gate  * defines the interleave size.  Unfortunately, it means that we generally
6250Sstevel@tonic-gate  * can't simply read N sectors starting at a given offset to satisfy
6260Sstevel@tonic-gate  * any given request.
6270Sstevel@tonic-gate  *
6280Sstevel@tonic-gate  * What we do is get the relevant memory pages via pvn_read_kluster(),
6290Sstevel@tonic-gate  * then stride through the interleaves, setting up a buf for each
6300Sstevel@tonic-gate  * sector that needs to be brought in.  Instead of kmem_alloc'ing
6310Sstevel@tonic-gate  * space for the sectors, though, we just point at the appropriate
6320Sstevel@tonic-gate  * spot in the relevant page for each of them.  This saves us a bunch
6330Sstevel@tonic-gate  * of copying.
6340Sstevel@tonic-gate  */
6350Sstevel@tonic-gate /*ARGSUSED*/
6360Sstevel@tonic-gate static int
6370Sstevel@tonic-gate hsfs_getapage(
6380Sstevel@tonic-gate 	struct vnode *vp,
6390Sstevel@tonic-gate 	u_offset_t off,
6400Sstevel@tonic-gate 	size_t len,
6410Sstevel@tonic-gate 	uint_t *protp,
6420Sstevel@tonic-gate 	struct page *pl[],
6430Sstevel@tonic-gate 	size_t plsz,
6440Sstevel@tonic-gate 	struct seg *seg,
6450Sstevel@tonic-gate 	caddr_t addr,
6460Sstevel@tonic-gate 	enum seg_rw rw,
6470Sstevel@tonic-gate 	struct cred *cred)
6480Sstevel@tonic-gate {
6490Sstevel@tonic-gate 	struct hsnode *hp;
6500Sstevel@tonic-gate 	struct hsfs *fsp;
6510Sstevel@tonic-gate 	int	err;
6520Sstevel@tonic-gate 	struct buf *bufs;
6530Sstevel@tonic-gate 	caddr_t *vas;
6540Sstevel@tonic-gate 	caddr_t va;
6550Sstevel@tonic-gate 	struct page *pp, *searchp, *lastp;
6560Sstevel@tonic-gate 	page_t	*pagefound;
6570Sstevel@tonic-gate 	offset_t	bof;
6580Sstevel@tonic-gate 	struct vnode *devvp;
6590Sstevel@tonic-gate 	ulong_t	byte_offset;
6600Sstevel@tonic-gate 	size_t	io_len_tmp;
6610Sstevel@tonic-gate 	uint_t	io_off, io_len;
6620Sstevel@tonic-gate 	uint_t	xlen;
6630Sstevel@tonic-gate 	uint_t	filsiz;
6640Sstevel@tonic-gate 	uint_t	secsize;
6650Sstevel@tonic-gate 	uint_t	bufcnt;
6660Sstevel@tonic-gate 	uint_t	bufsused;
6670Sstevel@tonic-gate 	uint_t	count;
6680Sstevel@tonic-gate 	uint_t	io_end;
6690Sstevel@tonic-gate 	uint_t	which_chunk_lbn;
6700Sstevel@tonic-gate 	uint_t	offset_lbn;
6710Sstevel@tonic-gate 	uint_t	offset_extra;
6720Sstevel@tonic-gate 	offset_t	offset_bytes;
6730Sstevel@tonic-gate 	uint_t	remaining_bytes;
6740Sstevel@tonic-gate 	uint_t	extension;
6750Sstevel@tonic-gate 	int	remainder;	/* must be signed */
6760Sstevel@tonic-gate 	int	chunk_lbn_count;
6770Sstevel@tonic-gate 	int	chunk_data_bytes;
6780Sstevel@tonic-gate 	int	xarsiz;
6790Sstevel@tonic-gate 	diskaddr_t driver_block;
6800Sstevel@tonic-gate 	u_offset_t io_off_tmp;
6810Sstevel@tonic-gate 
6820Sstevel@tonic-gate 	/*
6830Sstevel@tonic-gate 	 * We don't support asynchronous operation at the moment, so
6840Sstevel@tonic-gate 	 * just pretend we did it.  If the pages are ever actually
6850Sstevel@tonic-gate 	 * needed, they'll get brought in then.
6860Sstevel@tonic-gate 	 */
6870Sstevel@tonic-gate 	if (pl == NULL)
6880Sstevel@tonic-gate 		return (0);
6890Sstevel@tonic-gate 
6900Sstevel@tonic-gate 	hp = VTOH(vp);
6910Sstevel@tonic-gate 	fsp = VFS_TO_HSFS(vp->v_vfsp);
6920Sstevel@tonic-gate 	devvp = fsp->hsfs_devvp;
6930Sstevel@tonic-gate 	secsize = fsp->hsfs_vol.lbn_size;  /* bytes per logical block */
6940Sstevel@tonic-gate 
6950Sstevel@tonic-gate 	/* file data size */
6960Sstevel@tonic-gate 	filsiz = hp->hs_dirent.ext_size;
6970Sstevel@tonic-gate 
6980Sstevel@tonic-gate 	/* disk addr for start of file */
6990Sstevel@tonic-gate 	bof = LBN_TO_BYTE((offset_t)hp->hs_dirent.ext_lbn, vp->v_vfsp);
7000Sstevel@tonic-gate 
7010Sstevel@tonic-gate 	/* xarsiz byte must be skipped for data */
7020Sstevel@tonic-gate 	xarsiz = hp->hs_dirent.xar_len << fsp->hsfs_vol.lbn_shift;
7030Sstevel@tonic-gate 
7040Sstevel@tonic-gate 	/* how many logical blocks in an interleave (data+skip) */
7050Sstevel@tonic-gate 	chunk_lbn_count = hp->hs_dirent.intlf_sz + hp->hs_dirent.intlf_sk;
7060Sstevel@tonic-gate 
7070Sstevel@tonic-gate 	if (chunk_lbn_count == 0) {
7080Sstevel@tonic-gate 		chunk_lbn_count = 1;
7090Sstevel@tonic-gate 	}
7100Sstevel@tonic-gate 
7110Sstevel@tonic-gate 	/*
7120Sstevel@tonic-gate 	 * Convert interleaving size into bytes.  The zero case
7130Sstevel@tonic-gate 	 * (no interleaving) optimization is handled as a side-
7140Sstevel@tonic-gate 	 * effect of the read-ahead logic.
7150Sstevel@tonic-gate 	 */
7160Sstevel@tonic-gate 	if (hp->hs_dirent.intlf_sz == 0) {
7170Sstevel@tonic-gate 		chunk_data_bytes = LBN_TO_BYTE(1, vp->v_vfsp);
7180Sstevel@tonic-gate 	} else {
7190Sstevel@tonic-gate 		chunk_data_bytes = LBN_TO_BYTE(hp->hs_dirent.intlf_sz,
7200Sstevel@tonic-gate 			vp->v_vfsp);
7210Sstevel@tonic-gate 	}
7220Sstevel@tonic-gate 
7230Sstevel@tonic-gate reread:
7240Sstevel@tonic-gate 	err = 0;
7250Sstevel@tonic-gate 	pagefound = 0;
7260Sstevel@tonic-gate 
7270Sstevel@tonic-gate 	/*
7280Sstevel@tonic-gate 	 * Do some read-ahead.  This mostly saves us a bit of
7290Sstevel@tonic-gate 	 * system cpu time more than anything else when doing
7300Sstevel@tonic-gate 	 * sequential reads.  At some point, could do the
7310Sstevel@tonic-gate 	 * read-ahead asynchronously which might gain us something
7320Sstevel@tonic-gate 	 * on wall time, but it seems unlikely....
7330Sstevel@tonic-gate 	 *
7340Sstevel@tonic-gate 	 * We do the easy case here, which is to read through
7350Sstevel@tonic-gate 	 * the end of the chunk, minus whatever's at the end that
7360Sstevel@tonic-gate 	 * won't exactly fill a page.
7370Sstevel@tonic-gate 	 */
7380Sstevel@tonic-gate 	which_chunk_lbn = (off + len) / chunk_data_bytes;
7390Sstevel@tonic-gate 	extension = ((which_chunk_lbn + 1) * chunk_data_bytes) - off;
7400Sstevel@tonic-gate 	extension -= (extension % PAGESIZE);
7410Sstevel@tonic-gate 	if (extension != 0 && extension < filsiz - off) {
7420Sstevel@tonic-gate 		len = extension;
743206Speterte 	} else {
744206Speterte 		len = PAGESIZE;
7450Sstevel@tonic-gate 	}
7460Sstevel@tonic-gate 	/*
7470Sstevel@tonic-gate 	 * Some cd writers don't write sectors that aren't used.  Also,
7480Sstevel@tonic-gate 	 * there's no point in reading sectors we'll never look at.  So,
7490Sstevel@tonic-gate 	 * if we're asked to go beyond the end of a file, truncate to the
7500Sstevel@tonic-gate 	 * length of that file.
7510Sstevel@tonic-gate 	 *
7520Sstevel@tonic-gate 	 * Additionally, this behaviour is required by section 6.4.5 of
7530Sstevel@tonic-gate 	 * ISO 9660:1988(E).
7540Sstevel@tonic-gate 	 */
7550Sstevel@tonic-gate 	if (len > (filsiz - off)) {
7560Sstevel@tonic-gate 		len = filsiz - off;
7570Sstevel@tonic-gate 	}
7580Sstevel@tonic-gate 
759206Speterte 	/* A little paranoia. */
760206Speterte 	ASSERT(len > 0);
761206Speterte 
7620Sstevel@tonic-gate 	/*
7630Sstevel@tonic-gate 	 * After all that, make sure we're asking for things in units
7640Sstevel@tonic-gate 	 * that bdev_strategy() will understand (see bug 4202551).
7650Sstevel@tonic-gate 	 */
7660Sstevel@tonic-gate 	len = roundup(len, DEV_BSIZE);
7670Sstevel@tonic-gate 
7680Sstevel@tonic-gate 	pp = NULL;
7690Sstevel@tonic-gate again:
7700Sstevel@tonic-gate 	/* search for page in buffer */
7710Sstevel@tonic-gate 	if ((pagefound = page_exists(vp, off)) == 0) {
7720Sstevel@tonic-gate 		/*
7730Sstevel@tonic-gate 		 * Need to really do disk IO to get the page.
7740Sstevel@tonic-gate 		 */
7750Sstevel@tonic-gate 		pp = pvn_read_kluster(vp, off, seg, addr, &io_off_tmp,
7760Sstevel@tonic-gate 		    &io_len_tmp, off, len, 0);
7770Sstevel@tonic-gate 
7780Sstevel@tonic-gate 		if (pp == NULL)
7790Sstevel@tonic-gate 			goto again;
7800Sstevel@tonic-gate 
7810Sstevel@tonic-gate 		io_off = (uint_t)io_off_tmp;
7820Sstevel@tonic-gate 		io_len = (uint_t)io_len_tmp;
7830Sstevel@tonic-gate 
7840Sstevel@tonic-gate 		/* check for truncation */
7850Sstevel@tonic-gate 		/*
7860Sstevel@tonic-gate 		 * xxx Clean up and return EIO instead?
7870Sstevel@tonic-gate 		 * xxx Ought to go to u_offset_t for everything, but we
7880Sstevel@tonic-gate 		 * xxx call lots of things that want uint_t arguments.
7890Sstevel@tonic-gate 		 */
7900Sstevel@tonic-gate 		ASSERT(io_off == io_off_tmp);
7910Sstevel@tonic-gate 
7920Sstevel@tonic-gate 		/*
7930Sstevel@tonic-gate 		 * get enough buffers for worst-case scenario
7940Sstevel@tonic-gate 		 * (i.e., no coalescing possible).
7950Sstevel@tonic-gate 		 */
7960Sstevel@tonic-gate 		bufcnt = (len + secsize - 1) / secsize;
7970Sstevel@tonic-gate 		bufs = kmem_zalloc(bufcnt * sizeof (struct buf), KM_SLEEP);
7980Sstevel@tonic-gate 		vas = kmem_alloc(bufcnt * sizeof (caddr_t), KM_SLEEP);
7990Sstevel@tonic-gate 		for (count = 0; count < bufcnt; count++) {
8000Sstevel@tonic-gate 			bufs[count].b_edev = devvp->v_rdev;
8010Sstevel@tonic-gate 			bufs[count].b_dev = cmpdev(devvp->v_rdev);
8020Sstevel@tonic-gate 			bufs[count].b_flags = B_NOCACHE|B_BUSY|B_READ;
8030Sstevel@tonic-gate 			bufs[count].b_iodone = hsfs_iodone;
8040Sstevel@tonic-gate 			bufs[count].b_vp = vp;
8050Sstevel@tonic-gate 			bufs[count].b_file = vp;
8060Sstevel@tonic-gate 			sema_init(&bufs[count].b_io, 0, NULL,
8070Sstevel@tonic-gate 			    SEMA_DEFAULT, NULL);
8080Sstevel@tonic-gate 			sema_init(&bufs[count].b_sem, 0, NULL,
8090Sstevel@tonic-gate 			    SEMA_DEFAULT, NULL);
8100Sstevel@tonic-gate 		}
8110Sstevel@tonic-gate 
812206Speterte 		/*
813206Speterte 		 * If our filesize is not an integer multiple of PAGESIZE,
814206Speterte 		 * we zero that part of the last page that's between EOF and
815206Speterte 		 * the PAGESIZE boundary.
816206Speterte 		 */
8170Sstevel@tonic-gate 		xlen = io_len & PAGEOFFSET;
8180Sstevel@tonic-gate 		if (xlen != 0)
8190Sstevel@tonic-gate 			pagezero(pp->p_prev, xlen, PAGESIZE - xlen);
8200Sstevel@tonic-gate 
8210Sstevel@tonic-gate 		va = NULL;
8220Sstevel@tonic-gate 		lastp = NULL;
8230Sstevel@tonic-gate 		searchp = pp;
8240Sstevel@tonic-gate 		io_end = io_off + io_len;
8250Sstevel@tonic-gate 		for (count = 0, byte_offset = io_off;
8260Sstevel@tonic-gate 			byte_offset < io_end;
8270Sstevel@tonic-gate 			count++) {
8280Sstevel@tonic-gate 			ASSERT(count < bufcnt);
8290Sstevel@tonic-gate 
8300Sstevel@tonic-gate 			/* Compute disk address for interleaving. */
8310Sstevel@tonic-gate 
8320Sstevel@tonic-gate 			/* considered without skips */
8330Sstevel@tonic-gate 			which_chunk_lbn = byte_offset / chunk_data_bytes;
8340Sstevel@tonic-gate 
8350Sstevel@tonic-gate 			/* factor in skips */
8360Sstevel@tonic-gate 			offset_lbn = which_chunk_lbn * chunk_lbn_count;
8370Sstevel@tonic-gate 
8380Sstevel@tonic-gate 			/* convert to physical byte offset for lbn */
8390Sstevel@tonic-gate 			offset_bytes = LBN_TO_BYTE(offset_lbn, vp->v_vfsp);
8400Sstevel@tonic-gate 
8410Sstevel@tonic-gate 			/* don't forget offset into lbn */
8420Sstevel@tonic-gate 			offset_extra = byte_offset % chunk_data_bytes;
8430Sstevel@tonic-gate 
8440Sstevel@tonic-gate 			/* get virtual block number for driver */
8450Sstevel@tonic-gate 			driver_block = lbtodb(bof + xarsiz
8460Sstevel@tonic-gate 				+ offset_bytes + offset_extra);
8470Sstevel@tonic-gate 
8480Sstevel@tonic-gate 			if (lastp != searchp) {
8490Sstevel@tonic-gate 				/* this branch taken first time through loop */
8500Sstevel@tonic-gate 				va = vas[count]
8510Sstevel@tonic-gate 					= ppmapin(searchp, PROT_WRITE,
8520Sstevel@tonic-gate 						(caddr_t)-1);
8530Sstevel@tonic-gate 				/* ppmapin() guarantees not to return NULL */
8540Sstevel@tonic-gate 			} else {
8550Sstevel@tonic-gate 				vas[count] = NULL;
8560Sstevel@tonic-gate 			}
8570Sstevel@tonic-gate 
8580Sstevel@tonic-gate 			bufs[count].b_un.b_addr = va + byte_offset % PAGESIZE;
8590Sstevel@tonic-gate 			bufs[count].b_offset =
8600Sstevel@tonic-gate 			    (offset_t)(byte_offset - io_off + off);
8610Sstevel@tonic-gate 
8620Sstevel@tonic-gate 			/*
8630Sstevel@tonic-gate 			 * We specifically use the b_lblkno member here
8640Sstevel@tonic-gate 			 * as even in the 32 bit world driver_block can
8650Sstevel@tonic-gate 			 * get very large in line with the ISO9660 spec.
8660Sstevel@tonic-gate 			 */
8670Sstevel@tonic-gate 
8680Sstevel@tonic-gate 			bufs[count].b_lblkno = driver_block;
8690Sstevel@tonic-gate 
8700Sstevel@tonic-gate 			remaining_bytes = ((which_chunk_lbn + 1)
8710Sstevel@tonic-gate 				* chunk_data_bytes)
8720Sstevel@tonic-gate 				- byte_offset;
8730Sstevel@tonic-gate 
8740Sstevel@tonic-gate 			/*
8750Sstevel@tonic-gate 			 * remaining_bytes can't be zero, as we derived
8760Sstevel@tonic-gate 			 * which_chunk_lbn directly from byte_offset.
8770Sstevel@tonic-gate 			 */
8780Sstevel@tonic-gate 			if ((remaining_bytes+byte_offset) < (off+len)) {
8790Sstevel@tonic-gate 				/* coalesce-read the rest of the chunk */
8800Sstevel@tonic-gate 				bufs[count].b_bcount = remaining_bytes;
8810Sstevel@tonic-gate 			} else {
8820Sstevel@tonic-gate 				/* get the final bits */
8830Sstevel@tonic-gate 				bufs[count].b_bcount = off + len - byte_offset;
8840Sstevel@tonic-gate 			}
8850Sstevel@tonic-gate 
8860Sstevel@tonic-gate 			/*
8870Sstevel@tonic-gate 			 * It would be nice to do multiple pages'
8880Sstevel@tonic-gate 			 * worth at once here when the opportunity
8890Sstevel@tonic-gate 			 * arises, as that has been shown to improve
8900Sstevel@tonic-gate 			 * our wall time.  However, to do that
8910Sstevel@tonic-gate 			 * requires that we use the pageio subsystem,
8920Sstevel@tonic-gate 			 * which doesn't mix well with what we're
8930Sstevel@tonic-gate 			 * already using here.  We can't use pageio
8940Sstevel@tonic-gate 			 * all the time, because that subsystem
8950Sstevel@tonic-gate 			 * assumes that a page is stored in N
8960Sstevel@tonic-gate 			 * contiguous blocks on the device.
8970Sstevel@tonic-gate 			 * Interleaving violates that assumption.
8980Sstevel@tonic-gate 			 */
8990Sstevel@tonic-gate 
9000Sstevel@tonic-gate 			remainder = PAGESIZE - (byte_offset % PAGESIZE);
9010Sstevel@tonic-gate 			if (bufs[count].b_bcount > remainder) {
9020Sstevel@tonic-gate 				bufs[count].b_bcount = remainder;
9030Sstevel@tonic-gate 			}
9040Sstevel@tonic-gate 
9050Sstevel@tonic-gate 			bufs[count].b_bufsize = bufs[count].b_bcount;
9060Sstevel@tonic-gate 			byte_offset += bufs[count].b_bcount;
9070Sstevel@tonic-gate 
9080Sstevel@tonic-gate 			(void) bdev_strategy(&bufs[count]);
9090Sstevel@tonic-gate 
9100Sstevel@tonic-gate 			lwp_stat_update(LWP_STAT_INBLK, 1);
9110Sstevel@tonic-gate 			lastp = searchp;
9120Sstevel@tonic-gate 			if ((remainder - bufs[count].b_bcount) < 1) {
9130Sstevel@tonic-gate 				searchp = searchp->p_next;
9140Sstevel@tonic-gate 			}
9150Sstevel@tonic-gate 		}
9160Sstevel@tonic-gate 
9170Sstevel@tonic-gate 		bufsused = count;
9180Sstevel@tonic-gate 		/* Now wait for everything to come in */
9190Sstevel@tonic-gate 		for (count = 0; count < bufsused; count++) {
9200Sstevel@tonic-gate 			if (err == 0) {
9210Sstevel@tonic-gate 				err = biowait(&bufs[count]);
9220Sstevel@tonic-gate 			} else
9230Sstevel@tonic-gate 				(void) biowait(&bufs[count]);
9240Sstevel@tonic-gate 		}
9250Sstevel@tonic-gate 
9260Sstevel@tonic-gate 		/* Don't leak resources */
9270Sstevel@tonic-gate 		for (count = 0; count < bufcnt; count++) {
9280Sstevel@tonic-gate 			sema_destroy(&bufs[count].b_io);
9290Sstevel@tonic-gate 			sema_destroy(&bufs[count].b_sem);
9300Sstevel@tonic-gate 			if (count < bufsused && vas[count] != NULL) {
9310Sstevel@tonic-gate 				ppmapout(vas[count]);
9320Sstevel@tonic-gate 			}
9330Sstevel@tonic-gate 		}
9340Sstevel@tonic-gate 
9350Sstevel@tonic-gate 		kmem_free(vas, bufcnt * sizeof (caddr_t));
9360Sstevel@tonic-gate 		kmem_free(bufs, bufcnt * sizeof (struct buf));
9370Sstevel@tonic-gate 	}
9380Sstevel@tonic-gate 
9390Sstevel@tonic-gate 	if (err) {
9400Sstevel@tonic-gate 		pvn_read_done(pp, B_ERROR);
9410Sstevel@tonic-gate 		return (err);
9420Sstevel@tonic-gate 	}
9430Sstevel@tonic-gate 
9440Sstevel@tonic-gate 	/*
9450Sstevel@tonic-gate 	 * Lock the requested page, and the one after it if possible.
9460Sstevel@tonic-gate 	 * Don't bother if our caller hasn't given us a place to stash
9470Sstevel@tonic-gate 	 * the page pointers, since otherwise we'd lock pages that would
9480Sstevel@tonic-gate 	 * never get unlocked.
9490Sstevel@tonic-gate 	 */
9500Sstevel@tonic-gate 	if (pagefound) {
9510Sstevel@tonic-gate 		int index;
9520Sstevel@tonic-gate 		ulong_t soff;
9530Sstevel@tonic-gate 
9540Sstevel@tonic-gate 		/*
9550Sstevel@tonic-gate 		 * Make sure it's in memory before we say it's here.
9560Sstevel@tonic-gate 		 */
9570Sstevel@tonic-gate 		if ((pp = page_lookup(vp, off, SE_SHARED)) == NULL) {
9580Sstevel@tonic-gate 			hsfs_lostpage++;
9590Sstevel@tonic-gate 			goto reread;
9600Sstevel@tonic-gate 		}
9610Sstevel@tonic-gate 
9620Sstevel@tonic-gate 		pl[0] = pp;
9630Sstevel@tonic-gate 		index = 1;
9640Sstevel@tonic-gate 
9650Sstevel@tonic-gate 		/*
9660Sstevel@tonic-gate 		 * Try to lock the next page, if it exists, without
9670Sstevel@tonic-gate 		 * blocking.
9680Sstevel@tonic-gate 		 */
9690Sstevel@tonic-gate 		plsz -= PAGESIZE;
9700Sstevel@tonic-gate 		/* LINTED (plsz is unsigned) */
9710Sstevel@tonic-gate 		for (soff = off + PAGESIZE; plsz > 0;
9720Sstevel@tonic-gate 		    soff += PAGESIZE, plsz -= PAGESIZE) {
9730Sstevel@tonic-gate 			pp = page_lookup_nowait(vp, (u_offset_t)soff,
9740Sstevel@tonic-gate 					SE_SHARED);
9750Sstevel@tonic-gate 			if (pp == NULL)
9760Sstevel@tonic-gate 				break;
9770Sstevel@tonic-gate 			pl[index++] = pp;
9780Sstevel@tonic-gate 		}
9790Sstevel@tonic-gate 		pl[index] = NULL;
9800Sstevel@tonic-gate 		return (0);
9810Sstevel@tonic-gate 	}
9820Sstevel@tonic-gate 
9830Sstevel@tonic-gate 	if (pp != NULL) {
9840Sstevel@tonic-gate 		pvn_plist_init(pp, pl, plsz, off, io_len, rw);
9850Sstevel@tonic-gate 	}
9860Sstevel@tonic-gate 
9870Sstevel@tonic-gate 	return (err);
9880Sstevel@tonic-gate }
9890Sstevel@tonic-gate 
9900Sstevel@tonic-gate static int
9910Sstevel@tonic-gate hsfs_getpage(
9920Sstevel@tonic-gate 	struct vnode *vp,
9930Sstevel@tonic-gate 	offset_t off,
9940Sstevel@tonic-gate 	size_t len,
9950Sstevel@tonic-gate 	uint_t *protp,
9960Sstevel@tonic-gate 	struct page *pl[],
9970Sstevel@tonic-gate 	size_t plsz,
9980Sstevel@tonic-gate 	struct seg *seg,
9990Sstevel@tonic-gate 	caddr_t addr,
10000Sstevel@tonic-gate 	enum seg_rw rw,
10010Sstevel@tonic-gate 	struct cred *cred)
10020Sstevel@tonic-gate {
10030Sstevel@tonic-gate 	int err;
10040Sstevel@tonic-gate 	uint_t filsiz;
10050Sstevel@tonic-gate 	struct hsnode *hp = VTOH(vp);
10060Sstevel@tonic-gate 
10070Sstevel@tonic-gate 	/* does not support write */
10080Sstevel@tonic-gate 	if (rw == S_WRITE) {
10090Sstevel@tonic-gate 		panic("write attempt on READ ONLY HSFS");
10100Sstevel@tonic-gate 		/*NOTREACHED*/
10110Sstevel@tonic-gate 	}
10120Sstevel@tonic-gate 
10130Sstevel@tonic-gate 	if (vp->v_flag & VNOMAP) {
10140Sstevel@tonic-gate 		return (ENOSYS);
10150Sstevel@tonic-gate 	}
10160Sstevel@tonic-gate 
10170Sstevel@tonic-gate 	ASSERT(off <= MAXOFF_T);
10180Sstevel@tonic-gate 
10190Sstevel@tonic-gate 	/*
10200Sstevel@tonic-gate 	 * Determine file data size for EOF check.
10210Sstevel@tonic-gate 	 */
10220Sstevel@tonic-gate 	filsiz = hp->hs_dirent.ext_size;
10230Sstevel@tonic-gate 	if ((off + len) > (offset_t)(filsiz + PAGEOFFSET) && seg != segkmap)
10240Sstevel@tonic-gate 		return (EFAULT);	/* beyond EOF */
10250Sstevel@tonic-gate 
10260Sstevel@tonic-gate 	if (protp != NULL)
10270Sstevel@tonic-gate 		*protp = PROT_ALL;
10280Sstevel@tonic-gate 
10290Sstevel@tonic-gate 	if (len <= PAGESIZE)
10300Sstevel@tonic-gate 		err = hsfs_getapage(vp, (u_offset_t)off, len, protp, pl, plsz,
10310Sstevel@tonic-gate 		    seg, addr, rw, cred);
10320Sstevel@tonic-gate 	else
10330Sstevel@tonic-gate 		err = pvn_getpages(hsfs_getapage, vp, off, len, protp,
10340Sstevel@tonic-gate 		    pl, plsz, seg, addr, rw, cred);
10350Sstevel@tonic-gate 
10360Sstevel@tonic-gate 	return (err);
10370Sstevel@tonic-gate }
10380Sstevel@tonic-gate 
10390Sstevel@tonic-gate 
10400Sstevel@tonic-gate 
10410Sstevel@tonic-gate /*
10420Sstevel@tonic-gate  * This function should never be called. We need to have it to pass
10430Sstevel@tonic-gate  * it as an argument to other functions.
10440Sstevel@tonic-gate  */
10450Sstevel@tonic-gate /*ARGSUSED*/
10460Sstevel@tonic-gate int
10470Sstevel@tonic-gate hsfs_putapage(
10480Sstevel@tonic-gate 	vnode_t		*vp,
10490Sstevel@tonic-gate 	page_t		*pp,
10500Sstevel@tonic-gate 	u_offset_t	*offp,
10510Sstevel@tonic-gate 	size_t		*lenp,
10520Sstevel@tonic-gate 	int		flags,
10530Sstevel@tonic-gate 	cred_t		*cr)
10540Sstevel@tonic-gate {
10550Sstevel@tonic-gate 	/* should never happen - just destroy it */
10560Sstevel@tonic-gate 	cmn_err(CE_NOTE, "hsfs_putapage: dirty HSFS page");
10570Sstevel@tonic-gate 	pvn_write_done(pp, B_ERROR | B_WRITE | B_INVAL | B_FORCE | flags);
10580Sstevel@tonic-gate 	return (0);
10590Sstevel@tonic-gate }
10600Sstevel@tonic-gate 
10610Sstevel@tonic-gate 
10620Sstevel@tonic-gate /*
10630Sstevel@tonic-gate  * The only flags we support are B_INVAL, B_FREE and B_DONTNEED.
10640Sstevel@tonic-gate  * B_INVAL is set by:
10650Sstevel@tonic-gate  *
10660Sstevel@tonic-gate  *	1) the MC_SYNC command of memcntl(2) to support the MS_INVALIDATE flag.
10670Sstevel@tonic-gate  *	2) the MC_ADVISE command of memcntl(2) with the MADV_DONTNEED advice
10680Sstevel@tonic-gate  *	   which translates to an MC_SYNC with the MS_INVALIDATE flag.
10690Sstevel@tonic-gate  *
10700Sstevel@tonic-gate  * The B_FREE (as well as the B_DONTNEED) flag is set when the
10710Sstevel@tonic-gate  * MADV_SEQUENTIAL advice has been used. VOP_PUTPAGE is invoked
10720Sstevel@tonic-gate  * from SEGVN to release pages behind a pagefault.
10730Sstevel@tonic-gate  */
10740Sstevel@tonic-gate /*ARGSUSED*/
10750Sstevel@tonic-gate static int
10760Sstevel@tonic-gate hsfs_putpage(
10770Sstevel@tonic-gate 	struct vnode	*vp,
10780Sstevel@tonic-gate 	offset_t	off,
10790Sstevel@tonic-gate 	size_t		len,
10800Sstevel@tonic-gate 	int		flags,
10810Sstevel@tonic-gate 	struct cred	*cr)
10820Sstevel@tonic-gate {
10830Sstevel@tonic-gate 	int error = 0;
10840Sstevel@tonic-gate 
10850Sstevel@tonic-gate 	if (vp->v_count == 0) {
10860Sstevel@tonic-gate 		panic("hsfs_putpage: bad v_count");
10870Sstevel@tonic-gate 		/*NOTREACHED*/
10880Sstevel@tonic-gate 	}
10890Sstevel@tonic-gate 
10900Sstevel@tonic-gate 	if (vp->v_flag & VNOMAP)
10910Sstevel@tonic-gate 		return (ENOSYS);
10920Sstevel@tonic-gate 
10930Sstevel@tonic-gate 	ASSERT(off <= MAXOFF_T);
10940Sstevel@tonic-gate 
10950Sstevel@tonic-gate 	if (!vn_has_cached_data(vp))	/* no pages mapped */
10960Sstevel@tonic-gate 		return (0);
10970Sstevel@tonic-gate 
10980Sstevel@tonic-gate 	if (len == 0)		/* from 'off' to EOF */
10990Sstevel@tonic-gate 		error = pvn_vplist_dirty(vp, off,
11000Sstevel@tonic-gate 					hsfs_putapage, flags, cr);
11010Sstevel@tonic-gate 	else {
11020Sstevel@tonic-gate 		offset_t end_off = off + len;
11030Sstevel@tonic-gate 		offset_t file_size = VTOH(vp)->hs_dirent.ext_size;
11040Sstevel@tonic-gate 		offset_t io_off;
11050Sstevel@tonic-gate 
11060Sstevel@tonic-gate 		file_size = (file_size + PAGESIZE - 1) & PAGEMASK;
11070Sstevel@tonic-gate 		if (end_off > file_size)
11080Sstevel@tonic-gate 			end_off = file_size;
11090Sstevel@tonic-gate 
11100Sstevel@tonic-gate 		for (io_off = off; io_off < end_off; io_off += PAGESIZE) {
11110Sstevel@tonic-gate 			page_t *pp;
11120Sstevel@tonic-gate 
11130Sstevel@tonic-gate 			/*
11140Sstevel@tonic-gate 			 * We insist on getting the page only if we are
11150Sstevel@tonic-gate 			 * about to invalidate, free or write it and
11160Sstevel@tonic-gate 			 * the B_ASYNC flag is not set.
11170Sstevel@tonic-gate 			 */
11180Sstevel@tonic-gate 			if ((flags & B_INVAL) || ((flags & B_ASYNC) == 0)) {
11190Sstevel@tonic-gate 				pp = page_lookup(vp, io_off,
11200Sstevel@tonic-gate 					(flags & (B_INVAL | B_FREE)) ?
11210Sstevel@tonic-gate 					    SE_EXCL : SE_SHARED);
11220Sstevel@tonic-gate 			} else {
11230Sstevel@tonic-gate 				pp = page_lookup_nowait(vp, io_off,
11240Sstevel@tonic-gate 					(flags & B_FREE) ? SE_EXCL : SE_SHARED);
11250Sstevel@tonic-gate 			}
11260Sstevel@tonic-gate 
11270Sstevel@tonic-gate 			if (pp == NULL)
11280Sstevel@tonic-gate 				continue;
11290Sstevel@tonic-gate 			/*
11300Sstevel@tonic-gate 			 * Normally pvn_getdirty() should return 0, which
11310Sstevel@tonic-gate 			 * impies that it has done the job for us.
11320Sstevel@tonic-gate 			 * The shouldn't-happen scenario is when it returns 1.
11330Sstevel@tonic-gate 			 * This means that the page has been modified and
11340Sstevel@tonic-gate 			 * needs to be put back.
11350Sstevel@tonic-gate 			 * Since we can't write on a CD, we fake a failed
11360Sstevel@tonic-gate 			 * I/O and force pvn_write_done() to destroy the page.
11370Sstevel@tonic-gate 			 */
11380Sstevel@tonic-gate 			if (pvn_getdirty(pp, flags) == 1) {
11390Sstevel@tonic-gate 				cmn_err(CE_NOTE,
11400Sstevel@tonic-gate 					"hsfs_putpage: dirty HSFS page");
11410Sstevel@tonic-gate 				pvn_write_done(pp, flags |
11420Sstevel@tonic-gate 				    B_ERROR | B_WRITE | B_INVAL | B_FORCE);
11430Sstevel@tonic-gate 			}
11440Sstevel@tonic-gate 		}
11450Sstevel@tonic-gate 	}
11460Sstevel@tonic-gate 	return (error);
11470Sstevel@tonic-gate }
11480Sstevel@tonic-gate 
11490Sstevel@tonic-gate 
11500Sstevel@tonic-gate /*ARGSUSED*/
11510Sstevel@tonic-gate static int
11520Sstevel@tonic-gate hsfs_map(
11530Sstevel@tonic-gate 	struct vnode *vp,
11540Sstevel@tonic-gate 	offset_t off,
11550Sstevel@tonic-gate 	struct as *as,
11560Sstevel@tonic-gate 	caddr_t *addrp,
11570Sstevel@tonic-gate 	size_t len,
11580Sstevel@tonic-gate 	uchar_t prot,
11590Sstevel@tonic-gate 	uchar_t maxprot,
11600Sstevel@tonic-gate 	uint_t flags,
11610Sstevel@tonic-gate 	struct cred *cred)
11620Sstevel@tonic-gate {
11630Sstevel@tonic-gate 	struct segvn_crargs vn_a;
11640Sstevel@tonic-gate 	int error;
11650Sstevel@tonic-gate 
11660Sstevel@tonic-gate 	/* VFS_RECORD(vp->v_vfsp, VS_MAP, VS_CALL); */
11670Sstevel@tonic-gate 
11680Sstevel@tonic-gate 	if (vp->v_flag & VNOMAP)
11690Sstevel@tonic-gate 		return (ENOSYS);
11700Sstevel@tonic-gate 
1171143Speterte 	if (off > MAXOFF_T || off < 0 ||
1172143Speterte 	    (off + len) < 0 || (off + len) > MAXOFF_T)
1173143Speterte 		return (ENXIO);
11740Sstevel@tonic-gate 
11750Sstevel@tonic-gate 	if (vp->v_type != VREG) {
11760Sstevel@tonic-gate 		return (ENODEV);
11770Sstevel@tonic-gate 	}
11780Sstevel@tonic-gate 
11790Sstevel@tonic-gate 	/*
11800Sstevel@tonic-gate 	 * If file is being locked, disallow mapping.
11810Sstevel@tonic-gate 	 */
11820Sstevel@tonic-gate 	if (vn_has_mandatory_locks(vp, VTOH(vp)->hs_dirent.mode))
11830Sstevel@tonic-gate 		return (EAGAIN);
11840Sstevel@tonic-gate 
11850Sstevel@tonic-gate 	as_rangelock(as);
11860Sstevel@tonic-gate 
11870Sstevel@tonic-gate 	if ((flags & MAP_FIXED) == 0) {
11880Sstevel@tonic-gate 		map_addr(addrp, len, off, 1, flags);
11890Sstevel@tonic-gate 		if (*addrp == NULL) {
11900Sstevel@tonic-gate 			as_rangeunlock(as);
11910Sstevel@tonic-gate 			return (ENOMEM);
11920Sstevel@tonic-gate 		}
11930Sstevel@tonic-gate 	} else {
11940Sstevel@tonic-gate 		/*
11950Sstevel@tonic-gate 		 * User specified address - blow away any previous mappings
11960Sstevel@tonic-gate 		 */
11970Sstevel@tonic-gate 		(void) as_unmap(as, *addrp, len);
11980Sstevel@tonic-gate 	}
11990Sstevel@tonic-gate 
12000Sstevel@tonic-gate 	vn_a.vp = vp;
12010Sstevel@tonic-gate 	vn_a.offset = off;
12020Sstevel@tonic-gate 	vn_a.type = flags & MAP_TYPE;
12030Sstevel@tonic-gate 	vn_a.prot = prot;
12040Sstevel@tonic-gate 	vn_a.maxprot = maxprot;
12050Sstevel@tonic-gate 	vn_a.flags = flags & ~MAP_TYPE;
12060Sstevel@tonic-gate 	vn_a.cred = cred;
12070Sstevel@tonic-gate 	vn_a.amp = NULL;
12080Sstevel@tonic-gate 	vn_a.szc = 0;
12090Sstevel@tonic-gate 	vn_a.lgrp_mem_policy_flags = 0;
12100Sstevel@tonic-gate 
12110Sstevel@tonic-gate 	error = as_map(as, *addrp, len, segvn_create, &vn_a);
12120Sstevel@tonic-gate 	as_rangeunlock(as);
12130Sstevel@tonic-gate 	return (error);
12140Sstevel@tonic-gate }
12150Sstevel@tonic-gate 
12160Sstevel@tonic-gate /* ARGSUSED */
12170Sstevel@tonic-gate static int
12180Sstevel@tonic-gate hsfs_addmap(
12190Sstevel@tonic-gate 	struct vnode *vp,
12200Sstevel@tonic-gate 	offset_t off,
12210Sstevel@tonic-gate 	struct as *as,
12220Sstevel@tonic-gate 	caddr_t addr,
12230Sstevel@tonic-gate 	size_t len,
12240Sstevel@tonic-gate 	uchar_t prot,
12250Sstevel@tonic-gate 	uchar_t maxprot,
12260Sstevel@tonic-gate 	uint_t flags,
12270Sstevel@tonic-gate 	struct cred *cr)
12280Sstevel@tonic-gate {
12290Sstevel@tonic-gate 	struct hsnode *hp;
12300Sstevel@tonic-gate 
12310Sstevel@tonic-gate 	if (vp->v_flag & VNOMAP)
12320Sstevel@tonic-gate 		return (ENOSYS);
12330Sstevel@tonic-gate 
12340Sstevel@tonic-gate 	hp = VTOH(vp);
12350Sstevel@tonic-gate 	mutex_enter(&hp->hs_contents_lock);
12360Sstevel@tonic-gate 	hp->hs_mapcnt += btopr(len);
12370Sstevel@tonic-gate 	mutex_exit(&hp->hs_contents_lock);
12380Sstevel@tonic-gate 	return (0);
12390Sstevel@tonic-gate }
12400Sstevel@tonic-gate 
12410Sstevel@tonic-gate /*ARGSUSED*/
12420Sstevel@tonic-gate static int
12430Sstevel@tonic-gate hsfs_delmap(
12440Sstevel@tonic-gate 	struct vnode *vp,
12450Sstevel@tonic-gate 	offset_t off,
12460Sstevel@tonic-gate 	struct as *as,
12470Sstevel@tonic-gate 	caddr_t addr,
12480Sstevel@tonic-gate 	size_t len,
12490Sstevel@tonic-gate 	uint_t prot,
12500Sstevel@tonic-gate 	uint_t maxprot,
12510Sstevel@tonic-gate 	uint_t flags,
12520Sstevel@tonic-gate 	struct cred *cr)
12530Sstevel@tonic-gate {
12540Sstevel@tonic-gate 	struct hsnode *hp;
12550Sstevel@tonic-gate 
12560Sstevel@tonic-gate 	if (vp->v_flag & VNOMAP)
12570Sstevel@tonic-gate 		return (ENOSYS);
12580Sstevel@tonic-gate 
12590Sstevel@tonic-gate 	hp = VTOH(vp);
12600Sstevel@tonic-gate 	mutex_enter(&hp->hs_contents_lock);
12610Sstevel@tonic-gate 	hp->hs_mapcnt -= btopr(len);	/* Count released mappings */
12620Sstevel@tonic-gate 	ASSERT(hp->hs_mapcnt >= 0);
12630Sstevel@tonic-gate 	mutex_exit(&hp->hs_contents_lock);
12640Sstevel@tonic-gate 	return (0);
12650Sstevel@tonic-gate }
12660Sstevel@tonic-gate 
12670Sstevel@tonic-gate /* ARGSUSED */
12680Sstevel@tonic-gate static int
12690Sstevel@tonic-gate hsfs_seek(struct vnode *vp, offset_t ooff, offset_t *noffp)
12700Sstevel@tonic-gate {
12710Sstevel@tonic-gate 	return ((*noffp < 0 || *noffp > MAXOFFSET_T) ? EINVAL : 0);
12720Sstevel@tonic-gate }
12730Sstevel@tonic-gate 
12740Sstevel@tonic-gate /* ARGSUSED */
12750Sstevel@tonic-gate static int
12760Sstevel@tonic-gate hsfs_frlock(
12770Sstevel@tonic-gate 	struct vnode *vp,
12780Sstevel@tonic-gate 	int cmd,
12790Sstevel@tonic-gate 	struct flock64 *bfp,
12800Sstevel@tonic-gate 	int flag,
12810Sstevel@tonic-gate 	offset_t offset,
12820Sstevel@tonic-gate 	struct flk_callback *flk_cbp,
12830Sstevel@tonic-gate 	cred_t *cr)
12840Sstevel@tonic-gate {
12850Sstevel@tonic-gate 	struct hsnode *hp = VTOH(vp);
12860Sstevel@tonic-gate 
12870Sstevel@tonic-gate 	/*
12880Sstevel@tonic-gate 	 * If the file is being mapped, disallow fs_frlock.
12890Sstevel@tonic-gate 	 * We are not holding the hs_contents_lock while checking
12900Sstevel@tonic-gate 	 * hs_mapcnt because the current locking strategy drops all
12910Sstevel@tonic-gate 	 * locks before calling fs_frlock.
12920Sstevel@tonic-gate 	 * So, hs_mapcnt could change before we enter fs_frlock making
12930Sstevel@tonic-gate 	 * it meaningless to have held hs_contents_lock in the first place.
12940Sstevel@tonic-gate 	 */
12950Sstevel@tonic-gate 	if (hp->hs_mapcnt > 0 && MANDLOCK(vp, hp->hs_dirent.mode))
12960Sstevel@tonic-gate 		return (EAGAIN);
12970Sstevel@tonic-gate 
12980Sstevel@tonic-gate 	return (fs_frlock(vp, cmd, bfp, flag, offset, flk_cbp, cr));
12990Sstevel@tonic-gate }
13000Sstevel@tonic-gate 
13010Sstevel@tonic-gate const fs_operation_def_t hsfs_vnodeops_template[] = {
13020Sstevel@tonic-gate 	VOPNAME_OPEN, hsfs_open,
13030Sstevel@tonic-gate 	VOPNAME_CLOSE, hsfs_close,
13040Sstevel@tonic-gate 	VOPNAME_READ, hsfs_read,
13050Sstevel@tonic-gate 	VOPNAME_GETATTR, hsfs_getattr,
13060Sstevel@tonic-gate 	VOPNAME_ACCESS, hsfs_access,
13070Sstevel@tonic-gate 	VOPNAME_LOOKUP, hsfs_lookup,
13080Sstevel@tonic-gate 	VOPNAME_READDIR, hsfs_readdir,
13090Sstevel@tonic-gate 	VOPNAME_READLINK, hsfs_readlink,
13100Sstevel@tonic-gate 	VOPNAME_FSYNC, hsfs_fsync,
13110Sstevel@tonic-gate 	VOPNAME_INACTIVE, (fs_generic_func_p) hsfs_inactive,
13120Sstevel@tonic-gate 	VOPNAME_FID, hsfs_fid,
13130Sstevel@tonic-gate 	VOPNAME_SEEK, hsfs_seek,
13140Sstevel@tonic-gate 	VOPNAME_FRLOCK, hsfs_frlock,
13150Sstevel@tonic-gate 	VOPNAME_GETPAGE, hsfs_getpage,
13160Sstevel@tonic-gate 	VOPNAME_PUTPAGE, hsfs_putpage,
13170Sstevel@tonic-gate 	VOPNAME_MAP, (fs_generic_func_p) hsfs_map,
13180Sstevel@tonic-gate 	VOPNAME_ADDMAP, (fs_generic_func_p) hsfs_addmap,
13190Sstevel@tonic-gate 	VOPNAME_DELMAP, hsfs_delmap,
13200Sstevel@tonic-gate 	NULL, NULL
13210Sstevel@tonic-gate };
13220Sstevel@tonic-gate 
13230Sstevel@tonic-gate struct vnodeops *hsfs_vnodeops;
1324