xref: /onnv-gate/usr/src/uts/common/fs/hsfs/hsfs_vnops.c (revision 0:68f95e015346)
1*0Sstevel@tonic-gate /*
2*0Sstevel@tonic-gate  * CDDL HEADER START
3*0Sstevel@tonic-gate  *
4*0Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
5*0Sstevel@tonic-gate  * Common Development and Distribution License, Version 1.0 only
6*0Sstevel@tonic-gate  * (the "License").  You may not use this file except in compliance
7*0Sstevel@tonic-gate  * with the License.
8*0Sstevel@tonic-gate  *
9*0Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10*0Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
11*0Sstevel@tonic-gate  * See the License for the specific language governing permissions
12*0Sstevel@tonic-gate  * and limitations under the License.
13*0Sstevel@tonic-gate  *
14*0Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
15*0Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16*0Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
17*0Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
18*0Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
19*0Sstevel@tonic-gate  *
20*0Sstevel@tonic-gate  * CDDL HEADER END
21*0Sstevel@tonic-gate  */
22*0Sstevel@tonic-gate /*
23*0Sstevel@tonic-gate  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24*0Sstevel@tonic-gate  * Use is subject to license terms.
25*0Sstevel@tonic-gate  */
26*0Sstevel@tonic-gate 
27*0Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
28*0Sstevel@tonic-gate 
29*0Sstevel@tonic-gate /*
30*0Sstevel@tonic-gate  * Vnode operations for the High Sierra filesystem
31*0Sstevel@tonic-gate  */
32*0Sstevel@tonic-gate 
33*0Sstevel@tonic-gate #include <sys/types.h>
34*0Sstevel@tonic-gate #include <sys/t_lock.h>
35*0Sstevel@tonic-gate #include <sys/param.h>
36*0Sstevel@tonic-gate #include <sys/time.h>
37*0Sstevel@tonic-gate #include <sys/systm.h>
38*0Sstevel@tonic-gate #include <sys/sysmacros.h>
39*0Sstevel@tonic-gate #include <sys/resource.h>
40*0Sstevel@tonic-gate #include <sys/signal.h>
41*0Sstevel@tonic-gate #include <sys/cred.h>
42*0Sstevel@tonic-gate #include <sys/user.h>
43*0Sstevel@tonic-gate #include <sys/buf.h>
44*0Sstevel@tonic-gate #include <sys/vfs.h>
45*0Sstevel@tonic-gate #include <sys/stat.h>
46*0Sstevel@tonic-gate #include <sys/vnode.h>
47*0Sstevel@tonic-gate #include <sys/mode.h>
48*0Sstevel@tonic-gate #include <sys/proc.h>
49*0Sstevel@tonic-gate #include <sys/disp.h>
50*0Sstevel@tonic-gate #include <sys/file.h>
51*0Sstevel@tonic-gate #include <sys/fcntl.h>
52*0Sstevel@tonic-gate #include <sys/flock.h>
53*0Sstevel@tonic-gate #include <sys/kmem.h>
54*0Sstevel@tonic-gate #include <sys/uio.h>
55*0Sstevel@tonic-gate #include <sys/conf.h>
56*0Sstevel@tonic-gate #include <sys/errno.h>
57*0Sstevel@tonic-gate #include <sys/mman.h>
58*0Sstevel@tonic-gate #include <sys/pathname.h>
59*0Sstevel@tonic-gate #include <sys/debug.h>
60*0Sstevel@tonic-gate #include <sys/vmsystm.h>
61*0Sstevel@tonic-gate #include <sys/cmn_err.h>
62*0Sstevel@tonic-gate #include <sys/fbuf.h>
63*0Sstevel@tonic-gate #include <sys/dirent.h>
64*0Sstevel@tonic-gate #include <sys/errno.h>
65*0Sstevel@tonic-gate 
66*0Sstevel@tonic-gate #include <vm/hat.h>
67*0Sstevel@tonic-gate #include <vm/page.h>
68*0Sstevel@tonic-gate #include <vm/pvn.h>
69*0Sstevel@tonic-gate #include <vm/as.h>
70*0Sstevel@tonic-gate #include <vm/seg.h>
71*0Sstevel@tonic-gate #include <vm/seg_map.h>
72*0Sstevel@tonic-gate #include <vm/seg_kmem.h>
73*0Sstevel@tonic-gate #include <vm/seg_vn.h>
74*0Sstevel@tonic-gate #include <vm/rm.h>
75*0Sstevel@tonic-gate #include <vm/page.h>
76*0Sstevel@tonic-gate #include <sys/swap.h>
77*0Sstevel@tonic-gate 
78*0Sstevel@tonic-gate #include <sys/fs/hsfs_spec.h>
79*0Sstevel@tonic-gate #include <sys/fs/hsfs_node.h>
80*0Sstevel@tonic-gate #include <sys/fs/hsfs_impl.h>
81*0Sstevel@tonic-gate #include <sys/fs/hsfs_susp.h>
82*0Sstevel@tonic-gate #include <sys/fs/hsfs_rrip.h>
83*0Sstevel@tonic-gate 
84*0Sstevel@tonic-gate #include <fs/fs_subr.h>
85*0Sstevel@tonic-gate 
86*0Sstevel@tonic-gate /* ARGSUSED */
87*0Sstevel@tonic-gate static int
88*0Sstevel@tonic-gate hsfs_fsync(vnode_t *cp, int syncflag, cred_t *cred)
89*0Sstevel@tonic-gate {
90*0Sstevel@tonic-gate 	return (0);
91*0Sstevel@tonic-gate }
92*0Sstevel@tonic-gate 
93*0Sstevel@tonic-gate 
94*0Sstevel@tonic-gate /*ARGSUSED*/
95*0Sstevel@tonic-gate static int
96*0Sstevel@tonic-gate hsfs_read(struct vnode *vp, struct uio *uiop, int ioflag, struct cred *cred,
97*0Sstevel@tonic-gate 	struct caller_context *ct)
98*0Sstevel@tonic-gate {
99*0Sstevel@tonic-gate 	struct hsnode *hp;
100*0Sstevel@tonic-gate 	ulong_t off;
101*0Sstevel@tonic-gate 	long mapon, on;
102*0Sstevel@tonic-gate 	caddr_t base;
103*0Sstevel@tonic-gate 	uint_t	filesize;
104*0Sstevel@tonic-gate 	long nbytes, n;
105*0Sstevel@tonic-gate 	uint_t flags;
106*0Sstevel@tonic-gate 	int error;
107*0Sstevel@tonic-gate 
108*0Sstevel@tonic-gate 	hp = VTOH(vp);
109*0Sstevel@tonic-gate 	/*
110*0Sstevel@tonic-gate 	 * if vp is of type VDIR, make sure dirent
111*0Sstevel@tonic-gate 	 * is filled up with all info (because of ptbl)
112*0Sstevel@tonic-gate 	 */
113*0Sstevel@tonic-gate 	if (vp->v_type == VDIR) {
114*0Sstevel@tonic-gate 		if (hp->hs_dirent.ext_size == 0)
115*0Sstevel@tonic-gate 			hs_filldirent(vp, &hp->hs_dirent);
116*0Sstevel@tonic-gate 	}
117*0Sstevel@tonic-gate 	filesize = hp->hs_dirent.ext_size;
118*0Sstevel@tonic-gate 
119*0Sstevel@tonic-gate 	if (uiop->uio_loffset >= MAXOFF_T) {
120*0Sstevel@tonic-gate 		error = 0;
121*0Sstevel@tonic-gate 		goto out;
122*0Sstevel@tonic-gate 	}
123*0Sstevel@tonic-gate 
124*0Sstevel@tonic-gate 	if (uiop->uio_offset >= filesize) {
125*0Sstevel@tonic-gate 		error = 0;
126*0Sstevel@tonic-gate 		goto out;
127*0Sstevel@tonic-gate 	}
128*0Sstevel@tonic-gate 
129*0Sstevel@tonic-gate 	do {
130*0Sstevel@tonic-gate 		/* map file to correct page boundary */
131*0Sstevel@tonic-gate 		off = uiop->uio_offset & MAXBMASK;
132*0Sstevel@tonic-gate 		mapon = uiop->uio_offset & MAXBOFFSET;
133*0Sstevel@tonic-gate 
134*0Sstevel@tonic-gate 		/* set read in data size */
135*0Sstevel@tonic-gate 		on = (uiop->uio_offset) & PAGEOFFSET;
136*0Sstevel@tonic-gate 		nbytes = MIN(PAGESIZE - on, uiop->uio_resid);
137*0Sstevel@tonic-gate 		/* adjust down if > EOF */
138*0Sstevel@tonic-gate 		n = MIN((filesize - uiop->uio_offset), nbytes);
139*0Sstevel@tonic-gate 		if (n == 0) {
140*0Sstevel@tonic-gate 			error = 0;
141*0Sstevel@tonic-gate 			goto out;
142*0Sstevel@tonic-gate 		}
143*0Sstevel@tonic-gate 
144*0Sstevel@tonic-gate 		/* map the file into memory */
145*0Sstevel@tonic-gate 		base = segmap_getmapflt(segkmap, vp, (u_offset_t)off,
146*0Sstevel@tonic-gate 					MAXBSIZE, 1, S_READ);
147*0Sstevel@tonic-gate 
148*0Sstevel@tonic-gate 		error = uiomove(base+mapon, (size_t)n, UIO_READ, uiop);
149*0Sstevel@tonic-gate 		if (error == 0) {
150*0Sstevel@tonic-gate 			/*
151*0Sstevel@tonic-gate 			 * if read a whole block, or read to eof,
152*0Sstevel@tonic-gate 			 *  won't need this buffer again soon.
153*0Sstevel@tonic-gate 			 */
154*0Sstevel@tonic-gate 			if (n + on == PAGESIZE ||
155*0Sstevel@tonic-gate 			    uiop->uio_offset == filesize)
156*0Sstevel@tonic-gate 				flags = SM_DONTNEED;
157*0Sstevel@tonic-gate 			else
158*0Sstevel@tonic-gate 				flags = 0;
159*0Sstevel@tonic-gate 			error = segmap_release(segkmap, base, flags);
160*0Sstevel@tonic-gate 		} else
161*0Sstevel@tonic-gate 			(void) segmap_release(segkmap, base, 0);
162*0Sstevel@tonic-gate 
163*0Sstevel@tonic-gate 	} while (error == 0 && uiop->uio_resid > 0);
164*0Sstevel@tonic-gate 
165*0Sstevel@tonic-gate out:
166*0Sstevel@tonic-gate 	return (error);
167*0Sstevel@tonic-gate }
168*0Sstevel@tonic-gate 
169*0Sstevel@tonic-gate /*ARGSUSED2*/
170*0Sstevel@tonic-gate static int
171*0Sstevel@tonic-gate hsfs_getattr(
172*0Sstevel@tonic-gate 	struct vnode *vp,
173*0Sstevel@tonic-gate 	struct vattr *vap,
174*0Sstevel@tonic-gate 	int flags,
175*0Sstevel@tonic-gate 	struct cred *cred)
176*0Sstevel@tonic-gate {
177*0Sstevel@tonic-gate 	struct hsnode *hp;
178*0Sstevel@tonic-gate 	struct vfs *vfsp;
179*0Sstevel@tonic-gate 	struct hsfs *fsp;
180*0Sstevel@tonic-gate 
181*0Sstevel@tonic-gate 	hp = VTOH(vp);
182*0Sstevel@tonic-gate 	fsp = VFS_TO_HSFS(vp->v_vfsp);
183*0Sstevel@tonic-gate 	vfsp = vp->v_vfsp;
184*0Sstevel@tonic-gate 
185*0Sstevel@tonic-gate 	if ((hp->hs_dirent.ext_size == 0) && (vp->v_type == VDIR)) {
186*0Sstevel@tonic-gate 		hs_filldirent(vp, &hp->hs_dirent);
187*0Sstevel@tonic-gate 	}
188*0Sstevel@tonic-gate 	vap->va_type = IFTOVT(hp->hs_dirent.mode);
189*0Sstevel@tonic-gate 	vap->va_mode = hp->hs_dirent.mode;
190*0Sstevel@tonic-gate 	vap->va_uid = hp->hs_dirent.uid;
191*0Sstevel@tonic-gate 	vap->va_gid = hp->hs_dirent.gid;
192*0Sstevel@tonic-gate 
193*0Sstevel@tonic-gate 	vap->va_fsid = vfsp->vfs_dev;
194*0Sstevel@tonic-gate 	vap->va_nodeid = (ino64_t)hp->hs_nodeid;
195*0Sstevel@tonic-gate 	vap->va_nlink = hp->hs_dirent.nlink;
196*0Sstevel@tonic-gate 	vap->va_size =	(offset_t)hp->hs_dirent.ext_size;
197*0Sstevel@tonic-gate 
198*0Sstevel@tonic-gate 	vap->va_atime.tv_sec = hp->hs_dirent.adate.tv_sec;
199*0Sstevel@tonic-gate 	vap->va_atime.tv_nsec = hp->hs_dirent.adate.tv_usec*1000;
200*0Sstevel@tonic-gate 	vap->va_mtime.tv_sec = hp->hs_dirent.mdate.tv_sec;
201*0Sstevel@tonic-gate 	vap->va_mtime.tv_nsec = hp->hs_dirent.mdate.tv_usec*1000;
202*0Sstevel@tonic-gate 	vap->va_ctime.tv_sec = hp->hs_dirent.cdate.tv_sec;
203*0Sstevel@tonic-gate 	vap->va_ctime.tv_nsec = hp->hs_dirent.cdate.tv_usec*1000;
204*0Sstevel@tonic-gate 	if (vp->v_type == VCHR || vp->v_type == VBLK)
205*0Sstevel@tonic-gate 		vap->va_rdev = hp->hs_dirent.r_dev;
206*0Sstevel@tonic-gate 	else
207*0Sstevel@tonic-gate 		vap->va_rdev = 0;
208*0Sstevel@tonic-gate 	vap->va_blksize = vfsp->vfs_bsize;
209*0Sstevel@tonic-gate 	/* no. of blocks = no. of data blocks + no. of xar blocks */
210*0Sstevel@tonic-gate 	vap->va_nblocks = (fsblkcnt64_t)howmany(vap->va_size + (u_longlong_t)
211*0Sstevel@tonic-gate 	    (hp->hs_dirent.xar_len << fsp->hsfs_vol.lbn_shift), DEV_BSIZE);
212*0Sstevel@tonic-gate 	vap->va_seq = hp->hs_seq;
213*0Sstevel@tonic-gate 	return (0);
214*0Sstevel@tonic-gate }
215*0Sstevel@tonic-gate 
216*0Sstevel@tonic-gate /*ARGSUSED*/
217*0Sstevel@tonic-gate static int
218*0Sstevel@tonic-gate hsfs_readlink(struct vnode *vp, struct uio *uiop, struct cred *cred)
219*0Sstevel@tonic-gate {
220*0Sstevel@tonic-gate 	struct hsnode *hp;
221*0Sstevel@tonic-gate 
222*0Sstevel@tonic-gate 	if (vp->v_type != VLNK)
223*0Sstevel@tonic-gate 		return (EINVAL);
224*0Sstevel@tonic-gate 
225*0Sstevel@tonic-gate 	hp = VTOH(vp);
226*0Sstevel@tonic-gate 
227*0Sstevel@tonic-gate 	if (hp->hs_dirent.sym_link == (char *)NULL)
228*0Sstevel@tonic-gate 		return (ENOENT);
229*0Sstevel@tonic-gate 
230*0Sstevel@tonic-gate 	return (uiomove(hp->hs_dirent.sym_link,
231*0Sstevel@tonic-gate 	    (size_t)MIN(hp->hs_dirent.ext_size,
232*0Sstevel@tonic-gate 	    uiop->uio_resid), UIO_READ, uiop));
233*0Sstevel@tonic-gate }
234*0Sstevel@tonic-gate 
235*0Sstevel@tonic-gate /*ARGSUSED*/
236*0Sstevel@tonic-gate static void
237*0Sstevel@tonic-gate hsfs_inactive(struct vnode *vp, struct cred *cred)
238*0Sstevel@tonic-gate {
239*0Sstevel@tonic-gate 	struct hsnode *hp;
240*0Sstevel@tonic-gate 	struct hsfs *fsp;
241*0Sstevel@tonic-gate 
242*0Sstevel@tonic-gate 	int nopage;
243*0Sstevel@tonic-gate 
244*0Sstevel@tonic-gate 	hp = VTOH(vp);
245*0Sstevel@tonic-gate 	fsp = VFS_TO_HSFS(vp->v_vfsp);
246*0Sstevel@tonic-gate 	/*
247*0Sstevel@tonic-gate 	 * Note: acquiring and holding v_lock for quite a while
248*0Sstevel@tonic-gate 	 * here serializes on the vnode; this is unfortunate, but
249*0Sstevel@tonic-gate 	 * likely not to overly impact performance, as the underlying
250*0Sstevel@tonic-gate 	 * device (CDROM drive) is quite slow.
251*0Sstevel@tonic-gate 	 */
252*0Sstevel@tonic-gate 	rw_enter(&fsp->hsfs_hash_lock, RW_WRITER);
253*0Sstevel@tonic-gate 	mutex_enter(&hp->hs_contents_lock);
254*0Sstevel@tonic-gate 	mutex_enter(&vp->v_lock);
255*0Sstevel@tonic-gate 
256*0Sstevel@tonic-gate 	if (vp->v_count < 1) {
257*0Sstevel@tonic-gate 		panic("hsfs_inactive: v_count < 1");
258*0Sstevel@tonic-gate 		/*NOTREACHED*/
259*0Sstevel@tonic-gate 	}
260*0Sstevel@tonic-gate 
261*0Sstevel@tonic-gate 	if (vp->v_count > 1 || (hp->hs_flags & HREF) == 0) {
262*0Sstevel@tonic-gate 		vp->v_count--;	/* release hold from vn_rele */
263*0Sstevel@tonic-gate 		mutex_exit(&vp->v_lock);
264*0Sstevel@tonic-gate 		mutex_exit(&hp->hs_contents_lock);
265*0Sstevel@tonic-gate 		rw_exit(&fsp->hsfs_hash_lock);
266*0Sstevel@tonic-gate 		return;
267*0Sstevel@tonic-gate 	}
268*0Sstevel@tonic-gate 	vp->v_count--;	/* release hold from vn_rele */
269*0Sstevel@tonic-gate 	if (vp->v_count == 0) {
270*0Sstevel@tonic-gate 		/*
271*0Sstevel@tonic-gate 		 * Free the hsnode.
272*0Sstevel@tonic-gate 		 * If there are no pages associated with the
273*0Sstevel@tonic-gate 		 * hsnode, give it back to the kmem_cache,
274*0Sstevel@tonic-gate 		 * else put at the end of this file system's
275*0Sstevel@tonic-gate 		 * internal free list.
276*0Sstevel@tonic-gate 		 */
277*0Sstevel@tonic-gate 		nopage = !vn_has_cached_data(vp);
278*0Sstevel@tonic-gate 		hp->hs_flags = 0;
279*0Sstevel@tonic-gate 		/*
280*0Sstevel@tonic-gate 		 * exit these locks now, since hs_freenode may
281*0Sstevel@tonic-gate 		 * kmem_free the hsnode and embedded vnode
282*0Sstevel@tonic-gate 		 */
283*0Sstevel@tonic-gate 		mutex_exit(&vp->v_lock);
284*0Sstevel@tonic-gate 		mutex_exit(&hp->hs_contents_lock);
285*0Sstevel@tonic-gate 		hs_freenode(vp, fsp, nopage);
286*0Sstevel@tonic-gate 	} else {
287*0Sstevel@tonic-gate 		mutex_exit(&vp->v_lock);
288*0Sstevel@tonic-gate 		mutex_exit(&hp->hs_contents_lock);
289*0Sstevel@tonic-gate 	}
290*0Sstevel@tonic-gate 	rw_exit(&fsp->hsfs_hash_lock);
291*0Sstevel@tonic-gate }
292*0Sstevel@tonic-gate 
293*0Sstevel@tonic-gate 
294*0Sstevel@tonic-gate /*ARGSUSED*/
295*0Sstevel@tonic-gate static int
296*0Sstevel@tonic-gate hsfs_lookup(
297*0Sstevel@tonic-gate 	struct vnode *dvp,
298*0Sstevel@tonic-gate 	char *nm,
299*0Sstevel@tonic-gate 	struct vnode **vpp,
300*0Sstevel@tonic-gate 	struct pathname *pnp,
301*0Sstevel@tonic-gate 	int flags,
302*0Sstevel@tonic-gate 	struct vnode *rdir,
303*0Sstevel@tonic-gate 	struct cred *cred)
304*0Sstevel@tonic-gate {
305*0Sstevel@tonic-gate 	int error;
306*0Sstevel@tonic-gate 	int namelen = (int)strlen(nm);
307*0Sstevel@tonic-gate 
308*0Sstevel@tonic-gate 	if (*nm == '\0') {
309*0Sstevel@tonic-gate 		VN_HOLD(dvp);
310*0Sstevel@tonic-gate 		*vpp = dvp;
311*0Sstevel@tonic-gate 		return (0);
312*0Sstevel@tonic-gate 	}
313*0Sstevel@tonic-gate 
314*0Sstevel@tonic-gate 	/*
315*0Sstevel@tonic-gate 	 * If we're looking for ourself, life is simple.
316*0Sstevel@tonic-gate 	 */
317*0Sstevel@tonic-gate 	if (namelen == 1 && *nm == '.') {
318*0Sstevel@tonic-gate 		if (error = hs_access(dvp, (mode_t)VEXEC, cred))
319*0Sstevel@tonic-gate 			return (error);
320*0Sstevel@tonic-gate 		VN_HOLD(dvp);
321*0Sstevel@tonic-gate 		*vpp = dvp;
322*0Sstevel@tonic-gate 		return (0);
323*0Sstevel@tonic-gate 	}
324*0Sstevel@tonic-gate 
325*0Sstevel@tonic-gate 	return (hs_dirlook(dvp, nm, namelen, vpp, cred));
326*0Sstevel@tonic-gate }
327*0Sstevel@tonic-gate 
328*0Sstevel@tonic-gate 
329*0Sstevel@tonic-gate /*ARGSUSED*/
330*0Sstevel@tonic-gate static int
331*0Sstevel@tonic-gate hsfs_readdir(
332*0Sstevel@tonic-gate 	struct vnode	*vp,
333*0Sstevel@tonic-gate 	struct uio	*uiop,
334*0Sstevel@tonic-gate 	struct cred	*cred,
335*0Sstevel@tonic-gate 	int		*eofp)
336*0Sstevel@tonic-gate {
337*0Sstevel@tonic-gate 	struct hsnode	*dhp;
338*0Sstevel@tonic-gate 	struct hsfs	*fsp;
339*0Sstevel@tonic-gate 	struct hs_direntry hd;
340*0Sstevel@tonic-gate 	struct dirent64	*nd;
341*0Sstevel@tonic-gate 	int		error;
342*0Sstevel@tonic-gate 	uint_t		offset;		/* real offset in directory */
343*0Sstevel@tonic-gate 	uint_t		dirsiz;		/* real size of directory */
344*0Sstevel@tonic-gate 	uchar_t		*blkp;
345*0Sstevel@tonic-gate 	int		hdlen;		/* length of hs directory entry */
346*0Sstevel@tonic-gate 	long		ndlen;		/* length of dirent entry */
347*0Sstevel@tonic-gate 	int		bytes_wanted;
348*0Sstevel@tonic-gate 	size_t		bufsize;	/* size of dirent buffer */
349*0Sstevel@tonic-gate 	char		*outbuf;	/* ptr to dirent buffer */
350*0Sstevel@tonic-gate 	char		*dname;
351*0Sstevel@tonic-gate 	int		dnamelen;
352*0Sstevel@tonic-gate 	size_t		dname_size;
353*0Sstevel@tonic-gate 	struct fbuf	*fbp;
354*0Sstevel@tonic-gate 	uint_t		last_offset;	/* last index into current dir block */
355*0Sstevel@tonic-gate 	ulong_t		dir_lbn;	/* lbn of directory */
356*0Sstevel@tonic-gate 	ino64_t		dirino;	/* temporary storage before storing in dirent */
357*0Sstevel@tonic-gate 	off_t		diroff;
358*0Sstevel@tonic-gate 
359*0Sstevel@tonic-gate 	dhp = VTOH(vp);
360*0Sstevel@tonic-gate 	fsp = VFS_TO_HSFS(vp->v_vfsp);
361*0Sstevel@tonic-gate 	if (dhp->hs_dirent.ext_size == 0)
362*0Sstevel@tonic-gate 		hs_filldirent(vp, &dhp->hs_dirent);
363*0Sstevel@tonic-gate 	dirsiz = dhp->hs_dirent.ext_size;
364*0Sstevel@tonic-gate 	dir_lbn = dhp->hs_dirent.ext_lbn;
365*0Sstevel@tonic-gate 	if (uiop->uio_loffset >= dirsiz) {	/* at or beyond EOF */
366*0Sstevel@tonic-gate 		if (eofp)
367*0Sstevel@tonic-gate 			*eofp = 1;
368*0Sstevel@tonic-gate 		return (0);
369*0Sstevel@tonic-gate 	}
370*0Sstevel@tonic-gate 	ASSERT(uiop->uio_loffset <= MAXOFF_T);
371*0Sstevel@tonic-gate 	offset = (uint_t)uiop->uio_offset;
372*0Sstevel@tonic-gate 
373*0Sstevel@tonic-gate 	dname_size = fsp->hsfs_namemax + 1;	/* 1 for the ending NUL */
374*0Sstevel@tonic-gate 	dname = kmem_alloc(dname_size, KM_SLEEP);
375*0Sstevel@tonic-gate 	bufsize = uiop->uio_resid + sizeof (struct dirent64);
376*0Sstevel@tonic-gate 
377*0Sstevel@tonic-gate 	outbuf = kmem_alloc(bufsize, KM_SLEEP);
378*0Sstevel@tonic-gate 	nd = (struct dirent64 *)outbuf;
379*0Sstevel@tonic-gate 
380*0Sstevel@tonic-gate 	while (offset < dirsiz) {
381*0Sstevel@tonic-gate 		if ((offset & MAXBMASK) + MAXBSIZE > dirsiz)
382*0Sstevel@tonic-gate 			bytes_wanted = dirsiz - (offset & MAXBMASK);
383*0Sstevel@tonic-gate 		else
384*0Sstevel@tonic-gate 			bytes_wanted = MAXBSIZE;
385*0Sstevel@tonic-gate 
386*0Sstevel@tonic-gate 		error = fbread(vp, (offset_t)(offset & MAXBMASK),
387*0Sstevel@tonic-gate 			(unsigned int)bytes_wanted, S_READ, &fbp);
388*0Sstevel@tonic-gate 		if (error)
389*0Sstevel@tonic-gate 			goto done;
390*0Sstevel@tonic-gate 
391*0Sstevel@tonic-gate 		blkp = (uchar_t *)fbp->fb_addr;
392*0Sstevel@tonic-gate 		last_offset = (offset & MAXBMASK) + fbp->fb_count - 1;
393*0Sstevel@tonic-gate 
394*0Sstevel@tonic-gate #define	rel_offset(offset) ((offset) & MAXBOFFSET)	/* index into blkp */
395*0Sstevel@tonic-gate 
396*0Sstevel@tonic-gate 		while (offset < last_offset) {
397*0Sstevel@tonic-gate 			/*
398*0Sstevel@tonic-gate 			 * Directory Entries cannot span sectors.
399*0Sstevel@tonic-gate 			 * Unused bytes at the end of each sector are zeroed.
400*0Sstevel@tonic-gate 			 * Therefore, detect this condition when the size
401*0Sstevel@tonic-gate 			 * field of the directory entry is zero.
402*0Sstevel@tonic-gate 			 */
403*0Sstevel@tonic-gate 			hdlen = (int)((uchar_t)
404*0Sstevel@tonic-gate 				HDE_DIR_LEN(&blkp[rel_offset(offset)]));
405*0Sstevel@tonic-gate 			if (hdlen == 0) {
406*0Sstevel@tonic-gate 				/* advance to next sector boundary */
407*0Sstevel@tonic-gate 				offset = (offset & MAXHSMASK) + HS_SECTOR_SIZE;
408*0Sstevel@tonic-gate 
409*0Sstevel@tonic-gate 				/*
410*0Sstevel@tonic-gate 				 * Have we reached the end of current block?
411*0Sstevel@tonic-gate 				 */
412*0Sstevel@tonic-gate 				if (offset > last_offset)
413*0Sstevel@tonic-gate 					break;
414*0Sstevel@tonic-gate 				else
415*0Sstevel@tonic-gate 					continue;
416*0Sstevel@tonic-gate 			}
417*0Sstevel@tonic-gate 
418*0Sstevel@tonic-gate 			/* make sure this is nullified before  reading it */
419*0Sstevel@tonic-gate 			bzero(&hd, sizeof (hd));
420*0Sstevel@tonic-gate 
421*0Sstevel@tonic-gate 			/*
422*0Sstevel@tonic-gate 			 * Just ignore invalid directory entries.
423*0Sstevel@tonic-gate 			 * XXX - maybe hs_parsedir() will detect EXISTENCE bit
424*0Sstevel@tonic-gate 			 */
425*0Sstevel@tonic-gate 			if (!hs_parsedir(fsp, &blkp[rel_offset(offset)],
426*0Sstevel@tonic-gate 				&hd, dname, &dnamelen)) {
427*0Sstevel@tonic-gate 				/*
428*0Sstevel@tonic-gate 				 * Determine if there is enough room
429*0Sstevel@tonic-gate 				 */
430*0Sstevel@tonic-gate 				ndlen = (long)DIRENT64_RECLEN((dnamelen));
431*0Sstevel@tonic-gate 
432*0Sstevel@tonic-gate 				if ((ndlen + ((char *)nd - outbuf)) >
433*0Sstevel@tonic-gate 				    uiop->uio_resid) {
434*0Sstevel@tonic-gate 					fbrelse(fbp, S_READ);
435*0Sstevel@tonic-gate 					goto done; /* output buffer full */
436*0Sstevel@tonic-gate 				}
437*0Sstevel@tonic-gate 
438*0Sstevel@tonic-gate 				diroff = offset + hdlen;
439*0Sstevel@tonic-gate 				/*
440*0Sstevel@tonic-gate 				 * Generate nodeid.
441*0Sstevel@tonic-gate 				 * If a directory, nodeid points to the
442*0Sstevel@tonic-gate 				 * canonical dirent describing the directory:
443*0Sstevel@tonic-gate 				 * the dirent of the "." entry for the
444*0Sstevel@tonic-gate 				 * directory, which is pointed to by all
445*0Sstevel@tonic-gate 				 * dirents for that directory.
446*0Sstevel@tonic-gate 				 * Otherwise, nodeid points to dirent of file.
447*0Sstevel@tonic-gate 				 */
448*0Sstevel@tonic-gate 				if (hd.type == VDIR) {
449*0Sstevel@tonic-gate 					dirino = (ino64_t)
450*0Sstevel@tonic-gate 					    MAKE_NODEID(hd.ext_lbn, 0,
451*0Sstevel@tonic-gate 					    vp->v_vfsp);
452*0Sstevel@tonic-gate 				} else {
453*0Sstevel@tonic-gate 					struct hs_volume *hvp;
454*0Sstevel@tonic-gate 					offset_t lbn, off;
455*0Sstevel@tonic-gate 
456*0Sstevel@tonic-gate 					/*
457*0Sstevel@tonic-gate 					 * Normalize lbn and off
458*0Sstevel@tonic-gate 					 */
459*0Sstevel@tonic-gate 					hvp = &fsp->hsfs_vol;
460*0Sstevel@tonic-gate 					lbn = dir_lbn +
461*0Sstevel@tonic-gate 					    (offset >> hvp->lbn_shift);
462*0Sstevel@tonic-gate 					off = offset & hvp->lbn_maxoffset;
463*0Sstevel@tonic-gate 					dirino = (ino64_t)MAKE_NODEID(lbn,
464*0Sstevel@tonic-gate 					    off, vp->v_vfsp);
465*0Sstevel@tonic-gate 				}
466*0Sstevel@tonic-gate 
467*0Sstevel@tonic-gate 
468*0Sstevel@tonic-gate 				/* strncpy(9f) will zero uninitialized bytes */
469*0Sstevel@tonic-gate 
470*0Sstevel@tonic-gate 				ASSERT(strlen(dname) + 1 <=
471*0Sstevel@tonic-gate 				    DIRENT64_NAMELEN(ndlen));
472*0Sstevel@tonic-gate 				(void) strncpy(nd->d_name, dname,
473*0Sstevel@tonic-gate 				    DIRENT64_NAMELEN(ndlen));
474*0Sstevel@tonic-gate 				nd->d_reclen = (ushort_t)ndlen;
475*0Sstevel@tonic-gate 				nd->d_off = (offset_t)diroff;
476*0Sstevel@tonic-gate 				nd->d_ino = dirino;
477*0Sstevel@tonic-gate 				nd = (struct dirent64 *)((char *)nd + ndlen);
478*0Sstevel@tonic-gate 
479*0Sstevel@tonic-gate 				/*
480*0Sstevel@tonic-gate 				 * free up space allocated for symlink
481*0Sstevel@tonic-gate 				 */
482*0Sstevel@tonic-gate 				if (hd.sym_link != (char *)NULL) {
483*0Sstevel@tonic-gate 					kmem_free(hd.sym_link,
484*0Sstevel@tonic-gate 					    (size_t)(hd.ext_size+1));
485*0Sstevel@tonic-gate 					hd.sym_link = (char *)NULL;
486*0Sstevel@tonic-gate 				}
487*0Sstevel@tonic-gate 			}
488*0Sstevel@tonic-gate 
489*0Sstevel@tonic-gate 			offset += hdlen;
490*0Sstevel@tonic-gate 		}
491*0Sstevel@tonic-gate 		fbrelse(fbp, S_READ);
492*0Sstevel@tonic-gate 	}
493*0Sstevel@tonic-gate 
494*0Sstevel@tonic-gate 	/*
495*0Sstevel@tonic-gate 	 * Got here for one of the following reasons:
496*0Sstevel@tonic-gate 	 *	1) outbuf is full (error == 0)
497*0Sstevel@tonic-gate 	 *	2) end of directory reached (error == 0)
498*0Sstevel@tonic-gate 	 *	3) error reading directory sector (error != 0)
499*0Sstevel@tonic-gate 	 *	4) directory entry crosses sector boundary (error == 0)
500*0Sstevel@tonic-gate 	 *
501*0Sstevel@tonic-gate 	 * If any directory entries have been copied, don't report
502*0Sstevel@tonic-gate 	 * case 4.  Instead, return the valid directory entries.
503*0Sstevel@tonic-gate 	 *
504*0Sstevel@tonic-gate 	 * If no entries have been copied, report the error.
505*0Sstevel@tonic-gate 	 * If case 4, this will be indistiguishable from EOF.
506*0Sstevel@tonic-gate 	 */
507*0Sstevel@tonic-gate done:
508*0Sstevel@tonic-gate 	ndlen = ((char *)nd - outbuf);
509*0Sstevel@tonic-gate 	if (ndlen != 0) {
510*0Sstevel@tonic-gate 		error = uiomove(outbuf, (size_t)ndlen, UIO_READ, uiop);
511*0Sstevel@tonic-gate 		uiop->uio_offset = offset;
512*0Sstevel@tonic-gate 	}
513*0Sstevel@tonic-gate 	kmem_free(dname, dname_size);
514*0Sstevel@tonic-gate 	kmem_free(outbuf, bufsize);
515*0Sstevel@tonic-gate 	if (eofp && error == 0)
516*0Sstevel@tonic-gate 		*eofp = (uiop->uio_offset >= dirsiz);
517*0Sstevel@tonic-gate 	return (error);
518*0Sstevel@tonic-gate }
519*0Sstevel@tonic-gate 
520*0Sstevel@tonic-gate static int
521*0Sstevel@tonic-gate hsfs_fid(struct vnode *vp, struct fid *fidp)
522*0Sstevel@tonic-gate {
523*0Sstevel@tonic-gate 	struct hsnode *hp;
524*0Sstevel@tonic-gate 	struct hsfid *fid;
525*0Sstevel@tonic-gate 
526*0Sstevel@tonic-gate 	if (fidp->fid_len < (sizeof (*fid) - sizeof (fid->hf_len))) {
527*0Sstevel@tonic-gate 		fidp->fid_len = sizeof (*fid) - sizeof (fid->hf_len);
528*0Sstevel@tonic-gate 		return (ENOSPC);
529*0Sstevel@tonic-gate 	}
530*0Sstevel@tonic-gate 
531*0Sstevel@tonic-gate 	fid = (struct hsfid *)fidp;
532*0Sstevel@tonic-gate 	fid->hf_len = sizeof (*fid) - sizeof (fid->hf_len);
533*0Sstevel@tonic-gate 	hp = VTOH(vp);
534*0Sstevel@tonic-gate 	mutex_enter(&hp->hs_contents_lock);
535*0Sstevel@tonic-gate 	fid->hf_dir_lbn = hp->hs_dir_lbn;
536*0Sstevel@tonic-gate 	fid->hf_dir_off = (ushort_t)hp->hs_dir_off;
537*0Sstevel@tonic-gate 	mutex_exit(&hp->hs_contents_lock);
538*0Sstevel@tonic-gate 	return (0);
539*0Sstevel@tonic-gate }
540*0Sstevel@tonic-gate 
541*0Sstevel@tonic-gate /*ARGSUSED*/
542*0Sstevel@tonic-gate static int
543*0Sstevel@tonic-gate hsfs_open(struct vnode **vpp, int flag, struct cred *cred)
544*0Sstevel@tonic-gate {
545*0Sstevel@tonic-gate 	return (0);
546*0Sstevel@tonic-gate }
547*0Sstevel@tonic-gate 
548*0Sstevel@tonic-gate /*ARGSUSED*/
549*0Sstevel@tonic-gate static int
550*0Sstevel@tonic-gate hsfs_close(
551*0Sstevel@tonic-gate 	struct vnode *vp,
552*0Sstevel@tonic-gate 	int flag,
553*0Sstevel@tonic-gate 	int count,
554*0Sstevel@tonic-gate 	offset_t offset,
555*0Sstevel@tonic-gate 	struct cred *cred)
556*0Sstevel@tonic-gate {
557*0Sstevel@tonic-gate 	(void) cleanlocks(vp, ttoproc(curthread)->p_pid, 0);
558*0Sstevel@tonic-gate 	cleanshares(vp, ttoproc(curthread)->p_pid);
559*0Sstevel@tonic-gate 	return (0);
560*0Sstevel@tonic-gate }
561*0Sstevel@tonic-gate 
562*0Sstevel@tonic-gate /*ARGSUSED2*/
563*0Sstevel@tonic-gate static int
564*0Sstevel@tonic-gate hsfs_access(struct vnode *vp, int mode, int flags, cred_t *cred)
565*0Sstevel@tonic-gate {
566*0Sstevel@tonic-gate 	return (hs_access(vp, (mode_t)mode, cred));
567*0Sstevel@tonic-gate }
568*0Sstevel@tonic-gate 
569*0Sstevel@tonic-gate /*
570*0Sstevel@tonic-gate  * the seek time of a CD-ROM is very slow, and data transfer
571*0Sstevel@tonic-gate  * rate is even worse (max. 150K per sec).  The design
572*0Sstevel@tonic-gate  * decision is to reduce access to cd-rom as much as possible,
573*0Sstevel@tonic-gate  * and to transfer a sizable block (read-ahead) of data at a time.
574*0Sstevel@tonic-gate  * UFS style of read ahead one block at a time is not appropriate,
575*0Sstevel@tonic-gate  * and is not supported
576*0Sstevel@tonic-gate  */
577*0Sstevel@tonic-gate 
578*0Sstevel@tonic-gate /*
579*0Sstevel@tonic-gate  * KLUSTSIZE should be a multiple of PAGESIZE and <= MAXPHYS.
580*0Sstevel@tonic-gate  */
581*0Sstevel@tonic-gate #define	KLUSTSIZE	(56 * 1024)
582*0Sstevel@tonic-gate /* we don't support read ahead */
583*0Sstevel@tonic-gate int hsfs_lostpage;	/* no. of times we lost original page */
584*0Sstevel@tonic-gate 
585*0Sstevel@tonic-gate /*
586*0Sstevel@tonic-gate  * Used to prevent biodone() from releasing buf resources that
587*0Sstevel@tonic-gate  * we didn't allocate in quite the usual way.
588*0Sstevel@tonic-gate  */
589*0Sstevel@tonic-gate /*ARGSUSED*/
590*0Sstevel@tonic-gate int
591*0Sstevel@tonic-gate hsfs_iodone(struct buf *bp)
592*0Sstevel@tonic-gate {
593*0Sstevel@tonic-gate 	sema_v(&bp->b_io);
594*0Sstevel@tonic-gate 	return (0);
595*0Sstevel@tonic-gate }
596*0Sstevel@tonic-gate 
597*0Sstevel@tonic-gate /*
598*0Sstevel@tonic-gate  * Each file may have a different interleaving on disk.  This makes
599*0Sstevel@tonic-gate  * things somewhat interesting.  The gist is that there are some
600*0Sstevel@tonic-gate  * number of contiguous data sectors, followed by some other number
601*0Sstevel@tonic-gate  * of contiguous skip sectors.  The sum of those two sets of sectors
602*0Sstevel@tonic-gate  * defines the interleave size.  Unfortunately, it means that we generally
603*0Sstevel@tonic-gate  * can't simply read N sectors starting at a given offset to satisfy
604*0Sstevel@tonic-gate  * any given request.
605*0Sstevel@tonic-gate  *
606*0Sstevel@tonic-gate  * What we do is get the relevant memory pages via pvn_read_kluster(),
607*0Sstevel@tonic-gate  * then stride through the interleaves, setting up a buf for each
608*0Sstevel@tonic-gate  * sector that needs to be brought in.  Instead of kmem_alloc'ing
609*0Sstevel@tonic-gate  * space for the sectors, though, we just point at the appropriate
610*0Sstevel@tonic-gate  * spot in the relevant page for each of them.  This saves us a bunch
611*0Sstevel@tonic-gate  * of copying.
612*0Sstevel@tonic-gate  */
613*0Sstevel@tonic-gate /*ARGSUSED*/
614*0Sstevel@tonic-gate static int
615*0Sstevel@tonic-gate hsfs_getapage(
616*0Sstevel@tonic-gate 	struct vnode *vp,
617*0Sstevel@tonic-gate 	u_offset_t off,
618*0Sstevel@tonic-gate 	size_t len,
619*0Sstevel@tonic-gate 	uint_t *protp,
620*0Sstevel@tonic-gate 	struct page *pl[],
621*0Sstevel@tonic-gate 	size_t plsz,
622*0Sstevel@tonic-gate 	struct seg *seg,
623*0Sstevel@tonic-gate 	caddr_t addr,
624*0Sstevel@tonic-gate 	enum seg_rw rw,
625*0Sstevel@tonic-gate 	struct cred *cred)
626*0Sstevel@tonic-gate {
627*0Sstevel@tonic-gate 	struct hsnode *hp;
628*0Sstevel@tonic-gate 	struct hsfs *fsp;
629*0Sstevel@tonic-gate 	int	err;
630*0Sstevel@tonic-gate 	struct buf *bufs;
631*0Sstevel@tonic-gate 	caddr_t *vas;
632*0Sstevel@tonic-gate 	caddr_t va;
633*0Sstevel@tonic-gate 	struct page *pp, *searchp, *lastp;
634*0Sstevel@tonic-gate 	page_t	*pagefound;
635*0Sstevel@tonic-gate 	offset_t	bof;
636*0Sstevel@tonic-gate 	struct vnode *devvp;
637*0Sstevel@tonic-gate 	ulong_t	byte_offset;
638*0Sstevel@tonic-gate 	size_t	io_len_tmp;
639*0Sstevel@tonic-gate 	uint_t	io_off, io_len;
640*0Sstevel@tonic-gate 	uint_t	xlen;
641*0Sstevel@tonic-gate 	uint_t	filsiz;
642*0Sstevel@tonic-gate 	uint_t	secsize;
643*0Sstevel@tonic-gate 	uint_t	bufcnt;
644*0Sstevel@tonic-gate 	uint_t	bufsused;
645*0Sstevel@tonic-gate 	uint_t	count;
646*0Sstevel@tonic-gate 	uint_t	io_end;
647*0Sstevel@tonic-gate 	uint_t	which_chunk_lbn;
648*0Sstevel@tonic-gate 	uint_t	offset_lbn;
649*0Sstevel@tonic-gate 	uint_t	offset_extra;
650*0Sstevel@tonic-gate 	offset_t	offset_bytes;
651*0Sstevel@tonic-gate 	uint_t	remaining_bytes;
652*0Sstevel@tonic-gate 	uint_t	extension;
653*0Sstevel@tonic-gate 	int	remainder;	/* must be signed */
654*0Sstevel@tonic-gate 	int	chunk_lbn_count;
655*0Sstevel@tonic-gate 	int	chunk_data_bytes;
656*0Sstevel@tonic-gate 	int	xarsiz;
657*0Sstevel@tonic-gate 	diskaddr_t driver_block;
658*0Sstevel@tonic-gate 	u_offset_t io_off_tmp;
659*0Sstevel@tonic-gate 
660*0Sstevel@tonic-gate 	/*
661*0Sstevel@tonic-gate 	 * We don't support asynchronous operation at the moment, so
662*0Sstevel@tonic-gate 	 * just pretend we did it.  If the pages are ever actually
663*0Sstevel@tonic-gate 	 * needed, they'll get brought in then.
664*0Sstevel@tonic-gate 	 */
665*0Sstevel@tonic-gate 	if (pl == NULL)
666*0Sstevel@tonic-gate 		return (0);
667*0Sstevel@tonic-gate 
668*0Sstevel@tonic-gate 	hp = VTOH(vp);
669*0Sstevel@tonic-gate 	fsp = VFS_TO_HSFS(vp->v_vfsp);
670*0Sstevel@tonic-gate 	devvp = fsp->hsfs_devvp;
671*0Sstevel@tonic-gate 	secsize = fsp->hsfs_vol.lbn_size;  /* bytes per logical block */
672*0Sstevel@tonic-gate 
673*0Sstevel@tonic-gate 	/* file data size */
674*0Sstevel@tonic-gate 	filsiz = hp->hs_dirent.ext_size;
675*0Sstevel@tonic-gate 
676*0Sstevel@tonic-gate 	/* disk addr for start of file */
677*0Sstevel@tonic-gate 	bof = LBN_TO_BYTE((offset_t)hp->hs_dirent.ext_lbn, vp->v_vfsp);
678*0Sstevel@tonic-gate 
679*0Sstevel@tonic-gate 	/* xarsiz byte must be skipped for data */
680*0Sstevel@tonic-gate 	xarsiz = hp->hs_dirent.xar_len << fsp->hsfs_vol.lbn_shift;
681*0Sstevel@tonic-gate 
682*0Sstevel@tonic-gate 	/* how many logical blocks in an interleave (data+skip) */
683*0Sstevel@tonic-gate 	chunk_lbn_count = hp->hs_dirent.intlf_sz + hp->hs_dirent.intlf_sk;
684*0Sstevel@tonic-gate 
685*0Sstevel@tonic-gate 	if (chunk_lbn_count == 0) {
686*0Sstevel@tonic-gate 		chunk_lbn_count = 1;
687*0Sstevel@tonic-gate 	}
688*0Sstevel@tonic-gate 
689*0Sstevel@tonic-gate 	/*
690*0Sstevel@tonic-gate 	 * Convert interleaving size into bytes.  The zero case
691*0Sstevel@tonic-gate 	 * (no interleaving) optimization is handled as a side-
692*0Sstevel@tonic-gate 	 * effect of the read-ahead logic.
693*0Sstevel@tonic-gate 	 */
694*0Sstevel@tonic-gate 	if (hp->hs_dirent.intlf_sz == 0) {
695*0Sstevel@tonic-gate 		chunk_data_bytes = LBN_TO_BYTE(1, vp->v_vfsp);
696*0Sstevel@tonic-gate 	} else {
697*0Sstevel@tonic-gate 		chunk_data_bytes = LBN_TO_BYTE(hp->hs_dirent.intlf_sz,
698*0Sstevel@tonic-gate 			vp->v_vfsp);
699*0Sstevel@tonic-gate 	}
700*0Sstevel@tonic-gate 
701*0Sstevel@tonic-gate reread:
702*0Sstevel@tonic-gate 	err = 0;
703*0Sstevel@tonic-gate 	pagefound = 0;
704*0Sstevel@tonic-gate 
705*0Sstevel@tonic-gate 	/*
706*0Sstevel@tonic-gate 	 * Do some read-ahead.  This mostly saves us a bit of
707*0Sstevel@tonic-gate 	 * system cpu time more than anything else when doing
708*0Sstevel@tonic-gate 	 * sequential reads.  At some point, could do the
709*0Sstevel@tonic-gate 	 * read-ahead asynchronously which might gain us something
710*0Sstevel@tonic-gate 	 * on wall time, but it seems unlikely....
711*0Sstevel@tonic-gate 	 *
712*0Sstevel@tonic-gate 	 * We do the easy case here, which is to read through
713*0Sstevel@tonic-gate 	 * the end of the chunk, minus whatever's at the end that
714*0Sstevel@tonic-gate 	 * won't exactly fill a page.
715*0Sstevel@tonic-gate 	 */
716*0Sstevel@tonic-gate 	which_chunk_lbn = (off + len) / chunk_data_bytes;
717*0Sstevel@tonic-gate 	extension = ((which_chunk_lbn + 1) * chunk_data_bytes) - off;
718*0Sstevel@tonic-gate 	extension -= (extension % PAGESIZE);
719*0Sstevel@tonic-gate 	if (extension != 0 && extension < filsiz - off) {
720*0Sstevel@tonic-gate 		len = extension;
721*0Sstevel@tonic-gate 	}
722*0Sstevel@tonic-gate 	/*
723*0Sstevel@tonic-gate 	 * Some cd writers don't write sectors that aren't used.  Also,
724*0Sstevel@tonic-gate 	 * there's no point in reading sectors we'll never look at.  So,
725*0Sstevel@tonic-gate 	 * if we're asked to go beyond the end of a file, truncate to the
726*0Sstevel@tonic-gate 	 * length of that file.
727*0Sstevel@tonic-gate 	 *
728*0Sstevel@tonic-gate 	 * Additionally, this behaviour is required by section 6.4.5 of
729*0Sstevel@tonic-gate 	 * ISO 9660:1988(E).
730*0Sstevel@tonic-gate 	 */
731*0Sstevel@tonic-gate 	if (len > (filsiz - off)) {
732*0Sstevel@tonic-gate 		len = filsiz - off;
733*0Sstevel@tonic-gate 	}
734*0Sstevel@tonic-gate 
735*0Sstevel@tonic-gate 	/*
736*0Sstevel@tonic-gate 	 * After all that, make sure we're asking for things in units
737*0Sstevel@tonic-gate 	 * that bdev_strategy() will understand (see bug 4202551).
738*0Sstevel@tonic-gate 	 */
739*0Sstevel@tonic-gate 	len = roundup(len, DEV_BSIZE);
740*0Sstevel@tonic-gate 
741*0Sstevel@tonic-gate 	pp = NULL;
742*0Sstevel@tonic-gate again:
743*0Sstevel@tonic-gate 	/* search for page in buffer */
744*0Sstevel@tonic-gate 	if ((pagefound = page_exists(vp, off)) == 0) {
745*0Sstevel@tonic-gate 		/*
746*0Sstevel@tonic-gate 		 * Need to really do disk IO to get the page.
747*0Sstevel@tonic-gate 		 */
748*0Sstevel@tonic-gate 		pp = pvn_read_kluster(vp, off, seg, addr, &io_off_tmp,
749*0Sstevel@tonic-gate 		    &io_len_tmp, off, len, 0);
750*0Sstevel@tonic-gate 
751*0Sstevel@tonic-gate 		if (pp == NULL)
752*0Sstevel@tonic-gate 			goto again;
753*0Sstevel@tonic-gate 
754*0Sstevel@tonic-gate 		io_off = (uint_t)io_off_tmp;
755*0Sstevel@tonic-gate 		io_len = (uint_t)io_len_tmp;
756*0Sstevel@tonic-gate 
757*0Sstevel@tonic-gate 		/* check for truncation */
758*0Sstevel@tonic-gate 		/*
759*0Sstevel@tonic-gate 		 * xxx Clean up and return EIO instead?
760*0Sstevel@tonic-gate 		 * xxx Ought to go to u_offset_t for everything, but we
761*0Sstevel@tonic-gate 		 * xxx call lots of things that want uint_t arguments.
762*0Sstevel@tonic-gate 		 */
763*0Sstevel@tonic-gate 		ASSERT(io_off == io_off_tmp);
764*0Sstevel@tonic-gate 
765*0Sstevel@tonic-gate 		/*
766*0Sstevel@tonic-gate 		 * get enough buffers for worst-case scenario
767*0Sstevel@tonic-gate 		 * (i.e., no coalescing possible).
768*0Sstevel@tonic-gate 		 */
769*0Sstevel@tonic-gate 		bufcnt = (len + secsize - 1) / secsize;
770*0Sstevel@tonic-gate 		bufs = kmem_zalloc(bufcnt * sizeof (struct buf), KM_SLEEP);
771*0Sstevel@tonic-gate 		vas = kmem_alloc(bufcnt * sizeof (caddr_t), KM_SLEEP);
772*0Sstevel@tonic-gate 		for (count = 0; count < bufcnt; count++) {
773*0Sstevel@tonic-gate 			bufs[count].b_edev = devvp->v_rdev;
774*0Sstevel@tonic-gate 			bufs[count].b_dev = cmpdev(devvp->v_rdev);
775*0Sstevel@tonic-gate 			bufs[count].b_flags = B_NOCACHE|B_BUSY|B_READ;
776*0Sstevel@tonic-gate 			bufs[count].b_iodone = hsfs_iodone;
777*0Sstevel@tonic-gate 			bufs[count].b_vp = vp;
778*0Sstevel@tonic-gate 			bufs[count].b_file = vp;
779*0Sstevel@tonic-gate 			sema_init(&bufs[count].b_io, 0, NULL,
780*0Sstevel@tonic-gate 			    SEMA_DEFAULT, NULL);
781*0Sstevel@tonic-gate 			sema_init(&bufs[count].b_sem, 0, NULL,
782*0Sstevel@tonic-gate 			    SEMA_DEFAULT, NULL);
783*0Sstevel@tonic-gate 		}
784*0Sstevel@tonic-gate 
785*0Sstevel@tonic-gate 		/* zero not-to-be-read page parts */
786*0Sstevel@tonic-gate 		xlen = io_len & PAGEOFFSET;
787*0Sstevel@tonic-gate 		if (xlen != 0)
788*0Sstevel@tonic-gate 			pagezero(pp->p_prev, xlen, PAGESIZE - xlen);
789*0Sstevel@tonic-gate 
790*0Sstevel@tonic-gate 		va = NULL;
791*0Sstevel@tonic-gate 		lastp = NULL;
792*0Sstevel@tonic-gate 		searchp = pp;
793*0Sstevel@tonic-gate 		io_end = io_off + io_len;
794*0Sstevel@tonic-gate 		for (count = 0, byte_offset = io_off;
795*0Sstevel@tonic-gate 			byte_offset < io_end;
796*0Sstevel@tonic-gate 			count++) {
797*0Sstevel@tonic-gate 			ASSERT(count < bufcnt);
798*0Sstevel@tonic-gate 
799*0Sstevel@tonic-gate 			/* Compute disk address for interleaving. */
800*0Sstevel@tonic-gate 
801*0Sstevel@tonic-gate 			/* considered without skips */
802*0Sstevel@tonic-gate 			which_chunk_lbn = byte_offset / chunk_data_bytes;
803*0Sstevel@tonic-gate 
804*0Sstevel@tonic-gate 			/* factor in skips */
805*0Sstevel@tonic-gate 			offset_lbn = which_chunk_lbn * chunk_lbn_count;
806*0Sstevel@tonic-gate 
807*0Sstevel@tonic-gate 			/* convert to physical byte offset for lbn */
808*0Sstevel@tonic-gate 			offset_bytes = LBN_TO_BYTE(offset_lbn, vp->v_vfsp);
809*0Sstevel@tonic-gate 
810*0Sstevel@tonic-gate 			/* don't forget offset into lbn */
811*0Sstevel@tonic-gate 			offset_extra = byte_offset % chunk_data_bytes;
812*0Sstevel@tonic-gate 
813*0Sstevel@tonic-gate 			/* get virtual block number for driver */
814*0Sstevel@tonic-gate 			driver_block = lbtodb(bof + xarsiz
815*0Sstevel@tonic-gate 				+ offset_bytes + offset_extra);
816*0Sstevel@tonic-gate 
817*0Sstevel@tonic-gate 			if (lastp != searchp) {
818*0Sstevel@tonic-gate 				/* this branch taken first time through loop */
819*0Sstevel@tonic-gate 				va = vas[count]
820*0Sstevel@tonic-gate 					= ppmapin(searchp, PROT_WRITE,
821*0Sstevel@tonic-gate 						(caddr_t)-1);
822*0Sstevel@tonic-gate 				/* ppmapin() guarantees not to return NULL */
823*0Sstevel@tonic-gate 			} else {
824*0Sstevel@tonic-gate 				vas[count] = NULL;
825*0Sstevel@tonic-gate 			}
826*0Sstevel@tonic-gate 
827*0Sstevel@tonic-gate 			bufs[count].b_un.b_addr = va + byte_offset % PAGESIZE;
828*0Sstevel@tonic-gate 			bufs[count].b_offset =
829*0Sstevel@tonic-gate 			    (offset_t)(byte_offset - io_off + off);
830*0Sstevel@tonic-gate 
831*0Sstevel@tonic-gate 			/*
832*0Sstevel@tonic-gate 			 * We specifically use the b_lblkno member here
833*0Sstevel@tonic-gate 			 * as even in the 32 bit world driver_block can
834*0Sstevel@tonic-gate 			 * get very large in line with the ISO9660 spec.
835*0Sstevel@tonic-gate 			 */
836*0Sstevel@tonic-gate 
837*0Sstevel@tonic-gate 			bufs[count].b_lblkno = driver_block;
838*0Sstevel@tonic-gate 
839*0Sstevel@tonic-gate 			remaining_bytes = ((which_chunk_lbn + 1)
840*0Sstevel@tonic-gate 				* chunk_data_bytes)
841*0Sstevel@tonic-gate 				- byte_offset;
842*0Sstevel@tonic-gate 
843*0Sstevel@tonic-gate 			/*
844*0Sstevel@tonic-gate 			 * remaining_bytes can't be zero, as we derived
845*0Sstevel@tonic-gate 			 * which_chunk_lbn directly from byte_offset.
846*0Sstevel@tonic-gate 			 */
847*0Sstevel@tonic-gate 			if ((remaining_bytes+byte_offset) < (off+len)) {
848*0Sstevel@tonic-gate 				/* coalesce-read the rest of the chunk */
849*0Sstevel@tonic-gate 				bufs[count].b_bcount = remaining_bytes;
850*0Sstevel@tonic-gate 			} else {
851*0Sstevel@tonic-gate 				/* get the final bits */
852*0Sstevel@tonic-gate 				bufs[count].b_bcount = off + len - byte_offset;
853*0Sstevel@tonic-gate 			}
854*0Sstevel@tonic-gate 
855*0Sstevel@tonic-gate 			/*
856*0Sstevel@tonic-gate 			 * It would be nice to do multiple pages'
857*0Sstevel@tonic-gate 			 * worth at once here when the opportunity
858*0Sstevel@tonic-gate 			 * arises, as that has been shown to improve
859*0Sstevel@tonic-gate 			 * our wall time.  However, to do that
860*0Sstevel@tonic-gate 			 * requires that we use the pageio subsystem,
861*0Sstevel@tonic-gate 			 * which doesn't mix well with what we're
862*0Sstevel@tonic-gate 			 * already using here.  We can't use pageio
863*0Sstevel@tonic-gate 			 * all the time, because that subsystem
864*0Sstevel@tonic-gate 			 * assumes that a page is stored in N
865*0Sstevel@tonic-gate 			 * contiguous blocks on the device.
866*0Sstevel@tonic-gate 			 * Interleaving violates that assumption.
867*0Sstevel@tonic-gate 			 */
868*0Sstevel@tonic-gate 
869*0Sstevel@tonic-gate 			remainder = PAGESIZE - (byte_offset % PAGESIZE);
870*0Sstevel@tonic-gate 			if (bufs[count].b_bcount > remainder) {
871*0Sstevel@tonic-gate 				bufs[count].b_bcount = remainder;
872*0Sstevel@tonic-gate 			}
873*0Sstevel@tonic-gate 
874*0Sstevel@tonic-gate 			bufs[count].b_bufsize = bufs[count].b_bcount;
875*0Sstevel@tonic-gate 			byte_offset += bufs[count].b_bcount;
876*0Sstevel@tonic-gate 
877*0Sstevel@tonic-gate 			(void) bdev_strategy(&bufs[count]);
878*0Sstevel@tonic-gate 
879*0Sstevel@tonic-gate 			lwp_stat_update(LWP_STAT_INBLK, 1);
880*0Sstevel@tonic-gate 			lastp = searchp;
881*0Sstevel@tonic-gate 			if ((remainder - bufs[count].b_bcount) < 1) {
882*0Sstevel@tonic-gate 				searchp = searchp->p_next;
883*0Sstevel@tonic-gate 			}
884*0Sstevel@tonic-gate 		}
885*0Sstevel@tonic-gate 
886*0Sstevel@tonic-gate 		bufsused = count;
887*0Sstevel@tonic-gate 		/* Now wait for everything to come in */
888*0Sstevel@tonic-gate 		for (count = 0; count < bufsused; count++) {
889*0Sstevel@tonic-gate 			if (err == 0) {
890*0Sstevel@tonic-gate 				err = biowait(&bufs[count]);
891*0Sstevel@tonic-gate 			} else
892*0Sstevel@tonic-gate 				(void) biowait(&bufs[count]);
893*0Sstevel@tonic-gate 		}
894*0Sstevel@tonic-gate 
895*0Sstevel@tonic-gate 		/* Don't leak resources */
896*0Sstevel@tonic-gate 		for (count = 0; count < bufcnt; count++) {
897*0Sstevel@tonic-gate 			sema_destroy(&bufs[count].b_io);
898*0Sstevel@tonic-gate 			sema_destroy(&bufs[count].b_sem);
899*0Sstevel@tonic-gate 			if (count < bufsused && vas[count] != NULL) {
900*0Sstevel@tonic-gate 				ppmapout(vas[count]);
901*0Sstevel@tonic-gate 			}
902*0Sstevel@tonic-gate 		}
903*0Sstevel@tonic-gate 
904*0Sstevel@tonic-gate 		kmem_free(vas, bufcnt * sizeof (caddr_t));
905*0Sstevel@tonic-gate 		kmem_free(bufs, bufcnt * sizeof (struct buf));
906*0Sstevel@tonic-gate 	}
907*0Sstevel@tonic-gate 
908*0Sstevel@tonic-gate 	if (err) {
909*0Sstevel@tonic-gate 		pvn_read_done(pp, B_ERROR);
910*0Sstevel@tonic-gate 		return (err);
911*0Sstevel@tonic-gate 	}
912*0Sstevel@tonic-gate 
913*0Sstevel@tonic-gate 	/*
914*0Sstevel@tonic-gate 	 * Lock the requested page, and the one after it if possible.
915*0Sstevel@tonic-gate 	 * Don't bother if our caller hasn't given us a place to stash
916*0Sstevel@tonic-gate 	 * the page pointers, since otherwise we'd lock pages that would
917*0Sstevel@tonic-gate 	 * never get unlocked.
918*0Sstevel@tonic-gate 	 */
919*0Sstevel@tonic-gate 	if (pagefound) {
920*0Sstevel@tonic-gate 		int index;
921*0Sstevel@tonic-gate 		ulong_t soff;
922*0Sstevel@tonic-gate 
923*0Sstevel@tonic-gate 		/*
924*0Sstevel@tonic-gate 		 * Make sure it's in memory before we say it's here.
925*0Sstevel@tonic-gate 		 */
926*0Sstevel@tonic-gate 		if ((pp = page_lookup(vp, off, SE_SHARED)) == NULL) {
927*0Sstevel@tonic-gate 			hsfs_lostpage++;
928*0Sstevel@tonic-gate 			goto reread;
929*0Sstevel@tonic-gate 		}
930*0Sstevel@tonic-gate 
931*0Sstevel@tonic-gate 		pl[0] = pp;
932*0Sstevel@tonic-gate 		index = 1;
933*0Sstevel@tonic-gate 
934*0Sstevel@tonic-gate 		/*
935*0Sstevel@tonic-gate 		 * Try to lock the next page, if it exists, without
936*0Sstevel@tonic-gate 		 * blocking.
937*0Sstevel@tonic-gate 		 */
938*0Sstevel@tonic-gate 		plsz -= PAGESIZE;
939*0Sstevel@tonic-gate 		/* LINTED (plsz is unsigned) */
940*0Sstevel@tonic-gate 		for (soff = off + PAGESIZE; plsz > 0;
941*0Sstevel@tonic-gate 		    soff += PAGESIZE, plsz -= PAGESIZE) {
942*0Sstevel@tonic-gate 			pp = page_lookup_nowait(vp, (u_offset_t)soff,
943*0Sstevel@tonic-gate 					SE_SHARED);
944*0Sstevel@tonic-gate 			if (pp == NULL)
945*0Sstevel@tonic-gate 				break;
946*0Sstevel@tonic-gate 			pl[index++] = pp;
947*0Sstevel@tonic-gate 		}
948*0Sstevel@tonic-gate 		pl[index] = NULL;
949*0Sstevel@tonic-gate 		return (0);
950*0Sstevel@tonic-gate 	}
951*0Sstevel@tonic-gate 
952*0Sstevel@tonic-gate 	if (pp != NULL) {
953*0Sstevel@tonic-gate 		pvn_plist_init(pp, pl, plsz, off, io_len, rw);
954*0Sstevel@tonic-gate 	}
955*0Sstevel@tonic-gate 
956*0Sstevel@tonic-gate 	return (err);
957*0Sstevel@tonic-gate }
958*0Sstevel@tonic-gate 
959*0Sstevel@tonic-gate static int
960*0Sstevel@tonic-gate hsfs_getpage(
961*0Sstevel@tonic-gate 	struct vnode *vp,
962*0Sstevel@tonic-gate 	offset_t off,
963*0Sstevel@tonic-gate 	size_t len,
964*0Sstevel@tonic-gate 	uint_t *protp,
965*0Sstevel@tonic-gate 	struct page *pl[],
966*0Sstevel@tonic-gate 	size_t plsz,
967*0Sstevel@tonic-gate 	struct seg *seg,
968*0Sstevel@tonic-gate 	caddr_t addr,
969*0Sstevel@tonic-gate 	enum seg_rw rw,
970*0Sstevel@tonic-gate 	struct cred *cred)
971*0Sstevel@tonic-gate {
972*0Sstevel@tonic-gate 	int err;
973*0Sstevel@tonic-gate 	uint_t filsiz;
974*0Sstevel@tonic-gate 	struct hsnode *hp = VTOH(vp);
975*0Sstevel@tonic-gate 
976*0Sstevel@tonic-gate 	/* does not support write */
977*0Sstevel@tonic-gate 	if (rw == S_WRITE) {
978*0Sstevel@tonic-gate 		panic("write attempt on READ ONLY HSFS");
979*0Sstevel@tonic-gate 		/*NOTREACHED*/
980*0Sstevel@tonic-gate 	}
981*0Sstevel@tonic-gate 
982*0Sstevel@tonic-gate 	if (vp->v_flag & VNOMAP) {
983*0Sstevel@tonic-gate 		return (ENOSYS);
984*0Sstevel@tonic-gate 	}
985*0Sstevel@tonic-gate 
986*0Sstevel@tonic-gate 	ASSERT(off <= MAXOFF_T);
987*0Sstevel@tonic-gate 
988*0Sstevel@tonic-gate 	/*
989*0Sstevel@tonic-gate 	 * Determine file data size for EOF check.
990*0Sstevel@tonic-gate 	 */
991*0Sstevel@tonic-gate 	filsiz = hp->hs_dirent.ext_size;
992*0Sstevel@tonic-gate 	if ((off + len) > (offset_t)(filsiz + PAGEOFFSET) && seg != segkmap)
993*0Sstevel@tonic-gate 		return (EFAULT);	/* beyond EOF */
994*0Sstevel@tonic-gate 
995*0Sstevel@tonic-gate 	if (protp != NULL)
996*0Sstevel@tonic-gate 		*protp = PROT_ALL;
997*0Sstevel@tonic-gate 
998*0Sstevel@tonic-gate 	if (len <= PAGESIZE)
999*0Sstevel@tonic-gate 		err = hsfs_getapage(vp, (u_offset_t)off, len, protp, pl, plsz,
1000*0Sstevel@tonic-gate 		    seg, addr, rw, cred);
1001*0Sstevel@tonic-gate 	else
1002*0Sstevel@tonic-gate 		err = pvn_getpages(hsfs_getapage, vp, off, len, protp,
1003*0Sstevel@tonic-gate 		    pl, plsz, seg, addr, rw, cred);
1004*0Sstevel@tonic-gate 
1005*0Sstevel@tonic-gate 	return (err);
1006*0Sstevel@tonic-gate }
1007*0Sstevel@tonic-gate 
1008*0Sstevel@tonic-gate 
1009*0Sstevel@tonic-gate 
1010*0Sstevel@tonic-gate /*
1011*0Sstevel@tonic-gate  * This function should never be called. We need to have it to pass
1012*0Sstevel@tonic-gate  * it as an argument to other functions.
1013*0Sstevel@tonic-gate  */
1014*0Sstevel@tonic-gate /*ARGSUSED*/
1015*0Sstevel@tonic-gate int
1016*0Sstevel@tonic-gate hsfs_putapage(
1017*0Sstevel@tonic-gate 	vnode_t		*vp,
1018*0Sstevel@tonic-gate 	page_t		*pp,
1019*0Sstevel@tonic-gate 	u_offset_t	*offp,
1020*0Sstevel@tonic-gate 	size_t		*lenp,
1021*0Sstevel@tonic-gate 	int		flags,
1022*0Sstevel@tonic-gate 	cred_t		*cr)
1023*0Sstevel@tonic-gate {
1024*0Sstevel@tonic-gate 	/* should never happen - just destroy it */
1025*0Sstevel@tonic-gate 	cmn_err(CE_NOTE, "hsfs_putapage: dirty HSFS page");
1026*0Sstevel@tonic-gate 	pvn_write_done(pp, B_ERROR | B_WRITE | B_INVAL | B_FORCE | flags);
1027*0Sstevel@tonic-gate 	return (0);
1028*0Sstevel@tonic-gate }
1029*0Sstevel@tonic-gate 
1030*0Sstevel@tonic-gate 
1031*0Sstevel@tonic-gate /*
1032*0Sstevel@tonic-gate  * The only flags we support are B_INVAL, B_FREE and B_DONTNEED.
1033*0Sstevel@tonic-gate  * B_INVAL is set by:
1034*0Sstevel@tonic-gate  *
1035*0Sstevel@tonic-gate  *	1) the MC_SYNC command of memcntl(2) to support the MS_INVALIDATE flag.
1036*0Sstevel@tonic-gate  *	2) the MC_ADVISE command of memcntl(2) with the MADV_DONTNEED advice
1037*0Sstevel@tonic-gate  *	   which translates to an MC_SYNC with the MS_INVALIDATE flag.
1038*0Sstevel@tonic-gate  *
1039*0Sstevel@tonic-gate  * The B_FREE (as well as the B_DONTNEED) flag is set when the
1040*0Sstevel@tonic-gate  * MADV_SEQUENTIAL advice has been used. VOP_PUTPAGE is invoked
1041*0Sstevel@tonic-gate  * from SEGVN to release pages behind a pagefault.
1042*0Sstevel@tonic-gate  */
1043*0Sstevel@tonic-gate /*ARGSUSED*/
1044*0Sstevel@tonic-gate static int
1045*0Sstevel@tonic-gate hsfs_putpage(
1046*0Sstevel@tonic-gate 	struct vnode	*vp,
1047*0Sstevel@tonic-gate 	offset_t	off,
1048*0Sstevel@tonic-gate 	size_t		len,
1049*0Sstevel@tonic-gate 	int		flags,
1050*0Sstevel@tonic-gate 	struct cred	*cr)
1051*0Sstevel@tonic-gate {
1052*0Sstevel@tonic-gate 	int error = 0;
1053*0Sstevel@tonic-gate 
1054*0Sstevel@tonic-gate 	if (vp->v_count == 0) {
1055*0Sstevel@tonic-gate 		panic("hsfs_putpage: bad v_count");
1056*0Sstevel@tonic-gate 		/*NOTREACHED*/
1057*0Sstevel@tonic-gate 	}
1058*0Sstevel@tonic-gate 
1059*0Sstevel@tonic-gate 	if (vp->v_flag & VNOMAP)
1060*0Sstevel@tonic-gate 		return (ENOSYS);
1061*0Sstevel@tonic-gate 
1062*0Sstevel@tonic-gate 	ASSERT(off <= MAXOFF_T);
1063*0Sstevel@tonic-gate 
1064*0Sstevel@tonic-gate 	if (!vn_has_cached_data(vp))	/* no pages mapped */
1065*0Sstevel@tonic-gate 		return (0);
1066*0Sstevel@tonic-gate 
1067*0Sstevel@tonic-gate 	if (len == 0)		/* from 'off' to EOF */
1068*0Sstevel@tonic-gate 		error = pvn_vplist_dirty(vp, off,
1069*0Sstevel@tonic-gate 					hsfs_putapage, flags, cr);
1070*0Sstevel@tonic-gate 	else {
1071*0Sstevel@tonic-gate 		offset_t end_off = off + len;
1072*0Sstevel@tonic-gate 		offset_t file_size = VTOH(vp)->hs_dirent.ext_size;
1073*0Sstevel@tonic-gate 		offset_t io_off;
1074*0Sstevel@tonic-gate 
1075*0Sstevel@tonic-gate 		file_size = (file_size + PAGESIZE - 1) & PAGEMASK;
1076*0Sstevel@tonic-gate 		if (end_off > file_size)
1077*0Sstevel@tonic-gate 			end_off = file_size;
1078*0Sstevel@tonic-gate 
1079*0Sstevel@tonic-gate 		for (io_off = off; io_off < end_off; io_off += PAGESIZE) {
1080*0Sstevel@tonic-gate 			page_t *pp;
1081*0Sstevel@tonic-gate 
1082*0Sstevel@tonic-gate 			/*
1083*0Sstevel@tonic-gate 			 * We insist on getting the page only if we are
1084*0Sstevel@tonic-gate 			 * about to invalidate, free or write it and
1085*0Sstevel@tonic-gate 			 * the B_ASYNC flag is not set.
1086*0Sstevel@tonic-gate 			 */
1087*0Sstevel@tonic-gate 			if ((flags & B_INVAL) || ((flags & B_ASYNC) == 0)) {
1088*0Sstevel@tonic-gate 				pp = page_lookup(vp, io_off,
1089*0Sstevel@tonic-gate 					(flags & (B_INVAL | B_FREE)) ?
1090*0Sstevel@tonic-gate 					    SE_EXCL : SE_SHARED);
1091*0Sstevel@tonic-gate 			} else {
1092*0Sstevel@tonic-gate 				pp = page_lookup_nowait(vp, io_off,
1093*0Sstevel@tonic-gate 					(flags & B_FREE) ? SE_EXCL : SE_SHARED);
1094*0Sstevel@tonic-gate 			}
1095*0Sstevel@tonic-gate 
1096*0Sstevel@tonic-gate 			if (pp == NULL)
1097*0Sstevel@tonic-gate 				continue;
1098*0Sstevel@tonic-gate 			/*
1099*0Sstevel@tonic-gate 			 * Normally pvn_getdirty() should return 0, which
1100*0Sstevel@tonic-gate 			 * impies that it has done the job for us.
1101*0Sstevel@tonic-gate 			 * The shouldn't-happen scenario is when it returns 1.
1102*0Sstevel@tonic-gate 			 * This means that the page has been modified and
1103*0Sstevel@tonic-gate 			 * needs to be put back.
1104*0Sstevel@tonic-gate 			 * Since we can't write on a CD, we fake a failed
1105*0Sstevel@tonic-gate 			 * I/O and force pvn_write_done() to destroy the page.
1106*0Sstevel@tonic-gate 			 */
1107*0Sstevel@tonic-gate 			if (pvn_getdirty(pp, flags) == 1) {
1108*0Sstevel@tonic-gate 				cmn_err(CE_NOTE,
1109*0Sstevel@tonic-gate 					"hsfs_putpage: dirty HSFS page");
1110*0Sstevel@tonic-gate 				pvn_write_done(pp, flags |
1111*0Sstevel@tonic-gate 				    B_ERROR | B_WRITE | B_INVAL | B_FORCE);
1112*0Sstevel@tonic-gate 			}
1113*0Sstevel@tonic-gate 		}
1114*0Sstevel@tonic-gate 	}
1115*0Sstevel@tonic-gate 	return (error);
1116*0Sstevel@tonic-gate }
1117*0Sstevel@tonic-gate 
1118*0Sstevel@tonic-gate 
1119*0Sstevel@tonic-gate /*ARGSUSED*/
1120*0Sstevel@tonic-gate static int
1121*0Sstevel@tonic-gate hsfs_map(
1122*0Sstevel@tonic-gate 	struct vnode *vp,
1123*0Sstevel@tonic-gate 	offset_t off,
1124*0Sstevel@tonic-gate 	struct as *as,
1125*0Sstevel@tonic-gate 	caddr_t *addrp,
1126*0Sstevel@tonic-gate 	size_t len,
1127*0Sstevel@tonic-gate 	uchar_t prot,
1128*0Sstevel@tonic-gate 	uchar_t maxprot,
1129*0Sstevel@tonic-gate 	uint_t flags,
1130*0Sstevel@tonic-gate 	struct cred *cred)
1131*0Sstevel@tonic-gate {
1132*0Sstevel@tonic-gate 	struct segvn_crargs vn_a;
1133*0Sstevel@tonic-gate 	int error;
1134*0Sstevel@tonic-gate 
1135*0Sstevel@tonic-gate 	/* VFS_RECORD(vp->v_vfsp, VS_MAP, VS_CALL); */
1136*0Sstevel@tonic-gate 
1137*0Sstevel@tonic-gate 	if (vp->v_flag & VNOMAP)
1138*0Sstevel@tonic-gate 		return (ENOSYS);
1139*0Sstevel@tonic-gate 
1140*0Sstevel@tonic-gate 	if (off > MAXOFF_T)
1141*0Sstevel@tonic-gate 		return (EFBIG);
1142*0Sstevel@tonic-gate 
1143*0Sstevel@tonic-gate 	if (off < 0 || (offset_t)(off + len) < 0)
1144*0Sstevel@tonic-gate 		return (EINVAL);
1145*0Sstevel@tonic-gate 
1146*0Sstevel@tonic-gate 	if (vp->v_type != VREG) {
1147*0Sstevel@tonic-gate 		return (ENODEV);
1148*0Sstevel@tonic-gate 	}
1149*0Sstevel@tonic-gate 
1150*0Sstevel@tonic-gate 	/*
1151*0Sstevel@tonic-gate 	 * If file is being locked, disallow mapping.
1152*0Sstevel@tonic-gate 	 */
1153*0Sstevel@tonic-gate 	if (vn_has_mandatory_locks(vp, VTOH(vp)->hs_dirent.mode))
1154*0Sstevel@tonic-gate 		return (EAGAIN);
1155*0Sstevel@tonic-gate 
1156*0Sstevel@tonic-gate 	as_rangelock(as);
1157*0Sstevel@tonic-gate 
1158*0Sstevel@tonic-gate 	if ((flags & MAP_FIXED) == 0) {
1159*0Sstevel@tonic-gate 		map_addr(addrp, len, off, 1, flags);
1160*0Sstevel@tonic-gate 		if (*addrp == NULL) {
1161*0Sstevel@tonic-gate 			as_rangeunlock(as);
1162*0Sstevel@tonic-gate 			return (ENOMEM);
1163*0Sstevel@tonic-gate 		}
1164*0Sstevel@tonic-gate 	} else {
1165*0Sstevel@tonic-gate 		/*
1166*0Sstevel@tonic-gate 		 * User specified address - blow away any previous mappings
1167*0Sstevel@tonic-gate 		 */
1168*0Sstevel@tonic-gate 		(void) as_unmap(as, *addrp, len);
1169*0Sstevel@tonic-gate 	}
1170*0Sstevel@tonic-gate 
1171*0Sstevel@tonic-gate 	vn_a.vp = vp;
1172*0Sstevel@tonic-gate 	vn_a.offset = off;
1173*0Sstevel@tonic-gate 	vn_a.type = flags & MAP_TYPE;
1174*0Sstevel@tonic-gate 	vn_a.prot = prot;
1175*0Sstevel@tonic-gate 	vn_a.maxprot = maxprot;
1176*0Sstevel@tonic-gate 	vn_a.flags = flags & ~MAP_TYPE;
1177*0Sstevel@tonic-gate 	vn_a.cred = cred;
1178*0Sstevel@tonic-gate 	vn_a.amp = NULL;
1179*0Sstevel@tonic-gate 	vn_a.szc = 0;
1180*0Sstevel@tonic-gate 	vn_a.lgrp_mem_policy_flags = 0;
1181*0Sstevel@tonic-gate 
1182*0Sstevel@tonic-gate 	error = as_map(as, *addrp, len, segvn_create, &vn_a);
1183*0Sstevel@tonic-gate 	as_rangeunlock(as);
1184*0Sstevel@tonic-gate 	return (error);
1185*0Sstevel@tonic-gate }
1186*0Sstevel@tonic-gate 
1187*0Sstevel@tonic-gate /* ARGSUSED */
1188*0Sstevel@tonic-gate static int
1189*0Sstevel@tonic-gate hsfs_addmap(
1190*0Sstevel@tonic-gate 	struct vnode *vp,
1191*0Sstevel@tonic-gate 	offset_t off,
1192*0Sstevel@tonic-gate 	struct as *as,
1193*0Sstevel@tonic-gate 	caddr_t addr,
1194*0Sstevel@tonic-gate 	size_t len,
1195*0Sstevel@tonic-gate 	uchar_t prot,
1196*0Sstevel@tonic-gate 	uchar_t maxprot,
1197*0Sstevel@tonic-gate 	uint_t flags,
1198*0Sstevel@tonic-gate 	struct cred *cr)
1199*0Sstevel@tonic-gate {
1200*0Sstevel@tonic-gate 	struct hsnode *hp;
1201*0Sstevel@tonic-gate 
1202*0Sstevel@tonic-gate 	if (vp->v_flag & VNOMAP)
1203*0Sstevel@tonic-gate 		return (ENOSYS);
1204*0Sstevel@tonic-gate 
1205*0Sstevel@tonic-gate 	hp = VTOH(vp);
1206*0Sstevel@tonic-gate 	mutex_enter(&hp->hs_contents_lock);
1207*0Sstevel@tonic-gate 	hp->hs_mapcnt += btopr(len);
1208*0Sstevel@tonic-gate 	mutex_exit(&hp->hs_contents_lock);
1209*0Sstevel@tonic-gate 	return (0);
1210*0Sstevel@tonic-gate }
1211*0Sstevel@tonic-gate 
1212*0Sstevel@tonic-gate /*ARGSUSED*/
1213*0Sstevel@tonic-gate static int
1214*0Sstevel@tonic-gate hsfs_delmap(
1215*0Sstevel@tonic-gate 	struct vnode *vp,
1216*0Sstevel@tonic-gate 	offset_t off,
1217*0Sstevel@tonic-gate 	struct as *as,
1218*0Sstevel@tonic-gate 	caddr_t addr,
1219*0Sstevel@tonic-gate 	size_t len,
1220*0Sstevel@tonic-gate 	uint_t prot,
1221*0Sstevel@tonic-gate 	uint_t maxprot,
1222*0Sstevel@tonic-gate 	uint_t flags,
1223*0Sstevel@tonic-gate 	struct cred *cr)
1224*0Sstevel@tonic-gate {
1225*0Sstevel@tonic-gate 	struct hsnode *hp;
1226*0Sstevel@tonic-gate 
1227*0Sstevel@tonic-gate 	if (vp->v_flag & VNOMAP)
1228*0Sstevel@tonic-gate 		return (ENOSYS);
1229*0Sstevel@tonic-gate 
1230*0Sstevel@tonic-gate 	hp = VTOH(vp);
1231*0Sstevel@tonic-gate 	mutex_enter(&hp->hs_contents_lock);
1232*0Sstevel@tonic-gate 	hp->hs_mapcnt -= btopr(len);	/* Count released mappings */
1233*0Sstevel@tonic-gate 	ASSERT(hp->hs_mapcnt >= 0);
1234*0Sstevel@tonic-gate 	mutex_exit(&hp->hs_contents_lock);
1235*0Sstevel@tonic-gate 	return (0);
1236*0Sstevel@tonic-gate }
1237*0Sstevel@tonic-gate 
1238*0Sstevel@tonic-gate /* ARGSUSED */
1239*0Sstevel@tonic-gate static int
1240*0Sstevel@tonic-gate hsfs_seek(struct vnode *vp, offset_t ooff, offset_t *noffp)
1241*0Sstevel@tonic-gate {
1242*0Sstevel@tonic-gate 	return ((*noffp < 0 || *noffp > MAXOFFSET_T) ? EINVAL : 0);
1243*0Sstevel@tonic-gate }
1244*0Sstevel@tonic-gate 
1245*0Sstevel@tonic-gate /* ARGSUSED */
1246*0Sstevel@tonic-gate static int
1247*0Sstevel@tonic-gate hsfs_frlock(
1248*0Sstevel@tonic-gate 	struct vnode *vp,
1249*0Sstevel@tonic-gate 	int cmd,
1250*0Sstevel@tonic-gate 	struct flock64 *bfp,
1251*0Sstevel@tonic-gate 	int flag,
1252*0Sstevel@tonic-gate 	offset_t offset,
1253*0Sstevel@tonic-gate 	struct flk_callback *flk_cbp,
1254*0Sstevel@tonic-gate 	cred_t *cr)
1255*0Sstevel@tonic-gate {
1256*0Sstevel@tonic-gate 	struct hsnode *hp = VTOH(vp);
1257*0Sstevel@tonic-gate 
1258*0Sstevel@tonic-gate 	/*
1259*0Sstevel@tonic-gate 	 * If the file is being mapped, disallow fs_frlock.
1260*0Sstevel@tonic-gate 	 * We are not holding the hs_contents_lock while checking
1261*0Sstevel@tonic-gate 	 * hs_mapcnt because the current locking strategy drops all
1262*0Sstevel@tonic-gate 	 * locks before calling fs_frlock.
1263*0Sstevel@tonic-gate 	 * So, hs_mapcnt could change before we enter fs_frlock making
1264*0Sstevel@tonic-gate 	 * it meaningless to have held hs_contents_lock in the first place.
1265*0Sstevel@tonic-gate 	 */
1266*0Sstevel@tonic-gate 	if (hp->hs_mapcnt > 0 && MANDLOCK(vp, hp->hs_dirent.mode))
1267*0Sstevel@tonic-gate 		return (EAGAIN);
1268*0Sstevel@tonic-gate 
1269*0Sstevel@tonic-gate 	return (fs_frlock(vp, cmd, bfp, flag, offset, flk_cbp, cr));
1270*0Sstevel@tonic-gate }
1271*0Sstevel@tonic-gate 
1272*0Sstevel@tonic-gate const fs_operation_def_t hsfs_vnodeops_template[] = {
1273*0Sstevel@tonic-gate 	VOPNAME_OPEN, hsfs_open,
1274*0Sstevel@tonic-gate 	VOPNAME_CLOSE, hsfs_close,
1275*0Sstevel@tonic-gate 	VOPNAME_READ, hsfs_read,
1276*0Sstevel@tonic-gate 	VOPNAME_GETATTR, hsfs_getattr,
1277*0Sstevel@tonic-gate 	VOPNAME_ACCESS, hsfs_access,
1278*0Sstevel@tonic-gate 	VOPNAME_LOOKUP, hsfs_lookup,
1279*0Sstevel@tonic-gate 	VOPNAME_READDIR, hsfs_readdir,
1280*0Sstevel@tonic-gate 	VOPNAME_READLINK, hsfs_readlink,
1281*0Sstevel@tonic-gate 	VOPNAME_FSYNC, hsfs_fsync,
1282*0Sstevel@tonic-gate 	VOPNAME_INACTIVE, (fs_generic_func_p) hsfs_inactive,
1283*0Sstevel@tonic-gate 	VOPNAME_FID, hsfs_fid,
1284*0Sstevel@tonic-gate 	VOPNAME_SEEK, hsfs_seek,
1285*0Sstevel@tonic-gate 	VOPNAME_FRLOCK, hsfs_frlock,
1286*0Sstevel@tonic-gate 	VOPNAME_GETPAGE, hsfs_getpage,
1287*0Sstevel@tonic-gate 	VOPNAME_PUTPAGE, hsfs_putpage,
1288*0Sstevel@tonic-gate 	VOPNAME_MAP, (fs_generic_func_p) hsfs_map,
1289*0Sstevel@tonic-gate 	VOPNAME_ADDMAP, (fs_generic_func_p) hsfs_addmap,
1290*0Sstevel@tonic-gate 	VOPNAME_DELMAP, hsfs_delmap,
1291*0Sstevel@tonic-gate 	NULL, NULL
1292*0Sstevel@tonic-gate };
1293*0Sstevel@tonic-gate 
1294*0Sstevel@tonic-gate struct vnodeops *hsfs_vnodeops;
1295