xref: /onnv-gate/usr/src/uts/common/fs/udfs/udf_inode.c (revision 12273:63678502e95e)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
512196SMilan.Cermak@Sun.COM  * Common Development and Distribution License (the "License").
612196SMilan.Cermak@Sun.COM  * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate  *
80Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate  * See the License for the specific language governing permissions
110Sstevel@tonic-gate  * and limitations under the License.
120Sstevel@tonic-gate  *
130Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate  *
190Sstevel@tonic-gate  * CDDL HEADER END
200Sstevel@tonic-gate  */
210Sstevel@tonic-gate /*
2212196SMilan.Cermak@Sun.COM  * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
230Sstevel@tonic-gate  */
240Sstevel@tonic-gate 
250Sstevel@tonic-gate #include <sys/types.h>
260Sstevel@tonic-gate #include <sys/t_lock.h>
270Sstevel@tonic-gate #include <sys/param.h>
280Sstevel@tonic-gate #include <sys/time.h>
290Sstevel@tonic-gate #include <sys/systm.h>
300Sstevel@tonic-gate #include <sys/sysmacros.h>
310Sstevel@tonic-gate #include <sys/resource.h>
320Sstevel@tonic-gate #include <sys/signal.h>
330Sstevel@tonic-gate #include <sys/cred.h>
340Sstevel@tonic-gate #include <sys/user.h>
350Sstevel@tonic-gate #include <sys/buf.h>
360Sstevel@tonic-gate #include <sys/vfs.h>
370Sstevel@tonic-gate #include <sys/stat.h>
380Sstevel@tonic-gate #include <sys/vnode.h>
390Sstevel@tonic-gate #include <sys/mode.h>
400Sstevel@tonic-gate #include <sys/proc.h>
410Sstevel@tonic-gate #include <sys/disp.h>
420Sstevel@tonic-gate #include <sys/file.h>
430Sstevel@tonic-gate #include <sys/fcntl.h>
440Sstevel@tonic-gate #include <sys/flock.h>
450Sstevel@tonic-gate #include <sys/kmem.h>
460Sstevel@tonic-gate #include <sys/uio.h>
470Sstevel@tonic-gate #include <sys/dnlc.h>
480Sstevel@tonic-gate #include <sys/conf.h>
490Sstevel@tonic-gate #include <sys/errno.h>
500Sstevel@tonic-gate #include <sys/mman.h>
510Sstevel@tonic-gate #include <sys/fbuf.h>
520Sstevel@tonic-gate #include <sys/pathname.h>
530Sstevel@tonic-gate #include <sys/debug.h>
540Sstevel@tonic-gate #include <sys/vmsystm.h>
550Sstevel@tonic-gate #include <sys/cmn_err.h>
560Sstevel@tonic-gate #include <sys/dirent.h>
570Sstevel@tonic-gate #include <sys/errno.h>
580Sstevel@tonic-gate #include <sys/modctl.h>
590Sstevel@tonic-gate #include <sys/statvfs.h>
600Sstevel@tonic-gate #include <sys/mount.h>
610Sstevel@tonic-gate #include <sys/sunddi.h>
620Sstevel@tonic-gate #include <sys/bootconf.h>
630Sstevel@tonic-gate #include <sys/policy.h>
640Sstevel@tonic-gate 
650Sstevel@tonic-gate #include <vm/hat.h>
660Sstevel@tonic-gate #include <vm/page.h>
670Sstevel@tonic-gate #include <vm/pvn.h>
680Sstevel@tonic-gate #include <vm/as.h>
690Sstevel@tonic-gate #include <vm/seg.h>
700Sstevel@tonic-gate #include <vm/seg_map.h>
710Sstevel@tonic-gate #include <vm/seg_kmem.h>
720Sstevel@tonic-gate #include <vm/seg_vn.h>
730Sstevel@tonic-gate #include <vm/rm.h>
740Sstevel@tonic-gate #include <vm/page.h>
750Sstevel@tonic-gate #include <sys/swap.h>
760Sstevel@tonic-gate 
770Sstevel@tonic-gate 
780Sstevel@tonic-gate #include <fs/fs_subr.h>
790Sstevel@tonic-gate 
800Sstevel@tonic-gate 
810Sstevel@tonic-gate #include <sys/fs/udf_volume.h>
820Sstevel@tonic-gate #include <sys/fs/udf_inode.h>
830Sstevel@tonic-gate 
840Sstevel@tonic-gate extern struct vnodeops *udf_vnodeops;
850Sstevel@tonic-gate 
860Sstevel@tonic-gate kmutex_t ud_sync_busy;
870Sstevel@tonic-gate /*
880Sstevel@tonic-gate  * udf_vfs list manipulation routines
890Sstevel@tonic-gate  */
900Sstevel@tonic-gate kmutex_t udf_vfs_mutex;
910Sstevel@tonic-gate struct udf_vfs *udf_vfs_instances;
920Sstevel@tonic-gate #ifndef	__lint
930Sstevel@tonic-gate _NOTE(MUTEX_PROTECTS_DATA(udf_vfs_mutex, udf_vfs_instances))
940Sstevel@tonic-gate #endif
950Sstevel@tonic-gate 
960Sstevel@tonic-gate union ihead ud_ihead[UD_HASH_SZ];
970Sstevel@tonic-gate kmutex_t ud_icache_lock;
980Sstevel@tonic-gate 
990Sstevel@tonic-gate #define	UD_BEGIN	0x0
1000Sstevel@tonic-gate #define	UD_END		0x1
1010Sstevel@tonic-gate #define	UD_UNKN		0x2
1020Sstevel@tonic-gate struct ud_inode *udf_ifreeh, *udf_ifreet;
1030Sstevel@tonic-gate kmutex_t udf_ifree_lock;
1040Sstevel@tonic-gate #ifndef	__lint
1050Sstevel@tonic-gate _NOTE(MUTEX_PROTECTS_DATA(udf_ifree_lock, udf_ifreeh))
1060Sstevel@tonic-gate _NOTE(MUTEX_PROTECTS_DATA(udf_ifree_lock, udf_ifreet))
1070Sstevel@tonic-gate #endif
1080Sstevel@tonic-gate 
1090Sstevel@tonic-gate kmutex_t ud_nino_lock;
1100Sstevel@tonic-gate int32_t ud_max_inodes = 512;
1110Sstevel@tonic-gate int32_t ud_cur_inodes = 0;
1120Sstevel@tonic-gate #ifndef	__lint
1130Sstevel@tonic-gate _NOTE(MUTEX_PROTECTS_DATA(ud_nino_lock, ud_cur_inodes))
1140Sstevel@tonic-gate #endif
1150Sstevel@tonic-gate 
1160Sstevel@tonic-gate uid_t ud_default_uid = 0;
1170Sstevel@tonic-gate gid_t ud_default_gid = 3;
1180Sstevel@tonic-gate 
1190Sstevel@tonic-gate int32_t ud_updat_ext4(struct ud_inode *, struct file_entry *);
1200Sstevel@tonic-gate int32_t ud_updat_ext4096(struct ud_inode *, struct file_entry *);
1210Sstevel@tonic-gate void ud_make_sad(struct icb_ext *, struct short_ad *, int32_t);
1220Sstevel@tonic-gate void ud_make_lad(struct icb_ext *, struct long_ad *, int32_t);
1230Sstevel@tonic-gate void ud_trunc_ext4(struct ud_inode *, u_offset_t);
1240Sstevel@tonic-gate void ud_trunc_ext4096(struct ud_inode *, u_offset_t);
1250Sstevel@tonic-gate void ud_add_to_free_list(struct ud_inode *, uint32_t);
1260Sstevel@tonic-gate void ud_remove_from_free_list(struct ud_inode *, uint32_t);
1270Sstevel@tonic-gate 
1280Sstevel@tonic-gate 
1290Sstevel@tonic-gate #ifdef	DEBUG
1300Sstevel@tonic-gate struct ud_inode *
ud_search_icache(struct vfs * vfsp,uint16_t prn,uint32_t ploc)1310Sstevel@tonic-gate ud_search_icache(struct vfs *vfsp, uint16_t prn, uint32_t ploc)
1320Sstevel@tonic-gate {
1330Sstevel@tonic-gate 	int32_t hno;
1340Sstevel@tonic-gate 	union ihead *ih;
1350Sstevel@tonic-gate 	struct ud_inode *ip;
1360Sstevel@tonic-gate 	struct udf_vfs *udf_vfsp;
1370Sstevel@tonic-gate 	uint32_t loc, dummy;
1380Sstevel@tonic-gate 
1390Sstevel@tonic-gate 	udf_vfsp = (struct udf_vfs *)vfsp->vfs_data;
1400Sstevel@tonic-gate 	loc = ud_xlate_to_daddr(udf_vfsp, prn, ploc, 1, &dummy);
1410Sstevel@tonic-gate 
1420Sstevel@tonic-gate 	mutex_enter(&ud_icache_lock);
1430Sstevel@tonic-gate 	hno = UD_INOHASH(vfsp->vfs_dev, loc);
1440Sstevel@tonic-gate 	ih = &ud_ihead[hno];
1450Sstevel@tonic-gate 	for (ip = ih->ih_chain[0];
14612196SMilan.Cermak@Sun.COM 	    ip != (struct ud_inode *)ih;
14712196SMilan.Cermak@Sun.COM 	    ip = ip->i_forw) {
14812196SMilan.Cermak@Sun.COM 		if ((prn == ip->i_icb_prn) && (ploc == ip->i_icb_block) &&
14912196SMilan.Cermak@Sun.COM 		    (vfsp->vfs_dev == ip->i_dev)) {
1500Sstevel@tonic-gate 			mutex_exit(&ud_icache_lock);
1510Sstevel@tonic-gate 			return (ip);
1520Sstevel@tonic-gate 		}
1530Sstevel@tonic-gate 	}
1540Sstevel@tonic-gate 	mutex_exit(&ud_icache_lock);
1550Sstevel@tonic-gate 	return (0);
1560Sstevel@tonic-gate }
1570Sstevel@tonic-gate #endif
1580Sstevel@tonic-gate 
1590Sstevel@tonic-gate /* ARGSUSED */
1600Sstevel@tonic-gate int
ud_iget(struct vfs * vfsp,uint16_t prn,uint32_t ploc,struct ud_inode ** ipp,struct buf * pbp,struct cred * cred)1610Sstevel@tonic-gate ud_iget(struct vfs *vfsp, uint16_t prn, uint32_t ploc,
1620Sstevel@tonic-gate 	struct ud_inode **ipp, struct buf *pbp, struct cred *cred)
1630Sstevel@tonic-gate {
1640Sstevel@tonic-gate 	int32_t hno, nomem = 0, icb_tag_flags;
1650Sstevel@tonic-gate 	union ihead *ih;
1660Sstevel@tonic-gate 	struct ud_inode *ip;
1670Sstevel@tonic-gate 	struct vnode *vp;
1680Sstevel@tonic-gate 	struct buf *bp = NULL;
1690Sstevel@tonic-gate 	struct file_entry *fe;
1700Sstevel@tonic-gate 	struct udf_vfs *udf_vfsp;
1710Sstevel@tonic-gate 	struct ext_attr_hdr *eah;
1720Sstevel@tonic-gate 	struct attr_hdr *ah;
1730Sstevel@tonic-gate 	int32_t ea_len, ea_off;
1740Sstevel@tonic-gate 	daddr_t loc;
1750Sstevel@tonic-gate 	uint64_t offset = 0;
1760Sstevel@tonic-gate 	struct icb_ext *iext, *con;
1770Sstevel@tonic-gate 	uint32_t length, dummy;
1780Sstevel@tonic-gate 	int32_t ndesc, ftype;
1790Sstevel@tonic-gate 	uint16_t old_prn;
1800Sstevel@tonic-gate 	uint32_t old_block, old_lbano;
1810Sstevel@tonic-gate 
1820Sstevel@tonic-gate 	ud_printf("ud_iget\n");
1830Sstevel@tonic-gate 	udf_vfsp = (struct udf_vfs *)vfsp->vfs_data;
1840Sstevel@tonic-gate 	old_prn = 0;
1850Sstevel@tonic-gate 	old_block = old_lbano = 0;
1860Sstevel@tonic-gate 	ftype = 0;
1870Sstevel@tonic-gate 	loc = ud_xlate_to_daddr(udf_vfsp, prn, ploc, 1, &dummy);
1880Sstevel@tonic-gate loop:
1890Sstevel@tonic-gate 	mutex_enter(&ud_icache_lock);
1900Sstevel@tonic-gate 	hno = UD_INOHASH(vfsp->vfs_dev, loc);
1910Sstevel@tonic-gate 
1920Sstevel@tonic-gate 	ih = &ud_ihead[hno];
1930Sstevel@tonic-gate 	for (ip = ih->ih_chain[0];
19412196SMilan.Cermak@Sun.COM 	    ip != (struct ud_inode *)ih;
19512196SMilan.Cermak@Sun.COM 	    ip = ip->i_forw) {
1960Sstevel@tonic-gate 
1970Sstevel@tonic-gate 		if ((prn == ip->i_icb_prn) &&
19812196SMilan.Cermak@Sun.COM 		    (ploc == ip->i_icb_block) &&
19912196SMilan.Cermak@Sun.COM 		    (vfsp->vfs_dev == ip->i_dev)) {
2000Sstevel@tonic-gate 
2010Sstevel@tonic-gate 			vp = ITOV(ip);
2020Sstevel@tonic-gate 			VN_HOLD(vp);
2030Sstevel@tonic-gate 			mutex_exit(&ud_icache_lock);
2040Sstevel@tonic-gate 
2050Sstevel@tonic-gate 			rw_enter(&ip->i_contents, RW_READER);
2060Sstevel@tonic-gate 			mutex_enter(&ip->i_tlock);
2070Sstevel@tonic-gate 			if ((ip->i_flag & IREF) == 0) {
2080Sstevel@tonic-gate 				mutex_enter(&udf_ifree_lock);
2090Sstevel@tonic-gate 				ud_remove_from_free_list(ip, UD_UNKN);
2100Sstevel@tonic-gate 				mutex_exit(&udf_ifree_lock);
2110Sstevel@tonic-gate 			}
2120Sstevel@tonic-gate 			ip->i_flag |= IREF;
2130Sstevel@tonic-gate 			mutex_exit(&ip->i_tlock);
2140Sstevel@tonic-gate 			rw_exit(&ip->i_contents);
2150Sstevel@tonic-gate 
2160Sstevel@tonic-gate 			*ipp = ip;
2170Sstevel@tonic-gate 
2180Sstevel@tonic-gate 			if (pbp != NULL) {
2190Sstevel@tonic-gate 				brelse(pbp);
2200Sstevel@tonic-gate 			}
2210Sstevel@tonic-gate 
2220Sstevel@tonic-gate 			return (0);
2230Sstevel@tonic-gate 		}
2240Sstevel@tonic-gate 	}
2250Sstevel@tonic-gate 
2260Sstevel@tonic-gate 	/*
2270Sstevel@tonic-gate 	 * We don't have it in the cache
2280Sstevel@tonic-gate 	 * Allocate a new entry
2290Sstevel@tonic-gate 	 */
2300Sstevel@tonic-gate tryagain:
2310Sstevel@tonic-gate 	mutex_enter(&udf_ifree_lock);
2320Sstevel@tonic-gate 	mutex_enter(&ud_nino_lock);
2330Sstevel@tonic-gate 	if (ud_cur_inodes > ud_max_inodes) {
2340Sstevel@tonic-gate 		int32_t purged;
2350Sstevel@tonic-gate 
2360Sstevel@tonic-gate 		mutex_exit(&ud_nino_lock);
2370Sstevel@tonic-gate 		while (udf_ifreeh == NULL ||
2380Sstevel@tonic-gate 		    vn_has_cached_data(ITOV(udf_ifreeh))) {
2390Sstevel@tonic-gate 			/*
2400Sstevel@tonic-gate 			 * Try to put an inode on the freelist that's
2410Sstevel@tonic-gate 			 * sitting in the dnlc.
2420Sstevel@tonic-gate 			 */
2430Sstevel@tonic-gate 			mutex_exit(&udf_ifree_lock);
2440Sstevel@tonic-gate 			purged = dnlc_fs_purge1(udf_vnodeops);
2450Sstevel@tonic-gate 			mutex_enter(&udf_ifree_lock);
2460Sstevel@tonic-gate 			if (!purged) {
2470Sstevel@tonic-gate 				break;
2480Sstevel@tonic-gate 			}
2490Sstevel@tonic-gate 		}
2500Sstevel@tonic-gate 		mutex_enter(&ud_nino_lock);
2510Sstevel@tonic-gate 	}
2520Sstevel@tonic-gate 
2530Sstevel@tonic-gate 	/*
2540Sstevel@tonic-gate 	 * If there's a free one available and it has no pages attached
2550Sstevel@tonic-gate 	 * take it. If we're over the high water mark, take it even if
2560Sstevel@tonic-gate 	 * it has attached pages. Otherwise, make a new one.
2570Sstevel@tonic-gate 	 */
2580Sstevel@tonic-gate 	if (udf_ifreeh &&
25912196SMilan.Cermak@Sun.COM 	    (nomem || !vn_has_cached_data(ITOV(udf_ifreeh)) ||
26012196SMilan.Cermak@Sun.COM 	    ud_cur_inodes >= ud_max_inodes)) {
2610Sstevel@tonic-gate 
2620Sstevel@tonic-gate 		mutex_exit(&ud_nino_lock);
2630Sstevel@tonic-gate 		ip = udf_ifreeh;
2640Sstevel@tonic-gate 		vp = ITOV(ip);
2650Sstevel@tonic-gate 
2660Sstevel@tonic-gate 		ud_remove_from_free_list(ip, UD_BEGIN);
2670Sstevel@tonic-gate 
2680Sstevel@tonic-gate 		mutex_exit(&udf_ifree_lock);
2690Sstevel@tonic-gate 		if (ip->i_flag & IREF) {
2700Sstevel@tonic-gate 			cmn_err(CE_WARN, "ud_iget: bad i_flag\n");
2710Sstevel@tonic-gate 			mutex_exit(&ud_icache_lock);
2720Sstevel@tonic-gate 			if (pbp != NULL) {
2730Sstevel@tonic-gate 				brelse(pbp);
2740Sstevel@tonic-gate 			}
2750Sstevel@tonic-gate 			return (EINVAL);
2760Sstevel@tonic-gate 		}
2770Sstevel@tonic-gate 		rw_enter(&ip->i_contents, RW_WRITER);
2780Sstevel@tonic-gate 
2790Sstevel@tonic-gate 		/*
2800Sstevel@tonic-gate 		 * We call udf_syncip() to synchronously destroy all pages
2810Sstevel@tonic-gate 		 * associated with the vnode before re-using it. The pageout
2820Sstevel@tonic-gate 		 * thread may have beat us to this page so our v_count can
2830Sstevel@tonic-gate 		 * be > 0 at this point even though we are on the freelist.
2840Sstevel@tonic-gate 		 */
2850Sstevel@tonic-gate 		mutex_enter(&ip->i_tlock);
2860Sstevel@tonic-gate 		ip->i_flag = (ip->i_flag & IMODTIME) | IREF;
2870Sstevel@tonic-gate 		mutex_exit(&ip->i_tlock);
2880Sstevel@tonic-gate 
2890Sstevel@tonic-gate 		VN_HOLD(vp);
2900Sstevel@tonic-gate 		if (ud_syncip(ip, B_INVAL, I_SYNC) != 0) {
2910Sstevel@tonic-gate 			ud_idrop(ip);
2920Sstevel@tonic-gate 			rw_exit(&ip->i_contents);
2930Sstevel@tonic-gate 			mutex_exit(&ud_icache_lock);
2940Sstevel@tonic-gate 			goto loop;
2950Sstevel@tonic-gate 		}
2960Sstevel@tonic-gate 
2970Sstevel@tonic-gate 		mutex_enter(&ip->i_tlock);
2980Sstevel@tonic-gate 		ip->i_flag &= ~IMODTIME;
2990Sstevel@tonic-gate 		mutex_exit(&ip->i_tlock);
3000Sstevel@tonic-gate 
3010Sstevel@tonic-gate 		if (ip->i_ext) {
3020Sstevel@tonic-gate 			kmem_free(ip->i_ext,
30312196SMilan.Cermak@Sun.COM 			    sizeof (struct icb_ext) * ip->i_ext_count);
3040Sstevel@tonic-gate 			ip->i_ext = 0;
3050Sstevel@tonic-gate 			ip->i_ext_count = ip->i_ext_used = 0;
3060Sstevel@tonic-gate 		}
3070Sstevel@tonic-gate 
3080Sstevel@tonic-gate 		if (ip->i_con) {
3090Sstevel@tonic-gate 			kmem_free(ip->i_con,
31012196SMilan.Cermak@Sun.COM 			    sizeof (struct icb_ext) * ip->i_con_count);
3110Sstevel@tonic-gate 			ip->i_con = 0;
3120Sstevel@tonic-gate 			ip->i_con_count = ip->i_con_used = ip->i_con_read = 0;
3130Sstevel@tonic-gate 		}
3140Sstevel@tonic-gate 
3150Sstevel@tonic-gate 		/*
3160Sstevel@tonic-gate 		 * The pageout thread may not have had a chance to release
3170Sstevel@tonic-gate 		 * its hold on the vnode (if it was active with this vp),
3180Sstevel@tonic-gate 		 * but the pages should all be invalidated.
3190Sstevel@tonic-gate 		 */
3200Sstevel@tonic-gate 	} else {
3210Sstevel@tonic-gate 		mutex_exit(&ud_nino_lock);
3220Sstevel@tonic-gate 		mutex_exit(&udf_ifree_lock);
3230Sstevel@tonic-gate 		/*
3240Sstevel@tonic-gate 		 * Try to get memory for this inode without blocking.
3250Sstevel@tonic-gate 		 * If we can't and there is something on the freelist,
3260Sstevel@tonic-gate 		 * go ahead and use it, otherwise block waiting for
3270Sstevel@tonic-gate 		 * memory holding the hash_lock. We expose a potential
3280Sstevel@tonic-gate 		 * deadlock if all users of memory have to do a ud_iget()
3290Sstevel@tonic-gate 		 * before releasing memory.
3300Sstevel@tonic-gate 		 */
3310Sstevel@tonic-gate 		ip = (struct ud_inode *)kmem_zalloc(sizeof (struct ud_inode),
33212196SMilan.Cermak@Sun.COM 		    KM_NOSLEEP);
3330Sstevel@tonic-gate 		vp = vn_alloc(KM_NOSLEEP);
3340Sstevel@tonic-gate 		if ((ip == NULL) || (vp == NULL)) {
3350Sstevel@tonic-gate 			mutex_enter(&udf_ifree_lock);
3360Sstevel@tonic-gate 			if (udf_ifreeh) {
3370Sstevel@tonic-gate 				mutex_exit(&udf_ifree_lock);
3380Sstevel@tonic-gate 				if (ip != NULL)
3390Sstevel@tonic-gate 					kmem_free(ip, sizeof (struct ud_inode));
3400Sstevel@tonic-gate 				if (vp != NULL)
3410Sstevel@tonic-gate 					vn_free(vp);
3420Sstevel@tonic-gate 				nomem = 1;
3430Sstevel@tonic-gate 				goto tryagain;
3440Sstevel@tonic-gate 			} else {
3450Sstevel@tonic-gate 				mutex_exit(&udf_ifree_lock);
3460Sstevel@tonic-gate 				if (ip == NULL)
3470Sstevel@tonic-gate 					ip = (struct ud_inode *)
3480Sstevel@tonic-gate 					    kmem_zalloc(
34912196SMilan.Cermak@Sun.COM 					    sizeof (struct ud_inode),
35012196SMilan.Cermak@Sun.COM 					    KM_SLEEP);
3510Sstevel@tonic-gate 				if (vp == NULL)
3520Sstevel@tonic-gate 					vp = vn_alloc(KM_SLEEP);
3530Sstevel@tonic-gate 			}
3540Sstevel@tonic-gate 		}
3550Sstevel@tonic-gate 		ip->i_vnode = vp;
3560Sstevel@tonic-gate 
3570Sstevel@tonic-gate 		ip->i_marker1 = (uint32_t)0xAAAAAAAA;
3580Sstevel@tonic-gate 		ip->i_marker2 = (uint32_t)0xBBBBBBBB;
3590Sstevel@tonic-gate 		ip->i_marker3 = (uint32_t)0xCCCCCCCC;
3600Sstevel@tonic-gate 
3610Sstevel@tonic-gate 		rw_init(&ip->i_rwlock, NULL, RW_DEFAULT, NULL);
3620Sstevel@tonic-gate 		rw_init(&ip->i_contents, NULL, RW_DEFAULT, NULL);
3630Sstevel@tonic-gate 		mutex_init(&ip->i_tlock, NULL, MUTEX_DEFAULT, NULL);
3640Sstevel@tonic-gate 
3650Sstevel@tonic-gate 		ip->i_forw = ip;
3660Sstevel@tonic-gate 		ip->i_back = ip;
3670Sstevel@tonic-gate 		vp->v_data = (caddr_t)ip;
3680Sstevel@tonic-gate 		vn_setops(vp, udf_vnodeops);
3690Sstevel@tonic-gate 		ip->i_flag = IREF;
3700Sstevel@tonic-gate 		cv_init(&ip->i_wrcv, NULL, CV_DRIVER, NULL);
3710Sstevel@tonic-gate 		mutex_enter(&ud_nino_lock);
3720Sstevel@tonic-gate 		ud_cur_inodes++;
3730Sstevel@tonic-gate 		mutex_exit(&ud_nino_lock);
3740Sstevel@tonic-gate 
3750Sstevel@tonic-gate 		rw_enter(&ip->i_contents, RW_WRITER);
3760Sstevel@tonic-gate 	}
3770Sstevel@tonic-gate 
3780Sstevel@tonic-gate 	if (vp->v_count < 1) {
3790Sstevel@tonic-gate 		cmn_err(CE_WARN, "ud_iget: v_count < 1\n");
3800Sstevel@tonic-gate 		mutex_exit(&ud_icache_lock);
3810Sstevel@tonic-gate 		rw_exit(&ip->i_contents);
3820Sstevel@tonic-gate 		if (pbp != NULL) {
3830Sstevel@tonic-gate 			brelse(pbp);
3840Sstevel@tonic-gate 		}
3850Sstevel@tonic-gate 		return (EINVAL);
3860Sstevel@tonic-gate 	}
3870Sstevel@tonic-gate 	if (vn_has_cached_data(vp)) {
3880Sstevel@tonic-gate 		cmn_err(CE_WARN, "ud_iget: v_pages not NULL\n");
3890Sstevel@tonic-gate 		mutex_exit(&ud_icache_lock);
3900Sstevel@tonic-gate 		rw_exit(&ip->i_contents);
3910Sstevel@tonic-gate 		if (pbp != NULL) {
3920Sstevel@tonic-gate 			brelse(pbp);
3930Sstevel@tonic-gate 		}
3940Sstevel@tonic-gate 		return (EINVAL);
3950Sstevel@tonic-gate 	}
3960Sstevel@tonic-gate 
3970Sstevel@tonic-gate 	/*
3980Sstevel@tonic-gate 	 * Move the inode on the chain for its new (ino, dev) pair
3990Sstevel@tonic-gate 	 */
4000Sstevel@tonic-gate 	remque(ip);
4010Sstevel@tonic-gate 	ip->i_forw = ip;
4020Sstevel@tonic-gate 	ip->i_back = ip;
4030Sstevel@tonic-gate 	insque(ip, ih);
4040Sstevel@tonic-gate 
4050Sstevel@tonic-gate 	ip->i_dev = vfsp->vfs_dev;
4060Sstevel@tonic-gate 	ip->i_udf = udf_vfsp;
4070Sstevel@tonic-gate 	ip->i_diroff = 0;
4080Sstevel@tonic-gate 	ip->i_devvp = ip->i_udf->udf_devvp;
4090Sstevel@tonic-gate 	ip->i_icb_prn = prn;
4100Sstevel@tonic-gate 	ip->i_icb_block = ploc;
4110Sstevel@tonic-gate 	ip->i_icb_lbano = loc;
4120Sstevel@tonic-gate 	ip->i_nextr = 0;
4130Sstevel@tonic-gate 	ip->i_seq = 0;
4140Sstevel@tonic-gate 	mutex_exit(&ud_icache_lock);
4150Sstevel@tonic-gate 
4160Sstevel@tonic-gate read_de:
4170Sstevel@tonic-gate 	if (pbp != NULL) {
4180Sstevel@tonic-gate 		/*
4190Sstevel@tonic-gate 		 * assumption is that we will not
4200Sstevel@tonic-gate 		 * create a 4096 file
4210Sstevel@tonic-gate 		 */
4220Sstevel@tonic-gate 		bp = pbp;
4230Sstevel@tonic-gate 	} else {
4240Sstevel@tonic-gate 		bp = ud_bread(ip->i_dev,
42512196SMilan.Cermak@Sun.COM 		    ip->i_icb_lbano << udf_vfsp->udf_l2d_shift,
42612196SMilan.Cermak@Sun.COM 		    udf_vfsp->udf_lbsize);
4270Sstevel@tonic-gate 	}
4280Sstevel@tonic-gate 
4290Sstevel@tonic-gate 	/*
4300Sstevel@tonic-gate 	 * Check I/O errors
4310Sstevel@tonic-gate 	 */
4320Sstevel@tonic-gate 	fe = (struct file_entry *)bp->b_un.b_addr;
4330Sstevel@tonic-gate 	if ((bp->b_flags & B_ERROR) ||
4340Sstevel@tonic-gate 	    (ud_verify_tag_and_desc(&fe->fe_tag, UD_FILE_ENTRY,
4350Sstevel@tonic-gate 	    ip->i_icb_block, 1, udf_vfsp->udf_lbsize) != 0)) {
4360Sstevel@tonic-gate 
4370Sstevel@tonic-gate 		if (((bp->b_flags & B_ERROR) == 0) &&
43812196SMilan.Cermak@Sun.COM 		    (ftype == STRAT_TYPE4096)) {
4390Sstevel@tonic-gate 			if (ud_check_te_unrec(udf_vfsp,
44012196SMilan.Cermak@Sun.COM 			    bp->b_un.b_addr, ip->i_icb_block) == 0) {
4410Sstevel@tonic-gate 
4420Sstevel@tonic-gate 				brelse(bp);
4430Sstevel@tonic-gate 
4440Sstevel@tonic-gate 				/*
4450Sstevel@tonic-gate 				 * restore old file entry location
4460Sstevel@tonic-gate 				 */
4470Sstevel@tonic-gate 				ip->i_icb_prn = old_prn;
4480Sstevel@tonic-gate 				ip->i_icb_block = old_block;
4490Sstevel@tonic-gate 				ip->i_icb_lbano = old_lbano;
4500Sstevel@tonic-gate 
4510Sstevel@tonic-gate 				/*
4520Sstevel@tonic-gate 				 * reread old file entry
4530Sstevel@tonic-gate 				 */
4540Sstevel@tonic-gate 				bp = ud_bread(ip->i_dev,
45512196SMilan.Cermak@Sun.COM 				    old_lbano << udf_vfsp->udf_l2d_shift,
45612196SMilan.Cermak@Sun.COM 				    udf_vfsp->udf_lbsize);
4570Sstevel@tonic-gate 				if ((bp->b_flags & B_ERROR) == 0) {
4580Sstevel@tonic-gate 					fe = (struct file_entry *)
45912196SMilan.Cermak@Sun.COM 					    bp->b_un.b_addr;
4600Sstevel@tonic-gate 					if (ud_verify_tag_and_desc(&fe->fe_tag,
46112196SMilan.Cermak@Sun.COM 					    UD_FILE_ENTRY, ip->i_icb_block, 1,
4620Sstevel@tonic-gate 					    udf_vfsp->udf_lbsize) == 0) {
4630Sstevel@tonic-gate 						goto end_4096;
4640Sstevel@tonic-gate 					}
4650Sstevel@tonic-gate 				}
4660Sstevel@tonic-gate 			}
4670Sstevel@tonic-gate 		}
4680Sstevel@tonic-gate error_ret:
4690Sstevel@tonic-gate 		brelse(bp);
4700Sstevel@tonic-gate 		/*
4710Sstevel@tonic-gate 		 * The inode may not contain anything useful. Mark it as
4720Sstevel@tonic-gate 		 * having an error and let anyone else who was waiting for
4730Sstevel@tonic-gate 		 * this know there was an error. Callers waiting for
4740Sstevel@tonic-gate 		 * access to this inode in ud_iget will find
4750Sstevel@tonic-gate 		 * the i_icb_lbano == 0, so there won't be a match.
4760Sstevel@tonic-gate 		 * It remains in the cache. Put it back on the freelist.
4770Sstevel@tonic-gate 		 */
4780Sstevel@tonic-gate 		mutex_enter(&vp->v_lock);
4790Sstevel@tonic-gate 		vp->v_count--;
4800Sstevel@tonic-gate 		mutex_exit(&vp->v_lock);
4810Sstevel@tonic-gate 		ip->i_icb_lbano = 0;
4820Sstevel@tonic-gate 
4830Sstevel@tonic-gate 		/*
4840Sstevel@tonic-gate 		 * The folowing two lines make
4850Sstevel@tonic-gate 		 * it impossible for any one do
4860Sstevel@tonic-gate 		 * a VN_HOLD and then a VN_RELE
4870Sstevel@tonic-gate 		 * so avoiding a ud_iinactive
4880Sstevel@tonic-gate 		 */
4890Sstevel@tonic-gate 		ip->i_icb_prn = 0xffff;
4900Sstevel@tonic-gate 		ip->i_icb_block = 0;
4910Sstevel@tonic-gate 
4920Sstevel@tonic-gate 		/*
4930Sstevel@tonic-gate 		 * remove the bad inode from hash chains
4940Sstevel@tonic-gate 		 * so that during unmount we will not
4950Sstevel@tonic-gate 		 * go through this inode
4960Sstevel@tonic-gate 		 */
4970Sstevel@tonic-gate 		mutex_enter(&ud_icache_lock);
4980Sstevel@tonic-gate 		remque(ip);
4990Sstevel@tonic-gate 		ip->i_forw = ip;
5000Sstevel@tonic-gate 		ip->i_back = ip;
5010Sstevel@tonic-gate 		mutex_exit(&ud_icache_lock);
5020Sstevel@tonic-gate 
5030Sstevel@tonic-gate 		/* Put the inode at the front of the freelist */
5040Sstevel@tonic-gate 		mutex_enter(&ip->i_tlock);
5050Sstevel@tonic-gate 		mutex_enter(&udf_ifree_lock);
5060Sstevel@tonic-gate 		ud_add_to_free_list(ip, UD_BEGIN);
5070Sstevel@tonic-gate 		mutex_exit(&udf_ifree_lock);
5080Sstevel@tonic-gate 		ip->i_flag = 0;
5090Sstevel@tonic-gate 		mutex_exit(&ip->i_tlock);
5100Sstevel@tonic-gate 		rw_exit(&ip->i_contents);
5110Sstevel@tonic-gate 		return (EIO);
5120Sstevel@tonic-gate 	}
5130Sstevel@tonic-gate 
5140Sstevel@tonic-gate 	if (fe->fe_icb_tag.itag_strategy == SWAP_16(STRAT_TYPE4096)) {
5150Sstevel@tonic-gate 		struct buf *ibp = NULL;
5160Sstevel@tonic-gate 		struct indirect_entry *ie;
5170Sstevel@tonic-gate 
5180Sstevel@tonic-gate 		/*
5190Sstevel@tonic-gate 		 * save old file_entry location
5200Sstevel@tonic-gate 		 */
5210Sstevel@tonic-gate 		old_prn = ip->i_icb_prn;
5220Sstevel@tonic-gate 		old_block = ip->i_icb_block;
5230Sstevel@tonic-gate 		old_lbano = ip->i_icb_lbano;
5240Sstevel@tonic-gate 
5250Sstevel@tonic-gate 		ftype = STRAT_TYPE4096;
5260Sstevel@tonic-gate 
5270Sstevel@tonic-gate 		/*
5280Sstevel@tonic-gate 		 * If astrat is 4096 different versions
5290Sstevel@tonic-gate 		 * of the file exist on the media.
5300Sstevel@tonic-gate 		 * we are supposed to get to the latest
5310Sstevel@tonic-gate 		 * version of the file
5320Sstevel@tonic-gate 		 */
5330Sstevel@tonic-gate 
5340Sstevel@tonic-gate 		/*
5350Sstevel@tonic-gate 		 * IE is supposed to be in the next block
5360Sstevel@tonic-gate 		 * of DE
5370Sstevel@tonic-gate 		 */
53812196SMilan.Cermak@Sun.COM 		ibp = ud_bread(ip->i_dev,
53912196SMilan.Cermak@Sun.COM 		    (ip->i_icb_lbano + 1) << udf_vfsp->udf_l2d_shift,
54012196SMilan.Cermak@Sun.COM 		    udf_vfsp->udf_lbsize);
5410Sstevel@tonic-gate 		if (ibp->b_flags & B_ERROR) {
5420Sstevel@tonic-gate 			/*
5430Sstevel@tonic-gate 			 * Get rid of current ibp and
5440Sstevel@tonic-gate 			 * then goto error on DE's bp
5450Sstevel@tonic-gate 			 */
5460Sstevel@tonic-gate ie_error:
5470Sstevel@tonic-gate 			brelse(ibp);
5480Sstevel@tonic-gate 			goto error_ret;
5490Sstevel@tonic-gate 		}
5500Sstevel@tonic-gate 
5510Sstevel@tonic-gate 		ie = (struct indirect_entry *)ibp->b_un.b_addr;
5520Sstevel@tonic-gate 		if (ud_verify_tag_and_desc(&ie->ie_tag,
55312196SMilan.Cermak@Sun.COM 		    UD_INDIRECT_ENT, ip->i_icb_block + 1,
5540Sstevel@tonic-gate 		    1, udf_vfsp->udf_lbsize) == 0) {
5550Sstevel@tonic-gate 			struct long_ad *lad;
5560Sstevel@tonic-gate 
5570Sstevel@tonic-gate 			lad = &ie->ie_indirecticb;
5580Sstevel@tonic-gate 			ip->i_icb_prn = SWAP_16(lad->lad_ext_prn);
5590Sstevel@tonic-gate 			ip->i_icb_block = SWAP_32(lad->lad_ext_loc);
5600Sstevel@tonic-gate 			ip->i_icb_lbano = ud_xlate_to_daddr(udf_vfsp,
56112196SMilan.Cermak@Sun.COM 			    ip->i_icb_prn, ip->i_icb_block,
56212196SMilan.Cermak@Sun.COM 			    1, &dummy);
5630Sstevel@tonic-gate 			brelse(ibp);
5640Sstevel@tonic-gate 			brelse(bp);
5650Sstevel@tonic-gate 			goto read_de;
5660Sstevel@tonic-gate 		}
5670Sstevel@tonic-gate 
5680Sstevel@tonic-gate 		/*
5690Sstevel@tonic-gate 		 * If this block is TE or unrecorded we
5700Sstevel@tonic-gate 		 * are at the last entry
5710Sstevel@tonic-gate 		 */
5720Sstevel@tonic-gate 		if (ud_check_te_unrec(udf_vfsp, ibp->b_un.b_addr,
57312196SMilan.Cermak@Sun.COM 		    ip->i_icb_block + 1) != 0) {
5740Sstevel@tonic-gate 			/*
5750Sstevel@tonic-gate 			 * This is not an unrecorded block
5760Sstevel@tonic-gate 			 * Check if it a valid IE and
5770Sstevel@tonic-gate 			 * get the address of DE that
5780Sstevel@tonic-gate 			 * this IE points to
5790Sstevel@tonic-gate 			 */
5800Sstevel@tonic-gate 			goto ie_error;
5810Sstevel@tonic-gate 		}
5820Sstevel@tonic-gate 		/*
5830Sstevel@tonic-gate 		 * If ud_check_unrec returns "0"
5840Sstevel@tonic-gate 		 * this is the last in the chain
5850Sstevel@tonic-gate 		 * Latest file_entry
5860Sstevel@tonic-gate 		 */
5870Sstevel@tonic-gate 		brelse(ibp);
5880Sstevel@tonic-gate 	}
5890Sstevel@tonic-gate 
5900Sstevel@tonic-gate end_4096:
5910Sstevel@tonic-gate 
5920Sstevel@tonic-gate 	ip->i_uid = SWAP_32(fe->fe_uid);
5930Sstevel@tonic-gate 	if (ip->i_uid == -1) {
5940Sstevel@tonic-gate 		ip->i_uid = ud_default_uid;
5950Sstevel@tonic-gate 	}
5960Sstevel@tonic-gate 	ip->i_gid = SWAP_32(fe->fe_gid);
5970Sstevel@tonic-gate 	if (ip->i_gid == -1) {
5980Sstevel@tonic-gate 		ip->i_gid = ud_default_gid;
5990Sstevel@tonic-gate 	}
6000Sstevel@tonic-gate 	ip->i_perm = SWAP_32(fe->fe_perms) & 0xFFFF;
60112196SMilan.Cermak@Sun.COM 	if (fe->fe_icb_tag.itag_strategy == SWAP_16(STRAT_TYPE4096)) {
6020Sstevel@tonic-gate 		ip->i_perm &= ~(IWRITE | (IWRITE >> 5) | (IWRITE >> 10));
6030Sstevel@tonic-gate 	}
6040Sstevel@tonic-gate 
6050Sstevel@tonic-gate 	ip->i_nlink = SWAP_16(fe->fe_lcount);
6060Sstevel@tonic-gate 	ip->i_size = SWAP_64(fe->fe_info_len);
6070Sstevel@tonic-gate 	ip->i_lbr = SWAP_64(fe->fe_lbr);
6080Sstevel@tonic-gate 
6090Sstevel@tonic-gate 	ud_dtime2utime(&ip->i_atime, &fe->fe_acc_time);
6100Sstevel@tonic-gate 	ud_dtime2utime(&ip->i_mtime, &fe->fe_mod_time);
6110Sstevel@tonic-gate 	ud_dtime2utime(&ip->i_ctime, &fe->fe_attr_time);
6120Sstevel@tonic-gate 
6130Sstevel@tonic-gate 
6140Sstevel@tonic-gate 	ip->i_uniqid = SWAP_64(fe->fe_uniq_id);
6150Sstevel@tonic-gate 	icb_tag_flags = SWAP_16(fe->fe_icb_tag.itag_flags);
6160Sstevel@tonic-gate 
6170Sstevel@tonic-gate 	if ((fe->fe_icb_tag.itag_ftype == FTYPE_CHAR_DEV) ||
61812196SMilan.Cermak@Sun.COM 	    (fe->fe_icb_tag.itag_ftype == FTYPE_BLOCK_DEV)) {
6190Sstevel@tonic-gate 
6200Sstevel@tonic-gate 		eah = (struct ext_attr_hdr *)fe->fe_spec;
6210Sstevel@tonic-gate 		ea_off = GET_32(&eah->eah_ial);
6220Sstevel@tonic-gate 		ea_len = GET_32(&fe->fe_len_ear);
6230Sstevel@tonic-gate 		if (ea_len && (ud_verify_tag_and_desc(&eah->eah_tag,
62412196SMilan.Cermak@Sun.COM 		    UD_EXT_ATTR_HDR, ip->i_icb_block, 1,
6250Sstevel@tonic-gate 		    sizeof (struct file_entry) -
6260Sstevel@tonic-gate 		    offsetof(struct file_entry, fe_spec)) == 0)) {
6270Sstevel@tonic-gate 
6280Sstevel@tonic-gate 			while (ea_off < ea_len) {
6290Sstevel@tonic-gate 				/*
6300Sstevel@tonic-gate 				 * We now check the validity of ea_off.
6310Sstevel@tonic-gate 				 * (ea_len - ea_off) should be large enough to
6320Sstevel@tonic-gate 				 * hold the attribute header atleast.
6330Sstevel@tonic-gate 				 */
6340Sstevel@tonic-gate 				if ((ea_len - ea_off) <
6350Sstevel@tonic-gate 				    sizeof (struct attr_hdr)) {
6360Sstevel@tonic-gate 					cmn_err(CE_NOTE,
63712196SMilan.Cermak@Sun.COM 					    "ea_len(0x%x) - ea_off(0x%x) is "
63812196SMilan.Cermak@Sun.COM 					    "too small to hold attr. info. "
63912196SMilan.Cermak@Sun.COM 					    "blockno 0x%x\n",
6400Sstevel@tonic-gate 					    ea_len, ea_off, ip->i_icb_block);
6410Sstevel@tonic-gate 					goto error_ret;
6420Sstevel@tonic-gate 				}
6430Sstevel@tonic-gate 				ah = (struct attr_hdr *)&fe->fe_spec[ea_off];
6440Sstevel@tonic-gate 
6450Sstevel@tonic-gate 				/*
6460Sstevel@tonic-gate 				 * Device Specification EA
6470Sstevel@tonic-gate 				 */
6480Sstevel@tonic-gate 				if ((GET_32(&ah->ahdr_atype) == 12) &&
6490Sstevel@tonic-gate 					(ah->ahdr_astype == 1)) {
6500Sstevel@tonic-gate 					struct dev_spec_ear *ds;
6510Sstevel@tonic-gate 
65212196SMilan.Cermak@Sun.COM 					if ((ea_len - ea_off) <
65312196SMilan.Cermak@Sun.COM 					    sizeof (struct dev_spec_ear)) {
65412196SMilan.Cermak@Sun.COM 						cmn_err(CE_NOTE,
65512196SMilan.Cermak@Sun.COM 						    "ea_len(0x%x) - "
65612196SMilan.Cermak@Sun.COM 						    "ea_off(0x%x) is too small "
65712196SMilan.Cermak@Sun.COM 						    "to hold dev_spec_ear."
65812196SMilan.Cermak@Sun.COM 						    " blockno 0x%x\n",
65912196SMilan.Cermak@Sun.COM 						    ea_len, ea_off,
66012196SMilan.Cermak@Sun.COM 						    ip->i_icb_block);
66112196SMilan.Cermak@Sun.COM 						goto error_ret;
66212196SMilan.Cermak@Sun.COM 					}
66312196SMilan.Cermak@Sun.COM 					ds = (struct dev_spec_ear *)ah;
66412196SMilan.Cermak@Sun.COM 					ip->i_major = GET_32(&ds->ds_major_id);
66512196SMilan.Cermak@Sun.COM 					ip->i_minor = GET_32(&ds->ds_minor_id);
6660Sstevel@tonic-gate 				}
6670Sstevel@tonic-gate 
6680Sstevel@tonic-gate 				/*
6690Sstevel@tonic-gate 				 * Impl Use EA
6700Sstevel@tonic-gate 				 */
6710Sstevel@tonic-gate 				if ((GET_32(&ah->ahdr_atype) == 2048) &&
6720Sstevel@tonic-gate 					(ah->ahdr_astype == 1)) {
6730Sstevel@tonic-gate 					struct iu_ea *iuea;
6740Sstevel@tonic-gate 					struct copy_mgt_info *cmi;
6750Sstevel@tonic-gate 
6760Sstevel@tonic-gate 					if ((ea_len - ea_off) <
6770Sstevel@tonic-gate 					    sizeof (struct iu_ea)) {
6780Sstevel@tonic-gate 						cmn_err(CE_NOTE,
6790Sstevel@tonic-gate "ea_len(0x%x) - ea_off(0x%x) is too small to hold iu_ea. blockno 0x%x\n",
6800Sstevel@tonic-gate 						    ea_len, ea_off,
6810Sstevel@tonic-gate 						    ip->i_icb_block);
6820Sstevel@tonic-gate 						goto error_ret;
6830Sstevel@tonic-gate 					}
6840Sstevel@tonic-gate 					iuea = (struct iu_ea *)ah;
6850Sstevel@tonic-gate 					if (strncmp(iuea->iuea_ii.reg_id,
6860Sstevel@tonic-gate 					    UDF_FREEEASPACE,
6870Sstevel@tonic-gate 					    sizeof (iuea->iuea_ii.reg_id))
6880Sstevel@tonic-gate 					    == 0) {
6890Sstevel@tonic-gate 						/* skip it */
6900Sstevel@tonic-gate 						iuea = iuea;
6910Sstevel@tonic-gate 					} else if (strncmp(iuea->iuea_ii.reg_id,
6920Sstevel@tonic-gate 					    UDF_CGMS_INFO,
6930Sstevel@tonic-gate 					    sizeof (iuea->iuea_ii.reg_id))
6940Sstevel@tonic-gate 					    == 0) {
6950Sstevel@tonic-gate 						cmi = (struct copy_mgt_info *)
6960Sstevel@tonic-gate 							iuea->iuea_iu;
6970Sstevel@tonic-gate 						cmi = cmi;
6980Sstevel@tonic-gate 					}
6990Sstevel@tonic-gate 				}
7000Sstevel@tonic-gate 				/* ??? PARANOIA */
7010Sstevel@tonic-gate 				if (GET_32(&ah->ahdr_length) == 0) {
7020Sstevel@tonic-gate 					break;
7030Sstevel@tonic-gate 				}
7040Sstevel@tonic-gate 				ea_off += GET_32(&ah->ahdr_length);
7050Sstevel@tonic-gate 			}
7060Sstevel@tonic-gate 		}
7070Sstevel@tonic-gate 	}
7080Sstevel@tonic-gate 
7090Sstevel@tonic-gate 	ip->i_nextr = 0;
7100Sstevel@tonic-gate 
7110Sstevel@tonic-gate 	ip->i_maxent = SWAP_16(fe->fe_icb_tag.itag_max_ent);
7120Sstevel@tonic-gate 	ip->i_astrat = SWAP_16(fe->fe_icb_tag.itag_strategy);
7130Sstevel@tonic-gate 
7140Sstevel@tonic-gate 	ip->i_desc_type = icb_tag_flags & 0x7;
7150Sstevel@tonic-gate 
7160Sstevel@tonic-gate 	/* Strictly Paranoia */
7170Sstevel@tonic-gate 	ip->i_ext = NULL;
7180Sstevel@tonic-gate 	ip->i_ext_count = ip->i_ext_used = 0;
7190Sstevel@tonic-gate 	ip->i_con = 0;
7200Sstevel@tonic-gate 	ip->i_con_count = ip->i_con_used = ip->i_con_read = 0;
7210Sstevel@tonic-gate 
7220Sstevel@tonic-gate 	ip->i_data_off = 0xB0 + SWAP_32(fe->fe_len_ear);
7230Sstevel@tonic-gate 	ip->i_max_emb =  udf_vfsp->udf_lbsize - ip->i_data_off;
7240Sstevel@tonic-gate 	if (ip->i_desc_type == ICB_FLAG_SHORT_AD) {
7250Sstevel@tonic-gate 		/* Short allocation desc */
7260Sstevel@tonic-gate 		struct short_ad *sad;
7270Sstevel@tonic-gate 
7280Sstevel@tonic-gate 		ip->i_ext_used = 0;
7290Sstevel@tonic-gate 		ip->i_ext_count = ndesc =
73012196SMilan.Cermak@Sun.COM 		    SWAP_32(fe->fe_len_adesc) / sizeof (struct short_ad);
73112196SMilan.Cermak@Sun.COM 		ip->i_ext_count =
73212196SMilan.Cermak@Sun.COM 		    ((ip->i_ext_count / EXT_PER_MALLOC) + 1) * EXT_PER_MALLOC;
7330Sstevel@tonic-gate 		ip->i_ext = (struct icb_ext  *)kmem_zalloc(ip->i_ext_count *
73412196SMilan.Cermak@Sun.COM 		    sizeof (struct icb_ext), KM_SLEEP);
7350Sstevel@tonic-gate 		ip->i_cur_max_ext = ip->i_max_emb / sizeof (struct short_ad);
7360Sstevel@tonic-gate 		ip->i_cur_max_ext --;
7370Sstevel@tonic-gate 
7380Sstevel@tonic-gate 		if ((ip->i_astrat != STRAT_TYPE4) &&
73912196SMilan.Cermak@Sun.COM 		    (ip->i_astrat != STRAT_TYPE4096)) {
7400Sstevel@tonic-gate 			goto error_ret;
7410Sstevel@tonic-gate 		}
7420Sstevel@tonic-gate 
7430Sstevel@tonic-gate 		sad = (struct short_ad *)
74412196SMilan.Cermak@Sun.COM 		    (fe->fe_spec + SWAP_32(fe->fe_len_ear));
7450Sstevel@tonic-gate 		iext = ip->i_ext;
7460Sstevel@tonic-gate 		while (ndesc --) {
7470Sstevel@tonic-gate 			length = SWAP_32(sad->sad_ext_len);
7480Sstevel@tonic-gate 			if ((length & 0x3FFFFFFF) == 0) {
7490Sstevel@tonic-gate 				break;
7500Sstevel@tonic-gate 			}
7510Sstevel@tonic-gate 			if (((length >> 30) & IB_MASK) == IB_CON) {
7520Sstevel@tonic-gate 				if (ip->i_con == NULL) {
7530Sstevel@tonic-gate 					ip->i_con_count = EXT_PER_MALLOC;
7540Sstevel@tonic-gate 					ip->i_con_used = 0;
7550Sstevel@tonic-gate 					ip->i_con_read = 0;
7560Sstevel@tonic-gate 					ip->i_con = kmem_zalloc(
75712196SMilan.Cermak@Sun.COM 					    ip->i_con_count *
75812196SMilan.Cermak@Sun.COM 					    sizeof (struct icb_ext),
75912196SMilan.Cermak@Sun.COM 					    KM_SLEEP);
7600Sstevel@tonic-gate 				}
7610Sstevel@tonic-gate 				con = &ip->i_con[ip->i_con_used];
7620Sstevel@tonic-gate 				con->ib_prn = 0;
7630Sstevel@tonic-gate 				con->ib_block = SWAP_32(sad->sad_ext_loc);
7640Sstevel@tonic-gate 				con->ib_count = length & 0x3FFFFFFF;
7650Sstevel@tonic-gate 				con->ib_flags = (length >> 30) & IB_MASK;
7660Sstevel@tonic-gate 				ip->i_con_used++;
7670Sstevel@tonic-gate 				sad ++;
7680Sstevel@tonic-gate 				break;
7690Sstevel@tonic-gate 			}
7700Sstevel@tonic-gate 			iext->ib_prn = 0;
7710Sstevel@tonic-gate 			iext->ib_block = SWAP_32(sad->sad_ext_loc);
7720Sstevel@tonic-gate 			length = SWAP_32(sad->sad_ext_len);
7730Sstevel@tonic-gate 			iext->ib_count = length & 0x3FFFFFFF;
7740Sstevel@tonic-gate 			iext->ib_offset = offset;
7750Sstevel@tonic-gate 			iext->ib_marker1 = (uint32_t)0xAAAAAAAA;
7760Sstevel@tonic-gate 			iext->ib_marker2 = (uint32_t)0xBBBBBBBB;
7770Sstevel@tonic-gate 			offset += (iext->ib_count + udf_vfsp->udf_lbmask) &
77812196SMilan.Cermak@Sun.COM 			    (~udf_vfsp->udf_lbmask);
7790Sstevel@tonic-gate 
7800Sstevel@tonic-gate 			iext->ib_flags = (length >> 30) & IB_MASK;
7810Sstevel@tonic-gate 
7820Sstevel@tonic-gate 			ip->i_ext_used++;
7830Sstevel@tonic-gate 			iext++;
7840Sstevel@tonic-gate 			sad ++;
7850Sstevel@tonic-gate 		}
7860Sstevel@tonic-gate 	} else if (ip->i_desc_type == ICB_FLAG_LONG_AD) {
7870Sstevel@tonic-gate 		/* Long allocation desc */
7880Sstevel@tonic-gate 		struct long_ad *lad;
7890Sstevel@tonic-gate 
7900Sstevel@tonic-gate 		ip->i_ext_used = 0;
7910Sstevel@tonic-gate 		ip->i_ext_count = ndesc =
79212196SMilan.Cermak@Sun.COM 		    SWAP_32(fe->fe_len_adesc) / sizeof (struct long_ad);
79312196SMilan.Cermak@Sun.COM 		ip->i_ext_count =
79412196SMilan.Cermak@Sun.COM 		    ((ip->i_ext_count / EXT_PER_MALLOC) + 1) * EXT_PER_MALLOC;
7950Sstevel@tonic-gate 		ip->i_ext = (struct icb_ext  *)kmem_zalloc(ip->i_ext_count *
79612196SMilan.Cermak@Sun.COM 		    sizeof (struct icb_ext), KM_SLEEP);
7970Sstevel@tonic-gate 
7980Sstevel@tonic-gate 		ip->i_cur_max_ext = ip->i_max_emb / sizeof (struct long_ad);
7990Sstevel@tonic-gate 		ip->i_cur_max_ext --;
8000Sstevel@tonic-gate 
8010Sstevel@tonic-gate 		if ((ip->i_astrat != STRAT_TYPE4) &&
80212196SMilan.Cermak@Sun.COM 		    (ip->i_astrat != STRAT_TYPE4096)) {
8030Sstevel@tonic-gate 			goto error_ret;
8040Sstevel@tonic-gate 		}
8050Sstevel@tonic-gate 
8060Sstevel@tonic-gate 		lad = (struct long_ad *)
80712196SMilan.Cermak@Sun.COM 		    (fe->fe_spec + SWAP_32(fe->fe_len_ear));
8080Sstevel@tonic-gate 		iext = ip->i_ext;
8090Sstevel@tonic-gate 		while (ndesc --) {
8100Sstevel@tonic-gate 			length = SWAP_32(lad->lad_ext_len);
8110Sstevel@tonic-gate 			if ((length & 0x3FFFFFFF) == 0) {
8120Sstevel@tonic-gate 				break;
8130Sstevel@tonic-gate 			}
8140Sstevel@tonic-gate 			if (((length >> 30) & IB_MASK) == IB_CON) {
8150Sstevel@tonic-gate 				if (ip->i_con == NULL) {
8160Sstevel@tonic-gate 					ip->i_con_count = EXT_PER_MALLOC;
8170Sstevel@tonic-gate 					ip->i_con_used = 0;
8180Sstevel@tonic-gate 					ip->i_con_read = 0;
8190Sstevel@tonic-gate 					ip->i_con = kmem_zalloc(
82012196SMilan.Cermak@Sun.COM 					    ip->i_con_count *
82112196SMilan.Cermak@Sun.COM 					    sizeof (struct icb_ext),
82212196SMilan.Cermak@Sun.COM 					    KM_SLEEP);
8230Sstevel@tonic-gate 				}
8240Sstevel@tonic-gate 				con = &ip->i_con[ip->i_con_used];
8250Sstevel@tonic-gate 				con->ib_prn = SWAP_16(lad->lad_ext_prn);
8260Sstevel@tonic-gate 				con->ib_block = SWAP_32(lad->lad_ext_loc);
8270Sstevel@tonic-gate 				con->ib_count = length & 0x3FFFFFFF;
8280Sstevel@tonic-gate 				con->ib_flags = (length >> 30) & IB_MASK;
8290Sstevel@tonic-gate 				ip->i_con_used++;
8300Sstevel@tonic-gate 				lad ++;
8310Sstevel@tonic-gate 				break;
8320Sstevel@tonic-gate 			}
8330Sstevel@tonic-gate 			iext->ib_prn = SWAP_16(lad->lad_ext_prn);
8340Sstevel@tonic-gate 			iext->ib_block = SWAP_32(lad->lad_ext_loc);
8350Sstevel@tonic-gate 			iext->ib_count = length & 0x3FFFFFFF;
8360Sstevel@tonic-gate 			iext->ib_offset = offset;
8370Sstevel@tonic-gate 			iext->ib_marker1 = (uint32_t)0xAAAAAAAA;
8380Sstevel@tonic-gate 			iext->ib_marker2 = (uint32_t)0xBBBBBBBB;
8390Sstevel@tonic-gate 			offset += (iext->ib_count + udf_vfsp->udf_lbmask) &
84012196SMilan.Cermak@Sun.COM 			    (~udf_vfsp->udf_lbmask);
8410Sstevel@tonic-gate 
8420Sstevel@tonic-gate 			iext->ib_flags = (length >> 30) & IB_MASK;
8430Sstevel@tonic-gate 
8440Sstevel@tonic-gate 			ip->i_ext_used++;
8450Sstevel@tonic-gate 			iext++;
8460Sstevel@tonic-gate 			lad ++;
8470Sstevel@tonic-gate 		}
8480Sstevel@tonic-gate 	} else if (ip->i_desc_type == ICB_FLAG_ONE_AD) {
8490Sstevel@tonic-gate 		ASSERT(SWAP_32(fe->fe_len_ear) < udf_vfsp->udf_lbsize);
8500Sstevel@tonic-gate 
8510Sstevel@tonic-gate 		if (SWAP_32(fe->fe_len_ear) > udf_vfsp->udf_lbsize) {
8520Sstevel@tonic-gate 			goto error_ret;
8530Sstevel@tonic-gate 		}
8540Sstevel@tonic-gate 	} else {
8550Sstevel@tonic-gate 		/* Not to be used in UDF 1.50 */
8560Sstevel@tonic-gate 		cmn_err(CE_NOTE, "Invalid Allocation Descriptor type %x\n",
85712196SMilan.Cermak@Sun.COM 		    ip->i_desc_type);
8580Sstevel@tonic-gate 		goto error_ret;
8590Sstevel@tonic-gate 	}
8600Sstevel@tonic-gate 
8610Sstevel@tonic-gate 
8620Sstevel@tonic-gate 	if (icb_tag_flags & ICB_FLAG_SETUID) {
8630Sstevel@tonic-gate 		ip->i_char = ISUID;
8640Sstevel@tonic-gate 	} else {
8650Sstevel@tonic-gate 		ip->i_char = 0;
8660Sstevel@tonic-gate 	}
8670Sstevel@tonic-gate 	if (icb_tag_flags & ICB_FLAG_SETGID) {
8680Sstevel@tonic-gate 		ip->i_char |= ISGID;
8690Sstevel@tonic-gate 	}
8700Sstevel@tonic-gate 	if (icb_tag_flags & ICB_FLAG_STICKY) {
8710Sstevel@tonic-gate 		ip->i_char |= ISVTX;
8720Sstevel@tonic-gate 	}
8730Sstevel@tonic-gate 	switch (fe->fe_icb_tag.itag_ftype) {
8740Sstevel@tonic-gate 		case FTYPE_DIRECTORY :
8750Sstevel@tonic-gate 			ip->i_type = VDIR;
8760Sstevel@tonic-gate 			break;
8770Sstevel@tonic-gate 		case FTYPE_FILE :
8780Sstevel@tonic-gate 			ip->i_type = VREG;
8790Sstevel@tonic-gate 			break;
8800Sstevel@tonic-gate 		case FTYPE_BLOCK_DEV :
8810Sstevel@tonic-gate 			ip->i_type = VBLK;
8820Sstevel@tonic-gate 			break;
8830Sstevel@tonic-gate 		case FTYPE_CHAR_DEV :
8840Sstevel@tonic-gate 			ip->i_type = VCHR;
8850Sstevel@tonic-gate 			break;
8860Sstevel@tonic-gate 		case FTYPE_FIFO :
8870Sstevel@tonic-gate 			ip->i_type = VFIFO;
8880Sstevel@tonic-gate 			break;
8890Sstevel@tonic-gate 		case FTYPE_C_ISSOCK :
8900Sstevel@tonic-gate 			ip->i_type = VSOCK;
8910Sstevel@tonic-gate 			break;
8920Sstevel@tonic-gate 		case FTYPE_SYMLINK :
8930Sstevel@tonic-gate 			ip->i_type = VLNK;
8940Sstevel@tonic-gate 			break;
8950Sstevel@tonic-gate 		default :
8960Sstevel@tonic-gate 			ip->i_type = VNON;
8970Sstevel@tonic-gate 			break;
8980Sstevel@tonic-gate 	}
8990Sstevel@tonic-gate 
9000Sstevel@tonic-gate 	if (ip->i_type == VBLK || ip->i_type == VCHR) {
9010Sstevel@tonic-gate 		ip->i_rdev = makedevice(ip->i_major, ip->i_minor);
9020Sstevel@tonic-gate 	}
9030Sstevel@tonic-gate 
9040Sstevel@tonic-gate 	/*
9050Sstevel@tonic-gate 	 * Fill in the rest.  Don't bother with the vnode lock because nobody
9060Sstevel@tonic-gate 	 * should be looking at this vnode.  We have already invalidated the
9070Sstevel@tonic-gate 	 * pages if it had any so pageout shouldn't be referencing this vnode
9080Sstevel@tonic-gate 	 * and we are holding the write contents lock so a look up can't use
9090Sstevel@tonic-gate 	 * the vnode.
9100Sstevel@tonic-gate 	 */
9110Sstevel@tonic-gate 	vp->v_vfsp = vfsp;
9120Sstevel@tonic-gate 	vp->v_type = ip->i_type;
9130Sstevel@tonic-gate 	vp->v_rdev = ip->i_rdev;
9140Sstevel@tonic-gate 	if (ip->i_udf->udf_root_blkno == loc) {
9150Sstevel@tonic-gate 		vp->v_flag = VROOT;
9160Sstevel@tonic-gate 	} else {
9170Sstevel@tonic-gate 		vp->v_flag = 0;
9180Sstevel@tonic-gate 	}
9190Sstevel@tonic-gate 
9200Sstevel@tonic-gate 	brelse(bp);
9210Sstevel@tonic-gate 	*ipp = ip;
9220Sstevel@tonic-gate 	rw_exit(&ip->i_contents);
9230Sstevel@tonic-gate 	vn_exists(vp);
9240Sstevel@tonic-gate 	return (0);
9250Sstevel@tonic-gate }
9260Sstevel@tonic-gate 
9270Sstevel@tonic-gate void
ud_iinactive(struct ud_inode * ip,struct cred * cr)9280Sstevel@tonic-gate ud_iinactive(struct ud_inode *ip, struct cred *cr)
9290Sstevel@tonic-gate {
9300Sstevel@tonic-gate 	int32_t busy = 0;
9310Sstevel@tonic-gate 	struct vnode *vp;
9320Sstevel@tonic-gate 	vtype_t type;
9330Sstevel@tonic-gate 	caddr_t addr, addr1;
9340Sstevel@tonic-gate 	size_t size, size1;
9350Sstevel@tonic-gate 
9360Sstevel@tonic-gate 
9370Sstevel@tonic-gate 	ud_printf("ud_iinactive\n");
9380Sstevel@tonic-gate 
9390Sstevel@tonic-gate 	/*
9400Sstevel@tonic-gate 	 * Get exclusive access to inode data.
9410Sstevel@tonic-gate 	 */
9420Sstevel@tonic-gate 	rw_enter(&ip->i_contents, RW_WRITER);
9430Sstevel@tonic-gate 
9440Sstevel@tonic-gate 	/*
9450Sstevel@tonic-gate 	 * Make sure no one reclaimed the inode before we put
9460Sstevel@tonic-gate 	 * it on the freelist or destroy it. We keep our 'hold'
9470Sstevel@tonic-gate 	 * on the vnode from vn_rele until we are ready to
9480Sstevel@tonic-gate 	 * do something with the inode (freelist/destroy).
9490Sstevel@tonic-gate 	 *
9500Sstevel@tonic-gate 	 * Pageout may put a VN_HOLD/VN_RELE at anytime during this
9510Sstevel@tonic-gate 	 * operation via an async putpage, so we must make sure
9520Sstevel@tonic-gate 	 * we don't free/destroy the inode more than once. ud_iget
9530Sstevel@tonic-gate 	 * may also put a VN_HOLD on the inode before it grabs
9540Sstevel@tonic-gate 	 * the i_contents lock. This is done so we don't kmem_free
9550Sstevel@tonic-gate 	 * an inode that a thread is waiting on.
9560Sstevel@tonic-gate 	 */
9570Sstevel@tonic-gate 	vp = ITOV(ip);
9580Sstevel@tonic-gate 
9590Sstevel@tonic-gate 	mutex_enter(&vp->v_lock);
9600Sstevel@tonic-gate 	if (vp->v_count < 1) {
9610Sstevel@tonic-gate 		cmn_err(CE_WARN, "ud_iinactive: v_count < 1\n");
9620Sstevel@tonic-gate 		return;
9630Sstevel@tonic-gate 	}
96412196SMilan.Cermak@Sun.COM 	if ((vp->v_count > 1) || ((ip->i_flag & IREF) == 0)) {
9650Sstevel@tonic-gate 		vp->v_count--;		/* release our hold from vn_rele */
9660Sstevel@tonic-gate 		mutex_exit(&vp->v_lock);
9670Sstevel@tonic-gate 		rw_exit(&ip->i_contents);
9680Sstevel@tonic-gate 		return;
9690Sstevel@tonic-gate 	}
9700Sstevel@tonic-gate 	mutex_exit(&vp->v_lock);
9710Sstevel@tonic-gate 
9720Sstevel@tonic-gate 	/*
9730Sstevel@tonic-gate 	 * For forced umount case: if i_udf is NULL, the contents of
9740Sstevel@tonic-gate 	 * the inode and all the pages have already been pushed back
9750Sstevel@tonic-gate 	 * to disk. It can be safely destroyed.
9760Sstevel@tonic-gate 	 */
9770Sstevel@tonic-gate 	if (ip->i_udf == NULL) {
9780Sstevel@tonic-gate 		addr = (caddr_t)ip->i_ext;
9790Sstevel@tonic-gate 		size = sizeof (struct icb_ext) * ip->i_ext_count;
9800Sstevel@tonic-gate 		ip->i_ext = 0;
9810Sstevel@tonic-gate 		ip->i_ext_count = ip->i_ext_used = 0;
9820Sstevel@tonic-gate 		addr1 = (caddr_t)ip->i_con;
9830Sstevel@tonic-gate 		size1 = sizeof (struct icb_ext) * ip->i_con_count;
9840Sstevel@tonic-gate 		ip->i_con = 0;
9850Sstevel@tonic-gate 		ip->i_con_count = ip->i_con_used = ip->i_con_read = 0;
9860Sstevel@tonic-gate 		rw_exit(&ip->i_contents);
9870Sstevel@tonic-gate 		vn_invalid(vp);
9880Sstevel@tonic-gate 
9890Sstevel@tonic-gate 		mutex_enter(&ud_nino_lock);
9900Sstevel@tonic-gate 		ud_cur_inodes--;
9910Sstevel@tonic-gate 		mutex_exit(&ud_nino_lock);
9920Sstevel@tonic-gate 
9930Sstevel@tonic-gate 		cv_destroy(&ip->i_wrcv);  /* throttling */
9940Sstevel@tonic-gate 		rw_destroy(&ip->i_rwlock);
9950Sstevel@tonic-gate 		rw_exit(&ip->i_contents);
9960Sstevel@tonic-gate 		rw_destroy(&ip->i_contents);
9970Sstevel@tonic-gate 		kmem_free(addr, size);
9980Sstevel@tonic-gate 		kmem_free(addr1, size1);
9990Sstevel@tonic-gate 		vn_free(vp);
10000Sstevel@tonic-gate 		kmem_free(ip, sizeof (struct ud_inode));
10010Sstevel@tonic-gate 		return;
10020Sstevel@tonic-gate 	}
10030Sstevel@tonic-gate 
10040Sstevel@tonic-gate 	if ((ip->i_udf->udf_flags & UDF_FL_RDONLY) == 0) {
10050Sstevel@tonic-gate 		if (ip->i_nlink <= 0) {
10060Sstevel@tonic-gate 			ip->i_marker3 = (uint32_t)0xDDDD0000;
10070Sstevel@tonic-gate 			ip->i_nlink = 1;	/* prevent free-ing twice */
10080Sstevel@tonic-gate 			(void) ud_itrunc(ip, 0, 0, cr);
10090Sstevel@tonic-gate 			type = ip->i_type;
10100Sstevel@tonic-gate 			ip->i_perm = 0;
10110Sstevel@tonic-gate 			ip->i_uid = 0;
10120Sstevel@tonic-gate 			ip->i_gid = 0;
10130Sstevel@tonic-gate 			ip->i_rdev = 0;	/* Zero in core version of rdev */
10140Sstevel@tonic-gate 			mutex_enter(&ip->i_tlock);
10150Sstevel@tonic-gate 			ip->i_flag |= IUPD|ICHG;
10160Sstevel@tonic-gate 			mutex_exit(&ip->i_tlock);
10170Sstevel@tonic-gate 			ud_ifree(ip, type);
10180Sstevel@tonic-gate 			ip->i_icb_prn = 0xFFFF;
10190Sstevel@tonic-gate 		} else if (!IS_SWAPVP(vp)) {
10200Sstevel@tonic-gate 			/*
10210Sstevel@tonic-gate 			 * Write the inode out if dirty. Pages are
10220Sstevel@tonic-gate 			 * written back and put on the freelist.
10230Sstevel@tonic-gate 			 */
10240Sstevel@tonic-gate 			(void) ud_syncip(ip, B_FREE | B_ASYNC, 0);
10250Sstevel@tonic-gate 			/*
10260Sstevel@tonic-gate 			 * Do nothing if inode is now busy -- inode may
10270Sstevel@tonic-gate 			 * have gone busy because ud_syncip
10280Sstevel@tonic-gate 			 * releases/reacquires the i_contents lock
10290Sstevel@tonic-gate 			 */
10300Sstevel@tonic-gate 			mutex_enter(&vp->v_lock);
10310Sstevel@tonic-gate 			if (vp->v_count > 1) {
10320Sstevel@tonic-gate 				vp->v_count--;
10330Sstevel@tonic-gate 				mutex_exit(&vp->v_lock);
10340Sstevel@tonic-gate 				rw_exit(&ip->i_contents);
10350Sstevel@tonic-gate 				return;
10360Sstevel@tonic-gate 			}
10370Sstevel@tonic-gate 			mutex_exit(&vp->v_lock);
10380Sstevel@tonic-gate 		} else {
10390Sstevel@tonic-gate 			ud_iupdat(ip, 0);
10400Sstevel@tonic-gate 		}
10410Sstevel@tonic-gate 	}
10420Sstevel@tonic-gate 
10430Sstevel@tonic-gate 
10440Sstevel@tonic-gate 	/*
10450Sstevel@tonic-gate 	 * Put the inode on the end of the free list.
10460Sstevel@tonic-gate 	 * Possibly in some cases it would be better to
10470Sstevel@tonic-gate 	 * put the inode at the head of the free list,
10480Sstevel@tonic-gate 	 * (e.g.: where i_perm == 0 || i_number == 0)
10490Sstevel@tonic-gate 	 * but I will think about that later.
10500Sstevel@tonic-gate 	 * (i_number is rarely 0 - only after an i/o error in ud_iget,
10510Sstevel@tonic-gate 	 * where i_perm == 0, the inode will probably be wanted
10520Sstevel@tonic-gate 	 * again soon for an ialloc, so possibly we should keep it)
10530Sstevel@tonic-gate 	 */
10540Sstevel@tonic-gate 	/*
10550Sstevel@tonic-gate 	 * If inode is invalid or there is no page associated with
10560Sstevel@tonic-gate 	 * this inode, put the inode in the front of the free list.
10570Sstevel@tonic-gate 	 * Since we have a VN_HOLD on the vnode, and checked that it
10580Sstevel@tonic-gate 	 * wasn't already on the freelist when we entered, we can safely
10590Sstevel@tonic-gate 	 * put it on the freelist even if another thread puts a VN_HOLD
10600Sstevel@tonic-gate 	 * on it (pageout/ud_iget).
10610Sstevel@tonic-gate 	 */
10620Sstevel@tonic-gate tryagain:
10630Sstevel@tonic-gate 	mutex_enter(&ud_nino_lock);
10640Sstevel@tonic-gate 	if (vn_has_cached_data(vp)) {
10650Sstevel@tonic-gate 		mutex_exit(&ud_nino_lock);
10660Sstevel@tonic-gate 		mutex_enter(&vp->v_lock);
10670Sstevel@tonic-gate 		vp->v_count--;
10680Sstevel@tonic-gate 		mutex_exit(&vp->v_lock);
10690Sstevel@tonic-gate 		mutex_enter(&ip->i_tlock);
10700Sstevel@tonic-gate 		mutex_enter(&udf_ifree_lock);
10710Sstevel@tonic-gate 		ud_add_to_free_list(ip, UD_END);
10720Sstevel@tonic-gate 		mutex_exit(&udf_ifree_lock);
10730Sstevel@tonic-gate 		ip->i_flag &= IMODTIME;
10740Sstevel@tonic-gate 		mutex_exit(&ip->i_tlock);
10750Sstevel@tonic-gate 		rw_exit(&ip->i_contents);
10760Sstevel@tonic-gate 	} else if (busy || ud_cur_inodes < ud_max_inodes) {
10770Sstevel@tonic-gate 		mutex_exit(&ud_nino_lock);
10780Sstevel@tonic-gate 		/*
10790Sstevel@tonic-gate 		 * We're not over our high water mark, or it's
10800Sstevel@tonic-gate 		 * not safe to kmem_free the inode, so put it
10810Sstevel@tonic-gate 		 * on the freelist.
10820Sstevel@tonic-gate 		 */
10830Sstevel@tonic-gate 		mutex_enter(&vp->v_lock);
10840Sstevel@tonic-gate 		if (vn_has_cached_data(vp)) {
10850Sstevel@tonic-gate 			cmn_err(CE_WARN, "ud_iinactive: v_pages not NULL\n");
10860Sstevel@tonic-gate 		}
10870Sstevel@tonic-gate 		vp->v_count--;
10880Sstevel@tonic-gate 		mutex_exit(&vp->v_lock);
10890Sstevel@tonic-gate 
10900Sstevel@tonic-gate 	mutex_enter(&ip->i_tlock);
10910Sstevel@tonic-gate 		mutex_enter(&udf_ifree_lock);
10920Sstevel@tonic-gate 		ud_add_to_free_list(ip, UD_BEGIN);
10930Sstevel@tonic-gate 		mutex_exit(&udf_ifree_lock);
10940Sstevel@tonic-gate 	ip->i_flag &= IMODTIME;
10950Sstevel@tonic-gate 	mutex_exit(&ip->i_tlock);
10960Sstevel@tonic-gate 		rw_exit(&ip->i_contents);
10970Sstevel@tonic-gate 	} else {
10980Sstevel@tonic-gate 		mutex_exit(&ud_nino_lock);
10990Sstevel@tonic-gate 		if (vn_has_cached_data(vp)) {
11000Sstevel@tonic-gate 			cmn_err(CE_WARN, "ud_iinactive: v_pages not NULL\n");
11010Sstevel@tonic-gate 		}
11020Sstevel@tonic-gate 		/*
11030Sstevel@tonic-gate 		 * Try to free the inode. We must make sure
11040Sstevel@tonic-gate 		 * it's o.k. to destroy this inode. We can't destroy
11050Sstevel@tonic-gate 		 * if a thread is waiting for this inode. If we can't get the
11060Sstevel@tonic-gate 		 * cache now, put it back on the freelist.
11070Sstevel@tonic-gate 		 */
11080Sstevel@tonic-gate 		if (!mutex_tryenter(&ud_icache_lock)) {
11090Sstevel@tonic-gate 			busy = 1;
11100Sstevel@tonic-gate 			goto tryagain;
11110Sstevel@tonic-gate 		}
11120Sstevel@tonic-gate 		mutex_enter(&vp->v_lock);
11130Sstevel@tonic-gate 		if (vp->v_count > 1) {
11140Sstevel@tonic-gate 			/* inode is wanted in ud_iget */
11150Sstevel@tonic-gate 			busy = 1;
11160Sstevel@tonic-gate 			mutex_exit(&vp->v_lock);
11170Sstevel@tonic-gate 			mutex_exit(&ud_icache_lock);
11180Sstevel@tonic-gate 			goto tryagain;
11190Sstevel@tonic-gate 		}
11200Sstevel@tonic-gate 		mutex_exit(&vp->v_lock);
11210Sstevel@tonic-gate 		remque(ip);
11220Sstevel@tonic-gate 		ip->i_forw = ip;
11230Sstevel@tonic-gate 		ip->i_back = ip;
11240Sstevel@tonic-gate 		mutex_enter(&ud_nino_lock);
11250Sstevel@tonic-gate 		ud_cur_inodes--;
11260Sstevel@tonic-gate 		mutex_exit(&ud_nino_lock);
11270Sstevel@tonic-gate 		mutex_exit(&ud_icache_lock);
11280Sstevel@tonic-gate 		if (ip->i_icb_prn != 0xFFFF) {
11290Sstevel@tonic-gate 			ud_iupdat(ip, 0);
11300Sstevel@tonic-gate 		}
11310Sstevel@tonic-gate 		addr = (caddr_t)ip->i_ext;
11320Sstevel@tonic-gate 		size = sizeof (struct icb_ext) * ip->i_ext_count;
11330Sstevel@tonic-gate 		ip->i_ext = 0;
11340Sstevel@tonic-gate 		ip->i_ext_count = ip->i_ext_used = 0;
11350Sstevel@tonic-gate 		addr1 = (caddr_t)ip->i_con;
11360Sstevel@tonic-gate 		size1 = sizeof (struct icb_ext) * ip->i_con_count;
11370Sstevel@tonic-gate 		ip->i_con = 0;
11380Sstevel@tonic-gate 		ip->i_con_count = ip->i_con_used = ip->i_con_read = 0;
11390Sstevel@tonic-gate 		cv_destroy(&ip->i_wrcv);  /* throttling */
11400Sstevel@tonic-gate 		rw_destroy(&ip->i_rwlock);
11410Sstevel@tonic-gate 		rw_exit(&ip->i_contents);
11420Sstevel@tonic-gate 		rw_destroy(&ip->i_contents);
11430Sstevel@tonic-gate 		kmem_free(addr, size);
11440Sstevel@tonic-gate 		kmem_free(addr1, size1);
11450Sstevel@tonic-gate 		ip->i_marker3 = (uint32_t)0xDDDDDDDD;
11460Sstevel@tonic-gate 		vn_free(vp);
11470Sstevel@tonic-gate 		kmem_free(ip, sizeof (struct ud_inode));
11480Sstevel@tonic-gate 	}
11490Sstevel@tonic-gate }
11500Sstevel@tonic-gate 
11510Sstevel@tonic-gate 
11520Sstevel@tonic-gate void
ud_iupdat(struct ud_inode * ip,int32_t waitfor)11530Sstevel@tonic-gate ud_iupdat(struct ud_inode *ip, int32_t waitfor)
11540Sstevel@tonic-gate {
11550Sstevel@tonic-gate 	uint16_t flag, tag_flags;
11560Sstevel@tonic-gate 	int32_t error, crc_len = 0;
11570Sstevel@tonic-gate 	struct buf *bp;
11580Sstevel@tonic-gate 	struct udf_vfs *udf_vfsp;
11590Sstevel@tonic-gate 	struct file_entry *fe;
11600Sstevel@tonic-gate 
11610Sstevel@tonic-gate 	ASSERT(RW_WRITE_HELD(&ip->i_contents));
11620Sstevel@tonic-gate 
11630Sstevel@tonic-gate 	ud_printf("ud_iupdat\n");
11640Sstevel@tonic-gate 	/*
11650Sstevel@tonic-gate 	 * Return if file system has been forcibly umounted.
11660Sstevel@tonic-gate 	 */
11670Sstevel@tonic-gate 	if (ip->i_udf == NULL) {
11680Sstevel@tonic-gate 		return;
11690Sstevel@tonic-gate 	}
11700Sstevel@tonic-gate 
11710Sstevel@tonic-gate 	udf_vfsp = ip->i_udf;
11720Sstevel@tonic-gate 	flag = ip->i_flag;	/* Atomic read */
11730Sstevel@tonic-gate 	if ((flag & (IUPD|IACC|ICHG|IMOD|IMODACC)) != 0) {
11740Sstevel@tonic-gate 		if (udf_vfsp->udf_flags & UDF_FL_RDONLY) {
11750Sstevel@tonic-gate 			ip->i_flag &= ~(IUPD|IACC|ICHG|IMOD|IMODACC|IATTCHG);
11760Sstevel@tonic-gate 			return;
11770Sstevel@tonic-gate 		}
11780Sstevel@tonic-gate 
11790Sstevel@tonic-gate 		bp = ud_bread(ip->i_dev,
118012196SMilan.Cermak@Sun.COM 		    ip->i_icb_lbano << udf_vfsp->udf_l2d_shift,
118112196SMilan.Cermak@Sun.COM 		    ip->i_udf->udf_lbsize);
11820Sstevel@tonic-gate 		if (bp->b_flags & B_ERROR) {
11830Sstevel@tonic-gate 			brelse(bp);
11840Sstevel@tonic-gate 			return;
11850Sstevel@tonic-gate 		}
11860Sstevel@tonic-gate 		fe = (struct file_entry *)bp->b_un.b_addr;
11870Sstevel@tonic-gate 		if (ud_verify_tag_and_desc(&fe->fe_tag, UD_FILE_ENTRY,
11880Sstevel@tonic-gate 		    ip->i_icb_block,
11890Sstevel@tonic-gate 		    1, ip->i_udf->udf_lbsize) != 0) {
11900Sstevel@tonic-gate 			brelse(bp);
11910Sstevel@tonic-gate 			return;
11920Sstevel@tonic-gate 		}
11930Sstevel@tonic-gate 
11940Sstevel@tonic-gate 		mutex_enter(&ip->i_tlock);
11950Sstevel@tonic-gate 		if (ip->i_flag & (IUPD|IACC|ICHG)) {
11960Sstevel@tonic-gate 			IMARK(ip);
11970Sstevel@tonic-gate 		}
11980Sstevel@tonic-gate 		ip->i_flag &= ~(IUPD|IACC|ICHG|IMOD|IMODACC);
11990Sstevel@tonic-gate 		mutex_exit(&ip->i_tlock);
12000Sstevel@tonic-gate 
12010Sstevel@tonic-gate 		fe->fe_uid = SWAP_32(ip->i_uid);
12020Sstevel@tonic-gate 		fe->fe_gid = SWAP_32(ip->i_gid);
12030Sstevel@tonic-gate 
12040Sstevel@tonic-gate 		fe->fe_perms = SWAP_32(ip->i_perm);
12050Sstevel@tonic-gate 
12060Sstevel@tonic-gate 		fe->fe_lcount = SWAP_16(ip->i_nlink);
12070Sstevel@tonic-gate 		fe->fe_info_len = SWAP_64(ip->i_size);
12080Sstevel@tonic-gate 		fe->fe_lbr = SWAP_64(ip->i_lbr);
12090Sstevel@tonic-gate 
12100Sstevel@tonic-gate 		ud_utime2dtime(&ip->i_atime, &fe->fe_acc_time);
12110Sstevel@tonic-gate 		ud_utime2dtime(&ip->i_mtime, &fe->fe_mod_time);
12120Sstevel@tonic-gate 		ud_utime2dtime(&ip->i_ctime, &fe->fe_attr_time);
12130Sstevel@tonic-gate 
12140Sstevel@tonic-gate 		if (ip->i_char & ISUID) {
12150Sstevel@tonic-gate 			tag_flags = ICB_FLAG_SETUID;
12160Sstevel@tonic-gate 		} else {
12170Sstevel@tonic-gate 			tag_flags = 0;
12180Sstevel@tonic-gate 		}
12190Sstevel@tonic-gate 		if (ip->i_char & ISGID) {
12200Sstevel@tonic-gate 			tag_flags |= ICB_FLAG_SETGID;
12210Sstevel@tonic-gate 		}
12220Sstevel@tonic-gate 		if (ip->i_char & ISVTX) {
12230Sstevel@tonic-gate 			tag_flags |= ICB_FLAG_STICKY;
12240Sstevel@tonic-gate 		}
12250Sstevel@tonic-gate 		tag_flags |= ip->i_desc_type;
12260Sstevel@tonic-gate 
12270Sstevel@tonic-gate 		/*
12280Sstevel@tonic-gate 		 * Remove the following it is no longer contig
12290Sstevel@tonic-gate 		 * if (ip->i_astrat  == STRAT_TYPE4) {
12300Sstevel@tonic-gate 		 *	tag_flags |= ICB_FLAG_CONTIG;
12310Sstevel@tonic-gate 		 * }
12320Sstevel@tonic-gate 		 */
12330Sstevel@tonic-gate 
12340Sstevel@tonic-gate 		fe->fe_icb_tag.itag_flags &= ~SWAP_16((uint16_t)0x3C3);
12350Sstevel@tonic-gate 		fe->fe_icb_tag.itag_strategy = SWAP_16(ip->i_astrat);
12360Sstevel@tonic-gate 		fe->fe_icb_tag.itag_flags |= SWAP_16(tag_flags);
12370Sstevel@tonic-gate 
12380Sstevel@tonic-gate 		ud_update_regid(&fe->fe_impl_id);
12390Sstevel@tonic-gate 
12400Sstevel@tonic-gate 		crc_len = ((uint32_t)&((struct file_entry *)0)->fe_spec) +
124112196SMilan.Cermak@Sun.COM 		    SWAP_32(fe->fe_len_ear);
12420Sstevel@tonic-gate 		if (ip->i_desc_type == ICB_FLAG_ONE_AD) {
12430Sstevel@tonic-gate 			crc_len += ip->i_size;
12440Sstevel@tonic-gate 			fe->fe_len_adesc = SWAP_32(((uint32_t)ip->i_size));
124512196SMilan.Cermak@Sun.COM 		} else if ((ip->i_size != 0) && (ip->i_ext != NULL) &&
124612196SMilan.Cermak@Sun.COM 		    (ip->i_ext_used != 0)) {
12470Sstevel@tonic-gate 
12480Sstevel@tonic-gate 			if ((error = ud_read_icb_till_off(ip,
124912196SMilan.Cermak@Sun.COM 			    ip->i_size)) == 0) {
12500Sstevel@tonic-gate 				if (ip->i_astrat == STRAT_TYPE4) {
12510Sstevel@tonic-gate 					error = ud_updat_ext4(ip, fe);
12520Sstevel@tonic-gate 				} else if (ip->i_astrat == STRAT_TYPE4096) {
12530Sstevel@tonic-gate 					error = ud_updat_ext4096(ip, fe);
12540Sstevel@tonic-gate 				}
12550Sstevel@tonic-gate 				if (error) {
12560Sstevel@tonic-gate 					udf_vfsp->udf_mark_bad = 1;
12570Sstevel@tonic-gate 				}
12580Sstevel@tonic-gate 			}
12590Sstevel@tonic-gate 			crc_len += SWAP_32(fe->fe_len_adesc);
12600Sstevel@tonic-gate 		} else {
12610Sstevel@tonic-gate 			fe->fe_len_adesc = 0;
12620Sstevel@tonic-gate 		}
12630Sstevel@tonic-gate 
12640Sstevel@tonic-gate 		/*
12650Sstevel@tonic-gate 		 * Zero out the rest of the block
12660Sstevel@tonic-gate 		 */
12670Sstevel@tonic-gate 		bzero(bp->b_un.b_addr + crc_len,
126812196SMilan.Cermak@Sun.COM 		    ip->i_udf->udf_lbsize - crc_len);
12690Sstevel@tonic-gate 
12700Sstevel@tonic-gate 		ud_make_tag(ip->i_udf, &fe->fe_tag,
127112196SMilan.Cermak@Sun.COM 		    UD_FILE_ENTRY, ip->i_icb_block, crc_len);
12720Sstevel@tonic-gate 
12730Sstevel@tonic-gate 
12740Sstevel@tonic-gate 		if (waitfor) {
12750Sstevel@tonic-gate 			BWRITE(bp);
12760Sstevel@tonic-gate 
12770Sstevel@tonic-gate 			/*
12780Sstevel@tonic-gate 			 * Synchronous write has guaranteed that inode
12790Sstevel@tonic-gate 			 * has been written on disk so clear the flag
12800Sstevel@tonic-gate 			 */
12810Sstevel@tonic-gate 			ip->i_flag &= ~(IBDWRITE);
12820Sstevel@tonic-gate 		} else {
12830Sstevel@tonic-gate 			bdwrite(bp);
12840Sstevel@tonic-gate 
12850Sstevel@tonic-gate 			/*
12860Sstevel@tonic-gate 			 * This write hasn't guaranteed that inode has been
12870Sstevel@tonic-gate 			 * written on the disk.
12880Sstevel@tonic-gate 			 * Since, all updat flags on indoe are cleared, we must
12890Sstevel@tonic-gate 			 * remember the condition in case inode is to be updated
12900Sstevel@tonic-gate 			 * synchronously later (e.g.- fsync()/fdatasync())
12910Sstevel@tonic-gate 			 * and inode has not been modified yet.
12920Sstevel@tonic-gate 			 */
12930Sstevel@tonic-gate 			ip->i_flag |= (IBDWRITE);
12940Sstevel@tonic-gate 		}
12950Sstevel@tonic-gate 	} else {
12960Sstevel@tonic-gate 		/*
12970Sstevel@tonic-gate 		 * In case previous inode update was done asynchronously
12980Sstevel@tonic-gate 		 * (IBDWRITE) and this inode update request wants guaranteed
12990Sstevel@tonic-gate 		 * (synchronous) disk update, flush the inode.
13000Sstevel@tonic-gate 		 */
13010Sstevel@tonic-gate 		if (waitfor && (flag & IBDWRITE)) {
130212196SMilan.Cermak@Sun.COM 			blkflush(ip->i_dev,
130312196SMilan.Cermak@Sun.COM 			    (daddr_t)fsbtodb(udf_vfsp, ip->i_icb_lbano));
13040Sstevel@tonic-gate 			ip->i_flag &= ~(IBDWRITE);
13050Sstevel@tonic-gate 		}
13060Sstevel@tonic-gate 	}
13070Sstevel@tonic-gate }
13080Sstevel@tonic-gate 
13090Sstevel@tonic-gate int32_t
ud_updat_ext4(struct ud_inode * ip,struct file_entry * fe)13100Sstevel@tonic-gate ud_updat_ext4(struct ud_inode *ip, struct file_entry *fe)
13110Sstevel@tonic-gate {
13120Sstevel@tonic-gate 	uint32_t dummy;
13130Sstevel@tonic-gate 	int32_t elen, ndent, index, count, con_index;
13140Sstevel@tonic-gate 	daddr_t bno;
13150Sstevel@tonic-gate 	struct buf *bp;
13160Sstevel@tonic-gate 	struct short_ad *sad;
13170Sstevel@tonic-gate 	struct long_ad *lad;
13180Sstevel@tonic-gate 	struct icb_ext *iext, *icon;
13190Sstevel@tonic-gate 
13200Sstevel@tonic-gate 
13210Sstevel@tonic-gate 	ASSERT(ip);
13220Sstevel@tonic-gate 	ASSERT(fe);
13230Sstevel@tonic-gate 	ASSERT((ip->i_desc_type == ICB_FLAG_SHORT_AD) ||
132412196SMilan.Cermak@Sun.COM 	    (ip->i_desc_type == ICB_FLAG_LONG_AD));
13250Sstevel@tonic-gate 
13260Sstevel@tonic-gate 	if (ip->i_desc_type == ICB_FLAG_SHORT_AD) {
13270Sstevel@tonic-gate 		elen = sizeof (struct short_ad);
13280Sstevel@tonic-gate 		sad = (struct short_ad *)
132912196SMilan.Cermak@Sun.COM 		    (fe->fe_spec + SWAP_32(fe->fe_len_ear));
13300Sstevel@tonic-gate 	} else if (ip->i_desc_type == ICB_FLAG_LONG_AD) {
13310Sstevel@tonic-gate 		elen = sizeof (struct long_ad);
13320Sstevel@tonic-gate 		lad = (struct long_ad *)
133312196SMilan.Cermak@Sun.COM 		    (fe->fe_spec + SWAP_32(fe->fe_len_ear));
13340Sstevel@tonic-gate 	} else {
13350Sstevel@tonic-gate 		/* This cannot happen return */
13360Sstevel@tonic-gate 		return (EINVAL);
13370Sstevel@tonic-gate 	}
13380Sstevel@tonic-gate 
13390Sstevel@tonic-gate 	ndent = ip->i_max_emb / elen;
13400Sstevel@tonic-gate 
13410Sstevel@tonic-gate 	if (ip->i_ext_used < ndent) {
13420Sstevel@tonic-gate 
13430Sstevel@tonic-gate 		if (ip->i_desc_type == ICB_FLAG_SHORT_AD) {
13440Sstevel@tonic-gate 			ud_make_sad(ip->i_ext, sad, ip->i_ext_used);
13450Sstevel@tonic-gate 		} else {
13460Sstevel@tonic-gate 			ud_make_lad(ip->i_ext, lad, ip->i_ext_used);
13470Sstevel@tonic-gate 		}
13480Sstevel@tonic-gate 		fe->fe_len_adesc = SWAP_32(ip->i_ext_used * elen);
13490Sstevel@tonic-gate 		con_index = 0;
13500Sstevel@tonic-gate 	} else {
13510Sstevel@tonic-gate 
13520Sstevel@tonic-gate 		con_index = index = 0;
13530Sstevel@tonic-gate 
13540Sstevel@tonic-gate 		while (index < ip->i_ext_used) {
13550Sstevel@tonic-gate 			if (index == 0) {
13560Sstevel@tonic-gate 				/*
13570Sstevel@tonic-gate 				 * bp is already read
13580Sstevel@tonic-gate 				 * First few extents will go
13590Sstevel@tonic-gate 				 * into the file_entry
13600Sstevel@tonic-gate 				 */
13610Sstevel@tonic-gate 				count = ndent - 1;
136212196SMilan.Cermak@Sun.COM 				fe->fe_len_adesc = SWAP_32(ndent * elen);
13630Sstevel@tonic-gate 				bp = NULL;
13640Sstevel@tonic-gate 
13650Sstevel@tonic-gate 				/*
13660Sstevel@tonic-gate 				 * Last entry to be cont ext
13670Sstevel@tonic-gate 				 */
13680Sstevel@tonic-gate 				icon = &ip->i_con[con_index];
13690Sstevel@tonic-gate 			} else {
13700Sstevel@tonic-gate 				/*
13710Sstevel@tonic-gate 				 * Read the buffer
13720Sstevel@tonic-gate 				 */
13730Sstevel@tonic-gate 				icon = &ip->i_con[con_index];
13740Sstevel@tonic-gate 
13750Sstevel@tonic-gate 				bno = ud_xlate_to_daddr(ip->i_udf,
137612196SMilan.Cermak@Sun.COM 				    icon->ib_prn, icon->ib_block,
137712196SMilan.Cermak@Sun.COM 				    icon->ib_count >> ip->i_udf->udf_l2d_shift,
137812196SMilan.Cermak@Sun.COM 				    &dummy);
137912196SMilan.Cermak@Sun.COM 				bp = ud_bread(ip->i_dev,
138012196SMilan.Cermak@Sun.COM 				    bno << ip->i_udf->udf_l2d_shift,
138112196SMilan.Cermak@Sun.COM 				    ip->i_udf->udf_lbsize);
13820Sstevel@tonic-gate 				if (bp->b_flags & B_ERROR) {
13830Sstevel@tonic-gate 					brelse(bp);
13840Sstevel@tonic-gate 					return (EIO);
13850Sstevel@tonic-gate 				}
13860Sstevel@tonic-gate 
13870Sstevel@tonic-gate 				/*
13880Sstevel@tonic-gate 				 * Figure out how many extents in
13890Sstevel@tonic-gate 				 * this time
13900Sstevel@tonic-gate 				 */
13910Sstevel@tonic-gate 				count = (bp->b_bcount -
139212196SMilan.Cermak@Sun.COM 				    sizeof (struct alloc_ext_desc)) / elen;
13930Sstevel@tonic-gate 				if (count > (ip->i_ext_used - index)) {
13940Sstevel@tonic-gate 					count = ip->i_ext_used - index;
13950Sstevel@tonic-gate 				} else {
13960Sstevel@tonic-gate 					count --;
13970Sstevel@tonic-gate 				}
13980Sstevel@tonic-gate 				con_index++;
13990Sstevel@tonic-gate 				if (con_index >= ip->i_con_used) {
14000Sstevel@tonic-gate 					icon = NULL;
14010Sstevel@tonic-gate 				} else {
14020Sstevel@tonic-gate 					icon = &ip->i_con[con_index];
14030Sstevel@tonic-gate 				}
14040Sstevel@tonic-gate 			}
14050Sstevel@tonic-gate 
14060Sstevel@tonic-gate 
14070Sstevel@tonic-gate 
14080Sstevel@tonic-gate 			/*
14090Sstevel@tonic-gate 			 * convert to on disk form and
14100Sstevel@tonic-gate 			 * update
14110Sstevel@tonic-gate 			 */
14120Sstevel@tonic-gate 			iext = &ip->i_ext[index];
14130Sstevel@tonic-gate 			if (ip->i_desc_type == ICB_FLAG_SHORT_AD) {
14140Sstevel@tonic-gate 				if (index != 0) {
14150Sstevel@tonic-gate 					sad = (struct short_ad *)
141612196SMilan.Cermak@Sun.COM 					    (bp->b_un.b_addr +
141712196SMilan.Cermak@Sun.COM 					    sizeof (struct alloc_ext_desc));
14180Sstevel@tonic-gate 				}
14190Sstevel@tonic-gate 				ud_make_sad(iext, sad, count);
14200Sstevel@tonic-gate 				sad += count;
14210Sstevel@tonic-gate 				if (icon != NULL) {
14220Sstevel@tonic-gate 					ud_make_sad(icon, sad, 1);
14230Sstevel@tonic-gate 				}
14240Sstevel@tonic-gate 			} else {
14250Sstevel@tonic-gate 				if (index != 0) {
14260Sstevel@tonic-gate 					lad = (struct long_ad *)
142712196SMilan.Cermak@Sun.COM 					    (bp->b_un.b_addr +
142812196SMilan.Cermak@Sun.COM 					    sizeof (struct alloc_ext_desc));
14290Sstevel@tonic-gate 				}
14300Sstevel@tonic-gate 				ud_make_lad(iext, lad, count);
14310Sstevel@tonic-gate 				lad += count;
14320Sstevel@tonic-gate 				if (icon != NULL) {
14330Sstevel@tonic-gate 					ud_make_lad(icon, lad, 1);
14340Sstevel@tonic-gate 				}
14350Sstevel@tonic-gate 			}
14360Sstevel@tonic-gate 
14370Sstevel@tonic-gate 			if (con_index != 0) {
14380Sstevel@tonic-gate 				struct alloc_ext_desc *aed;
14390Sstevel@tonic-gate 				int32_t sz;
14400Sstevel@tonic-gate 				struct icb_ext *oicon;
14410Sstevel@tonic-gate 
14420Sstevel@tonic-gate 				oicon = &ip->i_con[con_index - 1];
14430Sstevel@tonic-gate 				sz = count * elen;
14440Sstevel@tonic-gate 				if (icon != NULL) {
14450Sstevel@tonic-gate 					sz += elen;
14460Sstevel@tonic-gate 				}
14470Sstevel@tonic-gate 				aed = (struct alloc_ext_desc *)bp->b_un.b_addr;
14480Sstevel@tonic-gate 				aed->aed_len_aed = SWAP_32(sz);
14490Sstevel@tonic-gate 				if (con_index == 1) {
14500Sstevel@tonic-gate 					aed->aed_rev_ael =
145112196SMilan.Cermak@Sun.COM 					    SWAP_32(ip->i_icb_block);
14520Sstevel@tonic-gate 				} else {
14530Sstevel@tonic-gate 					aed->aed_rev_ael =
145412196SMilan.Cermak@Sun.COM 					    SWAP_32(oicon->ib_block);
14550Sstevel@tonic-gate 				}
14560Sstevel@tonic-gate 				sz += sizeof (struct alloc_ext_desc);
14570Sstevel@tonic-gate 				ud_make_tag(ip->i_udf, &aed->aed_tag,
145812196SMilan.Cermak@Sun.COM 				    UD_ALLOC_EXT_DESC, oicon->ib_block, sz);
14590Sstevel@tonic-gate 			}
14600Sstevel@tonic-gate 
14610Sstevel@tonic-gate 			/*
14620Sstevel@tonic-gate 			 * Write back to disk
14630Sstevel@tonic-gate 			 */
14640Sstevel@tonic-gate 			if (bp != NULL) {
14650Sstevel@tonic-gate 				BWRITE(bp);
14660Sstevel@tonic-gate 			}
14670Sstevel@tonic-gate 			index += count;
14680Sstevel@tonic-gate 		}
14690Sstevel@tonic-gate 
14700Sstevel@tonic-gate 	}
14710Sstevel@tonic-gate 
14720Sstevel@tonic-gate 	if (con_index != ip->i_con_used) {
14730Sstevel@tonic-gate 		int32_t lbmask, l2b, temp;
14740Sstevel@tonic-gate 
14750Sstevel@tonic-gate 		temp = con_index;
14760Sstevel@tonic-gate 		lbmask = ip->i_udf->udf_lbmask;
14770Sstevel@tonic-gate 		l2b = ip->i_udf->udf_l2b_shift;
14780Sstevel@tonic-gate 		/*
14790Sstevel@tonic-gate 		 * Free unused continuation extents
14800Sstevel@tonic-gate 		 */
14810Sstevel@tonic-gate 		for (; con_index < ip->i_con_used; con_index++) {
14820Sstevel@tonic-gate 			icon = &ip->i_con[con_index];
14830Sstevel@tonic-gate 			count = (icon->ib_count + lbmask) >> l2b;
14840Sstevel@tonic-gate 			ud_free_space(ip->i_udf->udf_vfs, icon->ib_prn,
148512196SMilan.Cermak@Sun.COM 			    icon->ib_block, count);
14860Sstevel@tonic-gate 			count = (count << l2b) - sizeof (struct alloc_ext_desc);
14870Sstevel@tonic-gate 			ip->i_cur_max_ext -= (count / elen) - 1;
14880Sstevel@tonic-gate 		}
14890Sstevel@tonic-gate 		ip->i_con_used = temp;
14900Sstevel@tonic-gate 	}
14910Sstevel@tonic-gate 	return (0);
14920Sstevel@tonic-gate }
14930Sstevel@tonic-gate 
14940Sstevel@tonic-gate /* ARGSUSED */
14950Sstevel@tonic-gate int32_t
ud_updat_ext4096(struct ud_inode * ip,struct file_entry * fe)14960Sstevel@tonic-gate ud_updat_ext4096(struct ud_inode *ip, struct file_entry *fe)
14970Sstevel@tonic-gate {
14980Sstevel@tonic-gate 	return (ENXIO);
14990Sstevel@tonic-gate }
15000Sstevel@tonic-gate 
15010Sstevel@tonic-gate void
ud_make_sad(struct icb_ext * iext,struct short_ad * sad,int32_t count)15020Sstevel@tonic-gate ud_make_sad(struct icb_ext *iext, struct short_ad *sad, int32_t count)
15030Sstevel@tonic-gate {
15040Sstevel@tonic-gate 	int32_t index = 0, scount;
15050Sstevel@tonic-gate 
15060Sstevel@tonic-gate 	ASSERT(iext);
15070Sstevel@tonic-gate 	ASSERT(sad);
15080Sstevel@tonic-gate 
15090Sstevel@tonic-gate 	if (count != 0) {
15100Sstevel@tonic-gate 		ASSERT(count > 0);
15110Sstevel@tonic-gate 		while (index < count) {
15120Sstevel@tonic-gate 			scount = (iext->ib_count & 0x3FFFFFFF) |
151312196SMilan.Cermak@Sun.COM 			    (iext->ib_flags << 30);
15140Sstevel@tonic-gate 			sad->sad_ext_len = SWAP_32(scount);
15150Sstevel@tonic-gate 			sad->sad_ext_loc = SWAP_32(iext->ib_block);
15160Sstevel@tonic-gate 			sad++;
15170Sstevel@tonic-gate 			iext++;
15180Sstevel@tonic-gate 			index++;
15190Sstevel@tonic-gate 		}
15200Sstevel@tonic-gate 	}
15210Sstevel@tonic-gate }
15220Sstevel@tonic-gate 
15230Sstevel@tonic-gate void
ud_make_lad(struct icb_ext * iext,struct long_ad * lad,int32_t count)15240Sstevel@tonic-gate ud_make_lad(struct icb_ext *iext, struct long_ad *lad, int32_t count)
15250Sstevel@tonic-gate {
15260Sstevel@tonic-gate 	int32_t index = 0, scount;
15270Sstevel@tonic-gate 
15280Sstevel@tonic-gate 	ASSERT(iext);
15290Sstevel@tonic-gate 	ASSERT(lad);
15300Sstevel@tonic-gate 
15310Sstevel@tonic-gate 	if (count != 0) {
15320Sstevel@tonic-gate 		ASSERT(count > 0);
15330Sstevel@tonic-gate 
15340Sstevel@tonic-gate 		while (index < count) {
15350Sstevel@tonic-gate 			lad->lad_ext_prn = SWAP_16(iext->ib_prn);
15360Sstevel@tonic-gate 			scount = (iext->ib_count & 0x3FFFFFFF) |
153712196SMilan.Cermak@Sun.COM 			    (iext->ib_flags << 30);
15380Sstevel@tonic-gate 			lad->lad_ext_len = SWAP_32(scount);
15390Sstevel@tonic-gate 			lad->lad_ext_loc = SWAP_32(iext->ib_block);
15400Sstevel@tonic-gate 			lad++;
15410Sstevel@tonic-gate 			iext++;
15420Sstevel@tonic-gate 			index++;
15430Sstevel@tonic-gate 		}
15440Sstevel@tonic-gate 	}
15450Sstevel@tonic-gate }
15460Sstevel@tonic-gate 
15470Sstevel@tonic-gate /*
15480Sstevel@tonic-gate  * Truncate the inode ip to at most length size.
15490Sstevel@tonic-gate  * Free affected disk blocks -- the blocks of the
15500Sstevel@tonic-gate  * file are removed in reverse order.
15510Sstevel@tonic-gate  */
15520Sstevel@tonic-gate /* ARGSUSED */
15530Sstevel@tonic-gate int
ud_itrunc(struct ud_inode * oip,u_offset_t length,int32_t flags,struct cred * cr)15540Sstevel@tonic-gate ud_itrunc(struct ud_inode *oip, u_offset_t length,
155512196SMilan.Cermak@Sun.COM     int32_t flags, struct cred *cr)
15560Sstevel@tonic-gate {
15570Sstevel@tonic-gate 	int32_t error, boff;
15580Sstevel@tonic-gate 	off_t bsize;
15590Sstevel@tonic-gate 	mode_t mode;
15600Sstevel@tonic-gate 	struct udf_vfs *udf_vfsp;
15610Sstevel@tonic-gate 
15620Sstevel@tonic-gate 	ud_printf("ud_itrunc\n");
15630Sstevel@tonic-gate 
15640Sstevel@tonic-gate 	ASSERT(RW_WRITE_HELD(&oip->i_contents));
15650Sstevel@tonic-gate 	udf_vfsp = oip->i_udf;
15660Sstevel@tonic-gate 	bsize = udf_vfsp->udf_lbsize;
15670Sstevel@tonic-gate 
15680Sstevel@tonic-gate 	/*
15690Sstevel@tonic-gate 	 * We only allow truncation of regular files and directories
15700Sstevel@tonic-gate 	 * to arbritary lengths here.  In addition, we allow symbolic
15710Sstevel@tonic-gate 	 * links to be truncated only to zero length.  Other inode
15720Sstevel@tonic-gate 	 * types cannot have their length set here.
15730Sstevel@tonic-gate 	 */
15740Sstevel@tonic-gate 	mode = oip->i_type;
15750Sstevel@tonic-gate 	if (mode == VFIFO) {
15760Sstevel@tonic-gate 		return (0);
15770Sstevel@tonic-gate 	}
15780Sstevel@tonic-gate 	if ((mode != VREG) && (mode != VDIR) &&
157912196SMilan.Cermak@Sun.COM 	    (!(mode == VLNK && length == 0))) {
15800Sstevel@tonic-gate 		return (EINVAL);
15810Sstevel@tonic-gate 	}
15820Sstevel@tonic-gate 	if (length == oip->i_size) {
15830Sstevel@tonic-gate 		/* update ctime and mtime to please POSIX tests */
15840Sstevel@tonic-gate 		mutex_enter(&oip->i_tlock);
15850Sstevel@tonic-gate 		oip->i_flag |= ICHG |IUPD;
15860Sstevel@tonic-gate 		mutex_exit(&oip->i_tlock);
15870Sstevel@tonic-gate 		return (0);
15880Sstevel@tonic-gate 	}
15890Sstevel@tonic-gate 
15900Sstevel@tonic-gate 	boff = blkoff(udf_vfsp, length);
15910Sstevel@tonic-gate 
15920Sstevel@tonic-gate 	if (length > oip->i_size) {
15930Sstevel@tonic-gate 		/*
15940Sstevel@tonic-gate 		 * Trunc up case.ud_bmap_write will insure that the right blocks
15950Sstevel@tonic-gate 		 * are allocated.  This includes doing any work needed for
15960Sstevel@tonic-gate 		 * allocating the last block.
15970Sstevel@tonic-gate 		 */
15980Sstevel@tonic-gate 		if (boff == 0) {
15990Sstevel@tonic-gate 			error = ud_bmap_write(oip, length - 1,
160012196SMilan.Cermak@Sun.COM 			    (int)bsize, 0, cr);
16010Sstevel@tonic-gate 		} else {
16020Sstevel@tonic-gate 			error = ud_bmap_write(oip, length - 1, boff, 0, cr);
16030Sstevel@tonic-gate 		}
16040Sstevel@tonic-gate 		if (error == 0) {
16050Sstevel@tonic-gate 			u_offset_t osize = oip->i_size;
16060Sstevel@tonic-gate 			oip->i_size  = length;
16070Sstevel@tonic-gate 
16080Sstevel@tonic-gate 			/*
16090Sstevel@tonic-gate 			 * Make sure we zero out the remaining bytes of
16100Sstevel@tonic-gate 			 * the page in case a mmap scribbled on it. We
16110Sstevel@tonic-gate 			 * can't prevent a mmap from writing beyond EOF
16120Sstevel@tonic-gate 			 * on the last page of a file.
16130Sstevel@tonic-gate 			 */
16140Sstevel@tonic-gate 			if ((boff = blkoff(udf_vfsp, osize)) != 0) {
16150Sstevel@tonic-gate 				pvn_vpzero(ITOV(oip), osize,
161612196SMilan.Cermak@Sun.COM 				    (uint32_t)(bsize - boff));
16170Sstevel@tonic-gate 			}
16180Sstevel@tonic-gate 			mutex_enter(&oip->i_tlock);
16190Sstevel@tonic-gate 			oip->i_flag |= ICHG;
16200Sstevel@tonic-gate 			ITIMES_NOLOCK(oip);
16210Sstevel@tonic-gate 			mutex_exit(&oip->i_tlock);
16220Sstevel@tonic-gate 		}
16230Sstevel@tonic-gate 		return (error);
16240Sstevel@tonic-gate 	}
16250Sstevel@tonic-gate 
16260Sstevel@tonic-gate 	/*
16270Sstevel@tonic-gate 	 * Update the pages of the file.  If the file is not being
16280Sstevel@tonic-gate 	 * truncated to a block boundary, the contents of the
16290Sstevel@tonic-gate 	 * pages following the end of the file must be zero'ed
16300Sstevel@tonic-gate 	 * in case it ever become accessable again because
16310Sstevel@tonic-gate 	 * of subsequent file growth.
16320Sstevel@tonic-gate 	 */
16330Sstevel@tonic-gate 	if (boff == 0) {
16340Sstevel@tonic-gate 		(void) pvn_vplist_dirty(ITOV(oip), length,
163512196SMilan.Cermak@Sun.COM 		    ud_putapage, B_INVAL | B_TRUNC, CRED());
16360Sstevel@tonic-gate 	} else {
16370Sstevel@tonic-gate 		/*
16380Sstevel@tonic-gate 		 * Make sure that the last block is properly allocated.
16390Sstevel@tonic-gate 		 * We only really have to do this if the last block is
16400Sstevel@tonic-gate 		 * actually allocated.  Just to be sure, we do it now
16410Sstevel@tonic-gate 		 * independent of current allocation.
16420Sstevel@tonic-gate 		 */
16430Sstevel@tonic-gate 		error = ud_bmap_write(oip, length - 1, boff, 0, cr);
16440Sstevel@tonic-gate 		if (error) {
16450Sstevel@tonic-gate 			return (error);
16460Sstevel@tonic-gate 		}
16470Sstevel@tonic-gate 
16480Sstevel@tonic-gate 		pvn_vpzero(ITOV(oip), length, (uint32_t)(bsize - boff));
16490Sstevel@tonic-gate 
16500Sstevel@tonic-gate 		(void) pvn_vplist_dirty(ITOV(oip), length,
165112196SMilan.Cermak@Sun.COM 		    ud_putapage, B_INVAL | B_TRUNC, CRED());
16520Sstevel@tonic-gate 	}
16530Sstevel@tonic-gate 
16540Sstevel@tonic-gate 
16550Sstevel@tonic-gate 	/* Free the blocks */
16560Sstevel@tonic-gate 	if (oip->i_desc_type == ICB_FLAG_ONE_AD) {
16570Sstevel@tonic-gate 		if (length > oip->i_max_emb) {
16580Sstevel@tonic-gate 			return (EFBIG);
16590Sstevel@tonic-gate 		}
16600Sstevel@tonic-gate 		oip->i_size = length;
16610Sstevel@tonic-gate 		mutex_enter(&oip->i_tlock);
16620Sstevel@tonic-gate 		oip->i_flag |= ICHG|IUPD;
16630Sstevel@tonic-gate 		mutex_exit(&oip->i_tlock);
16640Sstevel@tonic-gate 		ud_iupdat(oip, 1);
16650Sstevel@tonic-gate 	} else {
16660Sstevel@tonic-gate 		if ((error = ud_read_icb_till_off(oip, oip->i_size)) != 0) {
16670Sstevel@tonic-gate 			return (error);
16680Sstevel@tonic-gate 		}
16690Sstevel@tonic-gate 
16700Sstevel@tonic-gate 		if (oip->i_astrat == STRAT_TYPE4) {
16710Sstevel@tonic-gate 			ud_trunc_ext4(oip, length);
16720Sstevel@tonic-gate 		} else if (oip->i_astrat == STRAT_TYPE4096) {
16730Sstevel@tonic-gate 			ud_trunc_ext4096(oip, length);
16740Sstevel@tonic-gate 		}
16750Sstevel@tonic-gate 	}
16760Sstevel@tonic-gate 
16770Sstevel@tonic-gate done:
16780Sstevel@tonic-gate 	return (0);
16790Sstevel@tonic-gate }
16800Sstevel@tonic-gate 
16810Sstevel@tonic-gate void
ud_trunc_ext4(struct ud_inode * ip,u_offset_t length)16820Sstevel@tonic-gate ud_trunc_ext4(struct ud_inode *ip, u_offset_t length)
16830Sstevel@tonic-gate {
16840Sstevel@tonic-gate 	int32_t index, l2b, count, ecount;
16850Sstevel@tonic-gate 	int32_t elen, ndent, nient;
16860Sstevel@tonic-gate 	u_offset_t ext_beg, ext_end;
16870Sstevel@tonic-gate 	struct icb_ext *iext, *icon;
16880Sstevel@tonic-gate 	int32_t lbmask, ext_used;
16890Sstevel@tonic-gate 	uint32_t loc;
16900Sstevel@tonic-gate 	struct icb_ext text;
16910Sstevel@tonic-gate 	uint32_t con_freed;
16920Sstevel@tonic-gate 
16930Sstevel@tonic-gate 	ASSERT((ip->i_desc_type == ICB_FLAG_SHORT_AD) ||
169412196SMilan.Cermak@Sun.COM 	    (ip->i_desc_type == ICB_FLAG_LONG_AD));
16950Sstevel@tonic-gate 
16960Sstevel@tonic-gate 	if (ip->i_ext_used == 0) {
16970Sstevel@tonic-gate 		return;
16980Sstevel@tonic-gate 	}
16990Sstevel@tonic-gate 
17000Sstevel@tonic-gate 	ext_used = ip->i_ext_used;
17010Sstevel@tonic-gate 
17020Sstevel@tonic-gate 	lbmask = ip->i_udf->udf_lbmask;
17030Sstevel@tonic-gate 	l2b = ip->i_udf->udf_l2b_shift;
17040Sstevel@tonic-gate 
17050Sstevel@tonic-gate 	ASSERT(ip->i_ext);
17060Sstevel@tonic-gate 
17070Sstevel@tonic-gate 	ip->i_lbr = 0;
17080Sstevel@tonic-gate 	for (index = 0; index < ext_used; index++) {
17090Sstevel@tonic-gate 		iext = &ip->i_ext[index];
17100Sstevel@tonic-gate 
17110Sstevel@tonic-gate 		/*
17120Sstevel@tonic-gate 		 * Find the begining and end
17130Sstevel@tonic-gate 		 * of current extent
17140Sstevel@tonic-gate 		 */
17150Sstevel@tonic-gate 		ext_beg = iext->ib_offset;
17160Sstevel@tonic-gate 		ext_end = iext->ib_offset +
171712196SMilan.Cermak@Sun.COM 		    ((iext->ib_count + lbmask) & ~lbmask);
17180Sstevel@tonic-gate 
17190Sstevel@tonic-gate 		/*
17200Sstevel@tonic-gate 		 * This is the extent that has offset "length"
17210Sstevel@tonic-gate 		 * make a copy of this extent and
17220Sstevel@tonic-gate 		 * remember the index. We can use
17230Sstevel@tonic-gate 		 * it to free blocks
17240Sstevel@tonic-gate 		 */
172512196SMilan.Cermak@Sun.COM 		if ((length <= ext_end) && (length >= ext_beg)) {
17260Sstevel@tonic-gate 			text = *iext;
17270Sstevel@tonic-gate 
17280Sstevel@tonic-gate 			iext->ib_count = length - ext_beg;
17290Sstevel@tonic-gate 			ip->i_ext_used = index + 1;
17300Sstevel@tonic-gate 			break;
17310Sstevel@tonic-gate 		}
17320Sstevel@tonic-gate 		if (iext->ib_flags != IB_UN_RE_AL) {
17330Sstevel@tonic-gate 			ip->i_lbr += iext->ib_count >> l2b;
17340Sstevel@tonic-gate 		}
17350Sstevel@tonic-gate 	}
17360Sstevel@tonic-gate 	if (ip->i_ext_used != index) {
17370Sstevel@tonic-gate 		if (iext->ib_flags != IB_UN_RE_AL) {
17380Sstevel@tonic-gate 			ip->i_lbr +=
173912196SMilan.Cermak@Sun.COM 			    ((iext->ib_count + lbmask) & ~lbmask) >> l2b;
17400Sstevel@tonic-gate 		}
17410Sstevel@tonic-gate 	}
17420Sstevel@tonic-gate 
17430Sstevel@tonic-gate 	ip->i_size = length;
17440Sstevel@tonic-gate 	mutex_enter(&ip->i_tlock);
17450Sstevel@tonic-gate 	ip->i_flag |= ICHG|IUPD;
17460Sstevel@tonic-gate 	mutex_exit(&ip->i_tlock);
17470Sstevel@tonic-gate 	ud_iupdat(ip, 1);
17480Sstevel@tonic-gate 
17490Sstevel@tonic-gate 	/*
17500Sstevel@tonic-gate 	 * Free the unused space
17510Sstevel@tonic-gate 	 */
17520Sstevel@tonic-gate 	if (text.ib_flags != IB_UN_RE_AL) {
17530Sstevel@tonic-gate 		count = (ext_end - length) >> l2b;
17540Sstevel@tonic-gate 		if (count) {
17550Sstevel@tonic-gate 			loc = text.ib_block +
175612196SMilan.Cermak@Sun.COM 			    (((length - text.ib_offset) + lbmask) >> l2b);
17570Sstevel@tonic-gate 			ud_free_space(ip->i_udf->udf_vfs, text.ib_prn,
175812196SMilan.Cermak@Sun.COM 			    loc, count);
17590Sstevel@tonic-gate 		}
17600Sstevel@tonic-gate 	}
17610Sstevel@tonic-gate 	for (index = ip->i_ext_used; index < ext_used; index++) {
17620Sstevel@tonic-gate 		iext = &ip->i_ext[index];
17630Sstevel@tonic-gate 		if (iext->ib_flags != IB_UN_RE_AL) {
17640Sstevel@tonic-gate 			count = (iext->ib_count + lbmask) >> l2b;
17650Sstevel@tonic-gate 			ud_free_space(ip->i_udf->udf_vfs, iext->ib_prn,
176612196SMilan.Cermak@Sun.COM 			    iext->ib_block, count);
17670Sstevel@tonic-gate 		}
17680Sstevel@tonic-gate 		bzero(iext, sizeof (struct icb_ext));
17690Sstevel@tonic-gate 		continue;
17700Sstevel@tonic-gate 	}
17710Sstevel@tonic-gate 
17720Sstevel@tonic-gate 	/*
17730Sstevel@tonic-gate 	 * release any continuation blocks
17740Sstevel@tonic-gate 	 */
17750Sstevel@tonic-gate 	if (ip->i_con) {
17760Sstevel@tonic-gate 
17770Sstevel@tonic-gate 		ASSERT(ip->i_con_count >= ip->i_con_used);
17780Sstevel@tonic-gate 
17790Sstevel@tonic-gate 		/*
17800Sstevel@tonic-gate 		 * Find out how many indirect blocks
17810Sstevel@tonic-gate 		 * are required and release the rest
17820Sstevel@tonic-gate 		 */
17830Sstevel@tonic-gate 		if (ip->i_desc_type == ICB_FLAG_SHORT_AD) {
17840Sstevel@tonic-gate 			elen = sizeof (struct short_ad);
17850Sstevel@tonic-gate 		} else if (ip->i_desc_type == ICB_FLAG_LONG_AD) {
17860Sstevel@tonic-gate 			elen = sizeof (struct long_ad);
17870Sstevel@tonic-gate 		}
17880Sstevel@tonic-gate 		ndent = ip->i_max_emb / elen;
17890Sstevel@tonic-gate 		if (ip->i_ext_used > ndent) {
17900Sstevel@tonic-gate 			ecount = ip->i_ext_used - ndent;
17910Sstevel@tonic-gate 		} else {
17920Sstevel@tonic-gate 			ecount = 0;
17930Sstevel@tonic-gate 		}
17940Sstevel@tonic-gate 		con_freed = 0;
17950Sstevel@tonic-gate 		for (index = 0; index < ip->i_con_used; index++) {
17960Sstevel@tonic-gate 			icon = &ip->i_con[index];
17970Sstevel@tonic-gate 			nient = icon->ib_count -
179812196SMilan.Cermak@Sun.COM 			    (sizeof (struct alloc_ext_desc) + elen);
179912196SMilan.Cermak@Sun.COM 			/* Header + 1 indirect extent */
18000Sstevel@tonic-gate 			nient /= elen;
18010Sstevel@tonic-gate 			if (ecount) {
18020Sstevel@tonic-gate 				if (ecount > nient) {
18030Sstevel@tonic-gate 					ecount -= nient;
18040Sstevel@tonic-gate 				} else {
18050Sstevel@tonic-gate 					ecount = 0;
18060Sstevel@tonic-gate 				}
18070Sstevel@tonic-gate 			} else {
18080Sstevel@tonic-gate 				count = ((icon->ib_count + lbmask) &
180912196SMilan.Cermak@Sun.COM 				    ~lbmask) >> l2b;
18100Sstevel@tonic-gate 				ud_free_space(ip->i_udf->udf_vfs,
181112196SMilan.Cermak@Sun.COM 				    icon->ib_prn, icon->ib_block, count);
18120Sstevel@tonic-gate 				con_freed++;
18130Sstevel@tonic-gate 				ip->i_cur_max_ext -= nient;
18140Sstevel@tonic-gate 			}
18150Sstevel@tonic-gate 		}
18160Sstevel@tonic-gate 		/*
18170Sstevel@tonic-gate 		 * set the continuation extents used(i_con_used)i to correct
18180Sstevel@tonic-gate 		 * value. It is possible for i_con_used to be zero,
18190Sstevel@tonic-gate 		 * if we free up all continuation extents. This happens
18200Sstevel@tonic-gate 		 * when ecount is 0 before entering the for loop above.
18210Sstevel@tonic-gate 		 */
18220Sstevel@tonic-gate 		ip->i_con_used -= con_freed;
18230Sstevel@tonic-gate 		if (ip->i_con_read > ip->i_con_used) {
18240Sstevel@tonic-gate 			ip->i_con_read = ip->i_con_used;
18250Sstevel@tonic-gate 		}
18260Sstevel@tonic-gate 	}
18270Sstevel@tonic-gate }
18280Sstevel@tonic-gate 
18290Sstevel@tonic-gate void
ud_trunc_ext4096(struct ud_inode * ip,u_offset_t length)18300Sstevel@tonic-gate ud_trunc_ext4096(struct ud_inode *ip, u_offset_t length)
18310Sstevel@tonic-gate {
18320Sstevel@tonic-gate 	/*
18330Sstevel@tonic-gate 	 * Truncate code is the same for
18340Sstevel@tonic-gate 	 * both file of type 4 and 4096
18350Sstevel@tonic-gate 	 */
18360Sstevel@tonic-gate 	ud_trunc_ext4(ip, length);
18370Sstevel@tonic-gate }
18380Sstevel@tonic-gate 
18390Sstevel@tonic-gate /*
18400Sstevel@tonic-gate  * Remove any inodes in the inode cache belonging to dev
18410Sstevel@tonic-gate  *
18420Sstevel@tonic-gate  * There should not be any active ones, return error if any are found but
18430Sstevel@tonic-gate  * still invalidate others (N.B.: this is a user error, not a system error).
18440Sstevel@tonic-gate  *
18450Sstevel@tonic-gate  * Also, count the references to dev by block devices - this really
18460Sstevel@tonic-gate  * has nothing to do with the object of the procedure, but as we have
18470Sstevel@tonic-gate  * to scan the inode table here anyway, we might as well get the
18480Sstevel@tonic-gate  * extra benefit.
18490Sstevel@tonic-gate  */
18500Sstevel@tonic-gate int32_t
ud_iflush(struct vfs * vfsp)18510Sstevel@tonic-gate ud_iflush(struct vfs *vfsp)
18520Sstevel@tonic-gate {
18530Sstevel@tonic-gate 	int32_t index, busy = 0;
18540Sstevel@tonic-gate 	union ihead *ih;
18550Sstevel@tonic-gate 	struct udf_vfs *udf_vfsp;
18560Sstevel@tonic-gate 	dev_t dev;
18570Sstevel@tonic-gate 	struct vnode *rvp, *vp;
18580Sstevel@tonic-gate 	struct ud_inode *ip, *next;
18590Sstevel@tonic-gate 
18600Sstevel@tonic-gate 	ud_printf("ud_iflush\n");
18610Sstevel@tonic-gate 	udf_vfsp = (struct udf_vfs *)vfsp->vfs_data;
18620Sstevel@tonic-gate 	rvp = udf_vfsp->udf_root;
18630Sstevel@tonic-gate 	dev = vfsp->vfs_dev;
18640Sstevel@tonic-gate 
18650Sstevel@tonic-gate 	mutex_enter(&ud_icache_lock);
18660Sstevel@tonic-gate 	for (index = 0; index < UD_HASH_SZ; index++) {
18670Sstevel@tonic-gate 		ih = &ud_ihead[index];
18680Sstevel@tonic-gate 
18690Sstevel@tonic-gate 		next = ih->ih_chain[0];
18700Sstevel@tonic-gate 		while (next != (struct ud_inode *)ih) {
18710Sstevel@tonic-gate 			ip = next;
18720Sstevel@tonic-gate 			next = ip->i_forw;
18730Sstevel@tonic-gate 			if (ip->i_dev != dev) {
18740Sstevel@tonic-gate 				continue;
18750Sstevel@tonic-gate 			}
18760Sstevel@tonic-gate 			vp = ITOV(ip);
18770Sstevel@tonic-gate 			/*
18780Sstevel@tonic-gate 			 * root inode is processed by the caller
18790Sstevel@tonic-gate 			 */
18800Sstevel@tonic-gate 			if (vp == rvp) {
18810Sstevel@tonic-gate 				if (vp->v_count > 1) {
18820Sstevel@tonic-gate 					busy = -1;
18830Sstevel@tonic-gate 				}
18840Sstevel@tonic-gate 				continue;
18850Sstevel@tonic-gate 			}
18860Sstevel@tonic-gate 			if (ip->i_flag & IREF) {
18870Sstevel@tonic-gate 				/*
18880Sstevel@tonic-gate 				 * Set error indicator for return value,
18890Sstevel@tonic-gate 				 * but continue invalidating other
18900Sstevel@tonic-gate 				 * inodes.
18910Sstevel@tonic-gate 				 */
18920Sstevel@tonic-gate 				busy = -1;
18930Sstevel@tonic-gate 				continue;
18940Sstevel@tonic-gate 			}
18950Sstevel@tonic-gate 
18960Sstevel@tonic-gate 			rw_enter(&ip->i_contents, RW_WRITER);
18970Sstevel@tonic-gate 			remque(ip);
18980Sstevel@tonic-gate 			ip->i_forw = ip;
18990Sstevel@tonic-gate 			ip->i_back = ip;
19000Sstevel@tonic-gate 			/*
19010Sstevel@tonic-gate 			 * Hold the vnode since its not done
19020Sstevel@tonic-gate 			 * in VOP_PUTPAGE anymore.
19030Sstevel@tonic-gate 			 */
19040Sstevel@tonic-gate 			VN_HOLD(vp);
19050Sstevel@tonic-gate 			/*
19060Sstevel@tonic-gate 			 * XXX Synchronous write holding
19070Sstevel@tonic-gate 			 * cache lock
19080Sstevel@tonic-gate 			 */
19090Sstevel@tonic-gate 			(void) ud_syncip(ip, B_INVAL, I_SYNC);
19100Sstevel@tonic-gate 			rw_exit(&ip->i_contents);
19110Sstevel@tonic-gate 			VN_RELE(vp);
19120Sstevel@tonic-gate 		}
19130Sstevel@tonic-gate 	}
19140Sstevel@tonic-gate 	mutex_exit(&ud_icache_lock);
19150Sstevel@tonic-gate 
19160Sstevel@tonic-gate 	return (busy);
19170Sstevel@tonic-gate }
19180Sstevel@tonic-gate 
19190Sstevel@tonic-gate 
19200Sstevel@tonic-gate /*
19210Sstevel@tonic-gate  * Check mode permission on inode.  Mode is READ, WRITE or EXEC.
19220Sstevel@tonic-gate  * In the case of WRITE, the read-only status of the file system
19230Sstevel@tonic-gate  * is checked.  The applicable mode bits are compared with the
19240Sstevel@tonic-gate  * requested form of access.  If bits are missing, the secpolicy
19250Sstevel@tonic-gate  * function will check for privileges.
19260Sstevel@tonic-gate  */
19270Sstevel@tonic-gate int
ud_iaccess(struct ud_inode * ip,int32_t mode,struct cred * cr,int dolock)192812196SMilan.Cermak@Sun.COM ud_iaccess(struct ud_inode *ip, int32_t mode, struct cred *cr, int dolock)
19290Sstevel@tonic-gate {
19300Sstevel@tonic-gate 	int shift = 0;
193112196SMilan.Cermak@Sun.COM 	int ret = 0;
193212196SMilan.Cermak@Sun.COM 
193312196SMilan.Cermak@Sun.COM 	if (dolock)
193412196SMilan.Cermak@Sun.COM 		rw_enter(&ip->i_contents, RW_READER);
193512196SMilan.Cermak@Sun.COM 	ASSERT(RW_LOCK_HELD(&ip->i_contents));
19360Sstevel@tonic-gate 
19370Sstevel@tonic-gate 	ud_printf("ud_iaccess\n");
19380Sstevel@tonic-gate 	if (mode & IWRITE) {
19390Sstevel@tonic-gate 		/*
19400Sstevel@tonic-gate 		 * Disallow write attempts on read-only
19410Sstevel@tonic-gate 		 * file systems, unless the file is a block
19420Sstevel@tonic-gate 		 * or character device or a FIFO.
19430Sstevel@tonic-gate 		 */
19440Sstevel@tonic-gate 		if (ip->i_udf->udf_flags & UDF_FL_RDONLY) {
19450Sstevel@tonic-gate 			if ((ip->i_type != VCHR) &&
19460Sstevel@tonic-gate 			    (ip->i_type != VBLK) &&
19470Sstevel@tonic-gate 			    (ip->i_type != VFIFO)) {
194812196SMilan.Cermak@Sun.COM 				ret = EROFS;
194912196SMilan.Cermak@Sun.COM 				goto out;
19500Sstevel@tonic-gate 			}
19510Sstevel@tonic-gate 		}
19520Sstevel@tonic-gate 	}
19530Sstevel@tonic-gate 
19540Sstevel@tonic-gate 	/*
19550Sstevel@tonic-gate 	 * Access check is based on only
19560Sstevel@tonic-gate 	 * one of owner, group, public.
19570Sstevel@tonic-gate 	 * If not owner, then check group.
19580Sstevel@tonic-gate 	 * If not a member of the group, then
19590Sstevel@tonic-gate 	 * check public access.
19600Sstevel@tonic-gate 	 */
19610Sstevel@tonic-gate 	if (crgetuid(cr) != ip->i_uid) {
19620Sstevel@tonic-gate 		shift += 5;
19630Sstevel@tonic-gate 		if (!groupmember((uid_t)ip->i_gid, cr))
19640Sstevel@tonic-gate 			shift += 5;
19650Sstevel@tonic-gate 	}
19660Sstevel@tonic-gate 
1967*12273SCasper.Dik@Sun.COM 	ret = secpolicy_vnode_access2(cr, ITOV(ip), ip->i_uid,
1968*12273SCasper.Dik@Sun.COM 	    UD2VA_PERM(ip->i_perm << shift), UD2VA_PERM(mode));
19690Sstevel@tonic-gate 
197012196SMilan.Cermak@Sun.COM out:
197112196SMilan.Cermak@Sun.COM 	if (dolock)
197212196SMilan.Cermak@Sun.COM 		rw_exit(&ip->i_contents);
197312196SMilan.Cermak@Sun.COM 	return (ret);
19740Sstevel@tonic-gate }
19750Sstevel@tonic-gate 
19760Sstevel@tonic-gate void
ud_imark(struct ud_inode * ip)19770Sstevel@tonic-gate ud_imark(struct ud_inode *ip)
19780Sstevel@tonic-gate {
19790Sstevel@tonic-gate 	timestruc_t	now;
19800Sstevel@tonic-gate 
19810Sstevel@tonic-gate 	gethrestime(&now);
19820Sstevel@tonic-gate 	ud_printf("ud_imark\n");
19830Sstevel@tonic-gate 	if (ip->i_flag & IACC) {
19840Sstevel@tonic-gate 		ip->i_atime.tv_sec = now.tv_sec;
19850Sstevel@tonic-gate 		ip->i_atime.tv_nsec = now.tv_nsec;
19860Sstevel@tonic-gate 	}
19870Sstevel@tonic-gate 	if (ip->i_flag & IUPD) {
19880Sstevel@tonic-gate 		ip->i_mtime.tv_sec = now.tv_sec;
19890Sstevel@tonic-gate 		ip->i_mtime.tv_nsec = now.tv_nsec;
19900Sstevel@tonic-gate 		ip->i_flag |= IMODTIME;
19910Sstevel@tonic-gate 	}
19920Sstevel@tonic-gate 	if (ip->i_flag & ICHG) {
19930Sstevel@tonic-gate 		ip->i_diroff = 0;
19940Sstevel@tonic-gate 		ip->i_ctime.tv_sec = now.tv_sec;
19950Sstevel@tonic-gate 		ip->i_ctime.tv_nsec = now.tv_nsec;
19960Sstevel@tonic-gate 	}
19970Sstevel@tonic-gate }
19980Sstevel@tonic-gate 
19990Sstevel@tonic-gate 
20000Sstevel@tonic-gate void
ud_itimes_nolock(struct ud_inode * ip)20010Sstevel@tonic-gate ud_itimes_nolock(struct ud_inode *ip)
20020Sstevel@tonic-gate {
20030Sstevel@tonic-gate 	ud_printf("ud_itimes_nolock\n");
20040Sstevel@tonic-gate 
20050Sstevel@tonic-gate 	if (ip->i_flag & (IUPD|IACC|ICHG)) {
20060Sstevel@tonic-gate 		if (ip->i_flag & ICHG) {
20070Sstevel@tonic-gate 			ip->i_flag |= IMOD;
20080Sstevel@tonic-gate 		} else {
20090Sstevel@tonic-gate 			ip->i_flag |= IMODACC;
20100Sstevel@tonic-gate 		}
20110Sstevel@tonic-gate 		ud_imark(ip);
20120Sstevel@tonic-gate 		ip->i_flag &= ~(IACC|IUPD|ICHG);
20130Sstevel@tonic-gate 	}
20140Sstevel@tonic-gate }
20150Sstevel@tonic-gate 
20160Sstevel@tonic-gate void
ud_delcache(struct ud_inode * ip)20170Sstevel@tonic-gate ud_delcache(struct ud_inode *ip)
20180Sstevel@tonic-gate {
20190Sstevel@tonic-gate 	ud_printf("ud_delcache\n");
20200Sstevel@tonic-gate 
20210Sstevel@tonic-gate 	mutex_enter(&ud_icache_lock);
20220Sstevel@tonic-gate 	remque(ip);
20230Sstevel@tonic-gate 	ip->i_forw = ip;
20240Sstevel@tonic-gate 	ip->i_back = ip;
20250Sstevel@tonic-gate 	mutex_exit(&ud_icache_lock);
20260Sstevel@tonic-gate }
20270Sstevel@tonic-gate 
20280Sstevel@tonic-gate void
ud_idrop(struct ud_inode * ip)20290Sstevel@tonic-gate ud_idrop(struct ud_inode *ip)
20300Sstevel@tonic-gate {
20310Sstevel@tonic-gate 	struct vnode *vp = ITOV(ip);
20320Sstevel@tonic-gate 
20330Sstevel@tonic-gate 	ASSERT(RW_WRITE_HELD(&ip->i_contents));
20340Sstevel@tonic-gate 
20350Sstevel@tonic-gate 	ud_printf("ud_idrop\n");
20360Sstevel@tonic-gate 
20370Sstevel@tonic-gate 	mutex_enter(&vp->v_lock);
20380Sstevel@tonic-gate 	if (vp->v_count > 1) {
20390Sstevel@tonic-gate 		vp->v_count--;
20400Sstevel@tonic-gate 		mutex_exit(&vp->v_lock);
20410Sstevel@tonic-gate 		return;
20420Sstevel@tonic-gate 	}
20430Sstevel@tonic-gate 	vp->v_count = 0;
20440Sstevel@tonic-gate 	mutex_exit(&vp->v_lock);
20450Sstevel@tonic-gate 
20460Sstevel@tonic-gate 
20470Sstevel@tonic-gate 	/*
20480Sstevel@tonic-gate 	 *  if inode is invalid or there is no page associated with
20490Sstevel@tonic-gate 	 *  this inode, put the inode in the front of the free list
20500Sstevel@tonic-gate 	 */
20510Sstevel@tonic-gate 	mutex_enter(&ip->i_tlock);
20520Sstevel@tonic-gate 	mutex_enter(&udf_ifree_lock);
20530Sstevel@tonic-gate 	if (!vn_has_cached_data(vp) || ip->i_perm == 0) {
20540Sstevel@tonic-gate 		ud_add_to_free_list(ip, UD_BEGIN);
20550Sstevel@tonic-gate 	} else {
20560Sstevel@tonic-gate 		/*
20570Sstevel@tonic-gate 		 * Otherwise, put the inode back on the end of the free list.
20580Sstevel@tonic-gate 		 */
20590Sstevel@tonic-gate 		ud_add_to_free_list(ip, UD_END);
20600Sstevel@tonic-gate 	}
20610Sstevel@tonic-gate 	mutex_exit(&udf_ifree_lock);
20620Sstevel@tonic-gate 	ip->i_flag &= IMODTIME;
20630Sstevel@tonic-gate 	mutex_exit(&ip->i_tlock);
20640Sstevel@tonic-gate }
20650Sstevel@tonic-gate 
20660Sstevel@tonic-gate void
ud_add_to_free_list(struct ud_inode * ip,uint32_t at)20670Sstevel@tonic-gate ud_add_to_free_list(struct ud_inode *ip, uint32_t at)
20680Sstevel@tonic-gate {
20690Sstevel@tonic-gate 	ASSERT(ip);
20700Sstevel@tonic-gate 	ASSERT(mutex_owned(&udf_ifree_lock));
20710Sstevel@tonic-gate 
20720Sstevel@tonic-gate #ifdef	DEBUG
20730Sstevel@tonic-gate 	/* Search if the element is already in the list */
20740Sstevel@tonic-gate 	if (udf_ifreeh != NULL) {
20750Sstevel@tonic-gate 		struct ud_inode *iq;
20760Sstevel@tonic-gate 
20770Sstevel@tonic-gate 		iq = udf_ifreeh;
20780Sstevel@tonic-gate 		while (iq) {
20790Sstevel@tonic-gate 			if (iq == ip) {
20800Sstevel@tonic-gate 				cmn_err(CE_WARN, "Duplicate %p\n", (void *)ip);
20810Sstevel@tonic-gate 			}
20820Sstevel@tonic-gate 			iq = iq->i_freef;
20830Sstevel@tonic-gate 		}
20840Sstevel@tonic-gate 	}
20850Sstevel@tonic-gate #endif
20860Sstevel@tonic-gate 
20870Sstevel@tonic-gate 	ip->i_freef = NULL;
20880Sstevel@tonic-gate 	ip->i_freeb = NULL;
20890Sstevel@tonic-gate 	if (udf_ifreeh == NULL) {
20900Sstevel@tonic-gate 		/*
20910Sstevel@tonic-gate 		 * Nothing on the list just add it
20920Sstevel@tonic-gate 		 */
20930Sstevel@tonic-gate 		udf_ifreeh = ip;
20940Sstevel@tonic-gate 		udf_ifreet = ip;
20950Sstevel@tonic-gate 	} else {
20960Sstevel@tonic-gate 		if (at == UD_BEGIN) {
20970Sstevel@tonic-gate 			/*
20980Sstevel@tonic-gate 			 * Add at the begining of the list
20990Sstevel@tonic-gate 			 */
21000Sstevel@tonic-gate 			ip->i_freef = udf_ifreeh;
21010Sstevel@tonic-gate 			udf_ifreeh->i_freeb = ip;
21020Sstevel@tonic-gate 			udf_ifreeh = ip;
21030Sstevel@tonic-gate 		} else {
21040Sstevel@tonic-gate 			/*
21050Sstevel@tonic-gate 			 * Add at the end of the list
21060Sstevel@tonic-gate 			 */
21070Sstevel@tonic-gate 			ip->i_freeb = udf_ifreet;
21080Sstevel@tonic-gate 			udf_ifreet->i_freef = ip;
21090Sstevel@tonic-gate 			udf_ifreet = ip;
21100Sstevel@tonic-gate 		}
21110Sstevel@tonic-gate 	}
21120Sstevel@tonic-gate }
21130Sstevel@tonic-gate 
21140Sstevel@tonic-gate void
ud_remove_from_free_list(struct ud_inode * ip,uint32_t at)21150Sstevel@tonic-gate ud_remove_from_free_list(struct ud_inode *ip, uint32_t at)
21160Sstevel@tonic-gate {
21170Sstevel@tonic-gate 	ASSERT(ip);
21180Sstevel@tonic-gate 	ASSERT(mutex_owned(&udf_ifree_lock));
21190Sstevel@tonic-gate 
21200Sstevel@tonic-gate #ifdef	DEBUG
21210Sstevel@tonic-gate 	{
21220Sstevel@tonic-gate 		struct ud_inode *iq;
21230Sstevel@tonic-gate 		uint32_t found = 0;
21240Sstevel@tonic-gate 
21250Sstevel@tonic-gate 		iq = udf_ifreeh;
21260Sstevel@tonic-gate 		while (iq) {
21270Sstevel@tonic-gate 			if (iq == ip) {
21280Sstevel@tonic-gate 				found++;
21290Sstevel@tonic-gate 			}
21300Sstevel@tonic-gate 			iq = iq->i_freef;
21310Sstevel@tonic-gate 		}
21320Sstevel@tonic-gate 		if (found != 1) {
21330Sstevel@tonic-gate 			cmn_err(CE_WARN, "ip %p is found %x times\n",
213412196SMilan.Cermak@Sun.COM 			    (void *)ip,  found);
21350Sstevel@tonic-gate 		}
21360Sstevel@tonic-gate 	}
21370Sstevel@tonic-gate #endif
21380Sstevel@tonic-gate 
213912196SMilan.Cermak@Sun.COM 	if ((ip->i_freef == NULL) && (ip->i_freeb == NULL)) {
21400Sstevel@tonic-gate 		if (ip != udf_ifreeh) {
21410Sstevel@tonic-gate 			return;
21420Sstevel@tonic-gate 		}
21430Sstevel@tonic-gate 	}
21440Sstevel@tonic-gate 
214512196SMilan.Cermak@Sun.COM 	if ((at == UD_BEGIN) || (ip == udf_ifreeh)) {
21460Sstevel@tonic-gate 		udf_ifreeh = ip->i_freef;
21470Sstevel@tonic-gate 		if (ip->i_freef == NULL) {
21480Sstevel@tonic-gate 			udf_ifreet = NULL;
21490Sstevel@tonic-gate 		} else {
21500Sstevel@tonic-gate 			udf_ifreeh->i_freeb = NULL;
21510Sstevel@tonic-gate 		}
21520Sstevel@tonic-gate 	} else {
21530Sstevel@tonic-gate 		ip->i_freeb->i_freef = ip->i_freef;
21540Sstevel@tonic-gate 		if (ip->i_freef) {
21550Sstevel@tonic-gate 			ip->i_freef->i_freeb = ip->i_freeb;
21560Sstevel@tonic-gate 		} else {
21570Sstevel@tonic-gate 			udf_ifreet = ip->i_freeb;
21580Sstevel@tonic-gate 		}
21590Sstevel@tonic-gate 	}
21600Sstevel@tonic-gate 	ip->i_freef = NULL;
21610Sstevel@tonic-gate 	ip->i_freeb = NULL;
21620Sstevel@tonic-gate }
21630Sstevel@tonic-gate 
21640Sstevel@tonic-gate void
ud_init_inodes(void)21650Sstevel@tonic-gate ud_init_inodes(void)
21660Sstevel@tonic-gate {
21670Sstevel@tonic-gate 	union ihead *ih = ud_ihead;
21680Sstevel@tonic-gate 	int index;
21690Sstevel@tonic-gate 
21700Sstevel@tonic-gate #ifndef	__lint
21710Sstevel@tonic-gate 	_NOTE(NO_COMPETING_THREADS_NOW);
21720Sstevel@tonic-gate #endif
21730Sstevel@tonic-gate 	for (index = 0; index < UD_HASH_SZ; index++, ih++) {
21740Sstevel@tonic-gate 		ih->ih_head[0] = ih;
21750Sstevel@tonic-gate 		ih->ih_head[1] = ih;
21760Sstevel@tonic-gate 	}
21770Sstevel@tonic-gate 	mutex_init(&ud_icache_lock, NULL, MUTEX_DEFAULT, NULL);
21780Sstevel@tonic-gate 	mutex_init(&ud_nino_lock, NULL, MUTEX_DEFAULT, NULL);
21790Sstevel@tonic-gate 
21800Sstevel@tonic-gate 	udf_ifreeh = NULL;
21810Sstevel@tonic-gate 	udf_ifreet = NULL;
21820Sstevel@tonic-gate 	mutex_init(&udf_ifree_lock, NULL, MUTEX_DEFAULT, NULL);
21830Sstevel@tonic-gate 
21840Sstevel@tonic-gate 	mutex_init(&ud_sync_busy, NULL, MUTEX_DEFAULT, NULL);
21850Sstevel@tonic-gate 	udf_vfs_instances = NULL;
21860Sstevel@tonic-gate 	mutex_init(&udf_vfs_mutex, NULL, MUTEX_DEFAULT, NULL);
21870Sstevel@tonic-gate 
21880Sstevel@tonic-gate #ifndef	__lint
21890Sstevel@tonic-gate 	_NOTE(COMPETING_THREADS_NOW);
21900Sstevel@tonic-gate #endif
21910Sstevel@tonic-gate }
2192