xref: /onnv-gate/usr/src/uts/common/fs/udfs/udf_bmap.c (revision 0:68f95e015346)
1*0Sstevel@tonic-gate /*
2*0Sstevel@tonic-gate  * CDDL HEADER START
3*0Sstevel@tonic-gate  *
4*0Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
5*0Sstevel@tonic-gate  * Common Development and Distribution License, Version 1.0 only
6*0Sstevel@tonic-gate  * (the "License").  You may not use this file except in compliance
7*0Sstevel@tonic-gate  * with the License.
8*0Sstevel@tonic-gate  *
9*0Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10*0Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
11*0Sstevel@tonic-gate  * See the License for the specific language governing permissions
12*0Sstevel@tonic-gate  * and limitations under the License.
13*0Sstevel@tonic-gate  *
14*0Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
15*0Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16*0Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
17*0Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
18*0Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
19*0Sstevel@tonic-gate  *
20*0Sstevel@tonic-gate  * CDDL HEADER END
21*0Sstevel@tonic-gate  */
22*0Sstevel@tonic-gate /*
23*0Sstevel@tonic-gate  * Copyright 2004 Sun Microsystems, Inc.  All rights reserved.
24*0Sstevel@tonic-gate  * Use is subject to license terms.
25*0Sstevel@tonic-gate  */
26*0Sstevel@tonic-gate 
27*0Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
28*0Sstevel@tonic-gate 
29*0Sstevel@tonic-gate #include <sys/types.h>
30*0Sstevel@tonic-gate #include <sys/t_lock.h>
31*0Sstevel@tonic-gate #include <sys/param.h>
32*0Sstevel@tonic-gate #include <sys/time.h>
33*0Sstevel@tonic-gate #include <sys/systm.h>
34*0Sstevel@tonic-gate #include <sys/sysmacros.h>
35*0Sstevel@tonic-gate #include <sys/resource.h>
36*0Sstevel@tonic-gate #include <sys/signal.h>
37*0Sstevel@tonic-gate #include <sys/cred.h>
38*0Sstevel@tonic-gate #include <sys/user.h>
39*0Sstevel@tonic-gate #include <sys/buf.h>
40*0Sstevel@tonic-gate #include <sys/vfs.h>
41*0Sstevel@tonic-gate #include <sys/stat.h>
42*0Sstevel@tonic-gate #include <sys/vnode.h>
43*0Sstevel@tonic-gate #include <sys/mode.h>
44*0Sstevel@tonic-gate #include <sys/proc.h>
45*0Sstevel@tonic-gate #include <sys/disp.h>
46*0Sstevel@tonic-gate #include <sys/file.h>
47*0Sstevel@tonic-gate #include <sys/fcntl.h>
48*0Sstevel@tonic-gate #include <sys/flock.h>
49*0Sstevel@tonic-gate #include <sys/kmem.h>
50*0Sstevel@tonic-gate #include <sys/uio.h>
51*0Sstevel@tonic-gate #include <sys/dnlc.h>
52*0Sstevel@tonic-gate #include <sys/conf.h>
53*0Sstevel@tonic-gate #include <sys/errno.h>
54*0Sstevel@tonic-gate #include <sys/mman.h>
55*0Sstevel@tonic-gate #include <sys/fbuf.h>
56*0Sstevel@tonic-gate #include <sys/pathname.h>
57*0Sstevel@tonic-gate #include <sys/debug.h>
58*0Sstevel@tonic-gate #include <sys/vmsystm.h>
59*0Sstevel@tonic-gate #include <sys/cmn_err.h>
60*0Sstevel@tonic-gate #include <sys/dirent.h>
61*0Sstevel@tonic-gate #include <sys/errno.h>
62*0Sstevel@tonic-gate #include <sys/modctl.h>
63*0Sstevel@tonic-gate #include <sys/statvfs.h>
64*0Sstevel@tonic-gate #include <sys/mount.h>
65*0Sstevel@tonic-gate #include <sys/sunddi.h>
66*0Sstevel@tonic-gate #include <sys/bootconf.h>
67*0Sstevel@tonic-gate 
68*0Sstevel@tonic-gate #include <vm/hat.h>
69*0Sstevel@tonic-gate #include <vm/page.h>
70*0Sstevel@tonic-gate #include <vm/pvn.h>
71*0Sstevel@tonic-gate #include <vm/as.h>
72*0Sstevel@tonic-gate #include <vm/seg.h>
73*0Sstevel@tonic-gate #include <vm/seg_map.h>
74*0Sstevel@tonic-gate #include <vm/seg_kmem.h>
75*0Sstevel@tonic-gate #include <vm/seg_vn.h>
76*0Sstevel@tonic-gate #include <vm/rm.h>
77*0Sstevel@tonic-gate #include <vm/page.h>
78*0Sstevel@tonic-gate #include <sys/swap.h>
79*0Sstevel@tonic-gate 
80*0Sstevel@tonic-gate 
81*0Sstevel@tonic-gate #include <fs/fs_subr.h>
82*0Sstevel@tonic-gate 
83*0Sstevel@tonic-gate 
84*0Sstevel@tonic-gate #include <sys/fs/udf_volume.h>
85*0Sstevel@tonic-gate #include <sys/fs/udf_inode.h>
86*0Sstevel@tonic-gate 
87*0Sstevel@tonic-gate 
88*0Sstevel@tonic-gate int32_t ud_break_create_new_icb(struct ud_inode *, int32_t, uint32_t);
89*0Sstevel@tonic-gate int32_t ud_bump_ext_count(struct ud_inode *, int32_t);
90*0Sstevel@tonic-gate void ud_remove_ext_at_index(struct ud_inode *, int32_t);
91*0Sstevel@tonic-gate int32_t ud_last_alloc_ext(struct ud_inode *, uint64_t, uint32_t, int32_t);
92*0Sstevel@tonic-gate int32_t ud_create_ext(struct ud_inode *, int32_t, uint32_t,
93*0Sstevel@tonic-gate 	int32_t, uint64_t, uint64_t *);
94*0Sstevel@tonic-gate int32_t	ud_zero_it(struct ud_inode *, uint32_t, uint32_t);
95*0Sstevel@tonic-gate 
96*0Sstevel@tonic-gate #define	ALLOC_SPACE	0x01
97*0Sstevel@tonic-gate #define	NEW_EXT		0x02
98*0Sstevel@tonic-gate 
99*0Sstevel@tonic-gate #define	MEXT_BITS	30
100*0Sstevel@tonic-gate 
101*0Sstevel@tonic-gate int32_t
ud_bmap_has_holes(struct ud_inode * ip)102*0Sstevel@tonic-gate ud_bmap_has_holes(struct ud_inode *ip)
103*0Sstevel@tonic-gate {
104*0Sstevel@tonic-gate 	int32_t i, error = 0;
105*0Sstevel@tonic-gate 	struct icb_ext *iext;
106*0Sstevel@tonic-gate 
107*0Sstevel@tonic-gate 	ud_printf("ud_bmap_has_holes\n");
108*0Sstevel@tonic-gate 
109*0Sstevel@tonic-gate 	ASSERT(RW_LOCK_HELD(&ip->i_contents));
110*0Sstevel@tonic-gate 
111*0Sstevel@tonic-gate 	/* ICB_FLAG_ONE_AD is always continuos */
112*0Sstevel@tonic-gate 	if (ip->i_desc_type != ICB_FLAG_ONE_AD) {
113*0Sstevel@tonic-gate 		if ((error = ud_read_icb_till_off(ip, ip->i_size)) == 0) {
114*0Sstevel@tonic-gate 			for (i = 0; i < ip->i_ext_used; i++) {
115*0Sstevel@tonic-gate 				iext = &ip->i_ext[i];
116*0Sstevel@tonic-gate 				if (iext->ib_flags == IB_UN_RE_AL) {
117*0Sstevel@tonic-gate 					error = 1;
118*0Sstevel@tonic-gate 					break;
119*0Sstevel@tonic-gate 				}
120*0Sstevel@tonic-gate 			}
121*0Sstevel@tonic-gate 		}
122*0Sstevel@tonic-gate 	}
123*0Sstevel@tonic-gate 
124*0Sstevel@tonic-gate 	return (error);
125*0Sstevel@tonic-gate }
126*0Sstevel@tonic-gate 
127*0Sstevel@tonic-gate int32_t
ud_bmap_read(struct ud_inode * ip,u_offset_t off,daddr_t * bnp,int32_t * lenp)128*0Sstevel@tonic-gate ud_bmap_read(struct ud_inode *ip, u_offset_t off, daddr_t *bnp, int32_t *lenp)
129*0Sstevel@tonic-gate {
130*0Sstevel@tonic-gate 	struct icb_ext *iext;
131*0Sstevel@tonic-gate 	daddr_t bno;
132*0Sstevel@tonic-gate 	int32_t lbmask, i, l2b, l2d, error = 0, count;
133*0Sstevel@tonic-gate 	uint32_t length, block, dummy;
134*0Sstevel@tonic-gate 
135*0Sstevel@tonic-gate 	ud_printf("ud_bmap_read\n");
136*0Sstevel@tonic-gate 
137*0Sstevel@tonic-gate 	ASSERT(RW_LOCK_HELD(&ip->i_contents));
138*0Sstevel@tonic-gate 
139*0Sstevel@tonic-gate 	lbmask = ip->i_udf->udf_lbmask;
140*0Sstevel@tonic-gate 	l2b = ip->i_udf->udf_l2b_shift;
141*0Sstevel@tonic-gate 	l2d = ip->i_udf->udf_l2d_shift;
142*0Sstevel@tonic-gate 
143*0Sstevel@tonic-gate 	if ((error = ud_read_icb_till_off(ip, ip->i_size)) == 0) {
144*0Sstevel@tonic-gate 		for (i = 0; i < ip->i_ext_used; i++) {
145*0Sstevel@tonic-gate 			iext = &ip->i_ext[i];
146*0Sstevel@tonic-gate 			if ((iext->ib_offset <= off) &&
147*0Sstevel@tonic-gate 				(off < (iext->ib_offset + iext->ib_count))) {
148*0Sstevel@tonic-gate 				length = ((iext->ib_offset +
149*0Sstevel@tonic-gate 						iext->ib_count - off) +
150*0Sstevel@tonic-gate 						lbmask) & ~lbmask;
151*0Sstevel@tonic-gate 				if (iext->ib_flags == IB_UN_RE_AL) {
152*0Sstevel@tonic-gate 					*bnp = UDF_HOLE;
153*0Sstevel@tonic-gate 					*lenp = length;
154*0Sstevel@tonic-gate 					break;
155*0Sstevel@tonic-gate 				}
156*0Sstevel@tonic-gate 
157*0Sstevel@tonic-gate 				block = iext->ib_block +
158*0Sstevel@tonic-gate 					((off - iext->ib_offset) >> l2b);
159*0Sstevel@tonic-gate 				count = length >> l2b;
160*0Sstevel@tonic-gate 
161*0Sstevel@tonic-gate 				bno = ud_xlate_to_daddr(ip->i_udf,
162*0Sstevel@tonic-gate 					iext->ib_prn, block, count, &dummy);
163*0Sstevel@tonic-gate 				ASSERT(dummy != 0);
164*0Sstevel@tonic-gate 				ASSERT(dummy <= count);
165*0Sstevel@tonic-gate 				*bnp = bno << l2d;
166*0Sstevel@tonic-gate 				*lenp = dummy << l2b;
167*0Sstevel@tonic-gate 
168*0Sstevel@tonic-gate 				break;
169*0Sstevel@tonic-gate 			}
170*0Sstevel@tonic-gate 		}
171*0Sstevel@tonic-gate 		if (i == ip->i_ext_used) {
172*0Sstevel@tonic-gate 			error = EINVAL;
173*0Sstevel@tonic-gate 		}
174*0Sstevel@tonic-gate 	}
175*0Sstevel@tonic-gate 
176*0Sstevel@tonic-gate 	return (error);
177*0Sstevel@tonic-gate }
178*0Sstevel@tonic-gate 
179*0Sstevel@tonic-gate 
180*0Sstevel@tonic-gate /*
181*0Sstevel@tonic-gate  * Extent allocation in the inode
182*0Sstevel@tonic-gate  * Initially when the inode is allocated we
183*0Sstevel@tonic-gate  * will allocate EXT_PER_MALLOC extents and once these
184*0Sstevel@tonic-gate  * are used we allocate another 10 and copy
185*0Sstevel@tonic-gate  * the old extents and start using the others
186*0Sstevel@tonic-gate  */
187*0Sstevel@tonic-gate #define	BASE(count)	((count) & ~lbmask)
188*0Sstevel@tonic-gate #define	CEIL(count)	(((count) + lbmask) & ~lbmask)
189*0Sstevel@tonic-gate 
190*0Sstevel@tonic-gate #define	PBASE(count)	((count) & PAGEMASK)
191*0Sstevel@tonic-gate #define	PCEIL(count)	(((count) + PAGEOFFSET) & PAGEMASK)
192*0Sstevel@tonic-gate 
193*0Sstevel@tonic-gate 
194*0Sstevel@tonic-gate /* ARGSUSED3 */
195*0Sstevel@tonic-gate int32_t
ud_bmap_write(struct ud_inode * ip,u_offset_t off,int32_t size,int32_t alloc_only,struct cred * cr)196*0Sstevel@tonic-gate ud_bmap_write(struct ud_inode *ip,
197*0Sstevel@tonic-gate 	u_offset_t off, int32_t size, int32_t alloc_only, struct cred *cr)
198*0Sstevel@tonic-gate {
199*0Sstevel@tonic-gate 	int32_t error = 0, i, isdir, issync;
200*0Sstevel@tonic-gate 	struct udf_vfs *udf_vfsp;
201*0Sstevel@tonic-gate 	struct icb_ext *iext, *pext;
202*0Sstevel@tonic-gate 	uint32_t blkno, sz;
203*0Sstevel@tonic-gate 	u_offset_t isize;
204*0Sstevel@tonic-gate 	uint32_t acount, prox;
205*0Sstevel@tonic-gate 	int32_t blkcount, next;
206*0Sstevel@tonic-gate 	int32_t lbmask, l2b;
207*0Sstevel@tonic-gate 	uint64_t end_req, end_ext, mext_sz, icb_offset, count;
208*0Sstevel@tonic-gate 	int32_t dtype_changed = 0, memory_allocated = 0;
209*0Sstevel@tonic-gate 	struct	fbuf *fbp = NULL;
210*0Sstevel@tonic-gate 
211*0Sstevel@tonic-gate 
212*0Sstevel@tonic-gate 	ud_printf("ud_bmap_write\n");
213*0Sstevel@tonic-gate 
214*0Sstevel@tonic-gate 	ASSERT(RW_WRITE_HELD(&ip->i_contents));
215*0Sstevel@tonic-gate 
216*0Sstevel@tonic-gate 	udf_vfsp = ip->i_udf;
217*0Sstevel@tonic-gate 	lbmask = udf_vfsp->udf_lbmask;
218*0Sstevel@tonic-gate 	l2b = udf_vfsp->udf_l2b_shift;
219*0Sstevel@tonic-gate 	mext_sz = (1 << MEXT_BITS) - PAGESIZE;
220*0Sstevel@tonic-gate 
221*0Sstevel@tonic-gate 	if (lblkno(udf_vfsp, off) < 0) {
222*0Sstevel@tonic-gate 		return (EFBIG);
223*0Sstevel@tonic-gate 	}
224*0Sstevel@tonic-gate 
225*0Sstevel@tonic-gate 	issync = ((ip->i_flag & ISYNC) != 0);
226*0Sstevel@tonic-gate 
227*0Sstevel@tonic-gate 	isdir = (ip->i_type == VDIR);
228*0Sstevel@tonic-gate 	if (isdir || issync) {
229*0Sstevel@tonic-gate 		alloc_only = 0;		/* make sure */
230*0Sstevel@tonic-gate 	}
231*0Sstevel@tonic-gate 
232*0Sstevel@tonic-gate 	end_req = BASE(off) + size;
233*0Sstevel@tonic-gate 	if (ip->i_desc_type == ICB_FLAG_ONE_AD) {
234*0Sstevel@tonic-gate 		if (end_req < ip->i_max_emb) {
235*0Sstevel@tonic-gate 			goto out;
236*0Sstevel@tonic-gate 		}
237*0Sstevel@tonic-gate 
238*0Sstevel@tonic-gate 		if (ip->i_size != 0) {
239*0Sstevel@tonic-gate 			error = fbread(ITOV(ip), 0, ip->i_size, S_OTHER, &fbp);
240*0Sstevel@tonic-gate 			if (error != 0) {
241*0Sstevel@tonic-gate 				goto out;
242*0Sstevel@tonic-gate 			}
243*0Sstevel@tonic-gate 		} else {
244*0Sstevel@tonic-gate 			fbp = NULL;
245*0Sstevel@tonic-gate 		}
246*0Sstevel@tonic-gate 		/*
247*0Sstevel@tonic-gate 		 * Change the desc_type
248*0Sstevel@tonic-gate 		 */
249*0Sstevel@tonic-gate 		ip->i_desc_type = ICB_FLAG_SHORT_AD;
250*0Sstevel@tonic-gate 		dtype_changed = 1;
251*0Sstevel@tonic-gate 
252*0Sstevel@tonic-gate one_ad_no_i_ext:
253*0Sstevel@tonic-gate 		ASSERT(ip->i_ext == NULL);
254*0Sstevel@tonic-gate 		ASSERT(ip->i_astrat == STRAT_TYPE4);
255*0Sstevel@tonic-gate 
256*0Sstevel@tonic-gate 		ip->i_ext_used = 0;
257*0Sstevel@tonic-gate 		ip->i_cur_max_ext = ip->i_max_emb / sizeof (struct short_ad);
258*0Sstevel@tonic-gate 		ip->i_cur_max_ext --;
259*0Sstevel@tonic-gate 		if (end_req > mext_sz) {
260*0Sstevel@tonic-gate 			next = end_req / mext_sz;
261*0Sstevel@tonic-gate 		} else {
262*0Sstevel@tonic-gate 			next = 1;
263*0Sstevel@tonic-gate 		}
264*0Sstevel@tonic-gate 		ip->i_ext_count =
265*0Sstevel@tonic-gate 			((next / EXT_PER_MALLOC) + 1) * EXT_PER_MALLOC;
266*0Sstevel@tonic-gate 		iext = ip->i_ext = (struct icb_ext  *)kmem_zalloc(
267*0Sstevel@tonic-gate 			ip->i_ext_count * sizeof (struct icb_ext), KM_SLEEP);
268*0Sstevel@tonic-gate 		memory_allocated = 1;
269*0Sstevel@tonic-gate 
270*0Sstevel@tonic-gate 		/* There will be atleast EXT_PER_MALLOC icb_ext's allocated */
271*0Sstevel@tonic-gate 
272*0Sstevel@tonic-gate one_ad_i_ext:
273*0Sstevel@tonic-gate 		icb_offset = 0;
274*0Sstevel@tonic-gate 		count = end_req;
275*0Sstevel@tonic-gate 
276*0Sstevel@tonic-gate 		/* Can we create a HOLE */
277*0Sstevel@tonic-gate 
278*0Sstevel@tonic-gate 		if ((PCEIL(ip->i_size) < PBASE(off)) &&
279*0Sstevel@tonic-gate 			((PBASE(off) - PCEIL(ip->i_size)) >= PAGESIZE)) {
280*0Sstevel@tonic-gate 
281*0Sstevel@tonic-gate 			if (ip->i_size != 0) {
282*0Sstevel@tonic-gate 
283*0Sstevel@tonic-gate 				/*
284*0Sstevel@tonic-gate 				 * Allocate one block for
285*0Sstevel@tonic-gate 				 * old data.(cannot be more than one page)
286*0Sstevel@tonic-gate 				 */
287*0Sstevel@tonic-gate 
288*0Sstevel@tonic-gate 				count = PAGESIZE;
289*0Sstevel@tonic-gate 				if (error = ud_create_ext(ip, ip->i_ext_used,
290*0Sstevel@tonic-gate 					ALLOC_SPACE | NEW_EXT, alloc_only,
291*0Sstevel@tonic-gate 					icb_offset, &count)) {
292*0Sstevel@tonic-gate 					goto embedded_error;
293*0Sstevel@tonic-gate 				}
294*0Sstevel@tonic-gate 				icb_offset = PAGESIZE;
295*0Sstevel@tonic-gate 			}
296*0Sstevel@tonic-gate 
297*0Sstevel@tonic-gate 			/*
298*0Sstevel@tonic-gate 			 * Allocate a hole from PCEIL(ip->i_size) to PBASE(off)
299*0Sstevel@tonic-gate 			 */
300*0Sstevel@tonic-gate 
301*0Sstevel@tonic-gate 			count = PBASE(off) - PCEIL(ip->i_size);
302*0Sstevel@tonic-gate 			(void) ud_create_ext(ip, ip->i_ext_used, NEW_EXT,
303*0Sstevel@tonic-gate 					alloc_only, icb_offset, &count);
304*0Sstevel@tonic-gate 			icb_offset = PBASE(off);
305*0Sstevel@tonic-gate 
306*0Sstevel@tonic-gate 			/*
307*0Sstevel@tonic-gate 			 * Allocate the rest of the space PBASE(off) to end_req
308*0Sstevel@tonic-gate 			 */
309*0Sstevel@tonic-gate 			count = end_req - PBASE(off);
310*0Sstevel@tonic-gate 		} else {
311*0Sstevel@tonic-gate 			/*
312*0Sstevel@tonic-gate 			 * If no hole can be created then allocate
313*0Sstevel@tonic-gate 			 * space till the end of the request
314*0Sstevel@tonic-gate 			 */
315*0Sstevel@tonic-gate 			count = end_req;
316*0Sstevel@tonic-gate 		}
317*0Sstevel@tonic-gate 
318*0Sstevel@tonic-gate 
319*0Sstevel@tonic-gate 
320*0Sstevel@tonic-gate 		if (error = ud_create_ext(ip, ip->i_ext_used,
321*0Sstevel@tonic-gate 				ALLOC_SPACE | NEW_EXT,
322*0Sstevel@tonic-gate 				alloc_only, icb_offset, &count)) {
323*0Sstevel@tonic-gate embedded_error:
324*0Sstevel@tonic-gate 			/*
325*0Sstevel@tonic-gate 			 * Something error
326*0Sstevel@tonic-gate 			 * most probable file system is full
327*0Sstevel@tonic-gate 			 * we know that the file came in as a embedded file.
328*0Sstevel@tonic-gate 			 * undo what ever we did in this block of code
329*0Sstevel@tonic-gate 			 */
330*0Sstevel@tonic-gate 			if (dtype_changed) {
331*0Sstevel@tonic-gate 				ip->i_desc_type = ICB_FLAG_ONE_AD;
332*0Sstevel@tonic-gate 			}
333*0Sstevel@tonic-gate 			for (i = 0; i < ip->i_ext_used; i++) {
334*0Sstevel@tonic-gate 				iext = &ip->i_ext[i];
335*0Sstevel@tonic-gate 				if (iext->ib_flags != IB_UN_RE_AL) {
336*0Sstevel@tonic-gate 					ud_free_space(ip->i_udf->udf_vfs,
337*0Sstevel@tonic-gate 						iext->ib_prn, iext->ib_block,
338*0Sstevel@tonic-gate 						(iext->ib_count + lbmask) >>
339*0Sstevel@tonic-gate 							l2b);
340*0Sstevel@tonic-gate 				}
341*0Sstevel@tonic-gate 			}
342*0Sstevel@tonic-gate 			if (memory_allocated) {
343*0Sstevel@tonic-gate 				kmem_free(ip->i_ext,
344*0Sstevel@tonic-gate 					ip->i_ext_count *
345*0Sstevel@tonic-gate 					sizeof (struct icb_ext));
346*0Sstevel@tonic-gate 				ip->i_ext = NULL;
347*0Sstevel@tonic-gate 				ip->i_ext_count = ip->i_ext_used = 0;
348*0Sstevel@tonic-gate 			}
349*0Sstevel@tonic-gate 		}
350*0Sstevel@tonic-gate 
351*0Sstevel@tonic-gate 		if (fbp != NULL) {
352*0Sstevel@tonic-gate 			fbrelse(fbp, S_WRITE);
353*0Sstevel@tonic-gate 		}
354*0Sstevel@tonic-gate 
355*0Sstevel@tonic-gate 		return (error);
356*0Sstevel@tonic-gate 	} else {
357*0Sstevel@tonic-gate 
358*0Sstevel@tonic-gate 		/*
359*0Sstevel@tonic-gate 		 * Type 4 directories being created
360*0Sstevel@tonic-gate 		 */
361*0Sstevel@tonic-gate 		if (ip->i_ext == NULL) {
362*0Sstevel@tonic-gate 			goto one_ad_no_i_ext;
363*0Sstevel@tonic-gate 		}
364*0Sstevel@tonic-gate 
365*0Sstevel@tonic-gate 		/*
366*0Sstevel@tonic-gate 		 * Read the entire icb's to memory
367*0Sstevel@tonic-gate 		 */
368*0Sstevel@tonic-gate 		if (ud_read_icb_till_off(ip, ip->i_size) != 0) {
369*0Sstevel@tonic-gate 			error = EINVAL;
370*0Sstevel@tonic-gate 			goto out;
371*0Sstevel@tonic-gate 		}
372*0Sstevel@tonic-gate 
373*0Sstevel@tonic-gate 		isize = CEIL(ip->i_size);
374*0Sstevel@tonic-gate 
375*0Sstevel@tonic-gate 		if (end_req > isize) {
376*0Sstevel@tonic-gate 
377*0Sstevel@tonic-gate 			/*
378*0Sstevel@tonic-gate 			 * The new file size is greater
379*0Sstevel@tonic-gate 			 * than the old size
380*0Sstevel@tonic-gate 			 */
381*0Sstevel@tonic-gate 
382*0Sstevel@tonic-gate 			if (ip->i_ext == NULL) {
383*0Sstevel@tonic-gate 				goto one_ad_no_i_ext;
384*0Sstevel@tonic-gate 			} else if (ip->i_ext_used == 0) {
385*0Sstevel@tonic-gate 				goto one_ad_i_ext;
386*0Sstevel@tonic-gate 			}
387*0Sstevel@tonic-gate 
388*0Sstevel@tonic-gate 			error = ud_last_alloc_ext(ip, off, size, alloc_only);
389*0Sstevel@tonic-gate 
390*0Sstevel@tonic-gate 			return (error);
391*0Sstevel@tonic-gate 		} else {
392*0Sstevel@tonic-gate 
393*0Sstevel@tonic-gate 			/*
394*0Sstevel@tonic-gate 			 * File growing the new size will be less than
395*0Sstevel@tonic-gate 			 * iext->ib_offset + CEIL(iext->ib_count)
396*0Sstevel@tonic-gate 			 */
397*0Sstevel@tonic-gate 
398*0Sstevel@tonic-gate 			iext = &ip->i_ext[ip->i_ext_used - 1];
399*0Sstevel@tonic-gate 
400*0Sstevel@tonic-gate 			if (end_req > (iext->ib_offset + iext->ib_count)) {
401*0Sstevel@tonic-gate 
402*0Sstevel@tonic-gate 				iext->ib_count = end_req - iext->ib_offset;
403*0Sstevel@tonic-gate 
404*0Sstevel@tonic-gate 				if (iext->ib_flags != IB_UN_RE_AL) {
405*0Sstevel@tonic-gate 					error = 0;
406*0Sstevel@tonic-gate 					goto out;
407*0Sstevel@tonic-gate 				}
408*0Sstevel@tonic-gate 			}
409*0Sstevel@tonic-gate 		}
410*0Sstevel@tonic-gate 	}
411*0Sstevel@tonic-gate 
412*0Sstevel@tonic-gate 	/* By this point the end of last extent is >= BASE(off) + size */
413*0Sstevel@tonic-gate 
414*0Sstevel@tonic-gate 	ASSERT(ip->i_ext);
415*0Sstevel@tonic-gate 
416*0Sstevel@tonic-gate 	/*
417*0Sstevel@tonic-gate 	 * Figure out the icb_ext that has offset "off"
418*0Sstevel@tonic-gate 	 */
419*0Sstevel@tonic-gate 	for (i = 0; i < ip->i_ext_used; i++) {
420*0Sstevel@tonic-gate 		iext = &ip->i_ext[i];
421*0Sstevel@tonic-gate 		if ((iext->ib_offset <= off) &&
422*0Sstevel@tonic-gate 			((iext->ib_offset + iext->ib_count) > off)) {
423*0Sstevel@tonic-gate 			break;
424*0Sstevel@tonic-gate 		}
425*0Sstevel@tonic-gate 	}
426*0Sstevel@tonic-gate 
427*0Sstevel@tonic-gate 	/*
428*0Sstevel@tonic-gate 	 * iext will have offset "off"
429*0Sstevel@tonic-gate 	 */
430*0Sstevel@tonic-gate 
431*0Sstevel@tonic-gate 
432*0Sstevel@tonic-gate 	do {
433*0Sstevel@tonic-gate 		iext = &ip->i_ext[i];
434*0Sstevel@tonic-gate 
435*0Sstevel@tonic-gate 		if ((iext->ib_flags & IB_UN_RE_AL) == 0) {
436*0Sstevel@tonic-gate 
437*0Sstevel@tonic-gate 			/*
438*0Sstevel@tonic-gate 			 * Already allocated do nothing
439*0Sstevel@tonic-gate 			 */
440*0Sstevel@tonic-gate 
441*0Sstevel@tonic-gate 			i++;
442*0Sstevel@tonic-gate 		} else {
443*0Sstevel@tonic-gate 
444*0Sstevel@tonic-gate 			/*
445*0Sstevel@tonic-gate 			 * We are in a hole.
446*0Sstevel@tonic-gate 			 * allocate the required space
447*0Sstevel@tonic-gate 			 * while trying to create smaller holes
448*0Sstevel@tonic-gate 			 */
449*0Sstevel@tonic-gate 
450*0Sstevel@tonic-gate 			if ((PBASE(off) > PBASE(iext->ib_offset)) &&
451*0Sstevel@tonic-gate 				((PBASE(off) - PBASE(iext->ib_offset)) >=
452*0Sstevel@tonic-gate 						PAGESIZE)) {
453*0Sstevel@tonic-gate 
454*0Sstevel@tonic-gate 				/*
455*0Sstevel@tonic-gate 				 * Allocate space from begining of
456*0Sstevel@tonic-gate 				 * old hole to the begining of new hole
457*0Sstevel@tonic-gate 				 * We want all holes created by us
458*0Sstevel@tonic-gate 				 * to be MMUPAGE Aligned
459*0Sstevel@tonic-gate 				 */
460*0Sstevel@tonic-gate 
461*0Sstevel@tonic-gate 				if (PBASE(iext->ib_offset) !=
462*0Sstevel@tonic-gate 						BASE(iext->ib_offset)) {
463*0Sstevel@tonic-gate 					if ((error = ud_break_create_new_icb(
464*0Sstevel@tonic-gate 						ip, i, BASE(iext->ib_offset) -
465*0Sstevel@tonic-gate 						PBASE(iext->ib_offset))) != 0) {
466*0Sstevel@tonic-gate 						return (error);
467*0Sstevel@tonic-gate 					}
468*0Sstevel@tonic-gate 					goto alloc_cur_ext;
469*0Sstevel@tonic-gate 				}
470*0Sstevel@tonic-gate 
471*0Sstevel@tonic-gate 				/*
472*0Sstevel@tonic-gate 				 * Create the new hole
473*0Sstevel@tonic-gate 				 */
474*0Sstevel@tonic-gate 
475*0Sstevel@tonic-gate 				if ((error = ud_break_create_new_icb(ip, i,
476*0Sstevel@tonic-gate 					PBASE(off) - iext->ib_offset)) != 0) {
477*0Sstevel@tonic-gate 					return (error);
478*0Sstevel@tonic-gate 				}
479*0Sstevel@tonic-gate 				iext = &ip->i_ext[i];
480*0Sstevel@tonic-gate 				i++;
481*0Sstevel@tonic-gate 				continue;
482*0Sstevel@tonic-gate 			}
483*0Sstevel@tonic-gate 
484*0Sstevel@tonic-gate 			end_ext = iext->ib_offset + iext->ib_count;
485*0Sstevel@tonic-gate 
486*0Sstevel@tonic-gate 			if ((PBASE(end_ext) > PCEIL(end_req)) &&
487*0Sstevel@tonic-gate 				((PBASE(end_ext) - PCEIL(end_req)) >=
488*0Sstevel@tonic-gate 							PAGESIZE)) {
489*0Sstevel@tonic-gate 				/*
490*0Sstevel@tonic-gate 				 * We can create a hole
491*0Sstevel@tonic-gate 				 * from PCEIL(end_req) - BASE(end_ext)
492*0Sstevel@tonic-gate 				 */
493*0Sstevel@tonic-gate 				if ((error = ud_break_create_new_icb(ip, i,
494*0Sstevel@tonic-gate 				PCEIL(end_req) - iext->ib_offset)) != 0) {
495*0Sstevel@tonic-gate 					return (error);
496*0Sstevel@tonic-gate 				}
497*0Sstevel@tonic-gate 			}
498*0Sstevel@tonic-gate 
499*0Sstevel@tonic-gate 
500*0Sstevel@tonic-gate alloc_cur_ext:
501*0Sstevel@tonic-gate 			/*
502*0Sstevel@tonic-gate 			 * Allocate the current extent
503*0Sstevel@tonic-gate 			 */
504*0Sstevel@tonic-gate 
505*0Sstevel@tonic-gate 
506*0Sstevel@tonic-gate 			/*
507*0Sstevel@tonic-gate 			 * If the previous extent
508*0Sstevel@tonic-gate 			 * is allocated then try to allocate
509*0Sstevel@tonic-gate 			 * adjascent to the previous extent
510*0Sstevel@tonic-gate 			 */
511*0Sstevel@tonic-gate 			prox = 0;
512*0Sstevel@tonic-gate 			if (i != 0) {
513*0Sstevel@tonic-gate 				pext = &ip->i_ext[i - 1];
514*0Sstevel@tonic-gate 				if (pext->ib_flags != IB_UN_RE_AL) {
515*0Sstevel@tonic-gate 					prox = pext->ib_block +
516*0Sstevel@tonic-gate 						(CEIL(pext->ib_count) >> l2b);
517*0Sstevel@tonic-gate 				}
518*0Sstevel@tonic-gate 			}
519*0Sstevel@tonic-gate 
520*0Sstevel@tonic-gate 			iext = &ip->i_ext[i];
521*0Sstevel@tonic-gate 			blkcount = CEIL(iext->ib_count) >> l2b;
522*0Sstevel@tonic-gate 
523*0Sstevel@tonic-gate 			if ((error = ud_alloc_space(ip->i_vfs,
524*0Sstevel@tonic-gate 					ip->i_icb_prn, prox, blkcount,
525*0Sstevel@tonic-gate 					&blkno, &sz, 1, 0)) != 0) {
526*0Sstevel@tonic-gate 				return (error);
527*0Sstevel@tonic-gate 			}
528*0Sstevel@tonic-gate 			ip->i_lbr += sz;
529*0Sstevel@tonic-gate 			if (sz == 0) {
530*0Sstevel@tonic-gate 				return (ENOSPC);
531*0Sstevel@tonic-gate 			}
532*0Sstevel@tonic-gate 
533*0Sstevel@tonic-gate 			if (alloc_only == 0) {
534*0Sstevel@tonic-gate 				error = ud_zero_it(ip, blkno, sz);
535*0Sstevel@tonic-gate 			}
536*0Sstevel@tonic-gate 
537*0Sstevel@tonic-gate 			acount = sz << l2b;
538*0Sstevel@tonic-gate 			if ((prox == blkno) &&
539*0Sstevel@tonic-gate 				((pext->ib_count + acount) < mext_sz)) {
540*0Sstevel@tonic-gate 
541*0Sstevel@tonic-gate 				/*
542*0Sstevel@tonic-gate 				 * We are able to allocate adjascent to
543*0Sstevel@tonic-gate 				 * the previous extent. Increment the
544*0Sstevel@tonic-gate 				 * previous extent count if the size
545*0Sstevel@tonic-gate 				 * of the extent is not greater than
546*0Sstevel@tonic-gate 				 * max extent size
547*0Sstevel@tonic-gate 				 */
548*0Sstevel@tonic-gate 
549*0Sstevel@tonic-gate 				pext = &ip->i_ext[i - 1];
550*0Sstevel@tonic-gate 				pext->ib_count += acount;
551*0Sstevel@tonic-gate 
552*0Sstevel@tonic-gate 				if (sz == blkcount) {
553*0Sstevel@tonic-gate 					/*
554*0Sstevel@tonic-gate 					 * and get rid of the current
555*0Sstevel@tonic-gate 					 * extent since we have
556*0Sstevel@tonic-gate 					 * allocated all of its size
557*0Sstevel@tonic-gate 					 * and incremented the
558*0Sstevel@tonic-gate 					 * previous extents count
559*0Sstevel@tonic-gate 					 */
560*0Sstevel@tonic-gate 					ud_remove_ext_at_index(ip, i);
561*0Sstevel@tonic-gate 				} else {
562*0Sstevel@tonic-gate 					/*
563*0Sstevel@tonic-gate 					 * reduce the count of the
564*0Sstevel@tonic-gate 					 * current extent by the amount
565*0Sstevel@tonic-gate 					 * allocated in the last extent
566*0Sstevel@tonic-gate 					 */
567*0Sstevel@tonic-gate 					ASSERT(acount < iext->ib_count);
568*0Sstevel@tonic-gate 					iext->ib_count -= acount;
569*0Sstevel@tonic-gate 					iext->ib_offset += acount;
570*0Sstevel@tonic-gate 				}
571*0Sstevel@tonic-gate 			} else {
572*0Sstevel@tonic-gate 				if (sz < blkcount) {
573*0Sstevel@tonic-gate 					if ((error = ud_break_create_new_icb(
574*0Sstevel@tonic-gate 						ip, i, sz << l2b)) != 0) {
575*0Sstevel@tonic-gate 						return (error);
576*0Sstevel@tonic-gate 					}
577*0Sstevel@tonic-gate 				}
578*0Sstevel@tonic-gate 				iext = &ip->i_ext[i];
579*0Sstevel@tonic-gate 				count -= CEIL(iext->ib_count);
580*0Sstevel@tonic-gate 				iext->ib_prn = ip->i_icb_prn;
581*0Sstevel@tonic-gate 				iext->ib_block = blkno;
582*0Sstevel@tonic-gate 				iext->ib_flags &= ~IB_UN_RE_AL;
583*0Sstevel@tonic-gate /*
584*0Sstevel@tonic-gate  *				iext->ib_flags |= IB_UN_REC;
585*0Sstevel@tonic-gate  */
586*0Sstevel@tonic-gate 				i++;
587*0Sstevel@tonic-gate 				continue;
588*0Sstevel@tonic-gate 			}
589*0Sstevel@tonic-gate 		}
590*0Sstevel@tonic-gate 	} while ((iext->ib_offset + iext->ib_count) < end_req);
591*0Sstevel@tonic-gate 
592*0Sstevel@tonic-gate out:
593*0Sstevel@tonic-gate 	return (error);
594*0Sstevel@tonic-gate }
595*0Sstevel@tonic-gate 
596*0Sstevel@tonic-gate 
597*0Sstevel@tonic-gate /*
598*0Sstevel@tonic-gate  * increase i_con/i_ext arrays and set new elements
599*0Sstevel@tonic-gate  * using long or short allocation descriptors
600*0Sstevel@tonic-gate  */
601*0Sstevel@tonic-gate static void
ud_common_ad(struct ud_inode * ip,struct buf * bp)602*0Sstevel@tonic-gate ud_common_ad(struct ud_inode *ip, struct buf *bp)
603*0Sstevel@tonic-gate {
604*0Sstevel@tonic-gate 	int32_t ndesc, count, lbmask;
605*0Sstevel@tonic-gate 	uint32_t length;
606*0Sstevel@tonic-gate 	struct alloc_ext_desc *aed;
607*0Sstevel@tonic-gate 	struct icb_ext *iext, *con;
608*0Sstevel@tonic-gate 	u_offset_t offset;
609*0Sstevel@tonic-gate 	long_ad_t *lad;
610*0Sstevel@tonic-gate 	short_ad_t *sad;
611*0Sstevel@tonic-gate 	int islong;
612*0Sstevel@tonic-gate 	void *addr;
613*0Sstevel@tonic-gate 
614*0Sstevel@tonic-gate 	addr = bp->b_un.b_addr + sizeof (struct alloc_ext_desc);
615*0Sstevel@tonic-gate 	aed = (struct alloc_ext_desc *)bp->b_un.b_addr;
616*0Sstevel@tonic-gate 	length = SWAP_32(aed->aed_len_aed);
617*0Sstevel@tonic-gate 	if (ip->i_desc_type == ICB_FLAG_LONG_AD) {
618*0Sstevel@tonic-gate 		islong = 1;
619*0Sstevel@tonic-gate 		lad = addr;
620*0Sstevel@tonic-gate 		ndesc = length / sizeof (*lad);
621*0Sstevel@tonic-gate 	} else if (ip->i_desc_type == ICB_FLAG_SHORT_AD) {
622*0Sstevel@tonic-gate 		islong = 0;
623*0Sstevel@tonic-gate 		sad = addr;
624*0Sstevel@tonic-gate 		ndesc = length / sizeof (*sad);
625*0Sstevel@tonic-gate 	} else
626*0Sstevel@tonic-gate 		return;
627*0Sstevel@tonic-gate 
628*0Sstevel@tonic-gate 	/*
629*0Sstevel@tonic-gate 	 * realloc i_ext array
630*0Sstevel@tonic-gate 	 */
631*0Sstevel@tonic-gate 	count = (((ip->i_ext_used + ndesc) / EXT_PER_MALLOC) + 1) *
632*0Sstevel@tonic-gate 	    EXT_PER_MALLOC;
633*0Sstevel@tonic-gate 	addr = kmem_zalloc(count * sizeof (struct icb_ext), KM_SLEEP);
634*0Sstevel@tonic-gate 	bcopy(ip->i_ext, addr, ip->i_ext_used * sizeof (struct icb_ext));
635*0Sstevel@tonic-gate 	kmem_free(ip->i_ext, ip->i_ext_count * sizeof (struct icb_ext));
636*0Sstevel@tonic-gate 	ip->i_ext = addr;
637*0Sstevel@tonic-gate 	ip->i_ext_count = count;
638*0Sstevel@tonic-gate 
639*0Sstevel@tonic-gate 	/*
640*0Sstevel@tonic-gate 	 * scan descriptors
641*0Sstevel@tonic-gate 	 */
642*0Sstevel@tonic-gate 	lbmask = ip->i_udf->udf_lbmask;
643*0Sstevel@tonic-gate 	iext = &ip->i_ext[ip->i_ext_used - 1];
644*0Sstevel@tonic-gate 	offset = iext->ib_offset + iext->ib_count;
645*0Sstevel@tonic-gate 	iext++;
646*0Sstevel@tonic-gate 	while (ndesc--) {
647*0Sstevel@tonic-gate 		if (islong)
648*0Sstevel@tonic-gate 			length = SWAP_32(lad->lad_ext_len);
649*0Sstevel@tonic-gate 		else
650*0Sstevel@tonic-gate 			length = SWAP_32(sad->sad_ext_len);
651*0Sstevel@tonic-gate 
652*0Sstevel@tonic-gate 		if ((length & 0x3FFFFFFF) == 0)
653*0Sstevel@tonic-gate 			break;
654*0Sstevel@tonic-gate 		else if (((length >> 30) & IB_MASK) == IB_CON) {
655*0Sstevel@tonic-gate 			if (ip->i_con_used == ip->i_con_count) {
656*0Sstevel@tonic-gate 				struct icb_ext *old;
657*0Sstevel@tonic-gate 				int32_t old_count;
658*0Sstevel@tonic-gate 
659*0Sstevel@tonic-gate 				old = ip->i_con;
660*0Sstevel@tonic-gate 				old_count = ip->i_con_count *
661*0Sstevel@tonic-gate 				    sizeof (struct icb_ext);
662*0Sstevel@tonic-gate 				ip->i_con_count += EXT_PER_MALLOC;
663*0Sstevel@tonic-gate 				ip->i_con = kmem_zalloc(ip->i_con_count *
664*0Sstevel@tonic-gate 				    sizeof (struct icb_ext), KM_SLEEP);
665*0Sstevel@tonic-gate 
666*0Sstevel@tonic-gate 				if (old) {
667*0Sstevel@tonic-gate 					bcopy(old, ip->i_con, old_count);
668*0Sstevel@tonic-gate 					kmem_free(old, old_count);
669*0Sstevel@tonic-gate 				}
670*0Sstevel@tonic-gate 			}
671*0Sstevel@tonic-gate 			con = &ip->i_con[ip->i_con_used];
672*0Sstevel@tonic-gate 			if (islong) {
673*0Sstevel@tonic-gate 				con->ib_prn = SWAP_16(lad->lad_ext_prn);
674*0Sstevel@tonic-gate 				con->ib_block = SWAP_32(lad->lad_ext_loc);
675*0Sstevel@tonic-gate 			} else {
676*0Sstevel@tonic-gate 				con->ib_prn = ip->i_icb_prn;
677*0Sstevel@tonic-gate 				con->ib_block = SWAP_32(sad->sad_ext_loc);
678*0Sstevel@tonic-gate 			}
679*0Sstevel@tonic-gate 			con->ib_count = length & 0x3FFFFFFF;
680*0Sstevel@tonic-gate 			con->ib_flags = (length >> 30) & IB_MASK;
681*0Sstevel@tonic-gate 			ip->i_con_used++;
682*0Sstevel@tonic-gate 			break;
683*0Sstevel@tonic-gate 		}
684*0Sstevel@tonic-gate 
685*0Sstevel@tonic-gate 		if (islong) {
686*0Sstevel@tonic-gate 			iext->ib_prn = SWAP_16(lad->lad_ext_prn);
687*0Sstevel@tonic-gate 			iext->ib_block = SWAP_32(lad->lad_ext_loc);
688*0Sstevel@tonic-gate 			lad++;
689*0Sstevel@tonic-gate 		} else {
690*0Sstevel@tonic-gate 			iext->ib_prn = 0;
691*0Sstevel@tonic-gate 			iext->ib_block = SWAP_32(sad->sad_ext_loc);
692*0Sstevel@tonic-gate 			sad++;
693*0Sstevel@tonic-gate 		}
694*0Sstevel@tonic-gate 		iext->ib_count = length & 0x3FFFFFFF;
695*0Sstevel@tonic-gate 		iext->ib_offset = offset;
696*0Sstevel@tonic-gate 		iext->ib_marker1 = (uint32_t)0xAAAAAAAA;
697*0Sstevel@tonic-gate 		iext->ib_marker2 = (uint32_t)0xBBBBBBBB;
698*0Sstevel@tonic-gate 		offset += (iext->ib_count + lbmask) & (~lbmask);
699*0Sstevel@tonic-gate 		iext->ib_flags = (length >> 30) & IB_MASK;
700*0Sstevel@tonic-gate 		ip->i_ext_used++;
701*0Sstevel@tonic-gate 		iext++;
702*0Sstevel@tonic-gate 	}
703*0Sstevel@tonic-gate }
704*0Sstevel@tonic-gate 
705*0Sstevel@tonic-gate 
706*0Sstevel@tonic-gate static int32_t
ud_read_next_cont(struct ud_inode * ip)707*0Sstevel@tonic-gate ud_read_next_cont(struct ud_inode *ip)
708*0Sstevel@tonic-gate {
709*0Sstevel@tonic-gate 	uint32_t dummy, error = 0;
710*0Sstevel@tonic-gate 	struct alloc_ext_desc *aed;
711*0Sstevel@tonic-gate 	struct icb_ext *cont;
712*0Sstevel@tonic-gate 	struct buf *bp;
713*0Sstevel@tonic-gate 	daddr_t bno;
714*0Sstevel@tonic-gate 
715*0Sstevel@tonic-gate 	cont = &ip->i_con[ip->i_con_read];
716*0Sstevel@tonic-gate 	ASSERT(cont->ib_count > 0);
717*0Sstevel@tonic-gate 
718*0Sstevel@tonic-gate 	bno = ud_xlate_to_daddr(ip->i_udf, cont->ib_prn, cont->ib_block,
719*0Sstevel@tonic-gate 	    1, &dummy);
720*0Sstevel@tonic-gate 	bp = ud_bread(ip->i_dev, bno << ip->i_udf->udf_l2d_shift,
721*0Sstevel@tonic-gate 	    cont->ib_count);
722*0Sstevel@tonic-gate 	if (bp->b_flags & B_ERROR)
723*0Sstevel@tonic-gate 		error = bp->b_error;
724*0Sstevel@tonic-gate 	else {
725*0Sstevel@tonic-gate 		aed = (struct alloc_ext_desc *)bp->b_un.b_addr;
726*0Sstevel@tonic-gate 		if (ud_verify_tag_and_desc(&aed->aed_tag, UD_ALLOC_EXT_DESC,
727*0Sstevel@tonic-gate 		    cont->ib_block, 1, cont->ib_count))
728*0Sstevel@tonic-gate 			error = EINVAL;
729*0Sstevel@tonic-gate 	}
730*0Sstevel@tonic-gate 
731*0Sstevel@tonic-gate 	if (error == 0)
732*0Sstevel@tonic-gate 		ud_common_ad(ip, bp);
733*0Sstevel@tonic-gate 
734*0Sstevel@tonic-gate 	brelse(bp);
735*0Sstevel@tonic-gate 	return (error);
736*0Sstevel@tonic-gate }
737*0Sstevel@tonic-gate 
738*0Sstevel@tonic-gate 
739*0Sstevel@tonic-gate int32_t
ud_read_icb_till_off(struct ud_inode * ip,u_offset_t offset)740*0Sstevel@tonic-gate ud_read_icb_till_off(struct ud_inode *ip, u_offset_t offset)
741*0Sstevel@tonic-gate {
742*0Sstevel@tonic-gate 	int32_t error = 0;
743*0Sstevel@tonic-gate 	struct icb_ext *iext;
744*0Sstevel@tonic-gate 
745*0Sstevel@tonic-gate 	ud_printf("ud_read_icb_till_off\n");
746*0Sstevel@tonic-gate 
747*0Sstevel@tonic-gate 	if (ip->i_desc_type == ICB_FLAG_ONE_AD)
748*0Sstevel@tonic-gate 		return (0);
749*0Sstevel@tonic-gate 	else if ((ip->i_astrat != STRAT_TYPE4) &&
750*0Sstevel@tonic-gate 	    (ip->i_astrat != STRAT_TYPE4096))
751*0Sstevel@tonic-gate 		return (EINVAL);
752*0Sstevel@tonic-gate 	else if (ip->i_ext_used == 0)
753*0Sstevel@tonic-gate 		return ((ip->i_size == 0) ? 0 : EINVAL);
754*0Sstevel@tonic-gate 
755*0Sstevel@tonic-gate 	/*
756*0Sstevel@tonic-gate 	 * supported allocation strategies are
757*0Sstevel@tonic-gate 	 * STRAT_TYPE4 and STRAT_TYPE4096
758*0Sstevel@tonic-gate 	 */
759*0Sstevel@tonic-gate 
760*0Sstevel@tonic-gate 	mutex_enter(&ip->i_con_lock);
761*0Sstevel@tonic-gate 	iext = &ip->i_ext[ip->i_ext_used - 1];
762*0Sstevel@tonic-gate 	while ((iext->ib_offset + iext->ib_count) < offset) {
763*0Sstevel@tonic-gate 		if (ip->i_con_used == ip->i_con_read) {
764*0Sstevel@tonic-gate 			error = EINVAL;
765*0Sstevel@tonic-gate 			break;
766*0Sstevel@tonic-gate 		}
767*0Sstevel@tonic-gate 		if (error = ud_read_next_cont(ip))
768*0Sstevel@tonic-gate 			break;
769*0Sstevel@tonic-gate 		ip->i_con_read++;
770*0Sstevel@tonic-gate 		iext = &ip->i_ext[ip->i_ext_used - 1];
771*0Sstevel@tonic-gate 	}
772*0Sstevel@tonic-gate 	mutex_exit(&ip->i_con_lock);
773*0Sstevel@tonic-gate 
774*0Sstevel@tonic-gate 	return (error);
775*0Sstevel@tonic-gate }
776*0Sstevel@tonic-gate 
777*0Sstevel@tonic-gate 
778*0Sstevel@tonic-gate /*
779*0Sstevel@tonic-gate  * Assumption is the off is beyond ip->i_size
780*0Sstevel@tonic-gate  * And we will have atleast one ext used
781*0Sstevel@tonic-gate  */
782*0Sstevel@tonic-gate int32_t
ud_last_alloc_ext(struct ud_inode * ip,uint64_t off,uint32_t size,int32_t alloc_only)783*0Sstevel@tonic-gate ud_last_alloc_ext(struct ud_inode *ip, uint64_t off,
784*0Sstevel@tonic-gate 		uint32_t size, int32_t alloc_only)
785*0Sstevel@tonic-gate {
786*0Sstevel@tonic-gate 	struct icb_ext *iext;
787*0Sstevel@tonic-gate 	struct udf_vfs *udf_vfsp;
788*0Sstevel@tonic-gate 	int32_t lbsize, lbmask;
789*0Sstevel@tonic-gate 	uint64_t end_req, end_count, icb_offset;
790*0Sstevel@tonic-gate 	uint64_t count;
791*0Sstevel@tonic-gate 	int32_t error = 0;
792*0Sstevel@tonic-gate 
793*0Sstevel@tonic-gate 
794*0Sstevel@tonic-gate 	udf_vfsp = ip->i_udf;
795*0Sstevel@tonic-gate 	lbsize = udf_vfsp->udf_lbsize;
796*0Sstevel@tonic-gate 	lbmask = udf_vfsp->udf_lbmask;
797*0Sstevel@tonic-gate 
798*0Sstevel@tonic-gate 	end_req = BASE(off) + size;
799*0Sstevel@tonic-gate 
800*0Sstevel@tonic-gate 
801*0Sstevel@tonic-gate 	/*
802*0Sstevel@tonic-gate 	 * If we are here it means the file
803*0Sstevel@tonic-gate 	 * is growing beyond the end of the
804*0Sstevel@tonic-gate 	 * current block. So round up the
805*0Sstevel@tonic-gate 	 * last extent
806*0Sstevel@tonic-gate 	 */
807*0Sstevel@tonic-gate 
808*0Sstevel@tonic-gate 	iext = &ip->i_ext[ip->i_ext_used - 1];
809*0Sstevel@tonic-gate 	iext->ib_count = CEIL(iext->ib_count);
810*0Sstevel@tonic-gate 
811*0Sstevel@tonic-gate 	/*
812*0Sstevel@tonic-gate 	 * Figure out if we can create
813*0Sstevel@tonic-gate 	 * a hole here
814*0Sstevel@tonic-gate 	 */
815*0Sstevel@tonic-gate 
816*0Sstevel@tonic-gate 
817*0Sstevel@tonic-gate 	end_count = iext->ib_offset + iext->ib_count;
818*0Sstevel@tonic-gate 
819*0Sstevel@tonic-gate 	if ((PCEIL(end_count) < PBASE(off)) &&
820*0Sstevel@tonic-gate 		((PBASE(off) - PCEIL(end_count)) >= PAGESIZE)) {
821*0Sstevel@tonic-gate 
822*0Sstevel@tonic-gate 		count = PCEIL(end_count) - CEIL(end_count);
823*0Sstevel@tonic-gate 		if (count >= lbsize) {
824*0Sstevel@tonic-gate 
825*0Sstevel@tonic-gate 			/*
826*0Sstevel@tonic-gate 			 * There is space between the begining
827*0Sstevel@tonic-gate 			 * of the hole to be created and
828*0Sstevel@tonic-gate 			 * end of the last offset
829*0Sstevel@tonic-gate 			 * Allocate blocks for it
830*0Sstevel@tonic-gate 			 */
831*0Sstevel@tonic-gate 
832*0Sstevel@tonic-gate 			iext = &ip->i_ext[ip->i_ext_used - 1];
833*0Sstevel@tonic-gate 			icb_offset = iext->ib_offset + CEIL(iext->ib_count);
834*0Sstevel@tonic-gate 
835*0Sstevel@tonic-gate 			if (iext->ib_flags == IB_UN_RE_AL) {
836*0Sstevel@tonic-gate 
837*0Sstevel@tonic-gate 				/*
838*0Sstevel@tonic-gate 				 * Previous extent is a unallocated
839*0Sstevel@tonic-gate 				 * extent. Create a new allocated
840*0Sstevel@tonic-gate 				 * extent
841*0Sstevel@tonic-gate 				 */
842*0Sstevel@tonic-gate 
843*0Sstevel@tonic-gate 				error = ud_create_ext(ip, ip->i_ext_used,
844*0Sstevel@tonic-gate 					ALLOC_SPACE | NEW_EXT,
845*0Sstevel@tonic-gate 					alloc_only, icb_offset, &count);
846*0Sstevel@tonic-gate 
847*0Sstevel@tonic-gate 			} else {
848*0Sstevel@tonic-gate 
849*0Sstevel@tonic-gate 				/*
850*0Sstevel@tonic-gate 				 * Last extent is allocated
851*0Sstevel@tonic-gate 				 * try to allocate adjascent to the
852*0Sstevel@tonic-gate 				 * last extent
853*0Sstevel@tonic-gate 				 */
854*0Sstevel@tonic-gate 
855*0Sstevel@tonic-gate 				error = ud_create_ext(ip, ip->i_ext_used - 1,
856*0Sstevel@tonic-gate 						ALLOC_SPACE, alloc_only,
857*0Sstevel@tonic-gate 						icb_offset, &count);
858*0Sstevel@tonic-gate 			}
859*0Sstevel@tonic-gate 
860*0Sstevel@tonic-gate 			if (error != 0) {
861*0Sstevel@tonic-gate 				return (error);
862*0Sstevel@tonic-gate 			}
863*0Sstevel@tonic-gate 		}
864*0Sstevel@tonic-gate 
865*0Sstevel@tonic-gate 		iext = &ip->i_ext[ip->i_ext_used - 1];
866*0Sstevel@tonic-gate 		end_count = iext->ib_offset + iext->ib_count;
867*0Sstevel@tonic-gate 		count = PBASE(off) - PCEIL(end_count);
868*0Sstevel@tonic-gate 		icb_offset = PCEIL(end_count);
869*0Sstevel@tonic-gate 
870*0Sstevel@tonic-gate 		if (iext->ib_flags == IB_UN_RE_AL) {
871*0Sstevel@tonic-gate 
872*0Sstevel@tonic-gate 			/*
873*0Sstevel@tonic-gate 			 * The last extent is unallocated
874*0Sstevel@tonic-gate 			 * Just bump the extent count
875*0Sstevel@tonic-gate 			 */
876*0Sstevel@tonic-gate 			(void) ud_create_ext(ip, ip->i_ext_used - 1,
877*0Sstevel@tonic-gate 					0, alloc_only, icb_offset, &count);
878*0Sstevel@tonic-gate 		} else {
879*0Sstevel@tonic-gate 
880*0Sstevel@tonic-gate 			/*
881*0Sstevel@tonic-gate 			 * Last extent is allocated
882*0Sstevel@tonic-gate 			 * round up the size of the extent to
883*0Sstevel@tonic-gate 			 * lbsize and allocate a new unallocated extent
884*0Sstevel@tonic-gate 			 */
885*0Sstevel@tonic-gate 			iext->ib_count = CEIL(iext->ib_count);
886*0Sstevel@tonic-gate 			(void) ud_create_ext(ip, ip->i_ext_used,
887*0Sstevel@tonic-gate 				NEW_EXT, alloc_only, icb_offset, &count);
888*0Sstevel@tonic-gate 		}
889*0Sstevel@tonic-gate 
890*0Sstevel@tonic-gate 		icb_offset = PBASE(off);
891*0Sstevel@tonic-gate 	} else {
892*0Sstevel@tonic-gate 
893*0Sstevel@tonic-gate 		/*
894*0Sstevel@tonic-gate 		 * We cannot create any hole inbetween
895*0Sstevel@tonic-gate 		 * the last extent and the off so
896*0Sstevel@tonic-gate 		 * round up the count in the last extent
897*0Sstevel@tonic-gate 		 */
898*0Sstevel@tonic-gate 
899*0Sstevel@tonic-gate 		iext = &ip->i_ext[ip->i_ext_used - 1];
900*0Sstevel@tonic-gate 		iext->ib_count = CEIL(iext->ib_count);
901*0Sstevel@tonic-gate 
902*0Sstevel@tonic-gate 	}
903*0Sstevel@tonic-gate 
904*0Sstevel@tonic-gate 
905*0Sstevel@tonic-gate 	iext = &ip->i_ext[ip->i_ext_used - 1];
906*0Sstevel@tonic-gate 	count = end_req - (iext->ib_offset + iext->ib_count);
907*0Sstevel@tonic-gate 	icb_offset = iext->ib_offset + CEIL(iext->ib_count);
908*0Sstevel@tonic-gate 
909*0Sstevel@tonic-gate 	if (iext->ib_flags == IB_UN_RE_AL) {
910*0Sstevel@tonic-gate 
911*0Sstevel@tonic-gate 		/*
912*0Sstevel@tonic-gate 		 * Last extent was a unallocated extent
913*0Sstevel@tonic-gate 		 * create a new extent
914*0Sstevel@tonic-gate 		 */
915*0Sstevel@tonic-gate 
916*0Sstevel@tonic-gate 		error = ud_create_ext(ip, ip->i_ext_used,
917*0Sstevel@tonic-gate 			ALLOC_SPACE | NEW_EXT, alloc_only, icb_offset, &count);
918*0Sstevel@tonic-gate 	} else {
919*0Sstevel@tonic-gate 
920*0Sstevel@tonic-gate 		/*
921*0Sstevel@tonic-gate 		 * Last extent was an allocated extent
922*0Sstevel@tonic-gate 		 * try to allocate adjascent to the old blocks
923*0Sstevel@tonic-gate 		 */
924*0Sstevel@tonic-gate 
925*0Sstevel@tonic-gate 		error = ud_create_ext(ip, ip->i_ext_used - 1,
926*0Sstevel@tonic-gate 			ALLOC_SPACE, alloc_only, icb_offset, &count);
927*0Sstevel@tonic-gate 	}
928*0Sstevel@tonic-gate 
929*0Sstevel@tonic-gate 	return (error);
930*0Sstevel@tonic-gate }
931*0Sstevel@tonic-gate 
932*0Sstevel@tonic-gate /*
933*0Sstevel@tonic-gate  * Break up the icb_ext at index
934*0Sstevel@tonic-gate  * into two icb_ext,
935*0Sstevel@tonic-gate  * one at index ib_count "count" and
936*0Sstevel@tonic-gate  * the other at index+1 with ib_count = old_ib_count - count
937*0Sstevel@tonic-gate  */
938*0Sstevel@tonic-gate int32_t
ud_break_create_new_icb(struct ud_inode * ip,int32_t index,uint32_t count)939*0Sstevel@tonic-gate ud_break_create_new_icb(struct ud_inode *ip,
940*0Sstevel@tonic-gate 	int32_t index, uint32_t count)
941*0Sstevel@tonic-gate {
942*0Sstevel@tonic-gate 	int32_t i, error;
943*0Sstevel@tonic-gate 	struct icb_ext *iext, *next;
944*0Sstevel@tonic-gate 
945*0Sstevel@tonic-gate 
946*0Sstevel@tonic-gate 	ud_printf("ud_break_create_new_icb\n");
947*0Sstevel@tonic-gate 	iext = &ip->i_ext[index];
948*0Sstevel@tonic-gate 
949*0Sstevel@tonic-gate 	ASSERT(count < iext->ib_count);
950*0Sstevel@tonic-gate 
951*0Sstevel@tonic-gate 	if ((error = ud_bump_ext_count(ip, KM_SLEEP)) != 0) {
952*0Sstevel@tonic-gate 		return (error);
953*0Sstevel@tonic-gate 	}
954*0Sstevel@tonic-gate 
955*0Sstevel@tonic-gate 	for (i = ip->i_ext_used; i > index; i--) {
956*0Sstevel@tonic-gate 		ip->i_ext[i] = ip->i_ext[i - 1];
957*0Sstevel@tonic-gate 	}
958*0Sstevel@tonic-gate 
959*0Sstevel@tonic-gate 	next = &ip->i_ext[index + 1];
960*0Sstevel@tonic-gate 	iext = &ip->i_ext[index];
961*0Sstevel@tonic-gate 
962*0Sstevel@tonic-gate 	iext->ib_count = count;
963*0Sstevel@tonic-gate 	next->ib_count -= count;
964*0Sstevel@tonic-gate 	next->ib_offset = iext->ib_offset + iext->ib_count;
965*0Sstevel@tonic-gate 	if (iext->ib_flags != IB_UN_RE_AL) {
966*0Sstevel@tonic-gate 		next->ib_block = iext->ib_block +
967*0Sstevel@tonic-gate 			iext->ib_count >> ip->i_udf->udf_l2b_shift;
968*0Sstevel@tonic-gate 	}
969*0Sstevel@tonic-gate 	ip->i_ext_used++;
970*0Sstevel@tonic-gate 	return (0);
971*0Sstevel@tonic-gate }
972*0Sstevel@tonic-gate 
973*0Sstevel@tonic-gate void
ud_remove_ext_at_index(struct ud_inode * ip,int32_t index)974*0Sstevel@tonic-gate ud_remove_ext_at_index(struct ud_inode *ip, int32_t index)
975*0Sstevel@tonic-gate {
976*0Sstevel@tonic-gate 	int32_t i;
977*0Sstevel@tonic-gate 
978*0Sstevel@tonic-gate 	ASSERT(index <= ip->i_ext_used);
979*0Sstevel@tonic-gate 
980*0Sstevel@tonic-gate 	for (i = index; i < ip->i_ext_used; i++) {
981*0Sstevel@tonic-gate 		if ((i + 1) < ip->i_ext_count) {
982*0Sstevel@tonic-gate 			ip->i_ext[i] = ip->i_ext[i + 1];
983*0Sstevel@tonic-gate 		} else {
984*0Sstevel@tonic-gate 			bzero(&ip->i_ext[i], sizeof (struct icb_ext));
985*0Sstevel@tonic-gate 		}
986*0Sstevel@tonic-gate 	}
987*0Sstevel@tonic-gate 	ip->i_ext_used --;
988*0Sstevel@tonic-gate }
989*0Sstevel@tonic-gate 
990*0Sstevel@tonic-gate int32_t
ud_bump_ext_count(struct ud_inode * ip,int32_t sleep_flag)991*0Sstevel@tonic-gate ud_bump_ext_count(struct ud_inode *ip, int32_t sleep_flag)
992*0Sstevel@tonic-gate {
993*0Sstevel@tonic-gate 	int32_t error = 0;
994*0Sstevel@tonic-gate 	struct icb_ext *iext;
995*0Sstevel@tonic-gate 	uint32_t old_count, elen;
996*0Sstevel@tonic-gate 
997*0Sstevel@tonic-gate 	ASSERT(ip);
998*0Sstevel@tonic-gate 	ASSERT(sleep_flag == KM_SLEEP);
999*0Sstevel@tonic-gate 
1000*0Sstevel@tonic-gate 	ud_printf("ud_bump_ext_count\n");
1001*0Sstevel@tonic-gate 
1002*0Sstevel@tonic-gate 	if (ip->i_ext_used >= ip->i_ext_count) {
1003*0Sstevel@tonic-gate 
1004*0Sstevel@tonic-gate 		old_count = sizeof (struct icb_ext) * ip->i_ext_count;
1005*0Sstevel@tonic-gate 		ip->i_ext_count += EXT_PER_MALLOC;
1006*0Sstevel@tonic-gate 		iext = kmem_zalloc(sizeof (struct icb_ext) *
1007*0Sstevel@tonic-gate 				ip->i_ext_count, sleep_flag);
1008*0Sstevel@tonic-gate 		bcopy(ip->i_ext, iext, old_count);
1009*0Sstevel@tonic-gate 		kmem_free(ip->i_ext, old_count);
1010*0Sstevel@tonic-gate 		ip->i_ext = iext;
1011*0Sstevel@tonic-gate 	}
1012*0Sstevel@tonic-gate 
1013*0Sstevel@tonic-gate 	if (ip->i_ext_used >= ip->i_cur_max_ext) {
1014*0Sstevel@tonic-gate 		int32_t prox;
1015*0Sstevel@tonic-gate 		struct icb_ext *icon;
1016*0Sstevel@tonic-gate 		uint32_t blkno, sz;
1017*0Sstevel@tonic-gate 		int32_t lbmask, l2b;
1018*0Sstevel@tonic-gate 
1019*0Sstevel@tonic-gate 		lbmask = ip->i_udf->udf_lbmask;
1020*0Sstevel@tonic-gate 		l2b = ip->i_udf->udf_l2b_shift;
1021*0Sstevel@tonic-gate 
1022*0Sstevel@tonic-gate 		if ((error = ud_read_icb_till_off(ip, ip->i_size)) != 0) {
1023*0Sstevel@tonic-gate 			return (error);
1024*0Sstevel@tonic-gate 		}
1025*0Sstevel@tonic-gate 
1026*0Sstevel@tonic-gate 		/*
1027*0Sstevel@tonic-gate 		 * If there are any old cont extents
1028*0Sstevel@tonic-gate 		 * allocate the new one ajscant to the old one
1029*0Sstevel@tonic-gate 		 */
1030*0Sstevel@tonic-gate 		if (ip->i_con_used != 0) {
1031*0Sstevel@tonic-gate 			icon = &ip->i_con[ip->i_con_used - 1];
1032*0Sstevel@tonic-gate 			prox = icon->ib_block + (CEIL(icon->ib_count) >> l2b);
1033*0Sstevel@tonic-gate 		} else {
1034*0Sstevel@tonic-gate 			prox = 0;
1035*0Sstevel@tonic-gate 		}
1036*0Sstevel@tonic-gate 
1037*0Sstevel@tonic-gate 		/*
1038*0Sstevel@tonic-gate 		 * Allocate space
1039*0Sstevel@tonic-gate 		 */
1040*0Sstevel@tonic-gate 		if ((error = ud_alloc_space(ip->i_vfs, ip->i_icb_prn,
1041*0Sstevel@tonic-gate 				prox, 1, &blkno, &sz, 0, 0)) != 0) {
1042*0Sstevel@tonic-gate 			return (error);
1043*0Sstevel@tonic-gate 		}
1044*0Sstevel@tonic-gate 		if (sz == 0) {
1045*0Sstevel@tonic-gate 			return (ENOSPC);
1046*0Sstevel@tonic-gate 		}
1047*0Sstevel@tonic-gate 
1048*0Sstevel@tonic-gate 		sz <<= l2b;
1049*0Sstevel@tonic-gate 
1050*0Sstevel@tonic-gate 		if (ip->i_con_used == ip->i_con_count) {
1051*0Sstevel@tonic-gate 			struct icb_ext *old;
1052*0Sstevel@tonic-gate 			int32_t old_count;
1053*0Sstevel@tonic-gate 
1054*0Sstevel@tonic-gate 			old = ip->i_con;
1055*0Sstevel@tonic-gate 			old_count = ip->i_con_count *
1056*0Sstevel@tonic-gate 				sizeof (struct icb_ext);
1057*0Sstevel@tonic-gate 			ip->i_con_count += EXT_PER_MALLOC;
1058*0Sstevel@tonic-gate 			ip->i_con = kmem_zalloc(ip->i_con_count *
1059*0Sstevel@tonic-gate 				sizeof (struct icb_ext), KM_SLEEP);
1060*0Sstevel@tonic-gate 			if (old != 0) {
1061*0Sstevel@tonic-gate 				bcopy(old, ip->i_con, old_count);
1062*0Sstevel@tonic-gate 				kmem_free(old, old_count);
1063*0Sstevel@tonic-gate 			}
1064*0Sstevel@tonic-gate 		}
1065*0Sstevel@tonic-gate 		icon = &ip->i_con[ip->i_con_used++];
1066*0Sstevel@tonic-gate 		icon->ib_flags = IB_CON;
1067*0Sstevel@tonic-gate 		icon->ib_prn = ip->i_icb_prn;
1068*0Sstevel@tonic-gate 		icon->ib_block = blkno;
1069*0Sstevel@tonic-gate 		icon->ib_count = sz;
1070*0Sstevel@tonic-gate 		icon->ib_offset = 0;
1071*0Sstevel@tonic-gate 		icon->ib_marker1 = (uint32_t)0xAAAAAAAA;
1072*0Sstevel@tonic-gate 		icon->ib_marker2 = (uint32_t)0xBBBBBBBB;
1073*0Sstevel@tonic-gate 
1074*0Sstevel@tonic-gate 		/*
1075*0Sstevel@tonic-gate 		 * Bump the i_cur_max_ext according to
1076*0Sstevel@tonic-gate 		 * the space allocated
1077*0Sstevel@tonic-gate 		 */
1078*0Sstevel@tonic-gate 		if (ip->i_desc_type == ICB_FLAG_SHORT_AD) {
1079*0Sstevel@tonic-gate 			elen = sizeof (struct short_ad);
1080*0Sstevel@tonic-gate 		} else if (ip->i_desc_type == ICB_FLAG_LONG_AD) {
1081*0Sstevel@tonic-gate 			elen = sizeof (struct long_ad);
1082*0Sstevel@tonic-gate 		} else {
1083*0Sstevel@tonic-gate 			return (ENOSPC);
1084*0Sstevel@tonic-gate 		}
1085*0Sstevel@tonic-gate 		sz = sz - (sizeof (struct alloc_ext_desc) + elen);
1086*0Sstevel@tonic-gate 		ip->i_cur_max_ext += sz / elen;
1087*0Sstevel@tonic-gate 	}
1088*0Sstevel@tonic-gate 	return (error);
1089*0Sstevel@tonic-gate }
1090*0Sstevel@tonic-gate 
1091*0Sstevel@tonic-gate int32_t
ud_create_ext(struct ud_inode * ip,int32_t index,uint32_t flags,int32_t alloc_only,uint64_t offset,uint64_t * count)1092*0Sstevel@tonic-gate ud_create_ext(struct ud_inode *ip, int32_t index, uint32_t flags,
1093*0Sstevel@tonic-gate 	int32_t alloc_only, uint64_t offset, uint64_t *count)
1094*0Sstevel@tonic-gate {
1095*0Sstevel@tonic-gate 	struct icb_ext *iext, *pext;
1096*0Sstevel@tonic-gate 	struct udf_vfs *udf_vfsp;
1097*0Sstevel@tonic-gate 	int32_t error = 0, blkcount, acount;
1098*0Sstevel@tonic-gate 	uint32_t blkno, sz, prox, mext_sz;
1099*0Sstevel@tonic-gate 	int32_t lbmask, l2b;
1100*0Sstevel@tonic-gate 
1101*0Sstevel@tonic-gate 	if (*count == 0) {
1102*0Sstevel@tonic-gate 		return (0);
1103*0Sstevel@tonic-gate 	}
1104*0Sstevel@tonic-gate 
1105*0Sstevel@tonic-gate begin:
1106*0Sstevel@tonic-gate 	udf_vfsp = ip->i_udf;
1107*0Sstevel@tonic-gate 	lbmask = udf_vfsp->udf_lbmask;
1108*0Sstevel@tonic-gate 	l2b = udf_vfsp->udf_l2b_shift;
1109*0Sstevel@tonic-gate 	mext_sz = (1 << MEXT_BITS) - PAGESIZE;
1110*0Sstevel@tonic-gate 
1111*0Sstevel@tonic-gate 	if ((error = ud_bump_ext_count(ip, KM_SLEEP)) != 0) {
1112*0Sstevel@tonic-gate 		return (error);
1113*0Sstevel@tonic-gate 	}
1114*0Sstevel@tonic-gate 
1115*0Sstevel@tonic-gate 	iext = &ip->i_ext[index];
1116*0Sstevel@tonic-gate 	if (flags & ALLOC_SPACE) {
1117*0Sstevel@tonic-gate 		if ((flags & NEW_EXT) ||
1118*0Sstevel@tonic-gate 			(ip->i_ext_count == 0)) {
1119*0Sstevel@tonic-gate 
1120*0Sstevel@tonic-gate 			iext->ib_flags = 0;
1121*0Sstevel@tonic-gate 			iext->ib_prn = ip->i_icb_prn;
1122*0Sstevel@tonic-gate 			if (*count > mext_sz) {
1123*0Sstevel@tonic-gate 				blkcount = mext_sz >> l2b;
1124*0Sstevel@tonic-gate 			} else {
1125*0Sstevel@tonic-gate 				blkcount = CEIL(*count) >> l2b;
1126*0Sstevel@tonic-gate 			}
1127*0Sstevel@tonic-gate 			if ((error = ud_alloc_space(ip->i_vfs,
1128*0Sstevel@tonic-gate 					ip->i_icb_prn, 0, blkcount,
1129*0Sstevel@tonic-gate 					&blkno, &sz, 1, 0)) != 0) {
1130*0Sstevel@tonic-gate 				return (error);
1131*0Sstevel@tonic-gate 			}
1132*0Sstevel@tonic-gate 			if (sz == 0) {
1133*0Sstevel@tonic-gate 				return (ENOSPC);
1134*0Sstevel@tonic-gate 			}
1135*0Sstevel@tonic-gate 			ip->i_lbr += sz;
1136*0Sstevel@tonic-gate 			iext->ib_block = blkno;
1137*0Sstevel@tonic-gate 			acount = sz << l2b;
1138*0Sstevel@tonic-gate 			if ((sz << l2b) > *count) {
1139*0Sstevel@tonic-gate 				iext->ib_count = *count;
1140*0Sstevel@tonic-gate 				*count = 0;
1141*0Sstevel@tonic-gate 			} else {
1142*0Sstevel@tonic-gate 				iext->ib_count = sz << l2b;
1143*0Sstevel@tonic-gate 				*count -= iext->ib_count;
1144*0Sstevel@tonic-gate 			}
1145*0Sstevel@tonic-gate 			iext->ib_offset = offset;
1146*0Sstevel@tonic-gate 			if (ip->i_ext_used <= index)
1147*0Sstevel@tonic-gate 				ip->i_ext_used ++;
1148*0Sstevel@tonic-gate 		} else {
1149*0Sstevel@tonic-gate 			if ((iext->ib_count + *count) > mext_sz) {
1150*0Sstevel@tonic-gate 				blkcount = (mext_sz - iext->ib_count) >> l2b;
1151*0Sstevel@tonic-gate 			} else {
1152*0Sstevel@tonic-gate 				blkcount = CEIL(*count) >> l2b;
1153*0Sstevel@tonic-gate 			}
1154*0Sstevel@tonic-gate 			if (blkcount == 0) {
1155*0Sstevel@tonic-gate 				flags |= NEW_EXT;
1156*0Sstevel@tonic-gate 				index++;
1157*0Sstevel@tonic-gate 				goto begin;
1158*0Sstevel@tonic-gate 			}
1159*0Sstevel@tonic-gate 			prox = iext->ib_block + (CEIL(iext->ib_count) >> l2b);
1160*0Sstevel@tonic-gate 			if ((error = ud_alloc_space(ip->i_vfs,
1161*0Sstevel@tonic-gate 					ip->i_icb_prn, prox, blkcount,
1162*0Sstevel@tonic-gate 					&blkno, &sz, 1, 0)) != 0) {
1163*0Sstevel@tonic-gate 				return (error);
1164*0Sstevel@tonic-gate 			}
1165*0Sstevel@tonic-gate 			if (sz == 0) {
1166*0Sstevel@tonic-gate 				return (ENOSPC);
1167*0Sstevel@tonic-gate 			}
1168*0Sstevel@tonic-gate 			acount = sz << l2b;
1169*0Sstevel@tonic-gate 			if (acount > *count) {
1170*0Sstevel@tonic-gate 				acount = *count;
1171*0Sstevel@tonic-gate 				*count = 0;
1172*0Sstevel@tonic-gate 			} else {
1173*0Sstevel@tonic-gate 				*count -= acount;
1174*0Sstevel@tonic-gate 			}
1175*0Sstevel@tonic-gate 			ip->i_lbr += sz;
1176*0Sstevel@tonic-gate 			if (prox == blkno) {
1177*0Sstevel@tonic-gate 				iext->ib_count += acount;
1178*0Sstevel@tonic-gate 			} else {
1179*0Sstevel@tonic-gate 				if ((error = ud_bump_ext_count(ip, KM_SLEEP))
1180*0Sstevel@tonic-gate 						!= 0) {
1181*0Sstevel@tonic-gate 					return (error);
1182*0Sstevel@tonic-gate 				}
1183*0Sstevel@tonic-gate 				pext = &ip->i_ext[index];
1184*0Sstevel@tonic-gate 				iext = &ip->i_ext[index + 1];
1185*0Sstevel@tonic-gate 				iext->ib_flags = 0;
1186*0Sstevel@tonic-gate 				iext->ib_prn = ip->i_icb_prn;
1187*0Sstevel@tonic-gate 				iext->ib_block = blkno;
1188*0Sstevel@tonic-gate 				iext->ib_offset =
1189*0Sstevel@tonic-gate 					pext->ib_offset + pext->ib_count;
1190*0Sstevel@tonic-gate 				iext->ib_count = acount;
1191*0Sstevel@tonic-gate 				/*
1192*0Sstevel@tonic-gate 				 * Increment the index, since we have used
1193*0Sstevel@tonic-gate 				 * the extent at [index+1] above.
1194*0Sstevel@tonic-gate 				 */
1195*0Sstevel@tonic-gate 				index++;
1196*0Sstevel@tonic-gate 				if (ip->i_ext_used <= index)
1197*0Sstevel@tonic-gate 					ip->i_ext_used ++;
1198*0Sstevel@tonic-gate 			}
1199*0Sstevel@tonic-gate 		}
1200*0Sstevel@tonic-gate 		if (alloc_only == 0) {
1201*0Sstevel@tonic-gate 			error = ud_zero_it(ip, blkno, sz);
1202*0Sstevel@tonic-gate 		}
1203*0Sstevel@tonic-gate 		if (*count) {
1204*0Sstevel@tonic-gate 			offset = iext->ib_offset + CEIL(iext->ib_count);
1205*0Sstevel@tonic-gate 			flags |= NEW_EXT;
1206*0Sstevel@tonic-gate 			index++;
1207*0Sstevel@tonic-gate 			goto begin;
1208*0Sstevel@tonic-gate 		}
1209*0Sstevel@tonic-gate 	} else {
1210*0Sstevel@tonic-gate 		if (flags & NEW_EXT) {
1211*0Sstevel@tonic-gate 			iext->ib_flags = IB_UN_RE_AL;
1212*0Sstevel@tonic-gate 			iext->ib_prn = 0;
1213*0Sstevel@tonic-gate 			iext->ib_block = 0;
1214*0Sstevel@tonic-gate 			if (*count > mext_sz) {
1215*0Sstevel@tonic-gate 				iext->ib_count = mext_sz;
1216*0Sstevel@tonic-gate 				*count -= iext->ib_count;
1217*0Sstevel@tonic-gate 			} else {
1218*0Sstevel@tonic-gate 				iext->ib_count = *count;
1219*0Sstevel@tonic-gate 				*count = 0;
1220*0Sstevel@tonic-gate 			}
1221*0Sstevel@tonic-gate 			iext->ib_offset = offset;
1222*0Sstevel@tonic-gate 			if (ip->i_ext_used <= index)
1223*0Sstevel@tonic-gate 				ip->i_ext_used ++;
1224*0Sstevel@tonic-gate 		} else {
1225*0Sstevel@tonic-gate 			ASSERT(iext->ib_flags == IB_UN_RE_AL);
1226*0Sstevel@tonic-gate 			if ((iext->ib_count + *count) > mext_sz) {
1227*0Sstevel@tonic-gate 				acount = mext_sz - iext->ib_count;
1228*0Sstevel@tonic-gate 				iext->ib_count += acount;
1229*0Sstevel@tonic-gate 				*count -= acount;
1230*0Sstevel@tonic-gate 			} else {
1231*0Sstevel@tonic-gate 				iext->ib_count += *count;
1232*0Sstevel@tonic-gate 				*count = 0;
1233*0Sstevel@tonic-gate 			}
1234*0Sstevel@tonic-gate 		}
1235*0Sstevel@tonic-gate 		if (*count != 0) {
1236*0Sstevel@tonic-gate 			offset = iext->ib_offset + CEIL(iext->ib_count);
1237*0Sstevel@tonic-gate 			flags |= NEW_EXT;
1238*0Sstevel@tonic-gate 			index++;
1239*0Sstevel@tonic-gate 			goto begin;
1240*0Sstevel@tonic-gate 		}
1241*0Sstevel@tonic-gate 	}
1242*0Sstevel@tonic-gate 	iext->ib_marker1 = (uint32_t)0xAAAAAAAA;
1243*0Sstevel@tonic-gate 	iext->ib_marker2 = (uint32_t)0xBBBBBBBB;
1244*0Sstevel@tonic-gate 	return (error);
1245*0Sstevel@tonic-gate }
1246*0Sstevel@tonic-gate 
1247*0Sstevel@tonic-gate #undef	CEIL
1248*0Sstevel@tonic-gate #undef	BASE
1249*0Sstevel@tonic-gate 
1250*0Sstevel@tonic-gate int32_t
ud_zero_it(struct ud_inode * ip,uint32_t start_block,uint32_t block_count)1251*0Sstevel@tonic-gate ud_zero_it(struct ud_inode *ip, uint32_t start_block, uint32_t block_count)
1252*0Sstevel@tonic-gate {
1253*0Sstevel@tonic-gate 	struct udf_vfs *udf_vfsp;
1254*0Sstevel@tonic-gate 	uint32_t bno, dummy;
1255*0Sstevel@tonic-gate 	int32_t error;
1256*0Sstevel@tonic-gate 	struct buf *bp;
1257*0Sstevel@tonic-gate 
1258*0Sstevel@tonic-gate 	/*
1259*0Sstevel@tonic-gate 	 * Donot use bio routines
1260*0Sstevel@tonic-gate 	 * since the buffer can sit
1261*0Sstevel@tonic-gate 	 * long enough in cache for the space
1262*0Sstevel@tonic-gate 	 * to be allocated/freed and
1263*0Sstevel@tonic-gate 	 * then allocated
1264*0Sstevel@tonic-gate 	 */
1265*0Sstevel@tonic-gate 	udf_vfsp = ip->i_udf;
1266*0Sstevel@tonic-gate 	bno = ud_xlate_to_daddr(udf_vfsp,
1267*0Sstevel@tonic-gate 		ip->i_icb_prn, start_block, block_count, &dummy);
1268*0Sstevel@tonic-gate 
1269*0Sstevel@tonic-gate 	dummy = block_count << udf_vfsp->udf_l2b_shift;
1270*0Sstevel@tonic-gate 	bp = (struct buf *)kmem_zalloc(biosize(), KM_SLEEP);
1271*0Sstevel@tonic-gate 	sema_init(&bp->b_sem, 0, NULL, SEMA_DEFAULT, NULL);
1272*0Sstevel@tonic-gate 	sema_init(&bp->b_io, 0, NULL, SEMA_DEFAULT, NULL);
1273*0Sstevel@tonic-gate 
1274*0Sstevel@tonic-gate 	bp->b_flags = B_WRITE | B_BUSY;
1275*0Sstevel@tonic-gate 	bp->b_edev = ip->i_dev;
1276*0Sstevel@tonic-gate 	bp->b_dev = cmpdev(ip->i_dev);
1277*0Sstevel@tonic-gate 	bp->b_blkno = bno << udf_vfsp->udf_l2d_shift;
1278*0Sstevel@tonic-gate 	bp->b_bcount = dummy;
1279*0Sstevel@tonic-gate 	bp->b_un.b_addr = kmem_zalloc(bp->b_bcount, KM_SLEEP);
1280*0Sstevel@tonic-gate 	bp->b_file = ip->i_vnode;
1281*0Sstevel@tonic-gate 	bp->b_offset = -1;
1282*0Sstevel@tonic-gate 
1283*0Sstevel@tonic-gate 	(void) bdev_strategy(bp);
1284*0Sstevel@tonic-gate 	if (error = biowait(bp)) {
1285*0Sstevel@tonic-gate 		cmn_err(CE_WARN, "error in write\n");
1286*0Sstevel@tonic-gate 	}
1287*0Sstevel@tonic-gate 
1288*0Sstevel@tonic-gate 	kmem_free(bp->b_un.b_addr, dummy);
1289*0Sstevel@tonic-gate 	sema_destroy(&bp->b_io);
1290*0Sstevel@tonic-gate 	sema_destroy(&bp->b_sem);
1291*0Sstevel@tonic-gate 	kmem_free((caddr_t)bp, biosize());
1292*0Sstevel@tonic-gate 
1293*0Sstevel@tonic-gate 	return (error);
1294*0Sstevel@tonic-gate }
1295