xref: /netbsd-src/sys/fs/udf/udf_allocation.c (revision 4b71a66d0f279143147d63ebfcfd8a59499a3684)
1 /* $NetBSD: udf_allocation.c,v 1.2 2008/05/20 21:31:52 reinoud Exp $ */
2 
3 /*
4  * Copyright (c) 2006, 2008 Reinoud Zandijk
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  *
27  */
28 
29 #include <sys/cdefs.h>
30 #ifndef lint
31 __KERNEL_RCSID(0, "$NetBSD: udf_allocation.c,v 1.2 2008/05/20 21:31:52 reinoud Exp $");
32 #endif /* not lint */
33 
34 
35 #if defined(_KERNEL_OPT)
36 #include "opt_quota.h"
37 #include "opt_compat_netbsd.h"
38 #endif
39 
40 /* TODO strip */
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/sysctl.h>
44 #include <sys/namei.h>
45 #include <sys/proc.h>
46 #include <sys/kernel.h>
47 #include <sys/vnode.h>
48 #include <miscfs/genfs/genfs_node.h>
49 #include <sys/mount.h>
50 #include <sys/buf.h>
51 #include <sys/file.h>
52 #include <sys/device.h>
53 #include <sys/disklabel.h>
54 #include <sys/ioctl.h>
55 #include <sys/malloc.h>
56 #include <sys/dirent.h>
57 #include <sys/stat.h>
58 #include <sys/conf.h>
59 #include <sys/kauth.h>
60 #include <sys/kthread.h>
61 #include <dev/clock_subr.h>
62 
63 #include <fs/udf/ecma167-udf.h>
64 #include <fs/udf/udf_mount.h>
65 
66 #if defined(_KERNEL_OPT)
67 #include "opt_udf.h"
68 #endif
69 
70 #include "udf.h"
71 #include "udf_subr.h"
72 #include "udf_bswap.h"
73 
74 
75 #define VTOI(vnode) ((struct udf_node *) vnode->v_data)
76 
77 static void udf_record_allocation_in_node(struct udf_mount *ump,
78 	struct buf *buf, uint16_t vpart_num, uint64_t *mapping,
79 	struct long_ad *node_ad_cpy);
80 
81 /*
82  * IDEA/BUSY: Each udf_node gets its own extentwalker state for all operations;
83  * this will hopefully/likely reduce O(nlog(n)) to O(1) for most functionality
84  * since actions are most likely sequencial and thus seeking doesn't need
85  * searching for the same or adjacent position again.
86  */
87 
88 /* --------------------------------------------------------------------- */
89 //#ifdef DEBUG
90 #if 1
91 #if 1
92 static void
93 udf_node_dump(struct udf_node *udf_node) {
94 	struct file_entry    *fe;
95 	struct extfile_entry *efe;
96 	struct icb_tag *icbtag;
97 	struct short_ad *short_ad;
98 	struct long_ad  *long_ad;
99 	uint64_t inflen;
100 	uint32_t icbflags, addr_type, max_l_ad;
101 	uint32_t len, lb_num;
102 	uint8_t  *data_pos;
103 	int part_num;
104 	int adlen, ad_off, dscr_size, l_ea, l_ad, lb_size, flags;
105 
106 	if ((udf_verbose & UDF_DEBUG_ADWLK) == 0)
107 		return;
108 
109 	lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
110 
111 	fe  = udf_node->fe;
112 	efe = udf_node->efe;
113 	if (fe) {
114 		icbtag = &fe->icbtag;
115 		inflen = udf_rw64(fe->inf_len);
116 		dscr_size  = sizeof(struct file_entry) -1;
117 		l_ea       = udf_rw32(fe->l_ea);
118 		l_ad       = udf_rw32(fe->l_ad);
119 		data_pos = (uint8_t *) fe + dscr_size + l_ea;
120 	} else {
121 		icbtag = &efe->icbtag;
122 		inflen = udf_rw64(efe->inf_len);
123 		dscr_size  = sizeof(struct extfile_entry) -1;
124 		l_ea       = udf_rw32(efe->l_ea);
125 		l_ad       = udf_rw32(efe->l_ad);
126 		data_pos = (uint8_t *) efe + dscr_size + l_ea;
127 	}
128 	max_l_ad = lb_size - dscr_size - l_ea;
129 
130 	icbflags   = udf_rw16(icbtag->flags);
131 	addr_type  = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
132 
133 	printf("udf_node_dump:\n");
134 	printf("\tudf_node %p\n", udf_node);
135 
136 	if (addr_type == UDF_ICB_INTERN_ALLOC) {
137 		printf("\t\tIntern alloc, len = %"PRIu64"\n", inflen);
138 		return;
139 	}
140 
141 	printf("\t\tInflen  = %"PRIu64"\n", inflen);
142 	printf("\t\tl_ad    = %d\n", l_ad);
143 
144 	if (addr_type == UDF_ICB_SHORT_ALLOC) {
145 		adlen = sizeof(struct short_ad);
146 	} else {
147 		adlen = sizeof(struct long_ad);
148 	}
149 
150 	printf("\t\t");
151 	for (ad_off = 0; ad_off < max_l_ad-adlen; ad_off += adlen) {
152 		if (addr_type == UDF_ICB_SHORT_ALLOC) {
153 			short_ad = (struct short_ad *) (data_pos + ad_off);
154 			len      = udf_rw32(short_ad->len);
155 			lb_num   = udf_rw32(short_ad->lb_num);
156 			part_num = -1;
157 			flags = UDF_EXT_FLAGS(len);
158 			len   = UDF_EXT_LEN(len);
159 		} else {
160 			long_ad  = (struct long_ad *) (data_pos + ad_off);
161 			len      = udf_rw32(long_ad->len);
162 			lb_num   = udf_rw32(long_ad->loc.lb_num);
163 			part_num = udf_rw16(long_ad->loc.part_num);
164 			flags = UDF_EXT_FLAGS(len);
165 			len   = UDF_EXT_LEN(len);
166 		}
167 		printf("[");
168 		if (part_num >= 0)
169 			printf("part %d, ", part_num);
170 		printf("lb_num %d, len %d", lb_num, len);
171 		if (flags)
172 			printf(", flags %d", flags);
173 		printf("] ");
174 		if (ad_off + adlen == l_ad)
175 			printf("\n\t\tl_ad END\n\t\t");
176 	}
177 	printf("\n");
178 }
179 #else
180 #define udf_node_dump(a)
181 #endif
182 
183 static void
184 udf_node_sanity_check(struct udf_node *udf_node,
185 		uint64_t *cnt_inflen, uint64_t *cnt_logblksrec) {
186 	struct file_entry    *fe;
187 	struct extfile_entry *efe;
188 	struct icb_tag *icbtag;
189 	struct short_ad *short_ad;
190 	struct long_ad  *long_ad;
191 	uint64_t inflen, logblksrec;
192 	uint32_t icbflags, addr_type, max_l_ad;
193 	uint32_t len, lb_num;
194 	uint8_t  *data_pos;
195 	int part_num;
196 	int adlen, ad_off, dscr_size, l_ea, l_ad, lb_size, flags, whole_lb;
197 
198 	/* only lock mutex; we're not changing and its a debug checking func */
199 	mutex_enter(&udf_node->node_mutex);
200 
201 	lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
202 
203 	fe  = udf_node->fe;
204 	efe = udf_node->efe;
205 	if (fe) {
206 		icbtag = &fe->icbtag;
207 		inflen = udf_rw64(fe->inf_len);
208 		logblksrec = udf_rw64(fe->logblks_rec);
209 		dscr_size  = sizeof(struct file_entry) -1;
210 		l_ea       = udf_rw32(fe->l_ea);
211 		l_ad       = udf_rw32(fe->l_ad);
212 		data_pos = (uint8_t *) fe + dscr_size + l_ea;
213 	} else {
214 		icbtag = &efe->icbtag;
215 		inflen = udf_rw64(efe->inf_len);
216 		logblksrec = udf_rw64(efe->logblks_rec);
217 		dscr_size  = sizeof(struct extfile_entry) -1;
218 		l_ea       = udf_rw32(efe->l_ea);
219 		l_ad       = udf_rw32(efe->l_ad);
220 		data_pos = (uint8_t *) efe + dscr_size + l_ea;
221 	}
222 	max_l_ad = lb_size - dscr_size - l_ea;
223 	icbflags   = udf_rw16(icbtag->flags);
224 	addr_type  = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
225 
226 	/* reset counters */
227 	*cnt_inflen     = 0;
228 	*cnt_logblksrec = 0;
229 
230 	if (addr_type == UDF_ICB_INTERN_ALLOC) {
231 		KASSERT(l_ad <= max_l_ad);
232 		KASSERT(l_ad == inflen);
233 		*cnt_inflen = inflen;
234 		mutex_exit(&udf_node->node_mutex);
235 		return;
236 	}
237 
238 	if (addr_type == UDF_ICB_SHORT_ALLOC) {
239 		adlen = sizeof(struct short_ad);
240 	} else {
241 		adlen = sizeof(struct long_ad);
242 	}
243 
244 	/* start counting */
245 	whole_lb = 1;
246 	for (ad_off = 0; ad_off < l_ad; ad_off += adlen) {
247 		KASSERT(whole_lb == 1);
248 		if (addr_type == UDF_ICB_SHORT_ALLOC) {
249 			short_ad = (struct short_ad *) (data_pos + ad_off);
250 			len      = udf_rw32(short_ad->len);
251 			lb_num   = udf_rw32(short_ad->lb_num);
252 			part_num = -1;
253 			flags = UDF_EXT_FLAGS(len);
254 			len   = UDF_EXT_LEN(len);
255 		} else {
256 			long_ad  = (struct long_ad *) (data_pos + ad_off);
257 			len      = udf_rw32(long_ad->len);
258 			lb_num   = udf_rw32(long_ad->loc.lb_num);
259 			part_num = udf_rw16(long_ad->loc.part_num);
260 			flags = UDF_EXT_FLAGS(len);
261 			len   = UDF_EXT_LEN(len);
262 		}
263 		KASSERT(flags != UDF_EXT_REDIRECT);	/* not implemented yet */
264 		*cnt_inflen += len;
265 		if (flags == UDF_EXT_ALLOCATED) {
266 			*cnt_logblksrec += (len + lb_size -1) / lb_size;
267 		}
268 		whole_lb = ((len % lb_size) == 0);
269 	}
270 	/* rest should be zero (ad_off > l_ad < max_l_ad - adlen) */
271 
272 	KASSERT(*cnt_inflen == inflen);
273 	KASSERT(*cnt_logblksrec == logblksrec);
274 
275 	mutex_exit(&udf_node->node_mutex);
276 	if (0)
277 		udf_node_dump(udf_node);
278 }
279 #else
280 #define udf_node_sanity_check(a, b, c)
281 #endif
282 
283 /* --------------------------------------------------------------------- */
284 
285 int
286 udf_translate_vtop(struct udf_mount *ump, struct long_ad *icb_loc,
287 		   uint32_t *lb_numres, uint32_t *extres)
288 {
289 	struct part_desc       *pdesc;
290 	struct spare_map_entry *sme;
291 	struct long_ad s_icb_loc;
292 	uint64_t foffset, end_foffset;
293 	uint32_t lb_size, len;
294 	uint32_t lb_num, lb_rel, lb_packet;
295 	uint32_t udf_rw32_lbmap, ext_offset;
296 	uint16_t vpart;
297 	int rel, part, error, eof, slot, flags;
298 
299 	assert(ump && icb_loc && lb_numres);
300 
301 	vpart  = udf_rw16(icb_loc->loc.part_num);
302 	lb_num = udf_rw32(icb_loc->loc.lb_num);
303 	if (vpart > UDF_VTOP_RAWPART)
304 		return EINVAL;
305 
306 translate_again:
307 	part = ump->vtop[vpart];
308 	pdesc = ump->partitions[part];
309 
310 	switch (ump->vtop_tp[vpart]) {
311 	case UDF_VTOP_TYPE_RAW :
312 		/* 1:1 to the end of the device */
313 		*lb_numres = lb_num;
314 		*extres = INT_MAX;
315 		return 0;
316 	case UDF_VTOP_TYPE_PHYS :
317 		/* transform into its disc logical block */
318 		if (lb_num > udf_rw32(pdesc->part_len))
319 			return EINVAL;
320 		*lb_numres = lb_num + udf_rw32(pdesc->start_loc);
321 
322 		/* extent from here to the end of the partition */
323 		*extres = udf_rw32(pdesc->part_len) - lb_num;
324 		return 0;
325 	case UDF_VTOP_TYPE_VIRT :
326 		/* only maps one logical block, lookup in VAT */
327 		if (lb_num >= ump->vat_entries)		/* XXX > or >= ? */
328 			return EINVAL;
329 
330 		/* lookup in virtual allocation table file */
331 		mutex_enter(&ump->allocate_mutex);
332 		error = udf_vat_read(ump->vat_node,
333 				(uint8_t *) &udf_rw32_lbmap, 4,
334 				ump->vat_offset + lb_num * 4);
335 		mutex_exit(&ump->allocate_mutex);
336 
337 		if (error)
338 			return error;
339 
340 		lb_num = udf_rw32(udf_rw32_lbmap);
341 
342 		/* transform into its disc logical block */
343 		if (lb_num > udf_rw32(pdesc->part_len))
344 			return EINVAL;
345 		*lb_numres = lb_num + udf_rw32(pdesc->start_loc);
346 
347 		/* just one logical block */
348 		*extres = 1;
349 		return 0;
350 	case UDF_VTOP_TYPE_SPARABLE :
351 		/* check if the packet containing the lb_num is remapped */
352 		lb_packet = lb_num / ump->sparable_packet_size;
353 		lb_rel    = lb_num % ump->sparable_packet_size;
354 
355 		for (rel = 0; rel < udf_rw16(ump->sparing_table->rt_l); rel++) {
356 			sme = &ump->sparing_table->entries[rel];
357 			if (lb_packet == udf_rw32(sme->org)) {
358 				/* NOTE maps to absolute disc logical block! */
359 				*lb_numres = udf_rw32(sme->map) + lb_rel;
360 				*extres    = ump->sparable_packet_size - lb_rel;
361 				return 0;
362 			}
363 		}
364 
365 		/* transform into its disc logical block */
366 		if (lb_num > udf_rw32(pdesc->part_len))
367 			return EINVAL;
368 		*lb_numres = lb_num + udf_rw32(pdesc->start_loc);
369 
370 		/* rest of block */
371 		*extres = ump->sparable_packet_size - lb_rel;
372 		return 0;
373 	case UDF_VTOP_TYPE_META :
374 		/* we have to look into the file's allocation descriptors */
375 
376 		/* use metadatafile allocation mutex */
377 		lb_size = udf_rw32(ump->logical_vol->lb_size);
378 
379 		UDF_LOCK_NODE(ump->metadata_node, 0);
380 
381 		/* get first overlapping extent */
382 		foffset = 0;
383 		slot    = 0;
384 		for (;;) {
385 			udf_get_adslot(ump->metadata_node,
386 				slot, &s_icb_loc, &eof);
387 			if (eof) {
388 				DPRINTF(TRANSLATE,
389 					("Meta partition translation "
390 					 "failed: can't seek location\n"));
391 				UDF_UNLOCK_NODE(ump->metadata_node, 0);
392 				return EINVAL;
393 			}
394 			len   = udf_rw32(s_icb_loc.len);
395 			flags = UDF_EXT_FLAGS(len);
396 			len   = UDF_EXT_LEN(len);
397 
398 			end_foffset = foffset + len;
399 
400 			if (end_foffset > lb_num * lb_size)
401 				break;	/* found */
402 			if (flags != UDF_EXT_REDIRECT)
403 				foffset = end_foffset;
404 			slot++;
405 		}
406 		/* found overlapping slot */
407 		ext_offset = lb_num * lb_size - foffset;
408 
409 		/* process extent offset */
410 		lb_num   = udf_rw32(s_icb_loc.loc.lb_num);
411 		vpart    = udf_rw16(s_icb_loc.loc.part_num);
412 		lb_num  += (ext_offset + lb_size -1) / lb_size;
413 		len     -= ext_offset;
414 		ext_offset = 0;
415 
416 		flags = UDF_EXT_FLAGS(s_icb_loc.len);
417 
418 		UDF_UNLOCK_NODE(ump->metadata_node, 0);
419 		if (flags != UDF_EXT_ALLOCATED) {
420 			DPRINTF(TRANSLATE, ("Metadata partition translation "
421 					    "failed: not allocated\n"));
422 			return EINVAL;
423 		}
424 
425 		/*
426 		 * vpart and lb_num are updated, translate again since we
427 		 * might be mapped on sparable media
428 		 */
429 		goto translate_again;
430 	default:
431 		printf("UDF vtop translation scheme %d unimplemented yet\n",
432 			ump->vtop_tp[vpart]);
433 	}
434 
435 	return EINVAL;
436 }
437 
438 /* --------------------------------------------------------------------- */
439 
440 /*
441  * Translate an extent (in logical_blocks) into logical block numbers; used
442  * for read and write operations. DOESNT't check extents.
443  */
444 
445 int
446 udf_translate_file_extent(struct udf_node *udf_node,
447 		          uint32_t from, uint32_t num_lb,
448 			  uint64_t *map)
449 {
450 	struct udf_mount *ump;
451 	struct icb_tag *icbtag;
452 	struct long_ad t_ad, s_ad;
453 	uint64_t transsec;
454 	uint64_t foffset, end_foffset;
455 	uint32_t transsec32;
456 	uint32_t lb_size;
457 	uint32_t ext_offset;
458 	uint32_t lb_num, len;
459 	uint32_t overlap, translen;
460 	uint16_t vpart_num;
461 	int eof, error, flags;
462 	int slot, addr_type, icbflags;
463 
464 	if (!udf_node)
465 		return ENOENT;
466 
467 	KASSERT(num_lb > 0);
468 
469 	UDF_LOCK_NODE(udf_node, 0);
470 
471 	/* initialise derivative vars */
472 	ump = udf_node->ump;
473 	lb_size = udf_rw32(ump->logical_vol->lb_size);
474 
475 	if (udf_node->fe) {
476 		icbtag = &udf_node->fe->icbtag;
477 	} else {
478 		icbtag = &udf_node->efe->icbtag;
479 	}
480 	icbflags  = udf_rw16(icbtag->flags);
481 	addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
482 
483 	/* do the work */
484 	if (addr_type == UDF_ICB_INTERN_ALLOC) {
485 		*map = UDF_TRANS_INTERN;
486 		UDF_UNLOCK_NODE(udf_node, 0);
487 		return 0;
488 	}
489 
490 	/* find first overlapping extent */
491 	foffset = 0;
492 	slot    = 0;
493 	for (;;) {
494 		udf_get_adslot(udf_node, slot, &s_ad, &eof);
495 		if (eof) {
496 			DPRINTF(TRANSLATE,
497 				("Translate file extent "
498 				 "failed: can't seek location\n"));
499 			UDF_UNLOCK_NODE(udf_node, 0);
500 			return EINVAL;
501 		}
502 		len    = udf_rw32(s_ad.len);
503 		flags  = UDF_EXT_FLAGS(len);
504 		len    = UDF_EXT_LEN(len);
505 		lb_num = udf_rw32(s_ad.loc.lb_num);
506 
507 		if (flags == UDF_EXT_REDIRECT) {
508 			slot++;
509 			continue;
510 		}
511 
512 		end_foffset = foffset + len;
513 
514 		if (end_foffset > from * lb_size)
515 			break;	/* found */
516 		foffset = end_foffset;
517 		slot++;
518 	}
519 	/* found overlapping slot */
520 	ext_offset = from * lb_size - foffset;
521 
522 	for (;;) {
523 		udf_get_adslot(udf_node, slot, &s_ad, &eof);
524 		if (eof) {
525 			DPRINTF(TRANSLATE,
526 				("Translate file extent "
527 				 "failed: past eof\n"));
528 			UDF_UNLOCK_NODE(udf_node, 0);
529 			return EINVAL;
530 		}
531 
532 		len    = udf_rw32(s_ad.len);
533 		flags  = UDF_EXT_FLAGS(len);
534 		len    = UDF_EXT_LEN(len);
535 
536 		lb_num    = udf_rw32(s_ad.loc.lb_num);
537 		vpart_num = udf_rw16(s_ad.loc.part_num);
538 
539 		end_foffset = foffset + len;
540 
541 		/* process extent, don't forget to advance on ext_offset! */
542 		lb_num  += (ext_offset + lb_size -1) / lb_size;
543 		overlap  = (len - ext_offset + lb_size -1) / lb_size;
544 		ext_offset = 0;
545 
546 		/*
547 		 * note that the while(){} is nessisary for the extent that
548 		 * the udf_translate_vtop() returns doens't have to span the
549 		 * whole extent.
550 		 */
551 
552 		overlap = MIN(overlap, num_lb);
553 		while (overlap) {
554 			switch (flags) {
555 			case UDF_EXT_FREE :
556 			case UDF_EXT_ALLOCATED_BUT_NOT_USED :
557 				transsec = UDF_TRANS_ZERO;
558 				translen = overlap;
559 				while (overlap && num_lb && translen) {
560 					*map++ = transsec;
561 					lb_num++;
562 					overlap--; num_lb--; translen--;
563 				}
564 				break;
565 			case UDF_EXT_ALLOCATED :
566 				t_ad.loc.lb_num   = udf_rw32(lb_num);
567 				t_ad.loc.part_num = udf_rw16(vpart_num);
568 				error = udf_translate_vtop(ump,
569 						&t_ad, &transsec32, &translen);
570 				transsec = transsec32;
571 				if (error) {
572 					UDF_UNLOCK_NODE(udf_node, 0);
573 					return error;
574 				}
575 				while (overlap && num_lb && translen) {
576 					*map++ = transsec;
577 					lb_num++; transsec++;
578 					overlap--; num_lb--; translen--;
579 				}
580 				break;
581 			default: /* UDF_EXT_REDIRECT */
582 				/* ignore, not a mapping */
583 				break;
584 			}
585 		}
586 		if (num_lb == 0)
587 			break;
588 
589 		if (flags != UDF_EXT_REDIRECT)
590 			foffset = end_foffset;
591 		slot++;
592 	}
593 	UDF_UNLOCK_NODE(udf_node, 0);
594 
595 	return 0;
596 }
597 
598 /* --------------------------------------------------------------------- */
599 
600 static int
601 udf_search_free_vatloc(struct udf_mount *ump, uint32_t *lbnumres)
602 {
603 	uint32_t lb_size, lb_num, lb_map, udf_rw32_lbmap;
604 	uint8_t *blob;
605 	int entry, chunk, found, error;
606 
607 	KASSERT(ump);
608 	KASSERT(ump->logical_vol);
609 
610 	lb_size = udf_rw32(ump->logical_vol->lb_size);
611 	blob = malloc(lb_size, M_UDFTEMP, M_WAITOK);
612 
613 	/* TODO static allocation of search chunk */
614 
615 	lb_num = MIN(ump->vat_entries, ump->vat_last_free_lb);
616 	found  = 0;
617 	error  = 0;
618 	entry  = 0;
619 	do {
620 		chunk = MIN(lb_size, (ump->vat_entries - lb_num) * 4);
621 		if (chunk <= 0)
622 			break;
623 		/* load in chunk */
624 		error = udf_vat_read(ump->vat_node, blob, chunk,
625 				ump->vat_offset + lb_num * 4);
626 
627 		if (error)
628 			break;
629 
630 		/* search this chunk */
631 		for (entry=0; entry < chunk /4; entry++, lb_num++) {
632 			udf_rw32_lbmap = *((uint32_t *) (blob + entry * 4));
633 			lb_map = udf_rw32(udf_rw32_lbmap);
634 			if (lb_map == 0xffffffff) {
635 				found = 1;
636 				break;
637 			}
638 		}
639 	} while (!found);
640 	if (error) {
641 		printf("udf_search_free_vatloc: error reading in vat chunk "
642 			"(lb %d, size %d)\n", lb_num, chunk);
643 	}
644 
645 	if (!found) {
646 		/* extend VAT */
647 		DPRINTF(WRITE, ("udf_search_free_vatloc: extending\n"));
648 		lb_num = ump->vat_entries;
649 		ump->vat_entries++;
650 	}
651 
652 	/* mark entry with initialiser just in case */
653 	lb_map = udf_rw32(0xfffffffe);
654 	udf_vat_write(ump->vat_node, (uint8_t *) &lb_map, 4,
655 		ump->vat_offset + lb_num *4);
656 	ump->vat_last_free_lb = lb_num;
657 
658 	free(blob, M_UDFTEMP);
659 	*lbnumres = lb_num;
660 	return 0;
661 }
662 
663 
664 static void
665 udf_bitmap_allocate(struct udf_bitmap *bitmap, int ismetadata,
666 	uint32_t ptov, uint32_t *num_lb, uint64_t *pmappos, uint64_t *lmappos)
667 {
668 	uint32_t offset, lb_num, bit;
669 	int32_t  diff;
670 	uint8_t *bpos;
671 	int pass;
672 
673 	if (!ismetadata) {
674 		/* heuristic to keep the two pointers not too close */
675 		diff = bitmap->data_pos - bitmap->metadata_pos;
676 		if ((diff >= 0) && (diff < 1024))
677 			bitmap->data_pos = bitmap->metadata_pos + 1024;
678 	}
679 	offset = ismetadata ? bitmap->metadata_pos : bitmap->data_pos;
680 	offset &= ~7;
681 	for (pass = 0; pass < 2; pass++) {
682 		if (offset >= bitmap->max_offset)
683 			offset = 0;
684 
685 		while (offset < bitmap->max_offset) {
686 			if (*num_lb == 0)
687 				break;
688 
689 			/* use first bit not set */
690 			bpos  = bitmap->bits + offset/8;
691 			bit = ffs(*bpos);
692 			if (bit == 0) {
693 				offset += 8;
694 				continue;
695 			}
696 			*bpos &= ~(1 << (bit-1));
697 			lb_num = offset + bit-1;
698 			*lmappos++ = lb_num;
699 			*pmappos++ = lb_num + ptov;
700 			*num_lb = *num_lb - 1;
701 			// offset = (offset & ~7);
702 		}
703 	}
704 
705 	if (ismetadata) {
706 		bitmap->metadata_pos = offset;
707 	} else {
708 		bitmap->data_pos = offset;
709 	}
710 }
711 
712 
713 static void
714 udf_bitmap_free(struct udf_bitmap *bitmap, uint32_t lb_num, uint32_t num_lb)
715 {
716 	uint32_t offset;
717 	uint32_t bit, bitval;
718 	uint8_t *bpos;
719 
720 	offset = lb_num;
721 
722 	/* starter bits */
723 	bpos = bitmap->bits + offset/8;
724 	bit = offset % 8;
725 	while ((bit != 0) && (num_lb > 0)) {
726 		bitval = (1 << bit);
727 		KASSERT((*bpos & bitval) == 0);
728 		*bpos |= bitval;
729 		offset++; num_lb--;
730 		bit = (bit + 1) % 8;
731 	}
732 	if (num_lb == 0)
733 		return;
734 
735 	/* whole bytes */
736 	KASSERT(bit == 0);
737 	bpos = bitmap->bits + offset / 8;
738 	while (num_lb >= 8) {
739 		KASSERT((*bpos == 0));
740 		*bpos = 255;
741 		offset += 8; num_lb -= 8;
742 		bpos++;
743 	}
744 
745 	/* stop bits */
746 	KASSERT(num_lb < 8);
747 	bit = 0;
748 	while (num_lb > 0) {
749 		bitval = (1 << bit);
750 		KASSERT((*bpos & bitval) == 0);
751 		*bpos |= bitval;
752 		offset++; num_lb--;
753 		bit = (bit + 1) % 8;
754 	}
755 }
756 
757 
758 /* allocate a contiguous sequence of sectornumbers */
759 static int
760 udf_allocate_space(struct udf_mount *ump, int ismetadata, int alloc_type,
761 	int num_lb, uint16_t *alloc_partp,
762 	uint64_t *lmapping, uint64_t *pmapping)
763 {
764 	struct mmc_trackinfo *alloc_track, *other_track;
765 	struct udf_bitmap *bitmap;
766 	struct part_desc *pdesc;
767 	struct logvol_int_desc *lvid;
768 	uint64_t *lmappos, *pmappos;
769 	uint32_t ptov, lb_num, *freepos, free_lbs;
770 	int lb_size, alloc_num_lb;
771 	int alloc_part;
772 	int error;
773 
774 	mutex_enter(&ump->allocate_mutex);
775 
776 	lb_size = udf_rw32(ump->logical_vol->lb_size);
777 	KASSERT(lb_size == ump->discinfo.sector_size);
778 
779 	if (ismetadata) {
780 		alloc_part  = ump->metadata_part;
781 		alloc_track = &ump->metadata_track;
782 		other_track = &ump->data_track;
783 	} else {
784 		alloc_part  = ump->data_part;
785 		alloc_track = &ump->data_track;
786 		other_track = &ump->metadata_track;
787 	}
788 
789 	*alloc_partp = alloc_part;
790 
791 	error = 0;
792 	/* XXX check disc space */
793 
794 	pdesc = ump->partitions[ump->vtop[alloc_part]];
795 	lmappos = lmapping;
796 	pmappos = pmapping;
797 
798 	switch (alloc_type) {
799 	case UDF_ALLOC_VAT :
800 		/* search empty slot in VAT file */
801 		KASSERT(num_lb == 1);
802 		error = udf_search_free_vatloc(ump, &lb_num);
803 		if (!error) {
804 			*lmappos = lb_num;
805 			*pmappos = 0;		/* will get late-allocated */
806 		}
807 		break;
808 	case UDF_ALLOC_SEQUENTIAL :
809 		/* sequential allocation on recordable media */
810 		/* calculate offset from physical base partition */
811 		ptov  = udf_rw32(pdesc->start_loc);
812 
813 		for (lb_num = 0; lb_num < num_lb; lb_num++) {
814 			*pmappos++ = alloc_track->next_writable;
815 			*lmappos++ = alloc_track->next_writable - ptov;
816 			alloc_track->next_writable++;
817 			alloc_track->free_blocks--;
818 		}
819 		if (alloc_track->tracknr == other_track->tracknr)
820 			memcpy(other_track, alloc_track,
821 				sizeof(struct mmc_trackinfo));
822 		break;
823 	case UDF_ALLOC_SPACEMAP :
824 		ptov  = udf_rw32(pdesc->start_loc);
825 
826 		/* allocate on unallocated bits page */
827 		alloc_num_lb = num_lb;
828 		bitmap = &ump->part_unalloc_bits[alloc_part];
829 		udf_bitmap_allocate(bitmap, ismetadata, ptov, &alloc_num_lb,
830 			pmappos, lmappos);
831 		ump->lvclose |= UDF_WRITE_PART_BITMAPS;
832 		if (alloc_num_lb) {
833 			/* TODO convert freed to unalloc and try again */
834 			/* free allocated piece for now */
835 			lmappos = lmapping;
836 			for (lb_num=0; lb_num < num_lb-alloc_num_lb; lb_num++) {
837 				udf_bitmap_free(bitmap, *lmappos++, 1);
838 			}
839 			error = ENOSPC;
840 		}
841 		if (!error) {
842 			/* adjust freecount */
843 			lvid = ump->logvol_integrity;
844 			freepos = &lvid->tables[0] + alloc_part;
845 			free_lbs = udf_rw32(*freepos);
846 			*freepos = udf_rw32(free_lbs - num_lb);
847 		}
848 		break;
849 	case UDF_ALLOC_METABITMAP :
850 	case UDF_ALLOC_METASEQUENTIAL :
851 	case UDF_ALLOC_RELAXEDSEQUENTIAL :
852 		printf("ALERT: udf_allocate_space : allocation %d "
853 				"not implemented yet!\n", alloc_type);
854 		/* TODO implement, doesn't have to be contiguous */
855 		error = ENOSPC;
856 		break;
857 	}
858 
859 #ifdef DEBUG
860 	if (udf_verbose & UDF_DEBUG_ALLOC) {
861 		lmappos = lmapping;
862 		pmappos = pmapping;
863 		printf("udf_allocate_space, mapping l->p:\n");
864 		for (lb_num = 0; lb_num < num_lb; lb_num++) {
865 			printf("\t%"PRIu64" -> %"PRIu64"\n",
866 				*lmappos++, *pmappos++);
867 		}
868 	}
869 #endif
870 	mutex_exit(&ump->allocate_mutex);
871 
872 	return error;
873 }
874 
875 /* --------------------------------------------------------------------- */
876 
877 void
878 udf_free_allocated_space(struct udf_mount *ump, uint32_t lb_num,
879 	uint16_t vpart_num, uint32_t num_lb)
880 {
881 	struct udf_bitmap *bitmap;
882 	struct part_desc *pdesc;
883 	struct logvol_int_desc *lvid;
884 	uint32_t ptov, lb_map, udf_rw32_lbmap;
885 	uint32_t *freepos, free_lbs;
886 	int phys_part;
887 	int error;
888 
889 	DPRINTF(ALLOC, ("udf_free_allocated_space: freeing virt lbnum %d "
890 			  "part %d + %d sect\n", lb_num, vpart_num, num_lb));
891 
892 	mutex_enter(&ump->allocate_mutex);
893 
894 	/* get partition backing up this vpart_num */
895 	pdesc = ump->partitions[ump->vtop[vpart_num]];
896 
897 	switch (ump->vtop_tp[vpart_num]) {
898 	case UDF_VTOP_TYPE_PHYS :
899 	case UDF_VTOP_TYPE_SPARABLE :
900 		/* free space to freed or unallocated space bitmap */
901 		ptov      = udf_rw32(pdesc->start_loc);
902 		phys_part = ump->vtop[vpart_num];
903 
904 		/* first try freed space bitmap */
905 		bitmap    = &ump->part_freed_bits[phys_part];
906 
907 		/* if not defined, use unallocated bitmap */
908 		if (bitmap->bits == NULL)
909 			bitmap = &ump->part_unalloc_bits[phys_part];
910 
911 		/* if no bitmaps are defined, bail out */
912 		if (bitmap->bits == NULL)
913 			break;
914 
915 		/* free bits if its defined */
916 		KASSERT(bitmap->bits);
917 		ump->lvclose |= UDF_WRITE_PART_BITMAPS;
918 		udf_bitmap_free(bitmap, lb_num, num_lb);
919 
920 		/* adjust freecount */
921 		lvid = ump->logvol_integrity;
922 		freepos = &lvid->tables[0] + vpart_num;
923 		free_lbs = udf_rw32(*freepos);
924 		*freepos = udf_rw32(free_lbs + num_lb);
925 		break;
926 	case UDF_VTOP_TYPE_VIRT :
927 		/* free this VAT entry */
928 		KASSERT(num_lb == 1);
929 
930 		lb_map = 0xffffffff;
931 		udf_rw32_lbmap = udf_rw32(lb_map);
932 		error = udf_vat_write(ump->vat_node,
933 			(uint8_t *) &udf_rw32_lbmap, 4,
934 			ump->vat_offset + lb_num * 4);
935 		KASSERT(error == 0);
936 		ump->vat_last_free_lb = MIN(ump->vat_last_free_lb, lb_num);
937 		break;
938 	case UDF_VTOP_TYPE_META :
939 		/* free space in the metadata bitmap */
940 	default:
941 		printf("ALERT: udf_free_allocated_space : allocation %d "
942 			"not implemented yet!\n", ump->vtop_tp[vpart_num]);
943 		break;
944 	}
945 
946 	mutex_exit(&ump->allocate_mutex);
947 }
948 
949 /* --------------------------------------------------------------------- */
950 
951 int
952 udf_pre_allocate_space(struct udf_mount *ump, int udf_c_type, int num_lb,
953 	uint16_t *alloc_partp, uint64_t *lmapping, uint64_t *pmapping)
954 {
955 	int ismetadata, alloc_type;
956 
957 	ismetadata = (udf_c_type == UDF_C_NODE);
958 	alloc_type = ismetadata? ump->meta_alloc : ump->data_alloc;
959 
960 #ifdef DIAGNOSTIC
961 	if ((alloc_type == UDF_ALLOC_VAT) && (udf_c_type != UDF_C_NODE)) {
962 		panic("udf_pre_allocate_space: bad c_type on VAT!\n");
963 	}
964 #endif
965 
966 	/* reserve size for VAT allocated data */
967 	if (alloc_type == UDF_ALLOC_VAT) {
968 		mutex_enter(&ump->allocate_mutex);
969 			ump->uncomitted_lb += num_lb;
970 		mutex_exit(&ump->allocate_mutex);
971 	}
972 
973 	return udf_allocate_space(ump, ismetadata, alloc_type,
974 		num_lb, alloc_partp, lmapping, pmapping);
975 }
976 
977 /* --------------------------------------------------------------------- */
978 
979 /*
980  * Allocate a buf on disc for direct write out. The space doesn't have to be
981  * contiguous as the caller takes care of this.
982  */
983 
984 void
985 udf_late_allocate_buf(struct udf_mount *ump, struct buf *buf,
986 	uint64_t *lmapping, uint64_t *pmapping, struct long_ad *node_ad_cpy)
987 {
988 	struct udf_node  *udf_node = VTOI(buf->b_vp);
989 	uint16_t vpart_num;
990 	int lb_size, blks, udf_c_type;
991 	int ismetadata, alloc_type;
992 	int num_lb;
993 	int error, s;
994 
995 	/*
996 	 * for each sector in the buf, allocate a sector on disc and record
997 	 * its position in the provided mapping array.
998 	 *
999 	 * If its userdata or FIDs, record its location in its node.
1000 	 */
1001 
1002 	lb_size    = udf_rw32(ump->logical_vol->lb_size);
1003 	num_lb     = (buf->b_bcount + lb_size -1) / lb_size;
1004 	blks       = lb_size / DEV_BSIZE;
1005 	udf_c_type = buf->b_udf_c_type;
1006 
1007 	KASSERT(lb_size == ump->discinfo.sector_size);
1008 
1009 	ismetadata = (udf_c_type == UDF_C_NODE);
1010 	alloc_type = ismetadata? ump->meta_alloc : ump->data_alloc;
1011 
1012 #ifdef DIAGNOSTIC
1013 	if ((alloc_type == UDF_ALLOC_VAT) && (udf_c_type != UDF_C_NODE)) {
1014 		panic("udf_late_allocate_buf: bad c_type on VAT!\n");
1015 	}
1016 #endif
1017 
1018 	if (udf_c_type == UDF_C_NODE) {
1019 		/* if not VAT, its allready allocated */
1020 		if (alloc_type != UDF_ALLOC_VAT)
1021 			return;
1022 
1023 		/* allocate sequential */
1024 		alloc_type = UDF_ALLOC_SEQUENTIAL;
1025 	}
1026 
1027 	error = udf_allocate_space(ump, ismetadata, alloc_type,
1028 			num_lb, &vpart_num, lmapping, pmapping);
1029 	if (error) {
1030 		/* ARGH! we've not done our accounting right! */
1031 		panic("UDF disc allocation accounting gone wrong");
1032 	}
1033 
1034 	/* commit our sector count */
1035 	mutex_enter(&ump->allocate_mutex);
1036 		if (num_lb > ump->uncomitted_lb) {
1037 			ump->uncomitted_lb = 0;
1038 		} else {
1039 			ump->uncomitted_lb -= num_lb;
1040 		}
1041 	mutex_exit(&ump->allocate_mutex);
1042 
1043 	buf->b_blkno = (*pmapping) * blks;
1044 
1045 	/* If its userdata or FIDs, record its allocation in its node. */
1046 	if ((udf_c_type == UDF_C_USERDATA) || (udf_c_type == UDF_C_FIDS)) {
1047 		udf_record_allocation_in_node(ump, buf, vpart_num, lmapping,
1048 			node_ad_cpy);
1049 		/* decrement our outstanding bufs counter */
1050 		s = splbio();
1051 			udf_node->outstanding_bufs--;
1052 		splx(s);
1053 	}
1054 }
1055 
1056 /* --------------------------------------------------------------------- */
1057 
1058 /*
1059  * Try to merge a1 with the new piece a2. udf_ads_merge returns error when not
1060  * possible (anymore); a2 returns the rest piece.
1061  */
1062 
1063 static int
1064 udf_ads_merge(uint32_t lb_size, struct long_ad *a1, struct long_ad *a2)
1065 {
1066 	uint32_t max_len, merge_len;
1067 	uint32_t a1_len, a2_len;
1068 	uint32_t a1_flags, a2_flags;
1069 	uint32_t a1_lbnum, a2_lbnum;
1070 	uint16_t a1_part, a2_part;
1071 
1072 	max_len = ((UDF_EXT_MAXLEN / lb_size) * lb_size);
1073 
1074 	a1_flags = UDF_EXT_FLAGS(udf_rw32(a1->len));
1075 	a1_len   = UDF_EXT_LEN(udf_rw32(a1->len));
1076 	a1_lbnum = udf_rw32(a1->loc.lb_num);
1077 	a1_part  = udf_rw16(a1->loc.part_num);
1078 
1079 	a2_flags = UDF_EXT_FLAGS(udf_rw32(a2->len));
1080 	a2_len   = UDF_EXT_LEN(udf_rw32(a2->len));
1081 	a2_lbnum = udf_rw32(a2->loc.lb_num);
1082 	a2_part  = udf_rw16(a2->loc.part_num);
1083 
1084 	/* defines same space */
1085 	if (a1_flags != a2_flags)
1086 		return 1;
1087 
1088 	if (a1_flags != UDF_EXT_FREE) {
1089 		/* the same partition */
1090 		if (a1_part != a2_part)
1091 			return 1;
1092 
1093 		/* a2 is successor of a1 */
1094 		if (a1_lbnum * lb_size + a1_len != a2_lbnum * lb_size)
1095 			return 1;
1096 	}
1097 
1098 	/* merge as most from a2 if possible */
1099 	merge_len = MIN(a2_len, max_len - a1_len);
1100 	a1_len   += merge_len;
1101 	a2_len   -= merge_len;
1102 	a2_lbnum += merge_len/lb_size;
1103 
1104 	a1->len = udf_rw32(a1_len | a1_flags);
1105 	a2->len = udf_rw32(a2_len | a2_flags);
1106 	a2->loc.lb_num = udf_rw32(a2_lbnum);
1107 
1108 	if (a2_len > 0)
1109 		return 1;
1110 
1111 	/* there is space over to merge */
1112 	return 0;
1113 }
1114 
1115 /* --------------------------------------------------------------------- */
1116 
1117 static void
1118 udf_wipe_adslots(struct udf_node *udf_node)
1119 {
1120 	struct file_entry      *fe;
1121 	struct extfile_entry   *efe;
1122 	struct alloc_ext_entry *ext;
1123 	uint64_t inflen, objsize;
1124 	uint32_t lb_size, dscr_size, l_ea, l_ad, max_l_ad, crclen;
1125 	uint8_t *data_pos;
1126 	int extnr;
1127 
1128 	lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1129 
1130 	fe  = udf_node->fe;
1131 	efe = udf_node->efe;
1132 	if (fe) {
1133 		inflen  = udf_rw64(fe->inf_len);
1134 		objsize = inflen;
1135 		dscr_size  = sizeof(struct file_entry) -1;
1136 		l_ea       = udf_rw32(fe->l_ea);
1137 		l_ad       = udf_rw32(fe->l_ad);
1138 		data_pos = (uint8_t *) fe + dscr_size + l_ea;
1139 	} else {
1140 		inflen  = udf_rw64(efe->inf_len);
1141 		objsize = udf_rw64(efe->obj_size);
1142 		dscr_size  = sizeof(struct extfile_entry) -1;
1143 		l_ea       = udf_rw32(efe->l_ea);
1144 		l_ad       = udf_rw32(efe->l_ad);
1145 		data_pos = (uint8_t *) efe + dscr_size + l_ea;
1146 	}
1147 	max_l_ad = lb_size - dscr_size - l_ea;
1148 
1149 	/* wipe fe/efe */
1150 	memset(data_pos, 0, max_l_ad);
1151 	crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea;
1152 	if (fe) {
1153 		fe->l_ad         = udf_rw32(0);
1154 		fe->logblks_rec  = udf_rw64(0);
1155 		fe->tag.desc_crc_len = udf_rw32(crclen);
1156 	} else {
1157 		efe->l_ad        = udf_rw32(0);
1158 		efe->logblks_rec = udf_rw64(0);
1159 		efe->tag.desc_crc_len = udf_rw32(crclen);
1160 	}
1161 
1162 	/* wipe all allocation extent entries */
1163 	for (extnr = 0; extnr < udf_node->num_extensions; extnr++) {
1164 		ext = udf_node->ext[extnr];
1165 		dscr_size  = sizeof(struct alloc_ext_entry) -1;
1166 		max_l_ad = lb_size - dscr_size;
1167 		memset(data_pos, 0, max_l_ad);
1168 		ext->l_ad = udf_rw32(0);
1169 
1170 		crclen = dscr_size - UDF_DESC_TAG_LENGTH;
1171 		ext->tag.desc_crc_len = udf_rw32(crclen);
1172 	}
1173 }
1174 
1175 /* --------------------------------------------------------------------- */
1176 
1177 void
1178 udf_get_adslot(struct udf_node *udf_node, int slot, struct long_ad *icb,
1179 	int *eof) {
1180 	struct file_entry      *fe;
1181 	struct extfile_entry   *efe;
1182 	struct alloc_ext_entry *ext;
1183 	struct icb_tag *icbtag;
1184 	struct short_ad *short_ad;
1185 	struct long_ad *long_ad;
1186 	uint32_t offset;
1187 	uint32_t lb_size, dscr_size, l_ea, l_ad, max_l_ad;
1188 	uint8_t *data_pos;
1189 	int icbflags, addr_type, adlen, extnr;
1190 
1191 	/* determine what descriptor we are in */
1192 	lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1193 
1194 	fe  = udf_node->fe;
1195 	efe = udf_node->efe;
1196 	if (fe) {
1197 		icbtag  = &fe->icbtag;
1198 		dscr_size  = sizeof(struct file_entry) -1;
1199 		l_ea       = udf_rw32(fe->l_ea);
1200 		l_ad       = udf_rw32(fe->l_ad);
1201 		data_pos = (uint8_t *) fe + dscr_size + l_ea;
1202 	} else {
1203 		icbtag  = &efe->icbtag;
1204 		dscr_size  = sizeof(struct extfile_entry) -1;
1205 		l_ea       = udf_rw32(efe->l_ea);
1206 		l_ad       = udf_rw32(efe->l_ad);
1207 		data_pos = (uint8_t *) efe + dscr_size + l_ea;
1208 	}
1209 	max_l_ad = lb_size - dscr_size - l_ea;
1210 
1211 	icbflags  = udf_rw16(icbtag->flags);
1212 	addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
1213 
1214 	/* just in case we're called on an intern, its EOF */
1215 	if (addr_type == UDF_ICB_INTERN_ALLOC) {
1216 		memset(icb, 0, sizeof(struct long_ad));
1217 		*eof = 1;
1218 		return;
1219 	}
1220 
1221 	adlen = 0;
1222 	if (addr_type == UDF_ICB_SHORT_ALLOC) {
1223 		adlen = sizeof(struct short_ad);
1224 	} else if (addr_type == UDF_ICB_LONG_ALLOC) {
1225 		adlen = sizeof(struct long_ad);
1226 	}
1227 
1228 	/* if offset too big, we go to the allocation extensions */
1229 	offset = slot * adlen;
1230 	extnr  = 0;
1231 	while (offset > max_l_ad) {
1232 		offset -= max_l_ad;
1233 		ext  = udf_node->ext[extnr];
1234 		dscr_size  = sizeof(struct alloc_ext_entry) -1;
1235 		l_ad = udf_rw32(ext->l_ad);
1236 		max_l_ad = lb_size - dscr_size;
1237 		data_pos = (uint8_t *) ext + dscr_size + l_ea;
1238 		extnr++;
1239 		if (extnr > udf_node->num_extensions) {
1240 			l_ad = 0;	/* force EOF */
1241 			break;
1242 		}
1243 	}
1244 
1245 	*eof = (offset >= l_ad) || (l_ad == 0);
1246 	if (*eof) {
1247 		memset(icb, 0, sizeof(struct long_ad));
1248 		return;
1249 	}
1250 
1251 	/* get the element */
1252 	if (addr_type == UDF_ICB_SHORT_ALLOC) {
1253 		short_ad = (struct short_ad *) (data_pos + offset);
1254 		icb->len          = short_ad->len;
1255 		icb->loc.part_num = udf_rw16(0);	/* ignore */
1256 		icb->loc.lb_num   = short_ad->lb_num;
1257 	} else if (addr_type == UDF_ICB_LONG_ALLOC) {
1258 		long_ad = (struct long_ad *) (data_pos + offset);
1259 		*icb = *long_ad;
1260 	}
1261 }
1262 
1263 /* --------------------------------------------------------------------- */
1264 
1265 int
1266 udf_append_adslot(struct udf_node *udf_node, int slot, struct long_ad *icb) {
1267 	union dscrptr          *dscr;
1268 	struct file_entry      *fe;
1269 	struct extfile_entry   *efe;
1270 	struct alloc_ext_entry *ext;
1271 	struct icb_tag *icbtag;
1272 	struct short_ad *short_ad;
1273 	struct long_ad *long_ad, o_icb;
1274 	uint64_t logblks_rec, *logblks_rec_p;
1275 	uint32_t offset, rest, len;
1276 	uint32_t lb_size, dscr_size, l_ea, l_ad, *l_ad_p, max_l_ad, crclen;
1277 	uint8_t *data_pos;
1278 	int icbflags, addr_type, adlen, extnr;
1279 
1280 	/* determine what descriptor we are in */
1281 	lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1282 
1283 	fe  = udf_node->fe;
1284 	efe = udf_node->efe;
1285 	if (fe) {
1286 		icbtag  = &fe->icbtag;
1287 		dscr      = (union dscrptr *) fe;
1288 		dscr_size = sizeof(struct file_entry) -1;
1289 
1290 		l_ea      = udf_rw32(fe->l_ea);
1291 		l_ad_p    = &fe->l_ad;
1292 		logblks_rec_p = &fe->logblks_rec;
1293 	} else {
1294 		icbtag    = &efe->icbtag;
1295 		dscr      = (union dscrptr *) efe;
1296 		dscr_size = sizeof(struct extfile_entry) -1;
1297 
1298 		l_ea      = udf_rw32(efe->l_ea);
1299 		l_ad_p    = &efe->l_ad;
1300 		logblks_rec_p = &efe->logblks_rec;
1301 	}
1302 	data_pos  = (uint8_t *) dscr + dscr_size + l_ea;
1303 	max_l_ad = lb_size - dscr_size - l_ea;
1304 
1305 	icbflags  = udf_rw16(icbtag->flags);
1306 	addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
1307 
1308 	/* just in case we're called on an intern, its EOF */
1309 	if (addr_type == UDF_ICB_INTERN_ALLOC) {
1310 		panic("udf_append_adslot on UDF_ICB_INTERN_ALLOC\n");
1311 	}
1312 
1313 	adlen = 0;
1314 	if (addr_type == UDF_ICB_SHORT_ALLOC) {
1315 		adlen = sizeof(struct short_ad);
1316 	} else if (addr_type == UDF_ICB_LONG_ALLOC) {
1317 		adlen = sizeof(struct long_ad);
1318 	}
1319 
1320 	/* if offset too big, we go to the allocation extensions */
1321 	offset = slot * adlen;
1322 	extnr  = 0;
1323 	while (offset > max_l_ad) {
1324 		offset -= max_l_ad;
1325 		ext  = udf_node->ext[extnr];
1326 		dscr = (union dscrptr *) ext;
1327 		dscr_size  = sizeof(struct alloc_ext_entry) -1;
1328 
1329 		KASSERT(ext != NULL);
1330 		l_ad_p = &ext->l_ad;
1331 		max_l_ad = lb_size - dscr_size;
1332 		data_pos = (uint8_t *) dscr + dscr_size;
1333 
1334 		extnr++;
1335 	}
1336 	/* offset is offset within the current (E)FE/AED */
1337 	l_ad   = udf_rw32(*l_ad_p);
1338 	crclen = udf_rw32(dscr->tag.desc_crc_len);
1339 	logblks_rec = udf_rw64(*logblks_rec_p);
1340 
1341 	if (extnr > udf_node->num_extensions)
1342 		return EFBIG;	/* too fragmented */
1343 
1344 	/* overwriting old piece? */
1345 	if (offset < l_ad) {
1346 		/* overwrite entry; compensate for the old element */
1347 		if (addr_type == UDF_ICB_SHORT_ALLOC) {
1348 			short_ad = (struct short_ad *) (data_pos + offset);
1349 			o_icb.len          = short_ad->len;
1350 			o_icb.loc.part_num = udf_rw16(0);	/* ignore */
1351 			o_icb.loc.lb_num   = short_ad->lb_num;
1352 		} else if (addr_type == UDF_ICB_LONG_ALLOC) {
1353 			long_ad = (struct long_ad *) (data_pos + offset);
1354 			o_icb = *long_ad;
1355 		} else {
1356 			panic("Invalid address type in udf_append_adslot\n");
1357 		}
1358 
1359 		len = udf_rw32(o_icb.len);
1360 		if (UDF_EXT_FLAGS(len) == UDF_EXT_ALLOCATED) {
1361 			/* adjust counts */
1362 			len = UDF_EXT_LEN(len);
1363 			logblks_rec -= (len + lb_size -1) / lb_size;
1364 		}
1365 	}
1366 
1367 	/* calculate rest space in this descriptor */
1368 	rest = max_l_ad - offset;
1369 	if (rest <= adlen) {
1370 		/* create redirect and link new allocation extension */
1371 		printf("udf_append_to_adslot: can't create allocation extention yet\n");
1372 		return EFBIG;
1373 	}
1374 
1375 	/* write out the element */
1376 	if (addr_type == UDF_ICB_SHORT_ALLOC) {
1377 		short_ad = (struct short_ad *) (data_pos + offset);
1378 		short_ad->len    = icb->len;
1379 		short_ad->lb_num = icb->loc.lb_num;
1380 	} else if (addr_type == UDF_ICB_LONG_ALLOC) {
1381 		long_ad = (struct long_ad *) (data_pos + offset);
1382 		*long_ad = *icb;
1383 	}
1384 
1385 	/* adjust logblks recorded count */
1386 	if (UDF_EXT_FLAGS(icb->len) == UDF_EXT_ALLOCATED)
1387 		logblks_rec += (UDF_EXT_LEN(icb->len) + lb_size -1) / lb_size;
1388 	*logblks_rec_p = udf_rw64(logblks_rec);
1389 
1390 	/* adjust l_ad and crclen when needed */
1391 	if (offset >= l_ad) {
1392 		l_ad   += adlen;
1393 		crclen += adlen;
1394 		dscr->tag.desc_crc_len = udf_rw32(crclen);
1395 		*l_ad_p = udf_rw32(l_ad);
1396 	}
1397 
1398 	return 0;
1399 }
1400 
1401 /* --------------------------------------------------------------------- */
1402 
1403 /*
1404  * Adjust the node's allocation descriptors to reflect the new mapping; do
1405  * take note that we might glue to existing allocation descriptors.
1406  *
1407  * XXX Note there can only be one allocation being recorded/mount; maybe
1408  * explicit allocation in shedule thread?
1409  */
1410 
1411 static void
1412 udf_record_allocation_in_node(struct udf_mount *ump, struct buf *buf,
1413 	uint16_t vpart_num, uint64_t *mapping, struct long_ad *node_ad_cpy)
1414 {
1415 	struct vnode    *vp = buf->b_vp;
1416 	struct udf_node *udf_node = VTOI(vp);
1417 	struct file_entry      *fe;
1418 	struct extfile_entry   *efe;
1419 	struct icb_tag  *icbtag;
1420 	struct long_ad   s_ad, c_ad;
1421 	uint64_t inflen, from, till;
1422 	uint64_t foffset, end_foffset, restart_foffset;
1423 	uint64_t orig_inflen, orig_lbrec, new_inflen, new_lbrec;
1424 	uint32_t num_lb, len, flags, lb_num;
1425 	uint32_t run_start;
1426 	uint32_t slot_offset;
1427 	uint32_t skip_len, skipped;
1428 	int addr_type, icbflags;
1429 	int udf_c_type = buf->b_udf_c_type;
1430 	int lb_size, run_length, eof;
1431 	int slot, cpy_slot, cpy_slots, restart_slot;
1432 	int error;
1433 
1434 	DPRINTF(ALLOC, ("udf_record_allocation_in_node\n"));
1435 	udf_node_sanity_check(udf_node, &orig_inflen, &orig_lbrec);
1436 
1437 	/* sanity check ... should be panic ? */
1438 	if ((udf_c_type != UDF_C_USERDATA) && (udf_c_type != UDF_C_FIDS))
1439 		return;
1440 
1441 	lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1442 
1443 	/* do the job */
1444 	UDF_LOCK_NODE(udf_node, 0);	/* XXX can deadlock ? */
1445 
1446 	fe  = udf_node->fe;
1447 	efe = udf_node->efe;
1448 	if (fe) {
1449 		icbtag = &fe->icbtag;
1450 		inflen = udf_rw64(fe->inf_len);
1451 	} else {
1452 		icbtag = &efe->icbtag;
1453 		inflen = udf_rw64(efe->inf_len);
1454 	}
1455 
1456 	/* do check if `till' is not past file information length */
1457 	from = buf->b_lblkno * lb_size;
1458 	till = MIN(inflen, from + buf->b_resid);
1459 
1460 	num_lb = (till - from + lb_size -1) / lb_size;
1461 
1462 	DPRINTF(ALLOC, ("record allocation from = %"PRIu64" + %d\n", from, buf->b_bcount));
1463 
1464 	icbflags  = udf_rw16(icbtag->flags);
1465 	addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
1466 
1467 	if (addr_type == UDF_ICB_INTERN_ALLOC) {
1468 		/* nothing to do */
1469 		/* XXX clean up rest of node? just in case? */
1470 		UDF_UNLOCK_NODE(udf_node, 0);
1471 		return;
1472 	}
1473 
1474 	slot     = 0;
1475 	cpy_slot = 0;
1476 	foffset  = 0;
1477 
1478 	/* 1) copy till first overlap piece to the rewrite buffer */
1479 	for (;;) {
1480 		udf_get_adslot(udf_node, slot, &s_ad, &eof);
1481 		if (eof) {
1482 			DPRINTF(WRITE,
1483 				("Record allocation in node "
1484 				 "failed: encountered EOF\n"));
1485 			UDF_UNLOCK_NODE(udf_node, 0);
1486 			buf->b_error = EINVAL;
1487 			return;
1488 		}
1489 		len   = udf_rw32(s_ad.len);
1490 		flags = UDF_EXT_FLAGS(len);
1491 		len   = UDF_EXT_LEN(len);
1492 
1493 		if (flags == UDF_EXT_REDIRECT) {
1494 			slot++;
1495 			continue;
1496 		}
1497 
1498 		end_foffset = foffset + len;
1499 		if (end_foffset > from)
1500 			break;	/* found */
1501 
1502 		node_ad_cpy[cpy_slot++] = s_ad;
1503 
1504 		DPRINTF(ALLOC, ("\t1: vp %d, lb %d, len %d, flags %d "
1505 			"-> stack\n",
1506 			udf_rw16(s_ad.loc.part_num),
1507 			udf_rw32(s_ad.loc.lb_num),
1508 			UDF_EXT_LEN(udf_rw32(s_ad.len)),
1509 			UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1510 
1511 		foffset = end_foffset;
1512 		slot++;
1513 	}
1514 	restart_slot    = slot;
1515 	restart_foffset = foffset;
1516 
1517 	/* 2) trunc overlapping slot at overlap and copy it */
1518 	slot_offset = from - foffset;
1519 	if (slot_offset > 0) {
1520 		DPRINTF(ALLOC, ("\tslot_offset = %d, flags = %d (%d)\n",
1521 				slot_offset, flags >> 30, flags));
1522 
1523 		s_ad.len = udf_rw32(slot_offset | flags);
1524 		node_ad_cpy[cpy_slot++] = s_ad;
1525 
1526 		DPRINTF(ALLOC, ("\t2: vp %d, lb %d, len %d, flags %d "
1527 			"-> stack\n",
1528 			udf_rw16(s_ad.loc.part_num),
1529 			udf_rw32(s_ad.loc.lb_num),
1530 			UDF_EXT_LEN(udf_rw32(s_ad.len)),
1531 			UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1532 	}
1533 	foffset += slot_offset;
1534 
1535 	/* 3) insert new mappings */
1536 	memset(&s_ad, 0, sizeof(struct long_ad));
1537 	lb_num = 0;
1538 	for (lb_num = 0; lb_num < num_lb; lb_num++) {
1539 		run_start  = mapping[lb_num];
1540 		run_length = 1;
1541 		while (lb_num < num_lb-1) {
1542 			if (mapping[lb_num+1] != mapping[lb_num]+1)
1543 				if (mapping[lb_num+1] != mapping[lb_num])
1544 					break;
1545 			run_length++;
1546 			lb_num++;
1547 		}
1548 		/* insert slot for this mapping */
1549 		len = run_length * lb_size;
1550 
1551 		/* bounds checking */
1552 		if (foffset + len > till)
1553 			len = till - foffset;
1554 		KASSERT(foffset + len <= inflen);
1555 
1556 		s_ad.len = udf_rw32(len | UDF_EXT_ALLOCATED);
1557 		s_ad.loc.part_num = udf_rw16(vpart_num);
1558 		s_ad.loc.lb_num   = udf_rw32(run_start);
1559 
1560 		foffset += len;
1561 
1562 		/* paranoia */
1563 		if (len == 0) {
1564 			DPRINTF(WRITE,
1565 				("Record allocation in node "
1566 				 "failed: insert failed\n"));
1567 			UDF_UNLOCK_NODE(udf_node, 0);
1568 			buf->b_error = EINVAL;
1569 			return;
1570 		}
1571 		node_ad_cpy[cpy_slot++] = s_ad;
1572 
1573 		DPRINTF(ALLOC, ("\t3: insert new mapping vp %d lb %d, len %d, "
1574 				"flags %d -> stack\n",
1575 			udf_rw16(s_ad.loc.part_num), udf_rw32(s_ad.loc.lb_num),
1576 			UDF_EXT_LEN(udf_rw32(s_ad.len)),
1577 			UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1578 	}
1579 
1580 	/* 4) pop replaced length */
1581 	slot = restart_slot;
1582 	foffset = restart_foffset;
1583 
1584 	skip_len = till - foffset;	/* relative to start of slot */
1585 	slot_offset = from - foffset;
1586 	for (;;) {
1587 		udf_get_adslot(udf_node, slot, &s_ad, &eof);
1588 		if (eof)
1589 			break;
1590 
1591 		len    = udf_rw32(s_ad.len);
1592 		flags  = UDF_EXT_FLAGS(len);
1593 		len    = UDF_EXT_LEN(len);
1594 		lb_num = udf_rw32(s_ad.loc.lb_num);
1595 
1596 		if (flags == UDF_EXT_REDIRECT) {
1597 			slot++;
1598 			continue;
1599 		}
1600 
1601 		DPRINTF(ALLOC, ("\t4i: got slot %d, skip_len %d, vp %d, "
1602 				"lb %d, len %d, flags %d\n",
1603 			slot, skip_len, udf_rw16(s_ad.loc.part_num),
1604 			udf_rw32(s_ad.loc.lb_num),
1605 			UDF_EXT_LEN(udf_rw32(s_ad.len)),
1606 			UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1607 
1608 		skipped   = MIN(len, skip_len);
1609 		if (flags != UDF_EXT_FREE) {
1610 			if (slot_offset) {
1611 				/* skip these blocks first */
1612 				num_lb = (slot_offset + lb_size-1) / lb_size;
1613 				len      -= slot_offset;
1614 				skip_len -= slot_offset;
1615 				foffset  += slot_offset;
1616 				lb_num   += num_lb;
1617 				skipped  -= slot_offset;
1618 				slot_offset = 0;
1619 			}
1620 			/* free space from current position till `skipped' */
1621 			num_lb = (skipped + lb_size-1) / lb_size;
1622 			udf_free_allocated_space(ump, lb_num,
1623 				udf_rw16(s_ad.loc.part_num), num_lb);
1624 			lb_num += num_lb;
1625 		}
1626 		len      -= skipped;
1627 		skip_len -= skipped;
1628 		foffset  += skipped;
1629 
1630 		if (len) {
1631 			KASSERT(skipped % lb_size == 0);
1632 
1633 			/* we arrived at our point, push remainder */
1634 			s_ad.len        = udf_rw32(len | flags);
1635 			s_ad.loc.lb_num = udf_rw32(lb_num);
1636 			node_ad_cpy[cpy_slot++] = s_ad;
1637 			foffset += len;
1638 			slot++;
1639 
1640 			DPRINTF(ALLOC, ("\t4: vp %d, lb %d, len %d, flags %d "
1641 				"-> stack\n",
1642 				udf_rw16(s_ad.loc.part_num),
1643 				udf_rw32(s_ad.loc.lb_num),
1644 				UDF_EXT_LEN(udf_rw32(s_ad.len)),
1645 				UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1646 			break;
1647 		}
1648 		slot++;
1649 	}
1650 
1651 	/* 5) copy remainder */
1652 	for (;;) {
1653 		udf_get_adslot(udf_node, slot, &s_ad, &eof);
1654 		if (eof)
1655 			break;
1656 
1657 		len   = udf_rw32(s_ad.len);
1658 		flags = UDF_EXT_FLAGS(len);
1659 		len   = UDF_EXT_LEN(len);
1660 
1661 		if (flags == UDF_EXT_REDIRECT) {
1662 			slot++;
1663 			continue;
1664 		}
1665 
1666 		node_ad_cpy[cpy_slot++] = s_ad;
1667 
1668 		DPRINTF(ALLOC, ("\t5: insert new mapping "
1669 			"vp %d lb %d, len %d, flags %d "
1670 			"-> stack\n",
1671 		udf_rw16(s_ad.loc.part_num),
1672 		udf_rw32(s_ad.loc.lb_num),
1673 		UDF_EXT_LEN(udf_rw32(s_ad.len)),
1674 		UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1675 
1676 		slot++;
1677 	}
1678 
1679 	/* 6) reset node descriptors */
1680 	udf_wipe_adslots(udf_node);
1681 
1682 	/* 7) copy back extents; merge when possible. Recounting on the fly */
1683 	cpy_slots = cpy_slot;
1684 
1685 	c_ad = node_ad_cpy[0];
1686 	slot = 0;
1687 	DPRINTF(ALLOC, ("\t7s: stack -> got mapping vp %d "
1688 		"lb %d, len %d, flags %d\n",
1689 	udf_rw16(c_ad.loc.part_num),
1690 	udf_rw32(c_ad.loc.lb_num),
1691 	UDF_EXT_LEN(udf_rw32(c_ad.len)),
1692 	UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
1693 
1694 	for (cpy_slot = 1; cpy_slot < cpy_slots; cpy_slot++) {
1695 		s_ad = node_ad_cpy[cpy_slot];
1696 
1697 		DPRINTF(ALLOC, ("\t7i: stack -> got mapping vp %d "
1698 			"lb %d, len %d, flags %d\n",
1699 		udf_rw16(s_ad.loc.part_num),
1700 		udf_rw32(s_ad.loc.lb_num),
1701 		UDF_EXT_LEN(udf_rw32(s_ad.len)),
1702 		UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1703 
1704 		/* see if we can merge */
1705 		if (udf_ads_merge(lb_size, &c_ad, &s_ad)) {
1706 			/* not mergable (anymore) */
1707 			DPRINTF(ALLOC, ("\t7: appending vp %d lb %d, "
1708 				"len %d, flags %d\n",
1709 			udf_rw16(c_ad.loc.part_num),
1710 			udf_rw32(c_ad.loc.lb_num),
1711 			UDF_EXT_LEN(udf_rw32(c_ad.len)),
1712 			UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
1713 
1714 			error = udf_append_adslot(udf_node, slot, &c_ad);
1715 			if (error) {
1716 				buf->b_error = error;
1717 				goto out;
1718 			}
1719 			c_ad = s_ad;
1720 			slot++;
1721 		}
1722 	}
1723 
1724 	/* 8) push rest slot (if any) */
1725 	if (UDF_EXT_LEN(c_ad.len) > 0) {
1726 		DPRINTF(ALLOC, ("\t8: last append vp %d lb %d, "
1727 				"len %d, flags %d\n",
1728 		udf_rw16(c_ad.loc.part_num),
1729 		udf_rw32(c_ad.loc.lb_num),
1730 		UDF_EXT_LEN(udf_rw32(c_ad.len)),
1731 		UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
1732 
1733 		error = udf_append_adslot(udf_node, slot, &c_ad);
1734 		if (error) {
1735 			buf->b_error = error;
1736 			goto out;
1737 		}
1738 	}
1739 
1740 out:
1741 	/* the node's descriptors should now be sane */
1742 	UDF_UNLOCK_NODE(udf_node, 0);
1743 
1744 	udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
1745 
1746 	KASSERT(orig_inflen == new_inflen);
1747 	KASSERT(new_lbrec >= orig_lbrec);
1748 
1749 	return;
1750 }
1751 
1752 /* --------------------------------------------------------------------- */
1753 
1754 int
1755 udf_grow_node(struct udf_node *udf_node, uint64_t new_size)
1756 {
1757 	union dscrptr *dscr;
1758 	struct vnode *vp = udf_node->vnode;
1759 	struct udf_mount *ump = udf_node->ump;
1760 	struct file_entry    *fe;
1761 	struct extfile_entry *efe;
1762 	struct icb_tag  *icbtag;
1763 	struct long_ad c_ad, s_ad;
1764 	uint64_t size_diff, old_size, inflen, objsize, chunk, append_len;
1765 	uint64_t foffset, end_foffset;
1766 	uint64_t orig_inflen, orig_lbrec, new_inflen, new_lbrec;
1767 	uint32_t lb_size, dscr_size, crclen, lastblock_grow;
1768 	uint32_t len, flags, max_len;
1769 	uint32_t max_l_ad, l_ad, l_ea;
1770 	uint8_t *data_pos, *evacuated_data;
1771 	int icbflags, addr_type;
1772 	int slot, cpy_slot;
1773 	int eof, error;
1774 
1775 	DPRINTF(ALLOC, ("udf_grow_node\n"));
1776 	udf_node_sanity_check(udf_node, &orig_inflen, &orig_lbrec);
1777 
1778 	UDF_LOCK_NODE(udf_node, 0);
1779 	lb_size = udf_rw32(ump->logical_vol->lb_size);
1780 	max_len = ((UDF_EXT_MAXLEN / lb_size) * lb_size);
1781 
1782 	fe  = udf_node->fe;
1783 	efe = udf_node->efe;
1784 	if (fe) {
1785 		dscr       = (union dscrptr *) fe;
1786 		icbtag  = &fe->icbtag;
1787 		inflen  = udf_rw64(fe->inf_len);
1788 		objsize = inflen;
1789 		dscr_size  = sizeof(struct file_entry) -1;
1790 		l_ea       = udf_rw32(fe->l_ea);
1791 		l_ad       = udf_rw32(fe->l_ad);
1792 	} else {
1793 		dscr       = (union dscrptr *) efe;
1794 		icbtag  = &efe->icbtag;
1795 		inflen  = udf_rw64(efe->inf_len);
1796 		objsize = udf_rw64(efe->obj_size);
1797 		dscr_size  = sizeof(struct extfile_entry) -1;
1798 		l_ea       = udf_rw32(efe->l_ea);
1799 		l_ad       = udf_rw32(efe->l_ad);
1800 	}
1801 	data_pos  = (uint8_t *) dscr + dscr_size + l_ea;
1802 	max_l_ad = lb_size - dscr_size - l_ea;
1803 
1804 	icbflags   = udf_rw16(icbtag->flags);
1805 	addr_type  = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
1806 
1807 	old_size  = inflen;
1808 	size_diff = new_size - old_size;
1809 
1810 	DPRINTF(ALLOC, ("\tfrom %"PRIu64" to %"PRIu64"\n", old_size, new_size));
1811 
1812 	evacuated_data = NULL;
1813 	if (addr_type == UDF_ICB_INTERN_ALLOC) {
1814 		if (l_ad + size_diff <= max_l_ad) {
1815 			/* only reflect size change directly in the node */
1816 			inflen  += size_diff;
1817 			objsize += size_diff;
1818 			l_ad    += size_diff;
1819 			crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea + l_ad;
1820 			if (fe) {
1821 				fe->inf_len   = udf_rw64(inflen);
1822 				fe->l_ad      = udf_rw32(l_ad);
1823 				fe->tag.desc_crc_len = udf_rw32(crclen);
1824 			} else {
1825 				efe->inf_len  = udf_rw64(inflen);
1826 				efe->obj_size = udf_rw64(objsize);
1827 				efe->l_ad     = udf_rw32(l_ad);
1828 				efe->tag.desc_crc_len = udf_rw32(crclen);
1829 			}
1830 			error = 0;
1831 
1832 			/* set new size for uvm */
1833 			uvm_vnp_setsize(vp, old_size);
1834 			uvm_vnp_setwritesize(vp, new_size);
1835 
1836 #if 0
1837 			/* zero append space in buffer */
1838 			uvm_vnp_zerorange(vp, old_size, new_size - old_size);
1839 #endif
1840 
1841 			/* unlock */
1842 			UDF_UNLOCK_NODE(udf_node, 0);
1843 
1844 			udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
1845 			KASSERT(new_inflen == orig_inflen + size_diff);
1846 			KASSERT(new_lbrec == orig_lbrec);
1847 			KASSERT(new_lbrec == 0);
1848 			return 0;
1849 		}
1850 
1851 		DPRINTF(ALLOC, ("\tCONVERT from internal\n"));
1852 
1853 		if (old_size > 0) {
1854 			/* allocate some space and copy in the stuff to keep */
1855 			evacuated_data = malloc(lb_size, M_UDFTEMP, M_WAITOK);
1856 			memset(evacuated_data, 0, lb_size);
1857 
1858 			/* node is locked, so safe to exit mutex */
1859 			UDF_UNLOCK_NODE(udf_node, 0);
1860 
1861 			/* read in using the `normal' vn_rdwr() */
1862 			error = vn_rdwr(UIO_READ, udf_node->vnode,
1863 					evacuated_data, old_size, 0,
1864 					UIO_SYSSPACE, IO_ALTSEMANTICS | IO_NODELOCKED,
1865 					FSCRED, NULL, NULL);
1866 
1867 			/* enter again */
1868 			UDF_LOCK_NODE(udf_node, 0);
1869 		}
1870 
1871 		/* convert to a normal alloc */
1872 		/* XXX HOWTO selecting allocation method ? */
1873 		icbflags &= ~UDF_ICB_TAG_FLAGS_ALLOC_MASK;
1874 		icbflags |=  UDF_ICB_LONG_ALLOC;	/* XXX or SHORT_ALLOC */
1875 		icbtag->flags = udf_rw16(icbflags);
1876 
1877 		/* wipe old descriptor space */
1878 		udf_wipe_adslots(udf_node);
1879 
1880 		memset(&c_ad, 0, sizeof(struct long_ad));
1881 		c_ad.len          = udf_rw32(old_size | UDF_EXT_FREE);
1882 		c_ad.loc.part_num = udf_rw16(0); /* not relevant */
1883 		c_ad.loc.lb_num   = udf_rw32(0); /* not relevant */
1884 
1885 		slot = 0;
1886 	} else {
1887 		/* goto the last entry (if any) */
1888 		slot     = 0;
1889 		cpy_slot = 0;
1890 		foffset  = 0;
1891 		memset(&c_ad, 0, sizeof(struct long_ad));
1892 		for (;;) {
1893 			udf_get_adslot(udf_node, slot, &c_ad, &eof);
1894 			if (eof)
1895 				break;
1896 
1897 			len   = udf_rw32(c_ad.len);
1898 			flags = UDF_EXT_FLAGS(len);
1899 			len   = UDF_EXT_LEN(len);
1900 
1901 			end_foffset = foffset + len;
1902 			if (flags != UDF_EXT_REDIRECT)
1903 				foffset = end_foffset;
1904 
1905 			slot++;
1906 		}
1907 		/* at end of adslots */
1908 
1909 		/* special case if the old size was zero, then there is no last slot */
1910 		if (old_size == 0) {
1911 			c_ad.len          = udf_rw32(0 | UDF_EXT_FREE);
1912 			c_ad.loc.part_num = udf_rw16(0); /* not relevant */
1913 			c_ad.loc.lb_num   = udf_rw32(0); /* not relevant */
1914 		} else {
1915 			/* refetch last slot */
1916 			slot--;
1917 			udf_get_adslot(udf_node, slot, &c_ad, &eof);
1918 		}
1919 	}
1920 
1921 	/*
1922 	 * If the length of the last slot is not a multiple of lb_size, adjust
1923 	 * length so that it is; don't forget to adjust `append_len'! relevant for
1924 	 * extending existing files
1925 	 */
1926 	len   = udf_rw32(c_ad.len);
1927 	flags = UDF_EXT_FLAGS(len);
1928 	len   = UDF_EXT_LEN(len);
1929 
1930 	lastblock_grow = 0;
1931 	if (len % lb_size > 0) {
1932 		lastblock_grow = lb_size - (len % lb_size);
1933 		lastblock_grow = MIN(size_diff, lastblock_grow);
1934 		len += lastblock_grow;
1935 		c_ad.len = udf_rw32(len | flags);
1936 
1937 		/* TODO zero appened space in buffer! */
1938 		/* using uvm_vnp_zerorange(vp, old_size, new_size - old_size); ? */
1939 	}
1940 	memset(&s_ad, 0, sizeof(struct long_ad));
1941 
1942 	/* size_diff can be bigger than allowed, so grow in chunks */
1943 	append_len = size_diff - lastblock_grow;
1944 	while (append_len > 0) {
1945 		chunk = MIN(append_len, max_len);
1946 		s_ad.len = udf_rw32(chunk | UDF_EXT_FREE);
1947 		s_ad.loc.part_num = udf_rw16(0);
1948 		s_ad.loc.lb_num   = udf_rw32(0);
1949 
1950 		if (udf_ads_merge(lb_size, &c_ad, &s_ad)) {
1951 			/* not mergable (anymore) */
1952 			error = udf_append_adslot(udf_node, slot, &c_ad);
1953 			if (error)
1954 				goto errorout;
1955 			slot++;
1956 			c_ad = s_ad;
1957 			memset(&s_ad, 0, sizeof(struct long_ad));
1958 		}
1959 		append_len -= chunk;
1960 	}
1961 
1962 	/* if there is a rest piece in the accumulator, append it */
1963 	if (UDF_EXT_LEN(c_ad.len) > 0) {
1964 		error = udf_append_adslot(udf_node, slot, &c_ad);
1965 		if (error)
1966 			goto errorout;
1967 		slot++;
1968 	}
1969 
1970 	/* if there is a rest piece that didn't fit, append it */
1971 	if (UDF_EXT_LEN(s_ad.len) > 0) {
1972 		error = udf_append_adslot(udf_node, slot, &s_ad);
1973 		if (error)
1974 			goto errorout;
1975 		slot++;
1976 	}
1977 
1978 	inflen  += size_diff;
1979 	objsize += size_diff;
1980 	if (fe) {
1981 		fe->inf_len   = udf_rw64(inflen);
1982 	} else {
1983 		efe->inf_len  = udf_rw64(inflen);
1984 		efe->obj_size = udf_rw64(objsize);
1985 	}
1986 	error = 0;
1987 
1988 	if (evacuated_data) {
1989 		/* set new write size for uvm */
1990 		uvm_vnp_setwritesize(vp, old_size);
1991 
1992 		/* write out evacuated data */
1993 		error = vn_rdwr(UIO_WRITE, udf_node->vnode,
1994 				evacuated_data, old_size, 0,
1995 				UIO_SYSSPACE, IO_ALTSEMANTICS | IO_NODELOCKED,
1996 				FSCRED, NULL, NULL);
1997 		uvm_vnp_setsize(vp, old_size);
1998 	}
1999 
2000 errorout:
2001 	if (evacuated_data)
2002 		free(evacuated_data, M_UDFTEMP);
2003 	UDF_UNLOCK_NODE(udf_node, 0);
2004 
2005 	udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2006 	KASSERT(new_inflen == orig_inflen + size_diff);
2007 	KASSERT(new_lbrec == orig_lbrec);
2008 
2009 	return error;
2010 }
2011 
2012 /* --------------------------------------------------------------------- */
2013 
2014 int
2015 udf_shrink_node(struct udf_node *udf_node, uint64_t new_size)
2016 {
2017 	struct vnode *vp = udf_node->vnode;
2018 	struct udf_mount *ump = udf_node->ump;
2019 	struct file_entry    *fe;
2020 	struct extfile_entry *efe;
2021 	struct icb_tag  *icbtag;
2022 	struct long_ad c_ad, s_ad, *node_ad_cpy;
2023 	uint64_t size_diff, old_size, inflen, objsize;
2024 	uint64_t foffset, end_foffset;
2025 	uint64_t orig_inflen, orig_lbrec, new_inflen, new_lbrec;
2026 	uint32_t lb_size, dscr_size, crclen;
2027 	uint32_t slot_offset;
2028 	uint32_t len, flags, max_len;
2029 	uint32_t num_lb, lb_num;
2030 	uint32_t max_l_ad, l_ad, l_ea;
2031 	uint16_t vpart_num;
2032 	uint8_t *data_pos;
2033 	int icbflags, addr_type;
2034 	int slot, cpy_slot, cpy_slots;
2035 	int eof, error;
2036 
2037 	DPRINTF(ALLOC, ("udf_shrink_node\n"));
2038 	udf_node_sanity_check(udf_node, &orig_inflen, &orig_lbrec);
2039 
2040 	UDF_LOCK_NODE(udf_node, 0);
2041 	lb_size = udf_rw32(ump->logical_vol->lb_size);
2042 	max_len = ((UDF_EXT_MAXLEN / lb_size) * lb_size);
2043 
2044 	/* do the work */
2045 	fe  = udf_node->fe;
2046 	efe = udf_node->efe;
2047 	if (fe) {
2048 		icbtag  = &fe->icbtag;
2049 		inflen  = udf_rw64(fe->inf_len);
2050 		objsize = inflen;
2051 		dscr_size  = sizeof(struct file_entry) -1;
2052 		l_ea       = udf_rw32(fe->l_ea);
2053 		l_ad       = udf_rw32(fe->l_ad);
2054 		data_pos = (uint8_t *) fe + dscr_size + l_ea;
2055 	} else {
2056 		icbtag  = &efe->icbtag;
2057 		inflen  = udf_rw64(efe->inf_len);
2058 		objsize = udf_rw64(efe->obj_size);
2059 		dscr_size  = sizeof(struct extfile_entry) -1;
2060 		l_ea       = udf_rw32(efe->l_ea);
2061 		l_ad       = udf_rw32(efe->l_ad);
2062 		data_pos = (uint8_t *) efe + dscr_size + l_ea;
2063 	}
2064 	max_l_ad = lb_size - dscr_size - l_ea;
2065 
2066 	icbflags   = udf_rw16(icbtag->flags);
2067 	addr_type  = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2068 
2069 	old_size  = inflen;
2070 	size_diff = old_size - new_size;
2071 
2072 	DPRINTF(ALLOC, ("\tfrom %"PRIu64" to %"PRIu64"\n", old_size, new_size));
2073 
2074 	/* shrink the node to its new size */
2075 	if (addr_type == UDF_ICB_INTERN_ALLOC) {
2076 		/* only reflect size change directly in the node */
2077 		KASSERT(new_size <= max_l_ad);
2078 		inflen  -= size_diff;
2079 		objsize -= size_diff;
2080 		l_ad    -= size_diff;
2081 		crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea + l_ad;
2082 		if (fe) {
2083 			fe->inf_len   = udf_rw64(inflen);
2084 			fe->l_ad      = udf_rw32(l_ad);
2085 			fe->tag.desc_crc_len = udf_rw32(crclen);
2086 		} else {
2087 			efe->inf_len  = udf_rw64(inflen);
2088 			efe->obj_size = udf_rw64(objsize);
2089 			efe->l_ad     = udf_rw32(l_ad);
2090 			efe->tag.desc_crc_len = udf_rw32(crclen);
2091 		}
2092 		error = 0;
2093 		/* TODO zero appened space in buffer! */
2094 		/* using uvm_vnp_zerorange(vp, old_size, old_size - new_size); ? */
2095 
2096 		/* set new size for uvm */
2097 		uvm_vnp_setsize(vp, new_size);
2098 		UDF_UNLOCK_NODE(udf_node, 0);
2099 
2100 		udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2101 		KASSERT(new_inflen == orig_inflen - size_diff);
2102 		KASSERT(new_lbrec == orig_lbrec);
2103 		KASSERT(new_lbrec == 0);
2104 
2105 		return 0;
2106 	}
2107 
2108 	/* setup node cleanup extents copy space */
2109 	node_ad_cpy = malloc(lb_size * UDF_MAX_ALLOC_EXTENTS,
2110 		M_UDFMNT, M_WAITOK);
2111 	memset(node_ad_cpy, 0, lb_size * UDF_MAX_ALLOC_EXTENTS);
2112 
2113 	/*
2114 	 * Shrink the node by releasing the allocations and truncate the last
2115 	 * allocation to the new size. If the new size fits into the
2116 	 * allocation descriptor itself, transform it into an
2117 	 * UDF_ICB_INTERN_ALLOC.
2118 	 */
2119 	slot     = 0;
2120 	cpy_slot = 0;
2121 	foffset  = 0;
2122 
2123 	/* 1) copy till first overlap piece to the rewrite buffer */
2124 	for (;;) {
2125 		udf_get_adslot(udf_node, slot, &s_ad, &eof);
2126 		if (eof) {
2127 			DPRINTF(WRITE,
2128 				("Shrink node failed: "
2129 				 "encountered EOF\n"));
2130 			error = EINVAL;
2131 			goto errorout; /* panic? */
2132 		}
2133 		len   = udf_rw32(s_ad.len);
2134 		flags = UDF_EXT_FLAGS(len);
2135 		len   = UDF_EXT_LEN(len);
2136 
2137 		if (flags == UDF_EXT_REDIRECT) {
2138 			slot++;
2139 			continue;
2140 		}
2141 
2142 		end_foffset = foffset + len;
2143 		if (end_foffset > new_size)
2144 			break;	/* found */
2145 
2146 		node_ad_cpy[cpy_slot++] = s_ad;
2147 
2148 		DPRINTF(ALLOC, ("\t1: vp %d, lb %d, len %d, flags %d "
2149 			"-> stack\n",
2150 			udf_rw16(s_ad.loc.part_num),
2151 			udf_rw32(s_ad.loc.lb_num),
2152 			UDF_EXT_LEN(udf_rw32(s_ad.len)),
2153 			UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2154 
2155 		foffset = end_foffset;
2156 		slot++;
2157 	}
2158 	slot_offset = new_size - foffset;
2159 
2160 	/* 2) trunc overlapping slot at overlap and copy it */
2161 	if (slot_offset > 0) {
2162 		lb_num    = udf_rw32(s_ad.loc.lb_num);
2163 		vpart_num = udf_rw16(s_ad.loc.part_num);
2164 
2165 		if (flags == UDF_EXT_ALLOCATED) {
2166 			lb_num += (slot_offset + lb_size -1) / lb_size;
2167 			num_lb  = (len - slot_offset + lb_size - 1) / lb_size;
2168 
2169 			udf_free_allocated_space(ump, lb_num, vpart_num, num_lb);
2170 		}
2171 
2172 		s_ad.len = udf_rw32(slot_offset | flags);
2173 		node_ad_cpy[cpy_slot++] = s_ad;
2174 		slot++;
2175 
2176 		DPRINTF(ALLOC, ("\t2: vp %d, lb %d, len %d, flags %d "
2177 			"-> stack\n",
2178 			udf_rw16(s_ad.loc.part_num),
2179 			udf_rw32(s_ad.loc.lb_num),
2180 			UDF_EXT_LEN(udf_rw32(s_ad.len)),
2181 			UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2182 	}
2183 
2184 	/* 3) delete remainder */
2185 	for (;;) {
2186 		udf_get_adslot(udf_node, slot, &s_ad, &eof);
2187 		if (eof)
2188 			break;
2189 
2190 		len       = udf_rw32(s_ad.len);
2191 		flags     = UDF_EXT_FLAGS(len);
2192 		len       = UDF_EXT_LEN(len);
2193 
2194 		if (flags == UDF_EXT_REDIRECT) {
2195 			slot++;
2196 			continue;
2197 		}
2198 
2199 		DPRINTF(ALLOC, ("\t3: delete remainder "
2200 			"vp %d lb %d, len %d, flags %d\n",
2201 		udf_rw16(s_ad.loc.part_num),
2202 		udf_rw32(s_ad.loc.lb_num),
2203 		UDF_EXT_LEN(udf_rw32(s_ad.len)),
2204 		UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2205 
2206 		if (flags == UDF_EXT_ALLOCATED) {
2207 			lb_num    = udf_rw32(s_ad.loc.lb_num);
2208 			vpart_num = udf_rw16(s_ad.loc.part_num);
2209 			num_lb    = (len + lb_size - 1) / lb_size;
2210 
2211 			udf_free_allocated_space(ump, lb_num, vpart_num,
2212 				num_lb);
2213 		}
2214 
2215 		slot++;
2216 	}
2217 
2218 	/* 4) if it will fit into the descriptor then convert */
2219 	if (new_size < max_l_ad) {
2220 		/*
2221 		 * resque/evacuate old piece by reading it in, and convert it
2222 		 * to internal alloc.
2223 		 */
2224 		if (new_size == 0) {
2225 			/* XXX/TODO only for zero sizing now */
2226 			udf_wipe_adslots(udf_node);
2227 
2228 			icbflags &= ~UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2229 			icbflags |=  UDF_ICB_INTERN_ALLOC;
2230 			icbtag->flags = udf_rw16(icbflags);
2231 
2232 			inflen  -= size_diff;	KASSERT(inflen == 0);
2233 			objsize -= size_diff;
2234 			l_ad     = new_size;
2235 			crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea + l_ad;
2236 			if (fe) {
2237 				fe->inf_len   = udf_rw64(inflen);
2238 				fe->l_ad      = udf_rw32(l_ad);
2239 				fe->tag.desc_crc_len = udf_rw32(crclen);
2240 			} else {
2241 				efe->inf_len  = udf_rw64(inflen);
2242 				efe->obj_size = udf_rw64(objsize);
2243 				efe->l_ad     = udf_rw32(l_ad);
2244 				efe->tag.desc_crc_len = udf_rw32(crclen);
2245 			}
2246 			/* eventually copy in evacuated piece */
2247 			/* set new size for uvm */
2248 			uvm_vnp_setsize(vp, new_size);
2249 
2250 			free(node_ad_cpy, M_UDFMNT);
2251 			UDF_UNLOCK_NODE(udf_node, 0);
2252 
2253 			udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2254 			KASSERT(new_inflen == orig_inflen - size_diff);
2255 			KASSERT(new_inflen == 0);
2256 			KASSERT(new_lbrec == 0);
2257 
2258 			return 0;
2259 		}
2260 
2261 		printf("UDF_SHRINK_NODE: could convert to internal alloc!\n");
2262 	}
2263 
2264 	/* 5) reset node descriptors */
2265 	udf_wipe_adslots(udf_node);
2266 
2267 	/* 6) copy back extents; merge when possible. Recounting on the fly */
2268 	cpy_slots = cpy_slot;
2269 
2270 	c_ad = node_ad_cpy[0];
2271 	slot = 0;
2272 	for (cpy_slot = 1; cpy_slot < cpy_slots; cpy_slot++) {
2273 		s_ad = node_ad_cpy[cpy_slot];
2274 
2275 		DPRINTF(ALLOC, ("\t6: stack -> got mapping vp %d "
2276 			"lb %d, len %d, flags %d\n",
2277 		udf_rw16(s_ad.loc.part_num),
2278 		udf_rw32(s_ad.loc.lb_num),
2279 		UDF_EXT_LEN(udf_rw32(s_ad.len)),
2280 		UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2281 
2282 		/* see if we can merge */
2283 		if (udf_ads_merge(lb_size, &c_ad, &s_ad)) {
2284 			/* not mergable (anymore) */
2285 			DPRINTF(ALLOC, ("\t6: appending vp %d lb %d, "
2286 				"len %d, flags %d\n",
2287 			udf_rw16(c_ad.loc.part_num),
2288 			udf_rw32(c_ad.loc.lb_num),
2289 			UDF_EXT_LEN(udf_rw32(c_ad.len)),
2290 			UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2291 
2292 			error = udf_append_adslot(udf_node, slot, &c_ad);
2293 			if (error)
2294 				goto errorout; /* panic? */
2295 			c_ad = s_ad;
2296 			slot++;
2297 		}
2298 	}
2299 
2300 	/* 7) push rest slot (if any) */
2301 	if (UDF_EXT_LEN(c_ad.len) > 0) {
2302 		DPRINTF(ALLOC, ("\t7: last append vp %d lb %d, "
2303 				"len %d, flags %d\n",
2304 		udf_rw16(c_ad.loc.part_num),
2305 		udf_rw32(c_ad.loc.lb_num),
2306 		UDF_EXT_LEN(udf_rw32(c_ad.len)),
2307 		UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2308 
2309 		error = udf_append_adslot(udf_node, slot, &c_ad);
2310 		if (error)
2311 			goto errorout; /* panic? */
2312 		;
2313 	}
2314 
2315 	inflen  -= size_diff;
2316 	objsize -= size_diff;
2317 	if (fe) {
2318 		fe->inf_len   = udf_rw64(inflen);
2319 	} else {
2320 		efe->inf_len  = udf_rw64(inflen);
2321 		efe->obj_size = udf_rw64(objsize);
2322 	}
2323 	error = 0;
2324 
2325 	/* set new size for uvm */
2326 	uvm_vnp_setsize(vp, new_size);
2327 
2328 errorout:
2329 	free(node_ad_cpy, M_UDFMNT);
2330 	UDF_UNLOCK_NODE(udf_node, 0);
2331 
2332 	udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2333 	KASSERT(new_inflen == orig_inflen - size_diff);
2334 
2335 	return error;
2336 }
2337 
2338