xref: /netbsd-src/sys/fs/udf/udf_allocation.c (revision 1faf39be0f3e880238337a31c64f531c907df63c)
1 /* $NetBSD: udf_allocation.c,v 1.25 2009/06/18 15:09:18 reinoud Exp $ */
2 
3 /*
4  * Copyright (c) 2006, 2008 Reinoud Zandijk
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  *
27  */
28 
29 #include <sys/cdefs.h>
30 #ifndef lint
31 __KERNEL_RCSID(0, "$NetBSD: udf_allocation.c,v 1.25 2009/06/18 15:09:18 reinoud Exp $");
32 #endif /* not lint */
33 
34 
35 #if defined(_KERNEL_OPT)
36 #include "opt_compat_netbsd.h"
37 #endif
38 
39 /* TODO strip */
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/sysctl.h>
43 #include <sys/namei.h>
44 #include <sys/proc.h>
45 #include <sys/kernel.h>
46 #include <sys/vnode.h>
47 #include <miscfs/genfs/genfs_node.h>
48 #include <sys/mount.h>
49 #include <sys/buf.h>
50 #include <sys/file.h>
51 #include <sys/device.h>
52 #include <sys/disklabel.h>
53 #include <sys/ioctl.h>
54 #include <sys/malloc.h>
55 #include <sys/dirent.h>
56 #include <sys/stat.h>
57 #include <sys/conf.h>
58 #include <sys/kauth.h>
59 #include <sys/kthread.h>
60 #include <dev/clock_subr.h>
61 
62 #include <fs/udf/ecma167-udf.h>
63 #include <fs/udf/udf_mount.h>
64 
65 #include "udf.h"
66 #include "udf_subr.h"
67 #include "udf_bswap.h"
68 
69 
70 #define VTOI(vnode) ((struct udf_node *) vnode->v_data)
71 
72 static void udf_record_allocation_in_node(struct udf_mount *ump,
73 	struct buf *buf, uint16_t vpart_num, uint64_t *mapping,
74 	struct long_ad *node_ad_cpy);
75 
76 /*
77  * IDEA/BUSY: Each udf_node gets its own extentwalker state for all operations;
78  * this will hopefully/likely reduce O(nlog(n)) to O(1) for most functionality
79  * since actions are most likely sequencial and thus seeking doesn't need
80  * searching for the same or adjacent position again.
81  */
82 
83 /* --------------------------------------------------------------------- */
84 
85 #if 0
86 #if 1
87 static void
88 udf_node_dump(struct udf_node *udf_node) {
89 	struct file_entry    *fe;
90 	struct extfile_entry *efe;
91 	struct icb_tag *icbtag;
92 	struct long_ad s_ad;
93 	uint64_t inflen;
94 	uint32_t icbflags, addr_type;
95 	uint32_t len, lb_num;
96 	uint32_t flags;
97 	int part_num;
98 	int lb_size, eof, slot;
99 
100 	if ((udf_verbose & UDF_DEBUG_NODEDUMP) == 0)
101 		return;
102 
103 	lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
104 
105 	fe  = udf_node->fe;
106 	efe = udf_node->efe;
107 	if (fe) {
108 		icbtag = &fe->icbtag;
109 		inflen = udf_rw64(fe->inf_len);
110 	} else {
111 		icbtag = &efe->icbtag;
112 		inflen = udf_rw64(efe->inf_len);
113 	}
114 
115 	icbflags   = udf_rw16(icbtag->flags);
116 	addr_type  = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
117 
118 	printf("udf_node_dump %p :\n", udf_node);
119 
120 	if (addr_type == UDF_ICB_INTERN_ALLOC) {
121 		printf("\tIntern alloc, len = %"PRIu64"\n", inflen);
122 		return;
123 	}
124 
125 	printf("\tInflen  = %"PRIu64"\n", inflen);
126 	printf("\t\t");
127 
128 	slot = 0;
129 	for (;;) {
130 		udf_get_adslot(udf_node, slot, &s_ad, &eof);
131 		if (eof)
132 			break;
133 		part_num = udf_rw16(s_ad.loc.part_num);
134 		lb_num = udf_rw32(s_ad.loc.lb_num);
135 		len   = udf_rw32(s_ad.len);
136 		flags = UDF_EXT_FLAGS(len);
137 		len   = UDF_EXT_LEN(len);
138 
139 		printf("[");
140 		if (part_num >= 0)
141 			printf("part %d, ", part_num);
142 		printf("lb_num %d, len %d", lb_num, len);
143 		if (flags)
144 			printf(", flags %d", flags>>30);
145 		printf("] ");
146 
147 		if (flags == UDF_EXT_REDIRECT) {
148 			printf("\n\textent END\n\tallocation extent\n\t\t");
149 		}
150 
151 		slot++;
152 	}
153 	printf("\n\tl_ad END\n\n");
154 }
155 #else
156 #define udf_node_dump(a)
157 #endif
158 
159 
160 static void
161 udf_assert_allocated(struct udf_mount *ump, uint16_t vpart_num,
162 	uint32_t lb_num, uint32_t num_lb)
163 {
164 	struct udf_bitmap *bitmap;
165 	struct part_desc *pdesc;
166 	uint32_t ptov;
167 	uint32_t bitval;
168 	uint8_t *bpos;
169 	int bit;
170 	int phys_part;
171 	int ok;
172 
173 	DPRINTF(PARANOIA, ("udf_assert_allocated: check virt lbnum %d "
174 			  "part %d + %d sect\n", lb_num, vpart_num, num_lb));
175 
176 	/* get partition backing up this vpart_num */
177 	pdesc = ump->partitions[ump->vtop[vpart_num]];
178 
179 	switch (ump->vtop_tp[vpart_num]) {
180 	case UDF_VTOP_TYPE_PHYS :
181 	case UDF_VTOP_TYPE_SPARABLE :
182 		/* free space to freed or unallocated space bitmap */
183 		ptov      = udf_rw32(pdesc->start_loc);
184 		phys_part = ump->vtop[vpart_num];
185 
186 		/* use unallocated bitmap */
187 		bitmap = &ump->part_unalloc_bits[phys_part];
188 
189 		/* if no bitmaps are defined, bail out */
190 		if (bitmap->bits == NULL)
191 			break;
192 
193 		/* check bits */
194 		KASSERT(bitmap->bits);
195 		ok = 1;
196 		bpos = bitmap->bits + lb_num/8;
197 		bit  = lb_num % 8;
198 		while (num_lb > 0) {
199 			bitval = (1 << bit);
200 			DPRINTF(PARANOIA, ("XXX : check %d, %p, bit %d\n",
201 				lb_num, bpos, bit));
202 			KASSERT(bitmap->bits + lb_num/8 == bpos);
203 			if (*bpos & bitval) {
204 				printf("\tlb_num %d is NOT marked busy\n",
205 					lb_num);
206 				ok = 0;
207 			}
208 			lb_num++; num_lb--;
209 			bit = (bit + 1) % 8;
210 			if (bit == 0)
211 				bpos++;
212 		}
213 		if (!ok) {
214 			/* KASSERT(0); */
215 		}
216 
217 		break;
218 	case UDF_VTOP_TYPE_VIRT :
219 		/* TODO check space */
220 		KASSERT(num_lb == 1);
221 		break;
222 	case UDF_VTOP_TYPE_META :
223 		/* TODO check space in the metadata bitmap */
224 	default:
225 		/* not implemented */
226 		break;
227 	}
228 }
229 
230 
231 static void
232 udf_node_sanity_check(struct udf_node *udf_node,
233 		uint64_t *cnt_inflen, uint64_t *cnt_logblksrec)
234 {
235 	union dscrptr *dscr;
236 	struct file_entry    *fe;
237 	struct extfile_entry *efe;
238 	struct icb_tag *icbtag;
239 	struct long_ad  s_ad;
240 	uint64_t inflen, logblksrec;
241 	uint32_t icbflags, addr_type;
242 	uint32_t len, lb_num, l_ea, l_ad, max_l_ad;
243 	uint16_t part_num;
244 	uint8_t *data_pos;
245 	int dscr_size, lb_size, flags, whole_lb;
246 	int i, slot, eof;
247 
248 //	KASSERT(mutex_owned(&udf_node->ump->allocate_mutex));
249 
250 	if (1)
251 		udf_node_dump(udf_node);
252 
253 	lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
254 
255 	fe  = udf_node->fe;
256 	efe = udf_node->efe;
257 	if (fe) {
258 		dscr       = (union dscrptr *) fe;
259 		icbtag     = &fe->icbtag;
260 		inflen     = udf_rw64(fe->inf_len);
261 		dscr_size  = sizeof(struct file_entry) -1;
262 		logblksrec = udf_rw64(fe->logblks_rec);
263 		l_ad       = udf_rw32(fe->l_ad);
264 		l_ea       = udf_rw32(fe->l_ea);
265 	} else {
266 		dscr       = (union dscrptr *) efe;
267 		icbtag     = &efe->icbtag;
268 		inflen     = udf_rw64(efe->inf_len);
269 		dscr_size  = sizeof(struct extfile_entry) -1;
270 		logblksrec = udf_rw64(efe->logblks_rec);
271 		l_ad       = udf_rw32(efe->l_ad);
272 		l_ea       = udf_rw32(efe->l_ea);
273 	}
274 	data_pos  = (uint8_t *) dscr + dscr_size + l_ea;
275 	max_l_ad   = lb_size - dscr_size - l_ea;
276 	icbflags   = udf_rw16(icbtag->flags);
277 	addr_type  = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
278 
279 	/* check if tail is zero */
280 	DPRINTF(PARANOIA, ("Sanity check blank tail\n"));
281 	for (i = l_ad; i < max_l_ad; i++) {
282 		if (data_pos[i] != 0)
283 			printf( "sanity_check: violation: node byte %d "
284 				"has value %d\n", i, data_pos[i]);
285 	}
286 
287 	/* reset counters */
288 	*cnt_inflen     = 0;
289 	*cnt_logblksrec = 0;
290 
291 	if (addr_type == UDF_ICB_INTERN_ALLOC) {
292 		KASSERT(l_ad <= max_l_ad);
293 		KASSERT(l_ad == inflen);
294 		*cnt_inflen = inflen;
295 		return;
296 	}
297 
298 	/* start counting */
299 	whole_lb = 1;
300 	slot = 0;
301 	for (;;) {
302 		udf_get_adslot(udf_node, slot, &s_ad, &eof);
303 		if (eof)
304 			break;
305 		KASSERT(whole_lb == 1);
306 
307 		part_num = udf_rw16(s_ad.loc.part_num);
308 		lb_num = udf_rw32(s_ad.loc.lb_num);
309 		len   = udf_rw32(s_ad.len);
310 		flags = UDF_EXT_FLAGS(len);
311 		len   = UDF_EXT_LEN(len);
312 
313 		if (flags != UDF_EXT_REDIRECT) {
314 			*cnt_inflen += len;
315 			if (flags == UDF_EXT_ALLOCATED) {
316 				*cnt_logblksrec += (len + lb_size -1) / lb_size;
317 			}
318 		} else {
319 			KASSERT(len == lb_size);
320 		}
321 		/* check allocation */
322 		if (flags == UDF_EXT_ALLOCATED)
323 			udf_assert_allocated(udf_node->ump, part_num, lb_num,
324 				(len + lb_size - 1) / lb_size);
325 
326 		/* check whole lb */
327 		whole_lb = ((len % lb_size) == 0);
328 
329 		slot++;
330 	}
331 	/* rest should be zero (ad_off > l_ad < max_l_ad - adlen) */
332 
333 	KASSERT(*cnt_inflen == inflen);
334 	KASSERT(*cnt_logblksrec == logblksrec);
335 
336 //	KASSERT(mutex_owned(&udf_node->ump->allocate_mutex));
337 }
338 #else
339 static void
340 udf_node_sanity_check(struct udf_node *udf_node,
341 		uint64_t *cnt_inflen, uint64_t *cnt_logblksrec) {
342 	struct file_entry    *fe;
343 	struct extfile_entry *efe;
344 	struct icb_tag *icbtag;
345 	uint64_t inflen, logblksrec;
346 	int dscr_size, lb_size;
347 
348 	lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
349 
350 	fe  = udf_node->fe;
351 	efe = udf_node->efe;
352 	if (fe) {
353 		icbtag = &fe->icbtag;
354 		inflen = udf_rw64(fe->inf_len);
355 		dscr_size  = sizeof(struct file_entry) -1;
356 		logblksrec = udf_rw64(fe->logblks_rec);
357 	} else {
358 		icbtag = &efe->icbtag;
359 		inflen = udf_rw64(efe->inf_len);
360 		dscr_size  = sizeof(struct extfile_entry) -1;
361 		logblksrec = udf_rw64(efe->logblks_rec);
362 	}
363 	*cnt_logblksrec = logblksrec;
364 	*cnt_inflen     = inflen;
365 }
366 #endif
367 
368 /* --------------------------------------------------------------------- */
369 
370 int
371 udf_translate_vtop(struct udf_mount *ump, struct long_ad *icb_loc,
372 		   uint32_t *lb_numres, uint32_t *extres)
373 {
374 	struct part_desc       *pdesc;
375 	struct spare_map_entry *sme;
376 	struct long_ad s_icb_loc;
377 	uint64_t foffset, end_foffset;
378 	uint32_t lb_size, len;
379 	uint32_t lb_num, lb_rel, lb_packet;
380 	uint32_t udf_rw32_lbmap, ext_offset;
381 	uint16_t vpart;
382 	int rel, part, error, eof, slot, flags;
383 
384 	assert(ump && icb_loc && lb_numres);
385 
386 	vpart  = udf_rw16(icb_loc->loc.part_num);
387 	lb_num = udf_rw32(icb_loc->loc.lb_num);
388 	if (vpart > UDF_VTOP_RAWPART)
389 		return EINVAL;
390 
391 translate_again:
392 	part = ump->vtop[vpart];
393 	pdesc = ump->partitions[part];
394 
395 	switch (ump->vtop_tp[vpart]) {
396 	case UDF_VTOP_TYPE_RAW :
397 		/* 1:1 to the end of the device */
398 		*lb_numres = lb_num;
399 		*extres = INT_MAX;
400 		return 0;
401 	case UDF_VTOP_TYPE_PHYS :
402 		/* transform into its disc logical block */
403 		if (lb_num > udf_rw32(pdesc->part_len))
404 			return EINVAL;
405 		*lb_numres = lb_num + udf_rw32(pdesc->start_loc);
406 
407 		/* extent from here to the end of the partition */
408 		*extres = udf_rw32(pdesc->part_len) - lb_num;
409 		return 0;
410 	case UDF_VTOP_TYPE_VIRT :
411 		/* only maps one logical block, lookup in VAT */
412 		if (lb_num >= ump->vat_entries)		/* XXX > or >= ? */
413 			return EINVAL;
414 
415 		/* lookup in virtual allocation table file */
416 		mutex_enter(&ump->allocate_mutex);
417 		error = udf_vat_read(ump->vat_node,
418 				(uint8_t *) &udf_rw32_lbmap, 4,
419 				ump->vat_offset + lb_num * 4);
420 		mutex_exit(&ump->allocate_mutex);
421 
422 		if (error)
423 			return error;
424 
425 		lb_num = udf_rw32(udf_rw32_lbmap);
426 
427 		/* transform into its disc logical block */
428 		if (lb_num > udf_rw32(pdesc->part_len))
429 			return EINVAL;
430 		*lb_numres = lb_num + udf_rw32(pdesc->start_loc);
431 
432 		/* just one logical block */
433 		*extres = 1;
434 		return 0;
435 	case UDF_VTOP_TYPE_SPARABLE :
436 		/* check if the packet containing the lb_num is remapped */
437 		lb_packet = lb_num / ump->sparable_packet_size;
438 		lb_rel    = lb_num % ump->sparable_packet_size;
439 
440 		for (rel = 0; rel < udf_rw16(ump->sparing_table->rt_l); rel++) {
441 			sme = &ump->sparing_table->entries[rel];
442 			if (lb_packet == udf_rw32(sme->org)) {
443 				/* NOTE maps to absolute disc logical block! */
444 				*lb_numres = udf_rw32(sme->map) + lb_rel;
445 				*extres    = ump->sparable_packet_size - lb_rel;
446 				return 0;
447 			}
448 		}
449 
450 		/* transform into its disc logical block */
451 		if (lb_num > udf_rw32(pdesc->part_len))
452 			return EINVAL;
453 		*lb_numres = lb_num + udf_rw32(pdesc->start_loc);
454 
455 		/* rest of block */
456 		*extres = ump->sparable_packet_size - lb_rel;
457 		return 0;
458 	case UDF_VTOP_TYPE_META :
459 		/* we have to look into the file's allocation descriptors */
460 
461 		/* use metadatafile allocation mutex */
462 		lb_size = udf_rw32(ump->logical_vol->lb_size);
463 
464 		UDF_LOCK_NODE(ump->metadata_node, 0);
465 
466 		/* get first overlapping extent */
467 		foffset = 0;
468 		slot    = 0;
469 		for (;;) {
470 			udf_get_adslot(ump->metadata_node,
471 				slot, &s_icb_loc, &eof);
472 			DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, "
473 				"len = %d, lb_num = %d, part = %d\n",
474 				slot, eof,
475 				UDF_EXT_FLAGS(udf_rw32(s_icb_loc.len)),
476 				UDF_EXT_LEN(udf_rw32(s_icb_loc.len)),
477 				udf_rw32(s_icb_loc.loc.lb_num),
478 				udf_rw16(s_icb_loc.loc.part_num)));
479 			if (eof) {
480 				DPRINTF(TRANSLATE,
481 					("Meta partition translation "
482 					 "failed: can't seek location\n"));
483 				UDF_UNLOCK_NODE(ump->metadata_node, 0);
484 				return EINVAL;
485 			}
486 			len   = udf_rw32(s_icb_loc.len);
487 			flags = UDF_EXT_FLAGS(len);
488 			len   = UDF_EXT_LEN(len);
489 
490 			if (flags == UDF_EXT_REDIRECT) {
491 				slot++;
492 				continue;
493 			}
494 
495 			end_foffset = foffset + len;
496 
497 			if (end_foffset > lb_num * lb_size)
498 				break;	/* found */
499 			foffset = end_foffset;
500 			slot++;
501 		}
502 		/* found overlapping slot */
503 		ext_offset = lb_num * lb_size - foffset;
504 
505 		/* process extent offset */
506 		lb_num   = udf_rw32(s_icb_loc.loc.lb_num);
507 		vpart    = udf_rw16(s_icb_loc.loc.part_num);
508 		lb_num  += (ext_offset + lb_size -1) / lb_size;
509 		ext_offset = 0;
510 
511 		UDF_UNLOCK_NODE(ump->metadata_node, 0);
512 		if (flags != UDF_EXT_ALLOCATED) {
513 			DPRINTF(TRANSLATE, ("Metadata partition translation "
514 					    "failed: not allocated\n"));
515 			return EINVAL;
516 		}
517 
518 		/*
519 		 * vpart and lb_num are updated, translate again since we
520 		 * might be mapped on sparable media
521 		 */
522 		goto translate_again;
523 	default:
524 		printf("UDF vtop translation scheme %d unimplemented yet\n",
525 			ump->vtop_tp[vpart]);
526 	}
527 
528 	return EINVAL;
529 }
530 
531 
532 /* XXX  provisional primitive braindead version */
533 /* TODO use ext_res */
534 void
535 udf_translate_vtop_list(struct udf_mount *ump, uint32_t sectors,
536 	uint16_t vpart_num, uint64_t *lmapping, uint64_t *pmapping)
537 {
538 	struct long_ad loc;
539 	uint32_t lb_numres, ext_res;
540 	int sector;
541 
542 	for (sector = 0; sector < sectors; sector++) {
543 		memset(&loc, 0, sizeof(struct long_ad));
544 		loc.loc.part_num = udf_rw16(vpart_num);
545 		loc.loc.lb_num   = udf_rw32(*lmapping);
546 		udf_translate_vtop(ump, &loc, &lb_numres, &ext_res);
547 		*pmapping = lb_numres;
548 		lmapping++; pmapping++;
549 	}
550 }
551 
552 
553 /* --------------------------------------------------------------------- */
554 
555 /*
556  * Translate an extent (in logical_blocks) into logical block numbers; used
557  * for read and write operations. DOESNT't check extents.
558  */
559 
560 int
561 udf_translate_file_extent(struct udf_node *udf_node,
562 		          uint32_t from, uint32_t num_lb,
563 			  uint64_t *map)
564 {
565 	struct udf_mount *ump;
566 	struct icb_tag *icbtag;
567 	struct long_ad t_ad, s_ad;
568 	uint64_t transsec;
569 	uint64_t foffset, end_foffset;
570 	uint32_t transsec32;
571 	uint32_t lb_size;
572 	uint32_t ext_offset;
573 	uint32_t lb_num, len;
574 	uint32_t overlap, translen;
575 	uint16_t vpart_num;
576 	int eof, error, flags;
577 	int slot, addr_type, icbflags;
578 
579 	if (!udf_node)
580 		return ENOENT;
581 
582 	KASSERT(num_lb > 0);
583 
584 	UDF_LOCK_NODE(udf_node, 0);
585 
586 	/* initialise derivative vars */
587 	ump = udf_node->ump;
588 	lb_size = udf_rw32(ump->logical_vol->lb_size);
589 
590 	if (udf_node->fe) {
591 		icbtag = &udf_node->fe->icbtag;
592 	} else {
593 		icbtag = &udf_node->efe->icbtag;
594 	}
595 	icbflags  = udf_rw16(icbtag->flags);
596 	addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
597 
598 	/* do the work */
599 	if (addr_type == UDF_ICB_INTERN_ALLOC) {
600 		*map = UDF_TRANS_INTERN;
601 		UDF_UNLOCK_NODE(udf_node, 0);
602 		return 0;
603 	}
604 
605 	/* find first overlapping extent */
606 	foffset = 0;
607 	slot    = 0;
608 	for (;;) {
609 		udf_get_adslot(udf_node, slot, &s_ad, &eof);
610 		DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, len = %d, "
611 			"lb_num = %d, part = %d\n", slot, eof,
612 			UDF_EXT_FLAGS(udf_rw32(s_ad.len)),
613 			UDF_EXT_LEN(udf_rw32(s_ad.len)),
614 			udf_rw32(s_ad.loc.lb_num),
615 			udf_rw16(s_ad.loc.part_num)));
616 		if (eof) {
617 			DPRINTF(TRANSLATE,
618 				("Translate file extent "
619 				 "failed: can't seek location\n"));
620 			UDF_UNLOCK_NODE(udf_node, 0);
621 			return EINVAL;
622 		}
623 		len    = udf_rw32(s_ad.len);
624 		flags  = UDF_EXT_FLAGS(len);
625 		len    = UDF_EXT_LEN(len);
626 		lb_num = udf_rw32(s_ad.loc.lb_num);
627 
628 		if (flags == UDF_EXT_REDIRECT) {
629 			slot++;
630 			continue;
631 		}
632 
633 		end_foffset = foffset + len;
634 
635 		if (end_foffset > from * lb_size)
636 			break;	/* found */
637 		foffset = end_foffset;
638 		slot++;
639 	}
640 	/* found overlapping slot */
641 	ext_offset = from * lb_size - foffset;
642 
643 	for (;;) {
644 		udf_get_adslot(udf_node, slot, &s_ad, &eof);
645 		DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, len = %d, "
646 			"lb_num = %d, part = %d\n", slot, eof,
647 			UDF_EXT_FLAGS(udf_rw32(s_ad.len)),
648 			UDF_EXT_LEN(udf_rw32(s_ad.len)),
649 			udf_rw32(s_ad.loc.lb_num),
650 			udf_rw16(s_ad.loc.part_num)));
651 		if (eof) {
652 			DPRINTF(TRANSLATE,
653 				("Translate file extent "
654 				 "failed: past eof\n"));
655 			UDF_UNLOCK_NODE(udf_node, 0);
656 			return EINVAL;
657 		}
658 
659 		len    = udf_rw32(s_ad.len);
660 		flags  = UDF_EXT_FLAGS(len);
661 		len    = UDF_EXT_LEN(len);
662 
663 		lb_num    = udf_rw32(s_ad.loc.lb_num);
664 		vpart_num = udf_rw16(s_ad.loc.part_num);
665 
666 		end_foffset = foffset + len;
667 
668 		/* process extent, don't forget to advance on ext_offset! */
669 		lb_num  += (ext_offset + lb_size -1) / lb_size;
670 		overlap  = (len - ext_offset + lb_size -1) / lb_size;
671 		ext_offset = 0;
672 
673 		/*
674 		 * note that the while(){} is nessisary for the extent that
675 		 * the udf_translate_vtop() returns doens't have to span the
676 		 * whole extent.
677 		 */
678 
679 		overlap = MIN(overlap, num_lb);
680 		while (overlap && (flags != UDF_EXT_REDIRECT)) {
681 			switch (flags) {
682 			case UDF_EXT_FREE :
683 			case UDF_EXT_ALLOCATED_BUT_NOT_USED :
684 				transsec = UDF_TRANS_ZERO;
685 				translen = overlap;
686 				while (overlap && num_lb && translen) {
687 					*map++ = transsec;
688 					lb_num++;
689 					overlap--; num_lb--; translen--;
690 				}
691 				break;
692 			case UDF_EXT_ALLOCATED :
693 				t_ad.loc.lb_num   = udf_rw32(lb_num);
694 				t_ad.loc.part_num = udf_rw16(vpart_num);
695 				error = udf_translate_vtop(ump,
696 						&t_ad, &transsec32, &translen);
697 				transsec = transsec32;
698 				if (error) {
699 					UDF_UNLOCK_NODE(udf_node, 0);
700 					return error;
701 				}
702 				while (overlap && num_lb && translen) {
703 					*map++ = transsec;
704 					lb_num++; transsec++;
705 					overlap--; num_lb--; translen--;
706 				}
707 				break;
708 			default:
709 				DPRINTF(TRANSLATE,
710 					("Translate file extent "
711 					 "failed: bad flags %x\n", flags));
712 				UDF_UNLOCK_NODE(udf_node, 0);
713 				return EINVAL;
714 			}
715 		}
716 		if (num_lb == 0)
717 			break;
718 
719 		if (flags != UDF_EXT_REDIRECT)
720 			foffset = end_foffset;
721 		slot++;
722 	}
723 	UDF_UNLOCK_NODE(udf_node, 0);
724 
725 	return 0;
726 }
727 
728 /* --------------------------------------------------------------------- */
729 
730 static int
731 udf_search_free_vatloc(struct udf_mount *ump, uint32_t *lbnumres)
732 {
733 	uint32_t lb_size, lb_num, lb_map, udf_rw32_lbmap;
734 	uint8_t *blob;
735 	int entry, chunk, found, error;
736 
737 	KASSERT(ump);
738 	KASSERT(ump->logical_vol);
739 
740 	lb_size = udf_rw32(ump->logical_vol->lb_size);
741 	blob = malloc(lb_size, M_UDFTEMP, M_WAITOK);
742 
743 	/* TODO static allocation of search chunk */
744 
745 	lb_num = MIN(ump->vat_entries, ump->vat_last_free_lb);
746 	found  = 0;
747 	error  = 0;
748 	entry  = 0;
749 	do {
750 		chunk = MIN(lb_size, (ump->vat_entries - lb_num) * 4);
751 		if (chunk <= 0)
752 			break;
753 		/* load in chunk */
754 		error = udf_vat_read(ump->vat_node, blob, chunk,
755 				ump->vat_offset + lb_num * 4);
756 
757 		if (error)
758 			break;
759 
760 		/* search this chunk */
761 		for (entry=0; entry < chunk /4; entry++, lb_num++) {
762 			udf_rw32_lbmap = *((uint32_t *) (blob + entry * 4));
763 			lb_map = udf_rw32(udf_rw32_lbmap);
764 			if (lb_map == 0xffffffff) {
765 				found = 1;
766 				break;
767 			}
768 		}
769 	} while (!found);
770 	if (error) {
771 		printf("udf_search_free_vatloc: error reading in vat chunk "
772 			"(lb %d, size %d)\n", lb_num, chunk);
773 	}
774 
775 	if (!found) {
776 		/* extend VAT */
777 		DPRINTF(WRITE, ("udf_search_free_vatloc: extending\n"));
778 		lb_num = ump->vat_entries;
779 		ump->vat_entries++;
780 	}
781 
782 	/* mark entry with initialiser just in case */
783 	lb_map = udf_rw32(0xfffffffe);
784 	udf_vat_write(ump->vat_node, (uint8_t *) &lb_map, 4,
785 		ump->vat_offset + lb_num *4);
786 	ump->vat_last_free_lb = lb_num;
787 
788 	free(blob, M_UDFTEMP);
789 	*lbnumres = lb_num;
790 	return 0;
791 }
792 
793 
794 static void
795 udf_bitmap_allocate(struct udf_bitmap *bitmap, int ismetadata,
796 	uint32_t *num_lb, uint64_t *lmappos)
797 {
798 	uint32_t offset, lb_num, bit;
799 	int32_t  diff;
800 	uint8_t *bpos;
801 	int pass;
802 
803 	if (!ismetadata) {
804 		/* heuristic to keep the two pointers not too close */
805 		diff = bitmap->data_pos - bitmap->metadata_pos;
806 		if ((diff >= 0) && (diff < 1024))
807 			bitmap->data_pos = bitmap->metadata_pos + 1024;
808 	}
809 	offset = ismetadata ? bitmap->metadata_pos : bitmap->data_pos;
810 	offset &= ~7;
811 	for (pass = 0; pass < 2; pass++) {
812 		if (offset >= bitmap->max_offset)
813 			offset = 0;
814 
815 		while (offset < bitmap->max_offset) {
816 			if (*num_lb == 0)
817 				break;
818 
819 			/* use first bit not set */
820 			bpos  = bitmap->bits + offset/8;
821 			bit = ffs(*bpos);	/* returns 0 or 1..8 */
822 			if (bit == 0) {
823 				offset += 8;
824 				continue;
825 			}
826 
827 			/* check for ffs overshoot */
828 			if (offset + bit-1 >= bitmap->max_offset) {
829 				offset = bitmap->max_offset;
830 				break;
831 			}
832 
833 			DPRINTF(PARANOIA, ("XXX : allocate %d, %p, bit %d\n",
834 				offset + bit -1, bpos, bit-1));
835 			*bpos &= ~(1 << (bit-1));
836 			lb_num = offset + bit-1;
837 			*lmappos++ = lb_num;
838 			*num_lb = *num_lb - 1;
839 			// offset = (offset & ~7);
840 		}
841 	}
842 
843 	if (ismetadata) {
844 		bitmap->metadata_pos = offset;
845 	} else {
846 		bitmap->data_pos = offset;
847 	}
848 }
849 
850 
851 static void
852 udf_bitmap_free(struct udf_bitmap *bitmap, uint32_t lb_num, uint32_t num_lb)
853 {
854 	uint32_t offset;
855 	uint32_t bit, bitval;
856 	uint8_t *bpos;
857 
858 	offset = lb_num;
859 
860 	/* starter bits */
861 	bpos = bitmap->bits + offset/8;
862 	bit = offset % 8;
863 	while ((bit != 0) && (num_lb > 0)) {
864 		bitval = (1 << bit);
865 		KASSERT((*bpos & bitval) == 0);
866 		DPRINTF(PARANOIA, ("XXX : free %d, %p, %d\n",
867 			offset, bpos, bit));
868 		*bpos |= bitval;
869 		offset++; num_lb--;
870 		bit = (bit + 1) % 8;
871 	}
872 	if (num_lb == 0)
873 		return;
874 
875 	/* whole bytes */
876 	KASSERT(bit == 0);
877 	bpos = bitmap->bits + offset / 8;
878 	while (num_lb >= 8) {
879 		KASSERT((*bpos == 0));
880 		DPRINTF(PARANOIA, ("XXX : free %d + 8, %p\n", offset, bpos));
881 		*bpos = 255;
882 		offset += 8; num_lb -= 8;
883 		bpos++;
884 	}
885 
886 	/* stop bits */
887 	KASSERT(num_lb < 8);
888 	bit = 0;
889 	while (num_lb > 0) {
890 		bitval = (1 << bit);
891 		KASSERT((*bpos & bitval) == 0);
892 		DPRINTF(PARANOIA, ("XXX : free %d, %p, %d\n",
893 			offset, bpos, bit));
894 		*bpos |= bitval;
895 		offset++; num_lb--;
896 		bit = (bit + 1) % 8;
897 	}
898 }
899 
900 
901 /* allocate a contiguous sequence of sectornumbers */
902 static int
903 udf_allocate_space(struct udf_mount *ump, int udf_c_type,
904 	uint16_t vpart_num, uint32_t num_lb, uint64_t *lmapping)
905 {
906 	struct mmc_trackinfo *alloc_track, *other_track;
907 	struct udf_bitmap *bitmap;
908 	struct part_desc *pdesc;
909 	struct logvol_int_desc *lvid;
910 	uint64_t *lmappos;
911 	uint32_t ptov, lb_num, *freepos, free_lbs;
912 	int lb_size, alloc_num_lb;
913 	int alloc_type, error;
914 	int is_node;
915 
916 	DPRINTF(CALL, ("udf_allocate_space(ctype %d, vpart %d, num_lb %d\n",
917 		udf_c_type, vpart_num, num_lb));
918 	mutex_enter(&ump->allocate_mutex);
919 
920 	lb_size = udf_rw32(ump->logical_vol->lb_size);
921 	KASSERT(lb_size == ump->discinfo.sector_size);
922 
923 	/* XXX TODO check disc space */
924 
925 	alloc_type =  ump->vtop_alloc[vpart_num];
926 	is_node    = (udf_c_type == UDF_C_NODE);
927 
928 	lmappos = lmapping;
929 	error = 0;
930 	switch (alloc_type) {
931 	case UDF_ALLOC_VAT :
932 		/* search empty slot in VAT file */
933 		KASSERT(num_lb == 1);
934 		error = udf_search_free_vatloc(ump, &lb_num);
935 		if (!error)
936 			*lmappos = lb_num;
937 		break;
938 	case UDF_ALLOC_SEQUENTIAL :
939 		/* sequential allocation on recordable media */
940 		/* get partition backing up this vpart_num_num */
941 		pdesc = ump->partitions[ump->vtop[vpart_num]];
942 
943 		/* calculate offset from physical base partition */
944 		ptov  = udf_rw32(pdesc->start_loc);
945 
946 		/* get our track descriptors */
947 		if (vpart_num == ump->node_part) {
948 			alloc_track = &ump->metadata_track;
949 			other_track = &ump->data_track;
950 		} else {
951 			alloc_track = &ump->data_track;
952 			other_track = &ump->metadata_track;
953 		}
954 
955 		/* allocate */
956 		for (lb_num = 0; lb_num < num_lb; lb_num++) {
957 			*lmappos++ = alloc_track->next_writable - ptov;
958 			alloc_track->next_writable++;
959 			alloc_track->free_blocks--;
960 		}
961 
962 		/* keep other track up-to-date */
963 		if (alloc_track->tracknr == other_track->tracknr)
964 			memcpy(other_track, alloc_track,
965 				sizeof(struct mmc_trackinfo));
966 		break;
967 	case UDF_ALLOC_SPACEMAP :
968 		/* try to allocate on unallocated bits */
969 		alloc_num_lb = num_lb;
970 		bitmap = &ump->part_unalloc_bits[vpart_num];
971 		udf_bitmap_allocate(bitmap, is_node, &alloc_num_lb, lmappos);
972 		ump->lvclose |= UDF_WRITE_PART_BITMAPS;
973 
974 		/* have we allocated all? */
975 		if (alloc_num_lb) {
976 			/* TODO convert freed to unalloc and try again */
977 			/* free allocated piece for now */
978 			lmappos = lmapping;
979 			for (lb_num=0; lb_num < num_lb-alloc_num_lb; lb_num++) {
980 				udf_bitmap_free(bitmap, *lmappos++, 1);
981 			}
982 			error = ENOSPC;
983 		}
984 		if (!error) {
985 			/* adjust freecount */
986 			lvid = ump->logvol_integrity;
987 			freepos = &lvid->tables[0] + vpart_num;
988 			free_lbs = udf_rw32(*freepos);
989 			*freepos = udf_rw32(free_lbs - num_lb);
990 		}
991 		break;
992 	case UDF_ALLOC_METABITMAP :		/* UDF 2.50, 2.60 BluRay-RE */
993 		/* allocate on metadata unallocated bits */
994 		alloc_num_lb = num_lb;
995 		bitmap = &ump->metadata_unalloc_bits;
996 		udf_bitmap_allocate(bitmap, is_node, &alloc_num_lb, lmappos);
997 		ump->lvclose |= UDF_WRITE_PART_BITMAPS;
998 
999 		/* have we allocated all? */
1000 		if (alloc_num_lb) {
1001 			/* YIKES! TODO we need to extend the metadata partition */
1002 			/* free allocated piece for now */
1003 			lmappos = lmapping;
1004 			for (lb_num=0; lb_num < num_lb-alloc_num_lb; lb_num++) {
1005 				udf_bitmap_free(bitmap, *lmappos++, 1);
1006 			}
1007 			error = ENOSPC;
1008 		}
1009 		if (!error) {
1010 			/* adjust freecount */
1011 			lvid = ump->logvol_integrity;
1012 			freepos = &lvid->tables[0] + vpart_num;
1013 			free_lbs = udf_rw32(*freepos);
1014 			*freepos = udf_rw32(free_lbs - num_lb);
1015 		}
1016 		break;
1017 	case UDF_ALLOC_METASEQUENTIAL :		/* UDF 2.60       BluRay-R  */
1018 	case UDF_ALLOC_RELAXEDSEQUENTIAL :	/* UDF 2.50/~meta BluRay-R  */
1019 		printf("ALERT: udf_allocate_space : allocation %d "
1020 				"not implemented yet!\n", alloc_type);
1021 		/* TODO implement, doesn't have to be contiguous */
1022 		error = ENOSPC;
1023 		break;
1024 	}
1025 
1026 #ifdef DEBUG
1027 	if (udf_verbose & UDF_DEBUG_ALLOC) {
1028 		lmappos = lmapping;
1029 		printf("udf_allocate_space, allocated logical lba :\n");
1030 		for (lb_num = 0; lb_num < num_lb; lb_num++) {
1031 			printf("%s %"PRIu64, (lb_num > 0)?",":"",
1032 				*lmappos++);
1033 		}
1034 		printf("\n");
1035 	}
1036 #endif
1037 	mutex_exit(&ump->allocate_mutex);
1038 
1039 	return error;
1040 }
1041 
1042 /* --------------------------------------------------------------------- */
1043 
1044 void
1045 udf_free_allocated_space(struct udf_mount *ump, uint32_t lb_num,
1046 	uint16_t vpart_num, uint32_t num_lb)
1047 {
1048 	struct udf_bitmap *bitmap;
1049 	struct part_desc *pdesc;
1050 	struct logvol_int_desc *lvid;
1051 	uint32_t ptov, lb_map, udf_rw32_lbmap;
1052 	uint32_t *freepos, free_lbs;
1053 	int phys_part;
1054 	int error;
1055 
1056 	DPRINTF(ALLOC, ("udf_free_allocated_space: freeing virt lbnum %d "
1057 			  "part %d + %d sect\n", lb_num, vpart_num, num_lb));
1058 
1059 	/* no use freeing zero length */
1060 	if (num_lb == 0)
1061 		return;
1062 
1063 	mutex_enter(&ump->allocate_mutex);
1064 
1065 	/* get partition backing up this vpart_num */
1066 	pdesc = ump->partitions[ump->vtop[vpart_num]];
1067 
1068 	switch (ump->vtop_tp[vpart_num]) {
1069 	case UDF_VTOP_TYPE_PHYS :
1070 	case UDF_VTOP_TYPE_SPARABLE :
1071 		/* free space to freed or unallocated space bitmap */
1072 		ptov      = udf_rw32(pdesc->start_loc);
1073 		phys_part = ump->vtop[vpart_num];
1074 
1075 		/* first try freed space bitmap */
1076 		bitmap    = &ump->part_freed_bits[phys_part];
1077 
1078 		/* if not defined, use unallocated bitmap */
1079 		if (bitmap->bits == NULL)
1080 			bitmap = &ump->part_unalloc_bits[phys_part];
1081 
1082 		/* if no bitmaps are defined, bail out; XXX OK? */
1083 		if (bitmap->bits == NULL)
1084 			break;
1085 
1086 		/* free bits if its defined */
1087 		KASSERT(bitmap->bits);
1088 		ump->lvclose |= UDF_WRITE_PART_BITMAPS;
1089 		udf_bitmap_free(bitmap, lb_num, num_lb);
1090 
1091 		/* adjust freecount */
1092 		lvid = ump->logvol_integrity;
1093 		freepos = &lvid->tables[0] + vpart_num;
1094 		free_lbs = udf_rw32(*freepos);
1095 		*freepos = udf_rw32(free_lbs + num_lb);
1096 		break;
1097 	case UDF_VTOP_TYPE_VIRT :
1098 		/* free this VAT entry */
1099 		KASSERT(num_lb == 1);
1100 
1101 		lb_map = 0xffffffff;
1102 		udf_rw32_lbmap = udf_rw32(lb_map);
1103 		error = udf_vat_write(ump->vat_node,
1104 			(uint8_t *) &udf_rw32_lbmap, 4,
1105 			ump->vat_offset + lb_num * 4);
1106 		KASSERT(error == 0);
1107 		ump->vat_last_free_lb = MIN(ump->vat_last_free_lb, lb_num);
1108 		break;
1109 	case UDF_VTOP_TYPE_META :
1110 		/* free space in the metadata bitmap */
1111 		bitmap = &ump->metadata_unalloc_bits;
1112 		KASSERT(bitmap->bits);
1113 
1114 		ump->lvclose |= UDF_WRITE_PART_BITMAPS;
1115 		udf_bitmap_free(bitmap, lb_num, num_lb);
1116 
1117 		/* adjust freecount */
1118 		lvid = ump->logvol_integrity;
1119 		freepos = &lvid->tables[0] + vpart_num;
1120 		free_lbs = udf_rw32(*freepos);
1121 		*freepos = udf_rw32(free_lbs + num_lb);
1122 		break;
1123 	default:
1124 		printf("ALERT: udf_free_allocated_space : allocation %d "
1125 			"not implemented yet!\n", ump->vtop_tp[vpart_num]);
1126 		break;
1127 	}
1128 
1129 	mutex_exit(&ump->allocate_mutex);
1130 }
1131 
1132 /* --------------------------------------------------------------------- */
1133 
1134 int
1135 udf_pre_allocate_space(struct udf_mount *ump, int udf_c_type,
1136 	uint32_t num_lb, uint16_t vpartnr, uint64_t *lmapping)
1137 {
1138 	/* TODO properly maintain uncomitted_lb per partition */
1139 
1140 	/* reserve size for VAT allocated data */
1141 	if (ump->vtop_alloc[vpartnr] == UDF_ALLOC_VAT) {
1142 		mutex_enter(&ump->allocate_mutex);
1143 			ump->uncomitted_lb += num_lb;
1144 		mutex_exit(&ump->allocate_mutex);
1145 	}
1146 
1147 	return udf_allocate_space(ump, udf_c_type, vpartnr, num_lb, lmapping);
1148 }
1149 
1150 /* --------------------------------------------------------------------- */
1151 
1152 /*
1153  * Allocate a buf on disc for direct write out. The space doesn't have to be
1154  * contiguous as the caller takes care of this.
1155  */
1156 
1157 void
1158 udf_late_allocate_buf(struct udf_mount *ump, struct buf *buf,
1159 	uint64_t *lmapping, struct long_ad *node_ad_cpy, uint16_t *vpart_nump)
1160 {
1161 	struct udf_node  *udf_node = VTOI(buf->b_vp);
1162 	int lb_size, blks, udf_c_type;
1163 	int vpart_num, num_lb;
1164 	int error, s;
1165 
1166 	/*
1167 	 * for each sector in the buf, allocate a sector on disc and record
1168 	 * its position in the provided mapping array.
1169 	 *
1170 	 * If its userdata or FIDs, record its location in its node.
1171 	 */
1172 
1173 	lb_size    = udf_rw32(ump->logical_vol->lb_size);
1174 	num_lb     = (buf->b_bcount + lb_size -1) / lb_size;
1175 	blks       = lb_size / DEV_BSIZE;
1176 	udf_c_type = buf->b_udf_c_type;
1177 
1178 	KASSERT(lb_size == ump->discinfo.sector_size);
1179 
1180 	/* select partition to record the buffer on */
1181 	vpart_num = ump->data_part;
1182 	if (udf_c_type == UDF_C_NODE)
1183 		vpart_num = ump->node_part;
1184 	if (udf_c_type == UDF_C_FIDS)
1185 		vpart_num = ump->fids_part;
1186 	*vpart_nump = vpart_num;
1187 
1188 	if (udf_c_type == UDF_C_NODE) {
1189 		/* if not VAT, its allready allocated */
1190 		if (ump->vtop_alloc[ump->node_part] != UDF_ALLOC_VAT)
1191 			return;
1192 
1193 		/* allocate on its backing sequential partition */
1194 		vpart_num = ump->data_part;
1195 	}
1196 
1197 	/* do allocation on the selected partition */
1198 	error = udf_allocate_space(ump, udf_c_type,
1199 			vpart_num, num_lb, lmapping);
1200 	if (error) {
1201 		/* ARGH! we've not done our accounting right! */
1202 		panic("UDF disc allocation accounting gone wrong");
1203 	}
1204 
1205 	/* commit our sector count */
1206 	mutex_enter(&ump->allocate_mutex);
1207 		if (num_lb > ump->uncomitted_lb) {
1208 			ump->uncomitted_lb = 0;
1209 		} else {
1210 			ump->uncomitted_lb -= num_lb;
1211 		}
1212 	mutex_exit(&ump->allocate_mutex);
1213 
1214 	/* If its userdata or FIDs, record its allocation in its node. */
1215 	if ((udf_c_type == UDF_C_USERDATA) ||
1216 	    (udf_c_type == UDF_C_FIDS) ||
1217 	    (udf_c_type == UDF_C_METADATA_SBM))
1218 	{
1219 		udf_record_allocation_in_node(ump, buf, vpart_num, lmapping,
1220 			node_ad_cpy);
1221 		/* decrement our outstanding bufs counter */
1222 		s = splbio();
1223 			udf_node->outstanding_bufs--;
1224 		splx(s);
1225 	}
1226 }
1227 
1228 /* --------------------------------------------------------------------- */
1229 
1230 /*
1231  * Try to merge a1 with the new piece a2. udf_ads_merge returns error when not
1232  * possible (anymore); a2 returns the rest piece.
1233  */
1234 
1235 static int
1236 udf_ads_merge(uint32_t lb_size, struct long_ad *a1, struct long_ad *a2)
1237 {
1238 	uint32_t max_len, merge_len;
1239 	uint32_t a1_len, a2_len;
1240 	uint32_t a1_flags, a2_flags;
1241 	uint32_t a1_lbnum, a2_lbnum;
1242 	uint16_t a1_part, a2_part;
1243 
1244 	max_len = ((UDF_EXT_MAXLEN / lb_size) * lb_size);
1245 
1246 	a1_flags = UDF_EXT_FLAGS(udf_rw32(a1->len));
1247 	a1_len   = UDF_EXT_LEN(udf_rw32(a1->len));
1248 	a1_lbnum = udf_rw32(a1->loc.lb_num);
1249 	a1_part  = udf_rw16(a1->loc.part_num);
1250 
1251 	a2_flags = UDF_EXT_FLAGS(udf_rw32(a2->len));
1252 	a2_len   = UDF_EXT_LEN(udf_rw32(a2->len));
1253 	a2_lbnum = udf_rw32(a2->loc.lb_num);
1254 	a2_part  = udf_rw16(a2->loc.part_num);
1255 
1256 	/* defines same space */
1257 	if (a1_flags != a2_flags)
1258 		return 1;
1259 
1260 	if (a1_flags != UDF_EXT_FREE) {
1261 		/* the same partition */
1262 		if (a1_part != a2_part)
1263 			return 1;
1264 
1265 		/* a2 is successor of a1 */
1266 		if (a1_lbnum * lb_size + a1_len != a2_lbnum * lb_size)
1267 			return 1;
1268 	}
1269 
1270 	/* merge as most from a2 if possible */
1271 	merge_len = MIN(a2_len, max_len - a1_len);
1272 	a1_len   += merge_len;
1273 	a2_len   -= merge_len;
1274 	a2_lbnum += merge_len/lb_size;
1275 
1276 	a1->len = udf_rw32(a1_len | a1_flags);
1277 	a2->len = udf_rw32(a2_len | a2_flags);
1278 	a2->loc.lb_num = udf_rw32(a2_lbnum);
1279 
1280 	if (a2_len > 0)
1281 		return 1;
1282 
1283 	/* there is space over to merge */
1284 	return 0;
1285 }
1286 
1287 /* --------------------------------------------------------------------- */
1288 
1289 static void
1290 udf_wipe_adslots(struct udf_node *udf_node)
1291 {
1292 	struct file_entry      *fe;
1293 	struct extfile_entry   *efe;
1294 	struct alloc_ext_entry *ext;
1295 	uint64_t inflen, objsize;
1296 	uint32_t lb_size, dscr_size, l_ea, l_ad, max_l_ad, crclen;
1297 	uint8_t *data_pos;
1298 	int extnr;
1299 
1300 	lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1301 
1302 	fe  = udf_node->fe;
1303 	efe = udf_node->efe;
1304 	if (fe) {
1305 		inflen  = udf_rw64(fe->inf_len);
1306 		objsize = inflen;
1307 		dscr_size  = sizeof(struct file_entry) -1;
1308 		l_ea       = udf_rw32(fe->l_ea);
1309 		l_ad       = udf_rw32(fe->l_ad);
1310 		data_pos = (uint8_t *) fe + dscr_size + l_ea;
1311 	} else {
1312 		inflen  = udf_rw64(efe->inf_len);
1313 		objsize = udf_rw64(efe->obj_size);
1314 		dscr_size  = sizeof(struct extfile_entry) -1;
1315 		l_ea       = udf_rw32(efe->l_ea);
1316 		l_ad       = udf_rw32(efe->l_ad);
1317 		data_pos = (uint8_t *) efe + dscr_size + l_ea;
1318 	}
1319 	max_l_ad = lb_size - dscr_size - l_ea;
1320 
1321 	/* wipe fe/efe */
1322 	memset(data_pos, 0, max_l_ad);
1323 	crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea;
1324 	if (fe) {
1325 		fe->l_ad         = udf_rw32(0);
1326 		fe->logblks_rec  = udf_rw64(0);
1327 		fe->tag.desc_crc_len = udf_rw16(crclen);
1328 	} else {
1329 		efe->l_ad        = udf_rw32(0);
1330 		efe->logblks_rec = udf_rw64(0);
1331 		efe->tag.desc_crc_len = udf_rw16(crclen);
1332 	}
1333 
1334 	/* wipe all allocation extent entries */
1335 	for (extnr = 0; extnr < udf_node->num_extensions; extnr++) {
1336 		ext = udf_node->ext[extnr];
1337 		dscr_size  = sizeof(struct alloc_ext_entry) -1;
1338 		data_pos = (uint8_t *) ext->data;
1339 		max_l_ad = lb_size - dscr_size;
1340 		memset(data_pos, 0, max_l_ad);
1341 		ext->l_ad = udf_rw32(0);
1342 
1343 		crclen = dscr_size - UDF_DESC_TAG_LENGTH;
1344 		ext->tag.desc_crc_len = udf_rw16(crclen);
1345 	}
1346 	udf_node->i_flags |= IN_NODE_REBUILD;
1347 }
1348 
1349 /* --------------------------------------------------------------------- */
1350 
1351 void
1352 udf_get_adslot(struct udf_node *udf_node, int slot, struct long_ad *icb,
1353 	int *eof) {
1354 	struct file_entry      *fe;
1355 	struct extfile_entry   *efe;
1356 	struct alloc_ext_entry *ext;
1357 	struct icb_tag *icbtag;
1358 	struct short_ad *short_ad;
1359 	struct long_ad *long_ad, l_icb;
1360 	uint32_t offset;
1361 	uint32_t lb_size, dscr_size, l_ea, l_ad, flags;
1362 	uint8_t *data_pos;
1363 	int icbflags, addr_type, adlen, extnr;
1364 
1365 	/* determine what descriptor we are in */
1366 	lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1367 
1368 	fe  = udf_node->fe;
1369 	efe = udf_node->efe;
1370 	if (fe) {
1371 		icbtag  = &fe->icbtag;
1372 		dscr_size  = sizeof(struct file_entry) -1;
1373 		l_ea       = udf_rw32(fe->l_ea);
1374 		l_ad       = udf_rw32(fe->l_ad);
1375 		data_pos = (uint8_t *) fe + dscr_size + l_ea;
1376 	} else {
1377 		icbtag  = &efe->icbtag;
1378 		dscr_size  = sizeof(struct extfile_entry) -1;
1379 		l_ea       = udf_rw32(efe->l_ea);
1380 		l_ad       = udf_rw32(efe->l_ad);
1381 		data_pos = (uint8_t *) efe + dscr_size + l_ea;
1382 	}
1383 
1384 	icbflags  = udf_rw16(icbtag->flags);
1385 	addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
1386 
1387 	/* just in case we're called on an intern, its EOF */
1388 	if (addr_type == UDF_ICB_INTERN_ALLOC) {
1389 		memset(icb, 0, sizeof(struct long_ad));
1390 		*eof = 1;
1391 		return;
1392 	}
1393 
1394 	adlen = 0;
1395 	if (addr_type == UDF_ICB_SHORT_ALLOC) {
1396 		adlen = sizeof(struct short_ad);
1397 	} else if (addr_type == UDF_ICB_LONG_ALLOC) {
1398 		adlen = sizeof(struct long_ad);
1399 	}
1400 
1401 	/* if offset too big, we go to the allocation extensions */
1402 	offset = slot * adlen;
1403 	extnr  = -1;
1404 	while (offset >= l_ad) {
1405 		/* check if our last entry is a redirect */
1406 		if (addr_type == UDF_ICB_SHORT_ALLOC) {
1407 			short_ad = (struct short_ad *) (data_pos + l_ad-adlen);
1408 			l_icb.len          = short_ad->len;
1409 			l_icb.loc.part_num = udf_node->loc.loc.part_num;
1410 			l_icb.loc.lb_num   = short_ad->lb_num;
1411 		} else {
1412 			KASSERT(addr_type == UDF_ICB_LONG_ALLOC);
1413 			long_ad = (struct long_ad *) (data_pos + l_ad-adlen);
1414 			l_icb = *long_ad;
1415 		}
1416 		flags = UDF_EXT_FLAGS(udf_rw32(l_icb.len));
1417 		if (flags != UDF_EXT_REDIRECT) {
1418 			l_ad = 0;	/* force EOF */
1419 			break;
1420 		}
1421 
1422 		/* advance to next extent */
1423 		extnr++;
1424 		if (extnr >= udf_node->num_extensions) {
1425 			l_ad = 0;	/* force EOF */
1426 			break;
1427 		}
1428 		offset = offset - l_ad;
1429 		ext  = udf_node->ext[extnr];
1430 		dscr_size  = sizeof(struct alloc_ext_entry) -1;
1431 		l_ad = udf_rw32(ext->l_ad);
1432 		data_pos = (uint8_t *) ext + dscr_size;
1433 	}
1434 
1435 	/* XXX l_ad == 0 should be enough to check */
1436 	*eof = (offset >= l_ad) || (l_ad == 0);
1437 	if (*eof) {
1438 		DPRINTF(PARANOIDADWLK, ("returning EOF, extnr %d, offset %d, "
1439 			"l_ad %d\n", extnr, offset, l_ad));
1440 		memset(icb, 0, sizeof(struct long_ad));
1441 		return;
1442 	}
1443 
1444 	/* get the element */
1445 	if (addr_type == UDF_ICB_SHORT_ALLOC) {
1446 		short_ad = (struct short_ad *) (data_pos + offset);
1447 		icb->len          = short_ad->len;
1448 		icb->loc.part_num = udf_node->loc.loc.part_num;
1449 		icb->loc.lb_num   = short_ad->lb_num;
1450 	} else if (addr_type == UDF_ICB_LONG_ALLOC) {
1451 		long_ad = (struct long_ad *) (data_pos + offset);
1452 		*icb = *long_ad;
1453 	}
1454 	DPRINTF(PARANOIDADWLK, ("returning element : v %d, lb %d, len %d, "
1455 		"flags %d\n", icb->loc.part_num, icb->loc.lb_num,
1456 		UDF_EXT_LEN(icb->len), UDF_EXT_FLAGS(icb->len)));
1457 }
1458 
1459 /* --------------------------------------------------------------------- */
1460 
1461 int
1462 udf_append_adslot(struct udf_node *udf_node, int *slot, struct long_ad *icb) {
1463 	struct udf_mount *ump = udf_node->ump;
1464 	union dscrptr          *dscr, *extdscr;
1465 	struct file_entry      *fe;
1466 	struct extfile_entry   *efe;
1467 	struct alloc_ext_entry *ext;
1468 	struct icb_tag *icbtag;
1469 	struct short_ad *short_ad;
1470 	struct long_ad *long_ad, o_icb, l_icb;
1471 	uint64_t logblks_rec, *logblks_rec_p;
1472 	uint64_t lmapping;
1473 	uint32_t offset, rest, len, lb_num;
1474 	uint32_t lb_size, dscr_size, l_ea, l_ad, *l_ad_p, max_l_ad, crclen;
1475 	uint32_t flags;
1476 	uint16_t vpart_num;
1477 	uint8_t *data_pos;
1478 	int icbflags, addr_type, adlen, extnr;
1479 	int error;
1480 
1481 	lb_size = udf_rw32(ump->logical_vol->lb_size);
1482 	vpart_num = udf_rw16(udf_node->loc.loc.part_num);
1483 
1484 	/* determine what descriptor we are in */
1485 	fe  = udf_node->fe;
1486 	efe = udf_node->efe;
1487 	if (fe) {
1488 		icbtag  = &fe->icbtag;
1489 		dscr      = (union dscrptr *) fe;
1490 		dscr_size = sizeof(struct file_entry) -1;
1491 
1492 		l_ea      = udf_rw32(fe->l_ea);
1493 		l_ad_p    = &fe->l_ad;
1494 		logblks_rec_p = &fe->logblks_rec;
1495 	} else {
1496 		icbtag    = &efe->icbtag;
1497 		dscr      = (union dscrptr *) efe;
1498 		dscr_size = sizeof(struct extfile_entry) -1;
1499 
1500 		l_ea      = udf_rw32(efe->l_ea);
1501 		l_ad_p    = &efe->l_ad;
1502 		logblks_rec_p = &efe->logblks_rec;
1503 	}
1504 	data_pos  = (uint8_t *) dscr + dscr_size + l_ea;
1505 	max_l_ad = lb_size - dscr_size - l_ea;
1506 
1507 	icbflags  = udf_rw16(icbtag->flags);
1508 	addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
1509 
1510 	/* just in case we're called on an intern, its EOF */
1511 	if (addr_type == UDF_ICB_INTERN_ALLOC) {
1512 		panic("udf_append_adslot on UDF_ICB_INTERN_ALLOC\n");
1513 	}
1514 
1515 	adlen = 0;
1516 	if (addr_type == UDF_ICB_SHORT_ALLOC) {
1517 		adlen = sizeof(struct short_ad);
1518 	} else if (addr_type == UDF_ICB_LONG_ALLOC) {
1519 		adlen = sizeof(struct long_ad);
1520 	}
1521 
1522 	/* clean up given long_ad since it can be a synthesized one */
1523 	flags = UDF_EXT_FLAGS(udf_rw32(icb->len));
1524 	if (flags == UDF_EXT_FREE) {
1525 		icb->loc.part_num = udf_rw16(0);
1526 		icb->loc.lb_num   = udf_rw32(0);
1527 	}
1528 
1529 	/* if offset too big, we go to the allocation extensions */
1530 	l_ad   = udf_rw32(*l_ad_p);
1531 	offset = (*slot) * adlen;
1532 	extnr  = -1;
1533 	while (offset >= l_ad) {
1534 		/* check if our last entry is a redirect */
1535 		if (addr_type == UDF_ICB_SHORT_ALLOC) {
1536 			short_ad = (struct short_ad *) (data_pos + l_ad-adlen);
1537 			l_icb.len          = short_ad->len;
1538 			l_icb.loc.part_num = udf_node->loc.loc.part_num;
1539 			l_icb.loc.lb_num   = short_ad->lb_num;
1540 		} else {
1541 			KASSERT(addr_type == UDF_ICB_LONG_ALLOC);
1542 			long_ad = (struct long_ad *) (data_pos + l_ad-adlen);
1543 			l_icb = *long_ad;
1544 		}
1545 		flags = UDF_EXT_FLAGS(udf_rw32(l_icb.len));
1546 		if (flags != UDF_EXT_REDIRECT) {
1547 			/* only one past the last one is adressable */
1548 			break;
1549 		}
1550 
1551 		/* advance to next extent */
1552 		extnr++;
1553 		KASSERT(extnr < udf_node->num_extensions);
1554 		offset = offset - l_ad;
1555 
1556 		ext  = udf_node->ext[extnr];
1557 		dscr = (union dscrptr *) ext;
1558 		dscr_size  = sizeof(struct alloc_ext_entry) -1;
1559 		max_l_ad = lb_size - dscr_size;
1560 		l_ad_p = &ext->l_ad;
1561 		l_ad   = udf_rw32(*l_ad_p);
1562 		data_pos = (uint8_t *) ext + dscr_size;
1563 	}
1564 	DPRINTF(PARANOIDADWLK, ("append, ext %d, offset %d, l_ad %d\n",
1565 		extnr, offset, udf_rw32(*l_ad_p)));
1566 	KASSERT(l_ad == udf_rw32(*l_ad_p));
1567 
1568 	/* offset is offset within the current (E)FE/AED */
1569 	l_ad   = udf_rw32(*l_ad_p);
1570 	crclen = udf_rw16(dscr->tag.desc_crc_len);
1571 	logblks_rec = udf_rw64(*logblks_rec_p);
1572 
1573 	/* overwriting old piece? */
1574 	if (offset < l_ad) {
1575 		/* overwrite entry; compensate for the old element */
1576 		if (addr_type == UDF_ICB_SHORT_ALLOC) {
1577 			short_ad = (struct short_ad *) (data_pos + offset);
1578 			o_icb.len          = short_ad->len;
1579 			o_icb.loc.part_num = udf_rw16(0);	/* ignore */
1580 			o_icb.loc.lb_num   = short_ad->lb_num;
1581 		} else if (addr_type == UDF_ICB_LONG_ALLOC) {
1582 			long_ad = (struct long_ad *) (data_pos + offset);
1583 			o_icb = *long_ad;
1584 		} else {
1585 			panic("Invalid address type in udf_append_adslot\n");
1586 		}
1587 
1588 		len = udf_rw32(o_icb.len);
1589 		if (UDF_EXT_FLAGS(len) == UDF_EXT_ALLOCATED) {
1590 			/* adjust counts */
1591 			len = UDF_EXT_LEN(len);
1592 			logblks_rec -= (len + lb_size -1) / lb_size;
1593 		}
1594 	}
1595 
1596 	/* check if we're not appending a redirection */
1597 	flags = UDF_EXT_FLAGS(udf_rw32(icb->len));
1598 	KASSERT(flags != UDF_EXT_REDIRECT);
1599 
1600 	/* round down available space */
1601 	rest = adlen * ((max_l_ad - offset) / adlen);
1602 	if (rest <= adlen) {
1603 		/* have to append aed, see if we already have a spare one */
1604 		extnr++;
1605 		ext = udf_node->ext[extnr];
1606 		l_icb = udf_node->ext_loc[extnr];
1607 		if (ext == NULL) {
1608 			DPRINTF(ALLOC,("adding allocation extent %d\n", extnr));
1609 
1610 			error = udf_pre_allocate_space(ump, UDF_C_NODE, 1,
1611 					vpart_num, &lmapping);
1612 			lb_num = lmapping;
1613 			if (error)
1614 				return error;
1615 
1616 			/* initialise pointer to location */
1617 			memset(&l_icb, 0, sizeof(struct long_ad));
1618 			l_icb.len = udf_rw32(lb_size | UDF_EXT_REDIRECT);
1619 			l_icb.loc.lb_num   = udf_rw32(lb_num);
1620 			l_icb.loc.part_num = udf_rw16(vpart_num);
1621 
1622 			/* create new aed descriptor */
1623 			udf_create_logvol_dscr(ump, udf_node, &l_icb, &extdscr);
1624 			ext = &extdscr->aee;
1625 
1626 			udf_inittag(ump, &ext->tag, TAGID_ALLOCEXTENT, lb_num);
1627 			dscr_size  = sizeof(struct alloc_ext_entry) -1;
1628 			max_l_ad = lb_size - dscr_size;
1629 			memset(ext->data, 0, max_l_ad);
1630 			ext->l_ad = udf_rw32(0);
1631 			ext->tag.desc_crc_len =
1632 				udf_rw16(dscr_size - UDF_DESC_TAG_LENGTH);
1633 
1634 			/* declare aed */
1635 			udf_node->num_extensions++;
1636 			udf_node->ext_loc[extnr] = l_icb;
1637 			udf_node->ext[extnr] = ext;
1638 		}
1639 		/* add redirect and adjust l_ad and crclen for old descr */
1640 		if (addr_type == UDF_ICB_SHORT_ALLOC) {
1641 			short_ad = (struct short_ad *) (data_pos + offset);
1642 			short_ad->len    = l_icb.len;
1643 			short_ad->lb_num = l_icb.loc.lb_num;
1644 		} else if (addr_type == UDF_ICB_LONG_ALLOC) {
1645 			long_ad = (struct long_ad *) (data_pos + offset);
1646 			*long_ad = l_icb;
1647 		}
1648 		l_ad   += adlen;
1649 		crclen += adlen;
1650 		dscr->tag.desc_crc_len = udf_rw16(crclen);
1651 		*l_ad_p = udf_rw32(l_ad);
1652 
1653 		/* advance to the new extension */
1654 		KASSERT(ext != NULL);
1655 		dscr = (union dscrptr *) ext;
1656 		dscr_size  = sizeof(struct alloc_ext_entry) -1;
1657 		max_l_ad = lb_size - dscr_size;
1658 		data_pos = (uint8_t *) dscr + dscr_size;
1659 
1660 		l_ad_p = &ext->l_ad;
1661 		l_ad   = udf_rw32(*l_ad_p);
1662 		crclen = udf_rw16(dscr->tag.desc_crc_len);
1663 		offset = 0;
1664 
1665 		/* adjust callees slot count for link insert */
1666 		*slot += 1;
1667 	}
1668 
1669 	/* write out the element */
1670 	DPRINTF(PARANOIDADWLK, ("adding element : %p : v %d, lb %d, "
1671 			"len %d, flags %d\n", data_pos + offset,
1672 			icb->loc.part_num, icb->loc.lb_num,
1673 			UDF_EXT_LEN(icb->len), UDF_EXT_FLAGS(icb->len)));
1674 	if (addr_type == UDF_ICB_SHORT_ALLOC) {
1675 		short_ad = (struct short_ad *) (data_pos + offset);
1676 		short_ad->len    = icb->len;
1677 		short_ad->lb_num = icb->loc.lb_num;
1678 	} else if (addr_type == UDF_ICB_LONG_ALLOC) {
1679 		long_ad = (struct long_ad *) (data_pos + offset);
1680 		*long_ad = *icb;
1681 	}
1682 
1683 	/* adjust logblks recorded count */
1684 	len = udf_rw32(icb->len);
1685 	flags = UDF_EXT_FLAGS(len);
1686 	if (flags == UDF_EXT_ALLOCATED)
1687 		logblks_rec += (UDF_EXT_LEN(len) + lb_size -1) / lb_size;
1688 	*logblks_rec_p = udf_rw64(logblks_rec);
1689 
1690 	/* adjust l_ad and crclen when needed */
1691 	if (offset >= l_ad) {
1692 		l_ad   += adlen;
1693 		crclen += adlen;
1694 		dscr->tag.desc_crc_len = udf_rw16(crclen);
1695 		*l_ad_p = udf_rw32(l_ad);
1696 	}
1697 
1698 	return 0;
1699 }
1700 
1701 /* --------------------------------------------------------------------- */
1702 
1703 static void
1704 udf_count_alloc_exts(struct udf_node *udf_node)
1705 {
1706 	struct long_ad s_ad;
1707 	uint32_t lb_num, len, flags;
1708 	uint16_t vpart_num;
1709 	int slot, eof;
1710 	int num_extents, extnr;
1711 	int lb_size;
1712 
1713 	if (udf_node->num_extensions == 0)
1714 		return;
1715 
1716 	lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1717 	/* count number of allocation extents in use */
1718 	num_extents = 0;
1719 	slot = 0;
1720 	for (;;) {
1721 		udf_get_adslot(udf_node, slot, &s_ad, &eof);
1722 		if (eof)
1723 			break;
1724 		len   = udf_rw32(s_ad.len);
1725 		flags = UDF_EXT_FLAGS(len);
1726 
1727 		if (flags == UDF_EXT_REDIRECT)
1728 			num_extents++;
1729 
1730 		slot++;
1731 	}
1732 
1733 	DPRINTF(ALLOC, ("udf_count_alloc_ext counted %d live extents\n",
1734 		num_extents));
1735 
1736 	/* XXX choice: we could delay freeing them on node writeout */
1737 	/* free excess entries */
1738 	extnr = num_extents;
1739 	for (;extnr < udf_node->num_extensions; extnr++) {
1740 		DPRINTF(ALLOC, ("freeing alloc ext %d\n", extnr));
1741 		/* free dscriptor */
1742 		s_ad = udf_node->ext_loc[extnr];
1743 		udf_free_logvol_dscr(udf_node->ump, &s_ad,
1744 			udf_node->ext[extnr]);
1745 		udf_node->ext[extnr] = NULL;
1746 
1747 		/* free disc space */
1748 		lb_num    = udf_rw32(s_ad.loc.lb_num);
1749 		vpart_num = udf_rw16(s_ad.loc.part_num);
1750 		udf_free_allocated_space(udf_node->ump, lb_num, vpart_num, 1);
1751 
1752 		memset(&udf_node->ext_loc[extnr], 0, sizeof(struct long_ad));
1753 	}
1754 
1755 	/* set our new number of allocation extents */
1756 	udf_node->num_extensions = num_extents;
1757 }
1758 
1759 
1760 /* --------------------------------------------------------------------- */
1761 
1762 /*
1763  * Adjust the node's allocation descriptors to reflect the new mapping; do
1764  * take note that we might glue to existing allocation descriptors.
1765  *
1766  * XXX Note there can only be one allocation being recorded/mount; maybe
1767  * explicit allocation in shedule thread?
1768  */
1769 
1770 static void
1771 udf_record_allocation_in_node(struct udf_mount *ump, struct buf *buf,
1772 	uint16_t vpart_num, uint64_t *mapping, struct long_ad *node_ad_cpy)
1773 {
1774 	struct vnode    *vp = buf->b_vp;
1775 	struct udf_node *udf_node = VTOI(vp);
1776 	struct file_entry      *fe;
1777 	struct extfile_entry   *efe;
1778 	struct icb_tag  *icbtag;
1779 	struct long_ad   s_ad, c_ad;
1780 	uint64_t inflen, from, till;
1781 	uint64_t foffset, end_foffset, restart_foffset;
1782 	uint64_t orig_inflen, orig_lbrec, new_inflen, new_lbrec;
1783 	uint32_t num_lb, len, flags, lb_num;
1784 	uint32_t run_start;
1785 	uint32_t slot_offset, replace_len, replace;
1786 	int addr_type, icbflags;
1787 //	int udf_c_type = buf->b_udf_c_type;
1788 	int lb_size, run_length, eof;
1789 	int slot, cpy_slot, cpy_slots, restart_slot;
1790 	int error;
1791 
1792 	DPRINTF(ALLOC, ("udf_record_allocation_in_node\n"));
1793 
1794 #if 0
1795 	/* XXX disable sanity check for now */
1796 	/* sanity check ... should be panic ? */
1797 	if ((udf_c_type != UDF_C_USERDATA) && (udf_c_type != UDF_C_FIDS))
1798 		return;
1799 #endif
1800 
1801 	lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1802 
1803 	/* do the job */
1804 	UDF_LOCK_NODE(udf_node, 0);	/* XXX can deadlock ? */
1805 	udf_node_sanity_check(udf_node, &orig_inflen, &orig_lbrec);
1806 
1807 	fe  = udf_node->fe;
1808 	efe = udf_node->efe;
1809 	if (fe) {
1810 		icbtag = &fe->icbtag;
1811 		inflen = udf_rw64(fe->inf_len);
1812 	} else {
1813 		icbtag = &efe->icbtag;
1814 		inflen = udf_rw64(efe->inf_len);
1815 	}
1816 
1817 	/* do check if `till' is not past file information length */
1818 	from = buf->b_lblkno * lb_size;
1819 	till = MIN(inflen, from + buf->b_resid);
1820 
1821 	num_lb = (till - from + lb_size -1) / lb_size;
1822 
1823 	DPRINTF(ALLOC, ("record allocation from %"PRIu64" + %d\n", from, buf->b_bcount));
1824 
1825 	icbflags  = udf_rw16(icbtag->flags);
1826 	addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
1827 
1828 	if (addr_type == UDF_ICB_INTERN_ALLOC) {
1829 		/* nothing to do */
1830 		/* XXX clean up rest of node? just in case? */
1831 		UDF_UNLOCK_NODE(udf_node, 0);
1832 		return;
1833 	}
1834 
1835 	slot     = 0;
1836 	cpy_slot = 0;
1837 	foffset  = 0;
1838 
1839 	/* 1) copy till first overlap piece to the rewrite buffer */
1840 	for (;;) {
1841 		udf_get_adslot(udf_node, slot, &s_ad, &eof);
1842 		if (eof) {
1843 			DPRINTF(WRITE,
1844 				("Record allocation in node "
1845 				 "failed: encountered EOF\n"));
1846 			UDF_UNLOCK_NODE(udf_node, 0);
1847 			buf->b_error = EINVAL;
1848 			return;
1849 		}
1850 		len   = udf_rw32(s_ad.len);
1851 		flags = UDF_EXT_FLAGS(len);
1852 		len   = UDF_EXT_LEN(len);
1853 
1854 		if (flags == UDF_EXT_REDIRECT) {
1855 			slot++;
1856 			continue;
1857 		}
1858 
1859 		end_foffset = foffset + len;
1860 		if (end_foffset > from)
1861 			break;	/* found */
1862 
1863 		node_ad_cpy[cpy_slot++] = s_ad;
1864 
1865 		DPRINTF(ALLOC, ("\t1: vp %d, lb %d, len %d, flags %d "
1866 			"-> stack\n",
1867 			udf_rw16(s_ad.loc.part_num),
1868 			udf_rw32(s_ad.loc.lb_num),
1869 			UDF_EXT_LEN(udf_rw32(s_ad.len)),
1870 			UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1871 
1872 		foffset = end_foffset;
1873 		slot++;
1874 	}
1875 	restart_slot    = slot;
1876 	restart_foffset = foffset;
1877 
1878 	/* 2) trunc overlapping slot at overlap and copy it */
1879 	slot_offset = from - foffset;
1880 	if (slot_offset > 0) {
1881 		DPRINTF(ALLOC, ("\tslot_offset = %d, flags = %d (%d)\n",
1882 				slot_offset, flags >> 30, flags));
1883 
1884 		s_ad.len = udf_rw32(slot_offset | flags);
1885 		node_ad_cpy[cpy_slot++] = s_ad;
1886 
1887 		DPRINTF(ALLOC, ("\t2: vp %d, lb %d, len %d, flags %d "
1888 			"-> stack\n",
1889 			udf_rw16(s_ad.loc.part_num),
1890 			udf_rw32(s_ad.loc.lb_num),
1891 			UDF_EXT_LEN(udf_rw32(s_ad.len)),
1892 			UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1893 	}
1894 	foffset += slot_offset;
1895 
1896 	/* 3) insert new mappings */
1897 	memset(&s_ad, 0, sizeof(struct long_ad));
1898 	lb_num = 0;
1899 	for (lb_num = 0; lb_num < num_lb; lb_num++) {
1900 		run_start  = mapping[lb_num];
1901 		run_length = 1;
1902 		while (lb_num < num_lb-1) {
1903 			if (mapping[lb_num+1] != mapping[lb_num]+1)
1904 				if (mapping[lb_num+1] != mapping[lb_num])
1905 					break;
1906 			run_length++;
1907 			lb_num++;
1908 		}
1909 		/* insert slot for this mapping */
1910 		len = run_length * lb_size;
1911 
1912 		/* bounds checking */
1913 		if (foffset + len > till)
1914 			len = till - foffset;
1915 		KASSERT(foffset + len <= inflen);
1916 
1917 		s_ad.len = udf_rw32(len | UDF_EXT_ALLOCATED);
1918 		s_ad.loc.part_num = udf_rw16(vpart_num);
1919 		s_ad.loc.lb_num   = udf_rw32(run_start);
1920 
1921 		foffset += len;
1922 
1923 		/* paranoia */
1924 		if (len == 0) {
1925 			DPRINTF(WRITE,
1926 				("Record allocation in node "
1927 				 "failed: insert failed\n"));
1928 			UDF_UNLOCK_NODE(udf_node, 0);
1929 			buf->b_error = EINVAL;
1930 			return;
1931 		}
1932 		node_ad_cpy[cpy_slot++] = s_ad;
1933 
1934 		DPRINTF(ALLOC, ("\t3: insert new mapping vp %d lb %d, len %d, "
1935 				"flags %d -> stack\n",
1936 			udf_rw16(s_ad.loc.part_num), udf_rw32(s_ad.loc.lb_num),
1937 			UDF_EXT_LEN(udf_rw32(s_ad.len)),
1938 			UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1939 	}
1940 
1941 	/* 4) pop replaced length */
1942 	slot    = restart_slot;
1943 	foffset = restart_foffset;
1944 
1945 	replace_len = till - foffset;	/* total amount of bytes to pop */
1946 	slot_offset = from - foffset;	/* offset in first encounted slot */
1947 	KASSERT((slot_offset % lb_size) == 0);
1948 
1949 	for (;;) {
1950 		udf_get_adslot(udf_node, slot, &s_ad, &eof);
1951 		if (eof)
1952 			break;
1953 
1954 		len    = udf_rw32(s_ad.len);
1955 		flags  = UDF_EXT_FLAGS(len);
1956 		len    = UDF_EXT_LEN(len);
1957 		lb_num = udf_rw32(s_ad.loc.lb_num);
1958 
1959 		if (flags == UDF_EXT_REDIRECT) {
1960 			slot++;
1961 			continue;
1962 		}
1963 
1964 		DPRINTF(ALLOC, ("\t4i: got slot %d, slot_offset %d, "
1965 				"replace_len %d, "
1966 				"vp %d, lb %d, len %d, flags %d\n",
1967 			slot, slot_offset, replace_len,
1968 			udf_rw16(s_ad.loc.part_num),
1969 			udf_rw32(s_ad.loc.lb_num),
1970 			UDF_EXT_LEN(udf_rw32(s_ad.len)),
1971 			UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1972 
1973 		/* adjust for slot offset */
1974 		if (slot_offset) {
1975 			DPRINTF(ALLOC, ("\t4s: skipping %d\n", slot_offset));
1976 			lb_num += slot_offset / lb_size;
1977 			len    -= slot_offset;
1978 			foffset += slot_offset;
1979 			replace_len -= slot_offset;
1980 
1981 			/* mark adjusted */
1982 			slot_offset = 0;
1983 		}
1984 
1985 		/* advance for (the rest of) this slot */
1986 		replace = MIN(len, replace_len);
1987 		DPRINTF(ALLOC, ("\t4d: replacing %d\n", replace));
1988 
1989 		/* advance for this slot */
1990 		if (replace) {
1991 			/* note: dont round DOWN on num_lb since we then
1992 			 * forget the last partial one */
1993 			num_lb = (replace + lb_size - 1) / lb_size;
1994 			if (flags != UDF_EXT_FREE) {
1995 				udf_free_allocated_space(ump, lb_num,
1996 					udf_rw16(s_ad.loc.part_num), num_lb);
1997 			}
1998 			lb_num      += num_lb;
1999 			len         -= replace;
2000 			foffset     += replace;
2001 			replace_len -= replace;
2002 		}
2003 
2004 		/* do we have a slot tail ? */
2005 		if (len) {
2006 			KASSERT(foffset % lb_size == 0);
2007 
2008 			/* we arrived at our point, push remainder */
2009 			s_ad.len        = udf_rw32(len | flags);
2010 			s_ad.loc.lb_num = udf_rw32(lb_num);
2011 			if (flags == UDF_EXT_FREE)
2012 				s_ad.loc.lb_num = udf_rw32(0);
2013 			node_ad_cpy[cpy_slot++] = s_ad;
2014 			foffset += len;
2015 			slot++;
2016 
2017 			DPRINTF(ALLOC, ("\t4: vp %d, lb %d, len %d, flags %d "
2018 				"-> stack\n",
2019 				udf_rw16(s_ad.loc.part_num),
2020 				udf_rw32(s_ad.loc.lb_num),
2021 				UDF_EXT_LEN(udf_rw32(s_ad.len)),
2022 				UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2023 			break;
2024 		}
2025 
2026 		slot++;
2027 	}
2028 
2029 	/* 5) copy remainder */
2030 	for (;;) {
2031 		udf_get_adslot(udf_node, slot, &s_ad, &eof);
2032 		if (eof)
2033 			break;
2034 
2035 		len   = udf_rw32(s_ad.len);
2036 		flags = UDF_EXT_FLAGS(len);
2037 		len   = UDF_EXT_LEN(len);
2038 
2039 		if (flags == UDF_EXT_REDIRECT) {
2040 			slot++;
2041 			continue;
2042 		}
2043 
2044 		node_ad_cpy[cpy_slot++] = s_ad;
2045 
2046 		DPRINTF(ALLOC, ("\t5: insert new mapping "
2047 			"vp %d lb %d, len %d, flags %d "
2048 			"-> stack\n",
2049 		udf_rw16(s_ad.loc.part_num),
2050 		udf_rw32(s_ad.loc.lb_num),
2051 		UDF_EXT_LEN(udf_rw32(s_ad.len)),
2052 		UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2053 
2054 		slot++;
2055 	}
2056 
2057 	/* 6) reset node descriptors */
2058 	udf_wipe_adslots(udf_node);
2059 
2060 	/* 7) copy back extents; merge when possible. Recounting on the fly */
2061 	cpy_slots = cpy_slot;
2062 
2063 	c_ad = node_ad_cpy[0];
2064 	slot = 0;
2065 	DPRINTF(ALLOC, ("\t7s: stack -> got mapping vp %d "
2066 		"lb %d, len %d, flags %d\n",
2067 	udf_rw16(c_ad.loc.part_num),
2068 	udf_rw32(c_ad.loc.lb_num),
2069 	UDF_EXT_LEN(udf_rw32(c_ad.len)),
2070 	UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2071 
2072 	for (cpy_slot = 1; cpy_slot < cpy_slots; cpy_slot++) {
2073 		s_ad = node_ad_cpy[cpy_slot];
2074 
2075 		DPRINTF(ALLOC, ("\t7i: stack -> got mapping vp %d "
2076 			"lb %d, len %d, flags %d\n",
2077 		udf_rw16(s_ad.loc.part_num),
2078 		udf_rw32(s_ad.loc.lb_num),
2079 		UDF_EXT_LEN(udf_rw32(s_ad.len)),
2080 		UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2081 
2082 		/* see if we can merge */
2083 		if (udf_ads_merge(lb_size, &c_ad, &s_ad)) {
2084 			/* not mergable (anymore) */
2085 			DPRINTF(ALLOC, ("\t7: appending vp %d lb %d, "
2086 				"len %d, flags %d\n",
2087 			udf_rw16(c_ad.loc.part_num),
2088 			udf_rw32(c_ad.loc.lb_num),
2089 			UDF_EXT_LEN(udf_rw32(c_ad.len)),
2090 			UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2091 
2092 			error = udf_append_adslot(udf_node, &slot, &c_ad);
2093 			if (error) {
2094 				buf->b_error = error;
2095 				goto out;
2096 			}
2097 			c_ad = s_ad;
2098 			slot++;
2099 		}
2100 	}
2101 
2102 	/* 8) push rest slot (if any) */
2103 	if (UDF_EXT_LEN(c_ad.len) > 0) {
2104 		DPRINTF(ALLOC, ("\t8: last append vp %d lb %d, "
2105 				"len %d, flags %d\n",
2106 		udf_rw16(c_ad.loc.part_num),
2107 		udf_rw32(c_ad.loc.lb_num),
2108 		UDF_EXT_LEN(udf_rw32(c_ad.len)),
2109 		UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2110 
2111 		error = udf_append_adslot(udf_node, &slot, &c_ad);
2112 		if (error) {
2113 			buf->b_error = error;
2114 			goto out;
2115 		}
2116 	}
2117 
2118 out:
2119 	udf_count_alloc_exts(udf_node);
2120 
2121 	/* the node's descriptors should now be sane */
2122 	udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2123 	UDF_UNLOCK_NODE(udf_node, 0);
2124 
2125 	KASSERT(orig_inflen == new_inflen);
2126 	KASSERT(new_lbrec >= orig_lbrec);
2127 
2128 	return;
2129 }
2130 
2131 /* --------------------------------------------------------------------- */
2132 
2133 int
2134 udf_grow_node(struct udf_node *udf_node, uint64_t new_size)
2135 {
2136 	union dscrptr *dscr;
2137 	struct vnode *vp = udf_node->vnode;
2138 	struct udf_mount *ump = udf_node->ump;
2139 	struct file_entry    *fe;
2140 	struct extfile_entry *efe;
2141 	struct icb_tag  *icbtag;
2142 	struct long_ad c_ad, s_ad;
2143 	uint64_t size_diff, old_size, inflen, objsize, chunk, append_len;
2144 	uint64_t foffset, end_foffset;
2145 	uint64_t orig_inflen, orig_lbrec, new_inflen, new_lbrec;
2146 	uint32_t lb_size, dscr_size, crclen, lastblock_grow;
2147 	uint32_t icbflags, len, flags, max_len;
2148 	uint32_t max_l_ad, l_ad, l_ea;
2149 	uint16_t my_part, dst_part;
2150 	uint8_t *data_pos, *evacuated_data;
2151 	int addr_type;
2152 	int slot, cpy_slot;
2153 	int isdir, eof, error;
2154 
2155 	DPRINTF(ALLOC, ("udf_grow_node\n"));
2156 
2157 	UDF_LOCK_NODE(udf_node, 0);
2158 	udf_node_sanity_check(udf_node, &orig_inflen, &orig_lbrec);
2159 
2160 	lb_size = udf_rw32(ump->logical_vol->lb_size);
2161 	max_len = ((UDF_EXT_MAXLEN / lb_size) * lb_size);
2162 
2163 	fe  = udf_node->fe;
2164 	efe = udf_node->efe;
2165 	if (fe) {
2166 		dscr       = (union dscrptr *) fe;
2167 		icbtag  = &fe->icbtag;
2168 		inflen  = udf_rw64(fe->inf_len);
2169 		objsize = inflen;
2170 		dscr_size  = sizeof(struct file_entry) -1;
2171 		l_ea       = udf_rw32(fe->l_ea);
2172 		l_ad       = udf_rw32(fe->l_ad);
2173 	} else {
2174 		dscr       = (union dscrptr *) efe;
2175 		icbtag  = &efe->icbtag;
2176 		inflen  = udf_rw64(efe->inf_len);
2177 		objsize = udf_rw64(efe->obj_size);
2178 		dscr_size  = sizeof(struct extfile_entry) -1;
2179 		l_ea       = udf_rw32(efe->l_ea);
2180 		l_ad       = udf_rw32(efe->l_ad);
2181 	}
2182 	data_pos  = (uint8_t *) dscr + dscr_size + l_ea;
2183 	max_l_ad = lb_size - dscr_size - l_ea;
2184 
2185 	icbflags   = udf_rw16(icbtag->flags);
2186 	addr_type  = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2187 
2188 	old_size  = inflen;
2189 	size_diff = new_size - old_size;
2190 
2191 	DPRINTF(ALLOC, ("\tfrom %"PRIu64" to %"PRIu64"\n", old_size, new_size));
2192 
2193 	evacuated_data = NULL;
2194 	if (addr_type == UDF_ICB_INTERN_ALLOC) {
2195 		if (l_ad + size_diff <= max_l_ad) {
2196 			/* only reflect size change directly in the node */
2197 			inflen  += size_diff;
2198 			objsize += size_diff;
2199 			l_ad    += size_diff;
2200 			crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea + l_ad;
2201 			if (fe) {
2202 				fe->inf_len   = udf_rw64(inflen);
2203 				fe->l_ad      = udf_rw32(l_ad);
2204 				fe->tag.desc_crc_len = udf_rw16(crclen);
2205 			} else {
2206 				efe->inf_len  = udf_rw64(inflen);
2207 				efe->obj_size = udf_rw64(objsize);
2208 				efe->l_ad     = udf_rw32(l_ad);
2209 				efe->tag.desc_crc_len = udf_rw16(crclen);
2210 			}
2211 			error = 0;
2212 
2213 			/* set new size for uvm */
2214 			uvm_vnp_setsize(vp, old_size);
2215 			uvm_vnp_setwritesize(vp, new_size);
2216 
2217 #if 0
2218 			/* zero append space in buffer */
2219 			uvm_vnp_zerorange(vp, old_size, new_size - old_size);
2220 #endif
2221 
2222 			udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2223 
2224 			/* unlock */
2225 			UDF_UNLOCK_NODE(udf_node, 0);
2226 
2227 			KASSERT(new_inflen == orig_inflen + size_diff);
2228 			KASSERT(new_lbrec == orig_lbrec);
2229 			KASSERT(new_lbrec == 0);
2230 			return 0;
2231 		}
2232 
2233 		DPRINTF(ALLOC, ("\tCONVERT from internal\n"));
2234 
2235 		if (old_size > 0) {
2236 			/* allocate some space and copy in the stuff to keep */
2237 			evacuated_data = malloc(lb_size, M_UDFTEMP, M_WAITOK);
2238 			memset(evacuated_data, 0, lb_size);
2239 
2240 			/* node is locked, so safe to exit mutex */
2241 			UDF_UNLOCK_NODE(udf_node, 0);
2242 
2243 			/* read in using the `normal' vn_rdwr() */
2244 			error = vn_rdwr(UIO_READ, udf_node->vnode,
2245 					evacuated_data, old_size, 0,
2246 					UIO_SYSSPACE, IO_ALTSEMANTICS | IO_NODELOCKED,
2247 					FSCRED, NULL, NULL);
2248 
2249 			/* enter again */
2250 			UDF_LOCK_NODE(udf_node, 0);
2251 		}
2252 
2253 		/* convert to a normal alloc and select type */
2254 		isdir    = (vp->v_type == VDIR);
2255 		my_part  = udf_rw16(udf_node->loc.loc.part_num);
2256 		dst_part = isdir ? ump->fids_part : ump->data_part;
2257 		addr_type = UDF_ICB_SHORT_ALLOC;
2258 		if (dst_part != my_part)
2259 			addr_type = UDF_ICB_LONG_ALLOC;
2260 
2261 		icbflags &= ~UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2262 		icbflags |= addr_type;
2263 		icbtag->flags = udf_rw16(icbflags);
2264 
2265 		/* wipe old descriptor space */
2266 		udf_wipe_adslots(udf_node);
2267 
2268 		memset(&c_ad, 0, sizeof(struct long_ad));
2269 		c_ad.len          = udf_rw32(old_size | UDF_EXT_FREE);
2270 		c_ad.loc.part_num = udf_rw16(0); /* not relevant */
2271 		c_ad.loc.lb_num   = udf_rw32(0); /* not relevant */
2272 
2273 		slot = 0;
2274 	} else {
2275 		/* goto the last entry (if any) */
2276 		slot     = 0;
2277 		cpy_slot = 0;
2278 		foffset  = 0;
2279 		memset(&c_ad, 0, sizeof(struct long_ad));
2280 		for (;;) {
2281 			udf_get_adslot(udf_node, slot, &c_ad, &eof);
2282 			if (eof)
2283 				break;
2284 
2285 			len   = udf_rw32(c_ad.len);
2286 			flags = UDF_EXT_FLAGS(len);
2287 			len   = UDF_EXT_LEN(len);
2288 
2289 			end_foffset = foffset + len;
2290 			if (flags != UDF_EXT_REDIRECT)
2291 				foffset = end_foffset;
2292 
2293 			slot++;
2294 		}
2295 		/* at end of adslots */
2296 
2297 		/* special case if the old size was zero, then there is no last slot */
2298 		if (old_size == 0) {
2299 			c_ad.len          = udf_rw32(0 | UDF_EXT_FREE);
2300 			c_ad.loc.part_num = udf_rw16(0); /* not relevant */
2301 			c_ad.loc.lb_num   = udf_rw32(0); /* not relevant */
2302 		} else {
2303 			/* refetch last slot */
2304 			slot--;
2305 			udf_get_adslot(udf_node, slot, &c_ad, &eof);
2306 		}
2307 	}
2308 
2309 	/*
2310 	 * If the length of the last slot is not a multiple of lb_size, adjust
2311 	 * length so that it is; don't forget to adjust `append_len'! relevant for
2312 	 * extending existing files
2313 	 */
2314 	len   = udf_rw32(c_ad.len);
2315 	flags = UDF_EXT_FLAGS(len);
2316 	len   = UDF_EXT_LEN(len);
2317 
2318 	lastblock_grow = 0;
2319 	if (len % lb_size > 0) {
2320 		lastblock_grow = lb_size - (len % lb_size);
2321 		lastblock_grow = MIN(size_diff, lastblock_grow);
2322 		len += lastblock_grow;
2323 		c_ad.len = udf_rw32(len | flags);
2324 
2325 		/* TODO zero appened space in buffer! */
2326 		/* using uvm_vnp_zerorange(vp, old_size, new_size - old_size); ? */
2327 	}
2328 	memset(&s_ad, 0, sizeof(struct long_ad));
2329 
2330 	/* size_diff can be bigger than allowed, so grow in chunks */
2331 	append_len = size_diff - lastblock_grow;
2332 	while (append_len > 0) {
2333 		chunk = MIN(append_len, max_len);
2334 		s_ad.len = udf_rw32(chunk | UDF_EXT_FREE);
2335 		s_ad.loc.part_num = udf_rw16(0);
2336 		s_ad.loc.lb_num   = udf_rw32(0);
2337 
2338 		if (udf_ads_merge(lb_size, &c_ad, &s_ad)) {
2339 			/* not mergable (anymore) */
2340 			error = udf_append_adslot(udf_node, &slot, &c_ad);
2341 			if (error)
2342 				goto errorout;
2343 			slot++;
2344 			c_ad = s_ad;
2345 			memset(&s_ad, 0, sizeof(struct long_ad));
2346 		}
2347 		append_len -= chunk;
2348 	}
2349 
2350 	/* if there is a rest piece in the accumulator, append it */
2351 	if (UDF_EXT_LEN(udf_rw32(c_ad.len)) > 0) {
2352 		error = udf_append_adslot(udf_node, &slot, &c_ad);
2353 		if (error)
2354 			goto errorout;
2355 		slot++;
2356 	}
2357 
2358 	/* if there is a rest piece that didn't fit, append it */
2359 	if (UDF_EXT_LEN(udf_rw32(s_ad.len)) > 0) {
2360 		error = udf_append_adslot(udf_node, &slot, &s_ad);
2361 		if (error)
2362 			goto errorout;
2363 		slot++;
2364 	}
2365 
2366 	inflen  += size_diff;
2367 	objsize += size_diff;
2368 	if (fe) {
2369 		fe->inf_len   = udf_rw64(inflen);
2370 	} else {
2371 		efe->inf_len  = udf_rw64(inflen);
2372 		efe->obj_size = udf_rw64(objsize);
2373 	}
2374 	error = 0;
2375 
2376 	if (evacuated_data) {
2377 		/* set new write size for uvm */
2378 		uvm_vnp_setwritesize(vp, old_size);
2379 
2380 		/* write out evacuated data */
2381 		error = vn_rdwr(UIO_WRITE, udf_node->vnode,
2382 				evacuated_data, old_size, 0,
2383 				UIO_SYSSPACE, IO_ALTSEMANTICS | IO_NODELOCKED,
2384 				FSCRED, NULL, NULL);
2385 		uvm_vnp_setsize(vp, old_size);
2386 	}
2387 
2388 errorout:
2389 	if (evacuated_data)
2390 		free(evacuated_data, M_UDFTEMP);
2391 
2392 	udf_count_alloc_exts(udf_node);
2393 
2394 	udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2395 	UDF_UNLOCK_NODE(udf_node, 0);
2396 
2397 	KASSERT(new_inflen == orig_inflen + size_diff);
2398 	KASSERT(new_lbrec == orig_lbrec);
2399 
2400 	return error;
2401 }
2402 
2403 /* --------------------------------------------------------------------- */
2404 
2405 int
2406 udf_shrink_node(struct udf_node *udf_node, uint64_t new_size)
2407 {
2408 	struct vnode *vp = udf_node->vnode;
2409 	struct udf_mount *ump = udf_node->ump;
2410 	struct file_entry    *fe;
2411 	struct extfile_entry *efe;
2412 	struct icb_tag  *icbtag;
2413 	struct long_ad c_ad, s_ad, *node_ad_cpy;
2414 	uint64_t size_diff, old_size, inflen, objsize;
2415 	uint64_t foffset, end_foffset;
2416 	uint64_t orig_inflen, orig_lbrec, new_inflen, new_lbrec;
2417 	uint32_t lb_size, dscr_size, crclen;
2418 	uint32_t slot_offset;
2419 	uint32_t len, flags, max_len;
2420 	uint32_t num_lb, lb_num;
2421 	uint32_t max_l_ad, l_ad, l_ea;
2422 	uint16_t vpart_num;
2423 	uint8_t *data_pos;
2424 	int icbflags, addr_type;
2425 	int slot, cpy_slot, cpy_slots;
2426 	int eof, error;
2427 
2428 	DPRINTF(ALLOC, ("udf_shrink_node\n"));
2429 
2430 	UDF_LOCK_NODE(udf_node, 0);
2431 	udf_node_sanity_check(udf_node, &orig_inflen, &orig_lbrec);
2432 
2433 	lb_size = udf_rw32(ump->logical_vol->lb_size);
2434 	max_len = ((UDF_EXT_MAXLEN / lb_size) * lb_size);
2435 
2436 	/* do the work */
2437 	fe  = udf_node->fe;
2438 	efe = udf_node->efe;
2439 	if (fe) {
2440 		icbtag  = &fe->icbtag;
2441 		inflen  = udf_rw64(fe->inf_len);
2442 		objsize = inflen;
2443 		dscr_size  = sizeof(struct file_entry) -1;
2444 		l_ea       = udf_rw32(fe->l_ea);
2445 		l_ad       = udf_rw32(fe->l_ad);
2446 		data_pos = (uint8_t *) fe + dscr_size + l_ea;
2447 	} else {
2448 		icbtag  = &efe->icbtag;
2449 		inflen  = udf_rw64(efe->inf_len);
2450 		objsize = udf_rw64(efe->obj_size);
2451 		dscr_size  = sizeof(struct extfile_entry) -1;
2452 		l_ea       = udf_rw32(efe->l_ea);
2453 		l_ad       = udf_rw32(efe->l_ad);
2454 		data_pos = (uint8_t *) efe + dscr_size + l_ea;
2455 	}
2456 	max_l_ad = lb_size - dscr_size - l_ea;
2457 
2458 	icbflags   = udf_rw16(icbtag->flags);
2459 	addr_type  = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2460 
2461 	old_size  = inflen;
2462 	size_diff = old_size - new_size;
2463 
2464 	DPRINTF(ALLOC, ("\tfrom %"PRIu64" to %"PRIu64"\n", old_size, new_size));
2465 
2466 	/* shrink the node to its new size */
2467 	if (addr_type == UDF_ICB_INTERN_ALLOC) {
2468 		/* only reflect size change directly in the node */
2469 		KASSERT(new_size <= max_l_ad);
2470 		inflen  -= size_diff;
2471 		objsize -= size_diff;
2472 		l_ad    -= size_diff;
2473 		crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea + l_ad;
2474 		if (fe) {
2475 			fe->inf_len   = udf_rw64(inflen);
2476 			fe->l_ad      = udf_rw32(l_ad);
2477 			fe->tag.desc_crc_len = udf_rw16(crclen);
2478 		} else {
2479 			efe->inf_len  = udf_rw64(inflen);
2480 			efe->obj_size = udf_rw64(objsize);
2481 			efe->l_ad     = udf_rw32(l_ad);
2482 			efe->tag.desc_crc_len = udf_rw16(crclen);
2483 		}
2484 		error = 0;
2485 
2486 		/* clear the space in the descriptor */
2487 		KASSERT(old_size > new_size);
2488 		memset(data_pos + new_size, 0, old_size - new_size);
2489 
2490 		/* TODO zero appened space in buffer! */
2491 		/* using uvm_vnp_zerorange(vp, old_size, old_size - new_size); ? */
2492 
2493 		/* set new size for uvm */
2494 		uvm_vnp_setsize(vp, new_size);
2495 
2496 		udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2497 		UDF_UNLOCK_NODE(udf_node, 0);
2498 
2499 		KASSERT(new_inflen == orig_inflen - size_diff);
2500 		KASSERT(new_lbrec == orig_lbrec);
2501 		KASSERT(new_lbrec == 0);
2502 
2503 		return 0;
2504 	}
2505 
2506 	/* setup node cleanup extents copy space */
2507 	node_ad_cpy = malloc(lb_size * UDF_MAX_ALLOC_EXTENTS,
2508 		M_UDFMNT, M_WAITOK);
2509 	memset(node_ad_cpy, 0, lb_size * UDF_MAX_ALLOC_EXTENTS);
2510 
2511 	/*
2512 	 * Shrink the node by releasing the allocations and truncate the last
2513 	 * allocation to the new size. If the new size fits into the
2514 	 * allocation descriptor itself, transform it into an
2515 	 * UDF_ICB_INTERN_ALLOC.
2516 	 */
2517 	slot     = 0;
2518 	cpy_slot = 0;
2519 	foffset  = 0;
2520 
2521 	/* 1) copy till first overlap piece to the rewrite buffer */
2522 	for (;;) {
2523 		udf_get_adslot(udf_node, slot, &s_ad, &eof);
2524 		if (eof) {
2525 			DPRINTF(WRITE,
2526 				("Shrink node failed: "
2527 				 "encountered EOF\n"));
2528 			error = EINVAL;
2529 			goto errorout; /* panic? */
2530 		}
2531 		len   = udf_rw32(s_ad.len);
2532 		flags = UDF_EXT_FLAGS(len);
2533 		len   = UDF_EXT_LEN(len);
2534 
2535 		if (flags == UDF_EXT_REDIRECT) {
2536 			slot++;
2537 			continue;
2538 		}
2539 
2540 		end_foffset = foffset + len;
2541 		if (end_foffset > new_size)
2542 			break;	/* found */
2543 
2544 		node_ad_cpy[cpy_slot++] = s_ad;
2545 
2546 		DPRINTF(ALLOC, ("\t1: vp %d, lb %d, len %d, flags %d "
2547 			"-> stack\n",
2548 			udf_rw16(s_ad.loc.part_num),
2549 			udf_rw32(s_ad.loc.lb_num),
2550 			UDF_EXT_LEN(udf_rw32(s_ad.len)),
2551 			UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2552 
2553 		foffset = end_foffset;
2554 		slot++;
2555 	}
2556 	slot_offset = new_size - foffset;
2557 
2558 	/* 2) trunc overlapping slot at overlap and copy it */
2559 	if (slot_offset > 0) {
2560 		lb_num    = udf_rw32(s_ad.loc.lb_num);
2561 		vpart_num = udf_rw16(s_ad.loc.part_num);
2562 
2563 		if (flags == UDF_EXT_ALLOCATED) {
2564 			/* note: round DOWN on num_lb */
2565 			lb_num += (slot_offset + lb_size -1) / lb_size;
2566 			num_lb  = (len - slot_offset) / lb_size;
2567 
2568 			udf_free_allocated_space(ump, lb_num, vpart_num, num_lb);
2569 		}
2570 
2571 		s_ad.len = udf_rw32(slot_offset | flags);
2572 		node_ad_cpy[cpy_slot++] = s_ad;
2573 		slot++;
2574 
2575 		DPRINTF(ALLOC, ("\t2: vp %d, lb %d, len %d, flags %d "
2576 			"-> stack\n",
2577 			udf_rw16(s_ad.loc.part_num),
2578 			udf_rw32(s_ad.loc.lb_num),
2579 			UDF_EXT_LEN(udf_rw32(s_ad.len)),
2580 			UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2581 	}
2582 
2583 	/* 3) delete remainder */
2584 	for (;;) {
2585 		udf_get_adslot(udf_node, slot, &s_ad, &eof);
2586 		if (eof)
2587 			break;
2588 
2589 		len       = udf_rw32(s_ad.len);
2590 		flags     = UDF_EXT_FLAGS(len);
2591 		len       = UDF_EXT_LEN(len);
2592 
2593 		if (flags == UDF_EXT_REDIRECT) {
2594 			slot++;
2595 			continue;
2596 		}
2597 
2598 		DPRINTF(ALLOC, ("\t3: delete remainder "
2599 			"vp %d lb %d, len %d, flags %d\n",
2600 		udf_rw16(s_ad.loc.part_num),
2601 		udf_rw32(s_ad.loc.lb_num),
2602 		UDF_EXT_LEN(udf_rw32(s_ad.len)),
2603 		UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2604 
2605 		if (flags == UDF_EXT_ALLOCATED) {
2606 			lb_num    = udf_rw32(s_ad.loc.lb_num);
2607 			vpart_num = udf_rw16(s_ad.loc.part_num);
2608 			num_lb    = (len + lb_size - 1) / lb_size;
2609 
2610 			udf_free_allocated_space(ump, lb_num, vpart_num,
2611 				num_lb);
2612 		}
2613 
2614 		slot++;
2615 	}
2616 
2617 	/* 4) if it will fit into the descriptor then convert */
2618 	if (new_size < max_l_ad) {
2619 		/*
2620 		 * resque/evacuate old piece by reading it in, and convert it
2621 		 * to internal alloc.
2622 		 */
2623 		if (new_size == 0) {
2624 			/* XXX/TODO only for zero sizing now */
2625 			udf_wipe_adslots(udf_node);
2626 
2627 			icbflags &= ~UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2628 			icbflags |=  UDF_ICB_INTERN_ALLOC;
2629 			icbtag->flags = udf_rw16(icbflags);
2630 
2631 			inflen  -= size_diff;	KASSERT(inflen == 0);
2632 			objsize -= size_diff;
2633 			l_ad     = new_size;
2634 			crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea + l_ad;
2635 			if (fe) {
2636 				fe->inf_len   = udf_rw64(inflen);
2637 				fe->l_ad      = udf_rw32(l_ad);
2638 				fe->tag.desc_crc_len = udf_rw16(crclen);
2639 			} else {
2640 				efe->inf_len  = udf_rw64(inflen);
2641 				efe->obj_size = udf_rw64(objsize);
2642 				efe->l_ad     = udf_rw32(l_ad);
2643 				efe->tag.desc_crc_len = udf_rw16(crclen);
2644 			}
2645 			/* eventually copy in evacuated piece */
2646 			/* set new size for uvm */
2647 			uvm_vnp_setsize(vp, new_size);
2648 
2649 			free(node_ad_cpy, M_UDFMNT);
2650 			udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2651 
2652 			UDF_UNLOCK_NODE(udf_node, 0);
2653 
2654 			KASSERT(new_inflen == orig_inflen - size_diff);
2655 			KASSERT(new_inflen == 0);
2656 			KASSERT(new_lbrec == 0);
2657 
2658 			return 0;
2659 		}
2660 
2661 		printf("UDF_SHRINK_NODE: could convert to internal alloc!\n");
2662 	}
2663 
2664 	/* 5) reset node descriptors */
2665 	udf_wipe_adslots(udf_node);
2666 
2667 	/* 6) copy back extents; merge when possible. Recounting on the fly */
2668 	cpy_slots = cpy_slot;
2669 
2670 	c_ad = node_ad_cpy[0];
2671 	slot = 0;
2672 	for (cpy_slot = 1; cpy_slot < cpy_slots; cpy_slot++) {
2673 		s_ad = node_ad_cpy[cpy_slot];
2674 
2675 		DPRINTF(ALLOC, ("\t6: stack -> got mapping vp %d "
2676 			"lb %d, len %d, flags %d\n",
2677 		udf_rw16(s_ad.loc.part_num),
2678 		udf_rw32(s_ad.loc.lb_num),
2679 		UDF_EXT_LEN(udf_rw32(s_ad.len)),
2680 		UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2681 
2682 		/* see if we can merge */
2683 		if (udf_ads_merge(lb_size, &c_ad, &s_ad)) {
2684 			/* not mergable (anymore) */
2685 			DPRINTF(ALLOC, ("\t6: appending vp %d lb %d, "
2686 				"len %d, flags %d\n",
2687 			udf_rw16(c_ad.loc.part_num),
2688 			udf_rw32(c_ad.loc.lb_num),
2689 			UDF_EXT_LEN(udf_rw32(c_ad.len)),
2690 			UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2691 
2692 			error = udf_append_adslot(udf_node, &slot, &c_ad);
2693 			if (error)
2694 				goto errorout; /* panic? */
2695 			c_ad = s_ad;
2696 			slot++;
2697 		}
2698 	}
2699 
2700 	/* 7) push rest slot (if any) */
2701 	if (UDF_EXT_LEN(c_ad.len) > 0) {
2702 		DPRINTF(ALLOC, ("\t7: last append vp %d lb %d, "
2703 				"len %d, flags %d\n",
2704 		udf_rw16(c_ad.loc.part_num),
2705 		udf_rw32(c_ad.loc.lb_num),
2706 		UDF_EXT_LEN(udf_rw32(c_ad.len)),
2707 		UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2708 
2709 		error = udf_append_adslot(udf_node, &slot, &c_ad);
2710 		if (error)
2711 			goto errorout; /* panic? */
2712 		;
2713 	}
2714 
2715 	inflen  -= size_diff;
2716 	objsize -= size_diff;
2717 	if (fe) {
2718 		fe->inf_len   = udf_rw64(inflen);
2719 	} else {
2720 		efe->inf_len  = udf_rw64(inflen);
2721 		efe->obj_size = udf_rw64(objsize);
2722 	}
2723 	error = 0;
2724 
2725 	/* set new size for uvm */
2726 	uvm_vnp_setsize(vp, new_size);
2727 
2728 errorout:
2729 	free(node_ad_cpy, M_UDFMNT);
2730 
2731 	udf_count_alloc_exts(udf_node);
2732 
2733 	udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2734 	UDF_UNLOCK_NODE(udf_node, 0);
2735 
2736 	KASSERT(new_inflen == orig_inflen - size_diff);
2737 
2738 	return error;
2739 }
2740 
2741