xref: /netbsd-src/sys/fs/udf/udf_allocation.c (revision 466a16a118933bd295a8a104f095714fadf9cf68)
1 /* $NetBSD: udf_allocation.c,v 1.19 2008/12/16 16:18:25 pooka Exp $ */
2 
3 /*
4  * Copyright (c) 2006, 2008 Reinoud Zandijk
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  *
27  */
28 
29 #include <sys/cdefs.h>
30 #ifndef lint
31 __KERNEL_RCSID(0, "$NetBSD: udf_allocation.c,v 1.19 2008/12/16 16:18:25 pooka Exp $");
32 #endif /* not lint */
33 
34 
35 #if defined(_KERNEL_OPT)
36 #include "opt_compat_netbsd.h"
37 #endif
38 
39 /* TODO strip */
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/sysctl.h>
43 #include <sys/namei.h>
44 #include <sys/proc.h>
45 #include <sys/kernel.h>
46 #include <sys/vnode.h>
47 #include <miscfs/genfs/genfs_node.h>
48 #include <sys/mount.h>
49 #include <sys/buf.h>
50 #include <sys/file.h>
51 #include <sys/device.h>
52 #include <sys/disklabel.h>
53 #include <sys/ioctl.h>
54 #include <sys/malloc.h>
55 #include <sys/dirent.h>
56 #include <sys/stat.h>
57 #include <sys/conf.h>
58 #include <sys/kauth.h>
59 #include <sys/kthread.h>
60 #include <dev/clock_subr.h>
61 
62 #include <fs/udf/ecma167-udf.h>
63 #include <fs/udf/udf_mount.h>
64 
65 #include "udf.h"
66 #include "udf_subr.h"
67 #include "udf_bswap.h"
68 
69 
70 #define VTOI(vnode) ((struct udf_node *) vnode->v_data)
71 
72 static void udf_record_allocation_in_node(struct udf_mount *ump,
73 	struct buf *buf, uint16_t vpart_num, uint64_t *mapping,
74 	struct long_ad *node_ad_cpy);
75 
76 /*
77  * IDEA/BUSY: Each udf_node gets its own extentwalker state for all operations;
78  * this will hopefully/likely reduce O(nlog(n)) to O(1) for most functionality
79  * since actions are most likely sequencial and thus seeking doesn't need
80  * searching for the same or adjacent position again.
81  */
82 
83 /* --------------------------------------------------------------------- */
84 
85 #if 0
86 #if 1
87 static void
88 udf_node_dump(struct udf_node *udf_node) {
89 	struct file_entry    *fe;
90 	struct extfile_entry *efe;
91 	struct icb_tag *icbtag;
92 	struct long_ad s_ad;
93 	uint64_t inflen;
94 	uint32_t icbflags, addr_type;
95 	uint32_t len, lb_num;
96 	uint32_t flags;
97 	int part_num;
98 	int lb_size, eof, slot;
99 
100 	if ((udf_verbose & UDF_DEBUG_NODEDUMP) == 0)
101 		return;
102 
103 	lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
104 
105 	fe  = udf_node->fe;
106 	efe = udf_node->efe;
107 	if (fe) {
108 		icbtag = &fe->icbtag;
109 		inflen = udf_rw64(fe->inf_len);
110 	} else {
111 		icbtag = &efe->icbtag;
112 		inflen = udf_rw64(efe->inf_len);
113 	}
114 
115 	icbflags   = udf_rw16(icbtag->flags);
116 	addr_type  = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
117 
118 	printf("udf_node_dump %p :\n", udf_node);
119 
120 	if (addr_type == UDF_ICB_INTERN_ALLOC) {
121 		printf("\tIntern alloc, len = %"PRIu64"\n", inflen);
122 		return;
123 	}
124 
125 	printf("\tInflen  = %"PRIu64"\n", inflen);
126 	printf("\t\t");
127 
128 	slot = 0;
129 	for (;;) {
130 		udf_get_adslot(udf_node, slot, &s_ad, &eof);
131 		if (eof)
132 			break;
133 		part_num = udf_rw16(s_ad.loc.part_num);
134 		lb_num = udf_rw32(s_ad.loc.lb_num);
135 		len   = udf_rw32(s_ad.len);
136 		flags = UDF_EXT_FLAGS(len);
137 		len   = UDF_EXT_LEN(len);
138 
139 		printf("[");
140 		if (part_num >= 0)
141 			printf("part %d, ", part_num);
142 		printf("lb_num %d, len %d", lb_num, len);
143 		if (flags)
144 			printf(", flags %d", flags>>30);
145 		printf("] ");
146 
147 		if (flags == UDF_EXT_REDIRECT) {
148 			printf("\n\textent END\n\tallocation extent\n\t\t");
149 		}
150 
151 		slot++;
152 	}
153 	printf("\n\tl_ad END\n\n");
154 }
155 #else
156 #define udf_node_dump(a)
157 #endif
158 
159 
160 static void
161 udf_assert_allocated(struct udf_mount *ump, uint16_t vpart_num,
162 	uint32_t lb_num, uint32_t num_lb)
163 {
164 	struct udf_bitmap *bitmap;
165 	struct part_desc *pdesc;
166 	uint32_t ptov;
167 	uint32_t bitval;
168 	uint8_t *bpos;
169 	int bit;
170 	int phys_part;
171 	int ok;
172 
173 	DPRINTF(PARANOIA, ("udf_assert_allocated: check virt lbnum %d "
174 			  "part %d + %d sect\n", lb_num, vpart_num, num_lb));
175 
176 	/* get partition backing up this vpart_num */
177 	pdesc = ump->partitions[ump->vtop[vpart_num]];
178 
179 	switch (ump->vtop_tp[vpart_num]) {
180 	case UDF_VTOP_TYPE_PHYS :
181 	case UDF_VTOP_TYPE_SPARABLE :
182 		/* free space to freed or unallocated space bitmap */
183 		ptov      = udf_rw32(pdesc->start_loc);
184 		phys_part = ump->vtop[vpart_num];
185 
186 		/* use unallocated bitmap */
187 		bitmap = &ump->part_unalloc_bits[phys_part];
188 
189 		/* if no bitmaps are defined, bail out */
190 		if (bitmap->bits == NULL)
191 			break;
192 
193 		/* check bits */
194 		KASSERT(bitmap->bits);
195 		ok = 1;
196 		bpos = bitmap->bits + lb_num/8;
197 		bit  = lb_num % 8;
198 		while (num_lb > 0) {
199 			bitval = (1 << bit);
200 			DPRINTF(PARANOIA, ("XXX : check %d, %p, bit %d\n",
201 				lb_num, bpos, bit));
202 			KASSERT(bitmap->bits + lb_num/8 == bpos);
203 			if (*bpos & bitval) {
204 				printf("\tlb_num %d is NOT marked busy\n",
205 					lb_num);
206 				ok = 0;
207 			}
208 			lb_num++; num_lb--;
209 			bit = (bit + 1) % 8;
210 			if (bit == 0)
211 				bpos++;
212 		}
213 		if (!ok) {
214 			/* KASSERT(0); */
215 		}
216 
217 		break;
218 	case UDF_VTOP_TYPE_VIRT :
219 		/* TODO check space */
220 		KASSERT(num_lb == 1);
221 		break;
222 	case UDF_VTOP_TYPE_META :
223 		/* TODO check space in the metadata bitmap */
224 	default:
225 		/* not implemented */
226 		break;
227 	}
228 }
229 
230 
231 static void
232 udf_node_sanity_check(struct udf_node *udf_node,
233 		uint64_t *cnt_inflen, uint64_t *cnt_logblksrec)
234 {
235 	union dscrptr *dscr;
236 	struct file_entry    *fe;
237 	struct extfile_entry *efe;
238 	struct icb_tag *icbtag;
239 	struct long_ad  s_ad;
240 	uint64_t inflen, logblksrec;
241 	uint32_t icbflags, addr_type;
242 	uint32_t len, lb_num, l_ea, l_ad, max_l_ad;
243 	uint16_t part_num;
244 	uint8_t *data_pos;
245 	int dscr_size, lb_size, flags, whole_lb;
246 	int i, slot, eof;
247 
248 //	KASSERT(mutex_owned(&udf_node->ump->allocate_mutex));
249 
250 	if (1)
251 		udf_node_dump(udf_node);
252 
253 	lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
254 
255 	fe  = udf_node->fe;
256 	efe = udf_node->efe;
257 	if (fe) {
258 		dscr       = (union dscrptr *) fe;
259 		icbtag     = &fe->icbtag;
260 		inflen     = udf_rw64(fe->inf_len);
261 		dscr_size  = sizeof(struct file_entry) -1;
262 		logblksrec = udf_rw64(fe->logblks_rec);
263 		l_ad       = udf_rw32(fe->l_ad);
264 		l_ea       = udf_rw32(fe->l_ea);
265 	} else {
266 		dscr       = (union dscrptr *) efe;
267 		icbtag     = &efe->icbtag;
268 		inflen     = udf_rw64(efe->inf_len);
269 		dscr_size  = sizeof(struct extfile_entry) -1;
270 		logblksrec = udf_rw64(efe->logblks_rec);
271 		l_ad       = udf_rw32(efe->l_ad);
272 		l_ea       = udf_rw32(efe->l_ea);
273 	}
274 	data_pos  = (uint8_t *) dscr + dscr_size + l_ea;
275 	max_l_ad   = lb_size - dscr_size - l_ea;
276 	icbflags   = udf_rw16(icbtag->flags);
277 	addr_type  = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
278 
279 	/* check if tail is zero */
280 	DPRINTF(PARANOIA, ("Sanity check blank tail\n"));
281 	for (i = l_ad; i < max_l_ad; i++) {
282 		if (data_pos[i] != 0)
283 			printf( "sanity_check: violation: node byte %d "
284 				"has value %d\n", i, data_pos[i]);
285 	}
286 
287 	/* reset counters */
288 	*cnt_inflen     = 0;
289 	*cnt_logblksrec = 0;
290 
291 	if (addr_type == UDF_ICB_INTERN_ALLOC) {
292 		KASSERT(l_ad <= max_l_ad);
293 		KASSERT(l_ad == inflen);
294 		*cnt_inflen = inflen;
295 		return;
296 	}
297 
298 	/* start counting */
299 	whole_lb = 1;
300 	slot = 0;
301 	for (;;) {
302 		udf_get_adslot(udf_node, slot, &s_ad, &eof);
303 		if (eof)
304 			break;
305 		KASSERT(whole_lb == 1);
306 
307 		part_num = udf_rw16(s_ad.loc.part_num);
308 		lb_num = udf_rw32(s_ad.loc.lb_num);
309 		len   = udf_rw32(s_ad.len);
310 		flags = UDF_EXT_FLAGS(len);
311 		len   = UDF_EXT_LEN(len);
312 
313 		if (flags != UDF_EXT_REDIRECT) {
314 			*cnt_inflen += len;
315 			if (flags == UDF_EXT_ALLOCATED) {
316 				*cnt_logblksrec += (len + lb_size -1) / lb_size;
317 			}
318 		} else {
319 			KASSERT(len == lb_size);
320 		}
321 		/* check allocation */
322 		if (flags == UDF_EXT_ALLOCATED)
323 			udf_assert_allocated(udf_node->ump, part_num, lb_num,
324 				(len + lb_size - 1) / lb_size);
325 
326 		/* check whole lb */
327 		whole_lb = ((len % lb_size) == 0);
328 
329 		slot++;
330 	}
331 	/* rest should be zero (ad_off > l_ad < max_l_ad - adlen) */
332 
333 	KASSERT(*cnt_inflen == inflen);
334 	KASSERT(*cnt_logblksrec == logblksrec);
335 
336 //	KASSERT(mutex_owned(&udf_node->ump->allocate_mutex));
337 }
338 #else
339 static void
340 udf_node_sanity_check(struct udf_node *udf_node,
341 		uint64_t *cnt_inflen, uint64_t *cnt_logblksrec) {
342 	struct file_entry    *fe;
343 	struct extfile_entry *efe;
344 	struct icb_tag *icbtag;
345 	uint64_t inflen, logblksrec;
346 	int dscr_size, lb_size;
347 
348 	lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
349 
350 	fe  = udf_node->fe;
351 	efe = udf_node->efe;
352 	if (fe) {
353 		icbtag = &fe->icbtag;
354 		inflen = udf_rw64(fe->inf_len);
355 		dscr_size  = sizeof(struct file_entry) -1;
356 		logblksrec = udf_rw64(fe->logblks_rec);
357 	} else {
358 		icbtag = &efe->icbtag;
359 		inflen = udf_rw64(efe->inf_len);
360 		dscr_size  = sizeof(struct extfile_entry) -1;
361 		logblksrec = udf_rw64(efe->logblks_rec);
362 	}
363 	*cnt_logblksrec = logblksrec;
364 	*cnt_inflen     = inflen;
365 }
366 #endif
367 
368 /* --------------------------------------------------------------------- */
369 
370 int
371 udf_translate_vtop(struct udf_mount *ump, struct long_ad *icb_loc,
372 		   uint32_t *lb_numres, uint32_t *extres)
373 {
374 	struct part_desc       *pdesc;
375 	struct spare_map_entry *sme;
376 	struct long_ad s_icb_loc;
377 	uint64_t foffset, end_foffset;
378 	uint32_t lb_size, len;
379 	uint32_t lb_num, lb_rel, lb_packet;
380 	uint32_t udf_rw32_lbmap, ext_offset;
381 	uint16_t vpart;
382 	int rel, part, error, eof, slot, flags;
383 
384 	assert(ump && icb_loc && lb_numres);
385 
386 	vpart  = udf_rw16(icb_loc->loc.part_num);
387 	lb_num = udf_rw32(icb_loc->loc.lb_num);
388 	if (vpart > UDF_VTOP_RAWPART)
389 		return EINVAL;
390 
391 translate_again:
392 	part = ump->vtop[vpart];
393 	pdesc = ump->partitions[part];
394 
395 	switch (ump->vtop_tp[vpart]) {
396 	case UDF_VTOP_TYPE_RAW :
397 		/* 1:1 to the end of the device */
398 		*lb_numres = lb_num;
399 		*extres = INT_MAX;
400 		return 0;
401 	case UDF_VTOP_TYPE_PHYS :
402 		/* transform into its disc logical block */
403 		if (lb_num > udf_rw32(pdesc->part_len))
404 			return EINVAL;
405 		*lb_numres = lb_num + udf_rw32(pdesc->start_loc);
406 
407 		/* extent from here to the end of the partition */
408 		*extres = udf_rw32(pdesc->part_len) - lb_num;
409 		return 0;
410 	case UDF_VTOP_TYPE_VIRT :
411 		/* only maps one logical block, lookup in VAT */
412 		if (lb_num >= ump->vat_entries)		/* XXX > or >= ? */
413 			return EINVAL;
414 
415 		/* lookup in virtual allocation table file */
416 		mutex_enter(&ump->allocate_mutex);
417 		error = udf_vat_read(ump->vat_node,
418 				(uint8_t *) &udf_rw32_lbmap, 4,
419 				ump->vat_offset + lb_num * 4);
420 		mutex_exit(&ump->allocate_mutex);
421 
422 		if (error)
423 			return error;
424 
425 		lb_num = udf_rw32(udf_rw32_lbmap);
426 
427 		/* transform into its disc logical block */
428 		if (lb_num > udf_rw32(pdesc->part_len))
429 			return EINVAL;
430 		*lb_numres = lb_num + udf_rw32(pdesc->start_loc);
431 
432 		/* just one logical block */
433 		*extres = 1;
434 		return 0;
435 	case UDF_VTOP_TYPE_SPARABLE :
436 		/* check if the packet containing the lb_num is remapped */
437 		lb_packet = lb_num / ump->sparable_packet_size;
438 		lb_rel    = lb_num % ump->sparable_packet_size;
439 
440 		for (rel = 0; rel < udf_rw16(ump->sparing_table->rt_l); rel++) {
441 			sme = &ump->sparing_table->entries[rel];
442 			if (lb_packet == udf_rw32(sme->org)) {
443 				/* NOTE maps to absolute disc logical block! */
444 				*lb_numres = udf_rw32(sme->map) + lb_rel;
445 				*extres    = ump->sparable_packet_size - lb_rel;
446 				return 0;
447 			}
448 		}
449 
450 		/* transform into its disc logical block */
451 		if (lb_num > udf_rw32(pdesc->part_len))
452 			return EINVAL;
453 		*lb_numres = lb_num + udf_rw32(pdesc->start_loc);
454 
455 		/* rest of block */
456 		*extres = ump->sparable_packet_size - lb_rel;
457 		return 0;
458 	case UDF_VTOP_TYPE_META :
459 		/* we have to look into the file's allocation descriptors */
460 
461 		/* use metadatafile allocation mutex */
462 		lb_size = udf_rw32(ump->logical_vol->lb_size);
463 
464 		UDF_LOCK_NODE(ump->metadata_node, 0);
465 
466 		/* get first overlapping extent */
467 		foffset = 0;
468 		slot    = 0;
469 		for (;;) {
470 			udf_get_adslot(ump->metadata_node,
471 				slot, &s_icb_loc, &eof);
472 			DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, "
473 				"len = %d, lb_num = %d, part = %d\n",
474 				slot, eof,
475 				UDF_EXT_FLAGS(udf_rw32(s_icb_loc.len)),
476 				UDF_EXT_LEN(udf_rw32(s_icb_loc.len)),
477 				udf_rw32(s_icb_loc.loc.lb_num),
478 				udf_rw16(s_icb_loc.loc.part_num)));
479 			if (eof) {
480 				DPRINTF(TRANSLATE,
481 					("Meta partition translation "
482 					 "failed: can't seek location\n"));
483 				UDF_UNLOCK_NODE(ump->metadata_node, 0);
484 				return EINVAL;
485 			}
486 			len   = udf_rw32(s_icb_loc.len);
487 			flags = UDF_EXT_FLAGS(len);
488 			len   = UDF_EXT_LEN(len);
489 
490 			if (flags == UDF_EXT_REDIRECT) {
491 				slot++;
492 				continue;
493 			}
494 
495 			end_foffset = foffset + len;
496 
497 			if (end_foffset > lb_num * lb_size)
498 				break;	/* found */
499 			foffset = end_foffset;
500 			slot++;
501 		}
502 		/* found overlapping slot */
503 		ext_offset = lb_num * lb_size - foffset;
504 
505 		/* process extent offset */
506 		lb_num   = udf_rw32(s_icb_loc.loc.lb_num);
507 		vpart    = udf_rw16(s_icb_loc.loc.part_num);
508 		lb_num  += (ext_offset + lb_size -1) / lb_size;
509 		len     -= ext_offset;
510 		ext_offset = 0;
511 
512 		flags = UDF_EXT_FLAGS(s_icb_loc.len);
513 
514 		UDF_UNLOCK_NODE(ump->metadata_node, 0);
515 		if (flags != UDF_EXT_ALLOCATED) {
516 			DPRINTF(TRANSLATE, ("Metadata partition translation "
517 					    "failed: not allocated\n"));
518 			return EINVAL;
519 		}
520 
521 		/*
522 		 * vpart and lb_num are updated, translate again since we
523 		 * might be mapped on sparable media
524 		 */
525 		goto translate_again;
526 	default:
527 		printf("UDF vtop translation scheme %d unimplemented yet\n",
528 			ump->vtop_tp[vpart]);
529 	}
530 
531 	return EINVAL;
532 }
533 
534 
535 /* XXX  provisional primitive braindead version */
536 /* TODO use ext_res */
537 void
538 udf_translate_vtop_list(struct udf_mount *ump, uint32_t sectors,
539 	uint16_t vpart_num, uint64_t *lmapping, uint64_t *pmapping)
540 {
541 	struct long_ad loc;
542 	uint32_t lb_numres, ext_res;
543 	int sector;
544 
545 	for (sector = 0; sector < sectors; sector++) {
546 		memset(&loc, 0, sizeof(struct long_ad));
547 		loc.loc.part_num = udf_rw16(vpart_num);
548 		loc.loc.lb_num   = udf_rw32(*lmapping);
549 		udf_translate_vtop(ump, &loc, &lb_numres, &ext_res);
550 		*pmapping = lb_numres;
551 		lmapping++; pmapping++;
552 	}
553 }
554 
555 
556 /* --------------------------------------------------------------------- */
557 
558 /*
559  * Translate an extent (in logical_blocks) into logical block numbers; used
560  * for read and write operations. DOESNT't check extents.
561  */
562 
563 int
564 udf_translate_file_extent(struct udf_node *udf_node,
565 		          uint32_t from, uint32_t num_lb,
566 			  uint64_t *map)
567 {
568 	struct udf_mount *ump;
569 	struct icb_tag *icbtag;
570 	struct long_ad t_ad, s_ad;
571 	uint64_t transsec;
572 	uint64_t foffset, end_foffset;
573 	uint32_t transsec32;
574 	uint32_t lb_size;
575 	uint32_t ext_offset;
576 	uint32_t lb_num, len;
577 	uint32_t overlap, translen;
578 	uint16_t vpart_num;
579 	int eof, error, flags;
580 	int slot, addr_type, icbflags;
581 
582 	if (!udf_node)
583 		return ENOENT;
584 
585 	KASSERT(num_lb > 0);
586 
587 	UDF_LOCK_NODE(udf_node, 0);
588 
589 	/* initialise derivative vars */
590 	ump = udf_node->ump;
591 	lb_size = udf_rw32(ump->logical_vol->lb_size);
592 
593 	if (udf_node->fe) {
594 		icbtag = &udf_node->fe->icbtag;
595 	} else {
596 		icbtag = &udf_node->efe->icbtag;
597 	}
598 	icbflags  = udf_rw16(icbtag->flags);
599 	addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
600 
601 	/* do the work */
602 	if (addr_type == UDF_ICB_INTERN_ALLOC) {
603 		*map = UDF_TRANS_INTERN;
604 		UDF_UNLOCK_NODE(udf_node, 0);
605 		return 0;
606 	}
607 
608 	/* find first overlapping extent */
609 	foffset = 0;
610 	slot    = 0;
611 	for (;;) {
612 		udf_get_adslot(udf_node, slot, &s_ad, &eof);
613 		DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, len = %d, "
614 			"lb_num = %d, part = %d\n", slot, eof,
615 			UDF_EXT_FLAGS(udf_rw32(s_ad.len)),
616 			UDF_EXT_LEN(udf_rw32(s_ad.len)),
617 			udf_rw32(s_ad.loc.lb_num),
618 			udf_rw16(s_ad.loc.part_num)));
619 		if (eof) {
620 			DPRINTF(TRANSLATE,
621 				("Translate file extent "
622 				 "failed: can't seek location\n"));
623 			UDF_UNLOCK_NODE(udf_node, 0);
624 			return EINVAL;
625 		}
626 		len    = udf_rw32(s_ad.len);
627 		flags  = UDF_EXT_FLAGS(len);
628 		len    = UDF_EXT_LEN(len);
629 		lb_num = udf_rw32(s_ad.loc.lb_num);
630 
631 		if (flags == UDF_EXT_REDIRECT) {
632 			slot++;
633 			continue;
634 		}
635 
636 		end_foffset = foffset + len;
637 
638 		if (end_foffset > from * lb_size)
639 			break;	/* found */
640 		foffset = end_foffset;
641 		slot++;
642 	}
643 	/* found overlapping slot */
644 	ext_offset = from * lb_size - foffset;
645 
646 	for (;;) {
647 		udf_get_adslot(udf_node, slot, &s_ad, &eof);
648 		DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, len = %d, "
649 			"lb_num = %d, part = %d\n", slot, eof,
650 			UDF_EXT_FLAGS(udf_rw32(s_ad.len)),
651 			UDF_EXT_LEN(udf_rw32(s_ad.len)),
652 			udf_rw32(s_ad.loc.lb_num),
653 			udf_rw16(s_ad.loc.part_num)));
654 		if (eof) {
655 			DPRINTF(TRANSLATE,
656 				("Translate file extent "
657 				 "failed: past eof\n"));
658 			UDF_UNLOCK_NODE(udf_node, 0);
659 			return EINVAL;
660 		}
661 
662 		len    = udf_rw32(s_ad.len);
663 		flags  = UDF_EXT_FLAGS(len);
664 		len    = UDF_EXT_LEN(len);
665 
666 		lb_num    = udf_rw32(s_ad.loc.lb_num);
667 		vpart_num = udf_rw16(s_ad.loc.part_num);
668 
669 		end_foffset = foffset + len;
670 
671 		/* process extent, don't forget to advance on ext_offset! */
672 		lb_num  += (ext_offset + lb_size -1) / lb_size;
673 		overlap  = (len - ext_offset + lb_size -1) / lb_size;
674 		ext_offset = 0;
675 
676 		/*
677 		 * note that the while(){} is nessisary for the extent that
678 		 * the udf_translate_vtop() returns doens't have to span the
679 		 * whole extent.
680 		 */
681 
682 		overlap = MIN(overlap, num_lb);
683 		while (overlap && (flags != UDF_EXT_REDIRECT)) {
684 			switch (flags) {
685 			case UDF_EXT_FREE :
686 			case UDF_EXT_ALLOCATED_BUT_NOT_USED :
687 				transsec = UDF_TRANS_ZERO;
688 				translen = overlap;
689 				while (overlap && num_lb && translen) {
690 					*map++ = transsec;
691 					lb_num++;
692 					overlap--; num_lb--; translen--;
693 				}
694 				break;
695 			case UDF_EXT_ALLOCATED :
696 				t_ad.loc.lb_num   = udf_rw32(lb_num);
697 				t_ad.loc.part_num = udf_rw16(vpart_num);
698 				error = udf_translate_vtop(ump,
699 						&t_ad, &transsec32, &translen);
700 				transsec = transsec32;
701 				if (error) {
702 					UDF_UNLOCK_NODE(udf_node, 0);
703 					return error;
704 				}
705 				while (overlap && num_lb && translen) {
706 					*map++ = transsec;
707 					lb_num++; transsec++;
708 					overlap--; num_lb--; translen--;
709 				}
710 				break;
711 			default:
712 				DPRINTF(TRANSLATE,
713 					("Translate file extent "
714 					 "failed: bad flags %x\n", flags));
715 				UDF_UNLOCK_NODE(udf_node, 0);
716 				return EINVAL;
717 			}
718 		}
719 		if (num_lb == 0)
720 			break;
721 
722 		if (flags != UDF_EXT_REDIRECT)
723 			foffset = end_foffset;
724 		slot++;
725 	}
726 	UDF_UNLOCK_NODE(udf_node, 0);
727 
728 	return 0;
729 }
730 
731 /* --------------------------------------------------------------------- */
732 
733 static int
734 udf_search_free_vatloc(struct udf_mount *ump, uint32_t *lbnumres)
735 {
736 	uint32_t lb_size, lb_num, lb_map, udf_rw32_lbmap;
737 	uint8_t *blob;
738 	int entry, chunk, found, error;
739 
740 	KASSERT(ump);
741 	KASSERT(ump->logical_vol);
742 
743 	lb_size = udf_rw32(ump->logical_vol->lb_size);
744 	blob = malloc(lb_size, M_UDFTEMP, M_WAITOK);
745 
746 	/* TODO static allocation of search chunk */
747 
748 	lb_num = MIN(ump->vat_entries, ump->vat_last_free_lb);
749 	found  = 0;
750 	error  = 0;
751 	entry  = 0;
752 	do {
753 		chunk = MIN(lb_size, (ump->vat_entries - lb_num) * 4);
754 		if (chunk <= 0)
755 			break;
756 		/* load in chunk */
757 		error = udf_vat_read(ump->vat_node, blob, chunk,
758 				ump->vat_offset + lb_num * 4);
759 
760 		if (error)
761 			break;
762 
763 		/* search this chunk */
764 		for (entry=0; entry < chunk /4; entry++, lb_num++) {
765 			udf_rw32_lbmap = *((uint32_t *) (blob + entry * 4));
766 			lb_map = udf_rw32(udf_rw32_lbmap);
767 			if (lb_map == 0xffffffff) {
768 				found = 1;
769 				break;
770 			}
771 		}
772 	} while (!found);
773 	if (error) {
774 		printf("udf_search_free_vatloc: error reading in vat chunk "
775 			"(lb %d, size %d)\n", lb_num, chunk);
776 	}
777 
778 	if (!found) {
779 		/* extend VAT */
780 		DPRINTF(WRITE, ("udf_search_free_vatloc: extending\n"));
781 		lb_num = ump->vat_entries;
782 		ump->vat_entries++;
783 	}
784 
785 	/* mark entry with initialiser just in case */
786 	lb_map = udf_rw32(0xfffffffe);
787 	udf_vat_write(ump->vat_node, (uint8_t *) &lb_map, 4,
788 		ump->vat_offset + lb_num *4);
789 	ump->vat_last_free_lb = lb_num;
790 
791 	free(blob, M_UDFTEMP);
792 	*lbnumres = lb_num;
793 	return 0;
794 }
795 
796 
797 static void
798 udf_bitmap_allocate(struct udf_bitmap *bitmap, int ismetadata,
799 	uint32_t *num_lb, uint64_t *lmappos)
800 {
801 	uint32_t offset, lb_num, bit;
802 	int32_t  diff;
803 	uint8_t *bpos;
804 	int pass;
805 
806 	if (!ismetadata) {
807 		/* heuristic to keep the two pointers not too close */
808 		diff = bitmap->data_pos - bitmap->metadata_pos;
809 		if ((diff >= 0) && (diff < 1024))
810 			bitmap->data_pos = bitmap->metadata_pos + 1024;
811 	}
812 	offset = ismetadata ? bitmap->metadata_pos : bitmap->data_pos;
813 	offset &= ~7;
814 	for (pass = 0; pass < 2; pass++) {
815 		if (offset >= bitmap->max_offset)
816 			offset = 0;
817 
818 		while (offset < bitmap->max_offset) {
819 			if (*num_lb == 0)
820 				break;
821 
822 			/* use first bit not set */
823 			bpos  = bitmap->bits + offset/8;
824 			bit = ffs(*bpos);	/* returns 0 or 1..8 */
825 			if (bit == 0) {
826 				offset += 8;
827 				continue;
828 			}
829 			DPRINTF(PARANOIA, ("XXX : allocate %d, %p, bit %d\n",
830 				offset + bit -1, bpos, bit-1));
831 			*bpos &= ~(1 << (bit-1));
832 			lb_num = offset + bit-1;
833 			*lmappos++ = lb_num;
834 			*num_lb = *num_lb - 1;
835 			// offset = (offset & ~7);
836 		}
837 	}
838 
839 	if (ismetadata) {
840 		bitmap->metadata_pos = offset;
841 	} else {
842 		bitmap->data_pos = offset;
843 	}
844 }
845 
846 
847 static void
848 udf_bitmap_free(struct udf_bitmap *bitmap, uint32_t lb_num, uint32_t num_lb)
849 {
850 	uint32_t offset;
851 	uint32_t bit, bitval;
852 	uint8_t *bpos;
853 
854 	offset = lb_num;
855 
856 	/* starter bits */
857 	bpos = bitmap->bits + offset/8;
858 	bit = offset % 8;
859 	while ((bit != 0) && (num_lb > 0)) {
860 		bitval = (1 << bit);
861 		KASSERT((*bpos & bitval) == 0);
862 		DPRINTF(PARANOIA, ("XXX : free %d, %p, %d\n",
863 			offset, bpos, bit));
864 		*bpos |= bitval;
865 		offset++; num_lb--;
866 		bit = (bit + 1) % 8;
867 	}
868 	if (num_lb == 0)
869 		return;
870 
871 	/* whole bytes */
872 	KASSERT(bit == 0);
873 	bpos = bitmap->bits + offset / 8;
874 	while (num_lb >= 8) {
875 		KASSERT((*bpos == 0));
876 		DPRINTF(PARANOIA, ("XXX : free %d + 8, %p\n", offset, bpos));
877 		*bpos = 255;
878 		offset += 8; num_lb -= 8;
879 		bpos++;
880 	}
881 
882 	/* stop bits */
883 	KASSERT(num_lb < 8);
884 	bit = 0;
885 	while (num_lb > 0) {
886 		bitval = (1 << bit);
887 		KASSERT((*bpos & bitval) == 0);
888 		DPRINTF(PARANOIA, ("XXX : free %d, %p, %d\n",
889 			offset, bpos, bit));
890 		*bpos |= bitval;
891 		offset++; num_lb--;
892 		bit = (bit + 1) % 8;
893 	}
894 }
895 
896 
897 /* allocate a contiguous sequence of sectornumbers */
898 static int
899 udf_allocate_space(struct udf_mount *ump, int udf_c_type,
900 	uint16_t vpart_num, uint32_t num_lb, uint64_t *lmapping)
901 {
902 	struct mmc_trackinfo *alloc_track, *other_track;
903 	struct udf_bitmap *bitmap;
904 	struct part_desc *pdesc;
905 	struct logvol_int_desc *lvid;
906 	uint64_t *lmappos;
907 	uint32_t ptov, lb_num, *freepos, free_lbs;
908 	int lb_size, alloc_num_lb;
909 	int alloc_type, error;
910 	int is_node;
911 
912 	DPRINTF(CALL, ("udf_allocate_space(ctype %d, vpart %d, num_lb %d\n",
913 		udf_c_type, vpart_num, num_lb));
914 	mutex_enter(&ump->allocate_mutex);
915 
916 	lb_size = udf_rw32(ump->logical_vol->lb_size);
917 	KASSERT(lb_size == ump->discinfo.sector_size);
918 
919 	/* XXX TODO check disc space */
920 
921 	alloc_type =  ump->vtop_alloc[vpart_num];
922 	is_node    = (udf_c_type == UDF_C_NODE);
923 
924 	lmappos = lmapping;
925 	error = 0;
926 	switch (alloc_type) {
927 	case UDF_ALLOC_VAT :
928 		/* search empty slot in VAT file */
929 		KASSERT(num_lb == 1);
930 		error = udf_search_free_vatloc(ump, &lb_num);
931 		if (!error)
932 			*lmappos = lb_num;
933 		break;
934 	case UDF_ALLOC_SEQUENTIAL :
935 		/* sequential allocation on recordable media */
936 		/* get partition backing up this vpart_num_num */
937 		pdesc = ump->partitions[ump->vtop[vpart_num]];
938 
939 		/* calculate offset from physical base partition */
940 		ptov  = udf_rw32(pdesc->start_loc);
941 
942 		/* get our track descriptors */
943 		if (vpart_num == ump->node_part) {
944 			alloc_track = &ump->metadata_track;
945 			other_track = &ump->data_track;
946 		} else {
947 			alloc_track = &ump->data_track;
948 			other_track = &ump->metadata_track;
949 		}
950 
951 		/* allocate */
952 		for (lb_num = 0; lb_num < num_lb; lb_num++) {
953 			*lmappos++ = alloc_track->next_writable - ptov;
954 			alloc_track->next_writable++;
955 			alloc_track->free_blocks--;
956 		}
957 
958 		/* keep other track up-to-date */
959 		if (alloc_track->tracknr == other_track->tracknr)
960 			memcpy(other_track, alloc_track,
961 				sizeof(struct mmc_trackinfo));
962 		break;
963 	case UDF_ALLOC_SPACEMAP :
964 		/* try to allocate on unallocated bits */
965 		alloc_num_lb = num_lb;
966 		bitmap = &ump->part_unalloc_bits[vpart_num];
967 		udf_bitmap_allocate(bitmap, is_node, &alloc_num_lb, lmappos);
968 		ump->lvclose |= UDF_WRITE_PART_BITMAPS;
969 
970 		/* have we allocated all? */
971 		if (alloc_num_lb) {
972 			/* TODO convert freed to unalloc and try again */
973 			/* free allocated piece for now */
974 			lmappos = lmapping;
975 			for (lb_num=0; lb_num < num_lb-alloc_num_lb; lb_num++) {
976 				udf_bitmap_free(bitmap, *lmappos++, 1);
977 			}
978 			error = ENOSPC;
979 		}
980 		if (!error) {
981 			/* adjust freecount */
982 			lvid = ump->logvol_integrity;
983 			freepos = &lvid->tables[0] + vpart_num;
984 			free_lbs = udf_rw32(*freepos);
985 			*freepos = udf_rw32(free_lbs - num_lb);
986 		}
987 		break;
988 	case UDF_ALLOC_METABITMAP :		/* UDF 2.50, 2.60 BluRay-RE */
989 		/* allocate on metadata unallocated bits */
990 		alloc_num_lb = num_lb;
991 		bitmap = &ump->metadata_unalloc_bits;
992 		udf_bitmap_allocate(bitmap, is_node, &alloc_num_lb, lmappos);
993 		ump->lvclose |= UDF_WRITE_PART_BITMAPS;
994 
995 		/* have we allocated all? */
996 		if (alloc_num_lb) {
997 			/* YIKES! TODO we need to extend the metadata partition */
998 			/* free allocated piece for now */
999 			lmappos = lmapping;
1000 			for (lb_num=0; lb_num < num_lb-alloc_num_lb; lb_num++) {
1001 				udf_bitmap_free(bitmap, *lmappos++, 1);
1002 			}
1003 			error = ENOSPC;
1004 		}
1005 		if (!error) {
1006 			/* adjust freecount */
1007 			lvid = ump->logvol_integrity;
1008 			freepos = &lvid->tables[0] + vpart_num;
1009 			free_lbs = udf_rw32(*freepos);
1010 			*freepos = udf_rw32(free_lbs - num_lb);
1011 		}
1012 		break;
1013 	case UDF_ALLOC_METASEQUENTIAL :		/* UDF 2.60       BluRay-R  */
1014 	case UDF_ALLOC_RELAXEDSEQUENTIAL :	/* UDF 2.50/~meta BluRay-R  */
1015 		printf("ALERT: udf_allocate_space : allocation %d "
1016 				"not implemented yet!\n", alloc_type);
1017 		/* TODO implement, doesn't have to be contiguous */
1018 		error = ENOSPC;
1019 		break;
1020 	}
1021 
1022 #ifdef DEBUG
1023 	if (udf_verbose & UDF_DEBUG_ALLOC) {
1024 		lmappos = lmapping;
1025 		printf("udf_allocate_space, allocated logical lba :\n");
1026 		for (lb_num = 0; lb_num < num_lb; lb_num++) {
1027 			printf("%s %"PRIu64",", (lb_num > 0)?",":"",
1028 				*lmappos++);
1029 		}
1030 		printf("\n");
1031 	}
1032 #endif
1033 	mutex_exit(&ump->allocate_mutex);
1034 
1035 	return error;
1036 }
1037 
1038 /* --------------------------------------------------------------------- */
1039 
1040 void
1041 udf_free_allocated_space(struct udf_mount *ump, uint32_t lb_num,
1042 	uint16_t vpart_num, uint32_t num_lb)
1043 {
1044 	struct udf_bitmap *bitmap;
1045 	struct part_desc *pdesc;
1046 	struct logvol_int_desc *lvid;
1047 	uint32_t ptov, lb_map, udf_rw32_lbmap;
1048 	uint32_t *freepos, free_lbs;
1049 	int phys_part;
1050 	int error;
1051 
1052 	DPRINTF(ALLOC, ("udf_free_allocated_space: freeing virt lbnum %d "
1053 			  "part %d + %d sect\n", lb_num, vpart_num, num_lb));
1054 
1055 	/* no use freeing zero length */
1056 	if (num_lb == 0)
1057 		return;
1058 
1059 	mutex_enter(&ump->allocate_mutex);
1060 
1061 	/* get partition backing up this vpart_num */
1062 	pdesc = ump->partitions[ump->vtop[vpart_num]];
1063 
1064 	switch (ump->vtop_tp[vpart_num]) {
1065 	case UDF_VTOP_TYPE_PHYS :
1066 	case UDF_VTOP_TYPE_SPARABLE :
1067 		/* free space to freed or unallocated space bitmap */
1068 		ptov      = udf_rw32(pdesc->start_loc);
1069 		phys_part = ump->vtop[vpart_num];
1070 
1071 		/* first try freed space bitmap */
1072 		bitmap    = &ump->part_freed_bits[phys_part];
1073 
1074 		/* if not defined, use unallocated bitmap */
1075 		if (bitmap->bits == NULL)
1076 			bitmap = &ump->part_unalloc_bits[phys_part];
1077 
1078 		/* if no bitmaps are defined, bail out; XXX OK? */
1079 		if (bitmap->bits == NULL)
1080 			break;
1081 
1082 		/* free bits if its defined */
1083 		KASSERT(bitmap->bits);
1084 		ump->lvclose |= UDF_WRITE_PART_BITMAPS;
1085 		udf_bitmap_free(bitmap, lb_num, num_lb);
1086 
1087 		/* adjust freecount */
1088 		lvid = ump->logvol_integrity;
1089 		freepos = &lvid->tables[0] + vpart_num;
1090 		free_lbs = udf_rw32(*freepos);
1091 		*freepos = udf_rw32(free_lbs + num_lb);
1092 		break;
1093 	case UDF_VTOP_TYPE_VIRT :
1094 		/* free this VAT entry */
1095 		KASSERT(num_lb == 1);
1096 
1097 		lb_map = 0xffffffff;
1098 		udf_rw32_lbmap = udf_rw32(lb_map);
1099 		error = udf_vat_write(ump->vat_node,
1100 			(uint8_t *) &udf_rw32_lbmap, 4,
1101 			ump->vat_offset + lb_num * 4);
1102 		KASSERT(error == 0);
1103 		ump->vat_last_free_lb = MIN(ump->vat_last_free_lb, lb_num);
1104 		break;
1105 	case UDF_VTOP_TYPE_META :
1106 		/* free space in the metadata bitmap */
1107 		bitmap = &ump->metadata_unalloc_bits;
1108 		KASSERT(bitmap->bits);
1109 
1110 		ump->lvclose |= UDF_WRITE_PART_BITMAPS;
1111 		udf_bitmap_free(bitmap, lb_num, num_lb);
1112 
1113 		/* adjust freecount */
1114 		lvid = ump->logvol_integrity;
1115 		freepos = &lvid->tables[0] + vpart_num;
1116 		free_lbs = udf_rw32(*freepos);
1117 		*freepos = udf_rw32(free_lbs + num_lb);
1118 		break;
1119 	default:
1120 		printf("ALERT: udf_free_allocated_space : allocation %d "
1121 			"not implemented yet!\n", ump->vtop_tp[vpart_num]);
1122 		break;
1123 	}
1124 
1125 	mutex_exit(&ump->allocate_mutex);
1126 }
1127 
1128 /* --------------------------------------------------------------------- */
1129 
1130 int
1131 udf_pre_allocate_space(struct udf_mount *ump, int udf_c_type,
1132 	uint32_t num_lb, uint16_t vpartnr, uint64_t *lmapping)
1133 {
1134 	/* TODO properly maintain uncomitted_lb per partition */
1135 
1136 	/* reserve size for VAT allocated data */
1137 	if (ump->vtop_alloc[vpartnr] == UDF_ALLOC_VAT) {
1138 		mutex_enter(&ump->allocate_mutex);
1139 			ump->uncomitted_lb += num_lb;
1140 		mutex_exit(&ump->allocate_mutex);
1141 	}
1142 
1143 	return udf_allocate_space(ump, udf_c_type, vpartnr, num_lb, lmapping);
1144 }
1145 
1146 /* --------------------------------------------------------------------- */
1147 
1148 /*
1149  * Allocate a buf on disc for direct write out. The space doesn't have to be
1150  * contiguous as the caller takes care of this.
1151  */
1152 
1153 void
1154 udf_late_allocate_buf(struct udf_mount *ump, struct buf *buf,
1155 	uint64_t *lmapping, struct long_ad *node_ad_cpy, uint16_t *vpart_nump)
1156 {
1157 	struct udf_node  *udf_node = VTOI(buf->b_vp);
1158 	int lb_size, blks, udf_c_type;
1159 	int vpart_num, num_lb;
1160 	int error, s;
1161 
1162 	/*
1163 	 * for each sector in the buf, allocate a sector on disc and record
1164 	 * its position in the provided mapping array.
1165 	 *
1166 	 * If its userdata or FIDs, record its location in its node.
1167 	 */
1168 
1169 	lb_size    = udf_rw32(ump->logical_vol->lb_size);
1170 	num_lb     = (buf->b_bcount + lb_size -1) / lb_size;
1171 	blks       = lb_size / DEV_BSIZE;
1172 	udf_c_type = buf->b_udf_c_type;
1173 
1174 	KASSERT(lb_size == ump->discinfo.sector_size);
1175 
1176 	/* select partition to record the buffer on */
1177 	vpart_num = ump->data_part;
1178 	if (udf_c_type == UDF_C_NODE)
1179 		vpart_num = ump->node_part;
1180 	if (udf_c_type == UDF_C_FIDS)
1181 		vpart_num = ump->fids_part;
1182 	*vpart_nump = vpart_num;
1183 
1184 	if (udf_c_type == UDF_C_NODE) {
1185 		/* if not VAT, its allready allocated */
1186 		if (ump->vtop_alloc[ump->node_part] != UDF_ALLOC_VAT)
1187 			return;
1188 
1189 		/* allocate on its backing sequential partition */
1190 		vpart_num = ump->data_part;
1191 	}
1192 
1193 	/* do allocation on the selected partition */
1194 	error = udf_allocate_space(ump, udf_c_type,
1195 			vpart_num, num_lb, lmapping);
1196 	if (error) {
1197 		/* ARGH! we've not done our accounting right! */
1198 		panic("UDF disc allocation accounting gone wrong");
1199 	}
1200 
1201 	/* commit our sector count */
1202 	mutex_enter(&ump->allocate_mutex);
1203 		if (num_lb > ump->uncomitted_lb) {
1204 			ump->uncomitted_lb = 0;
1205 		} else {
1206 			ump->uncomitted_lb -= num_lb;
1207 		}
1208 	mutex_exit(&ump->allocate_mutex);
1209 
1210 	/* If its userdata or FIDs, record its allocation in its node. */
1211 	if ((udf_c_type == UDF_C_USERDATA) ||
1212 	    (udf_c_type == UDF_C_FIDS) ||
1213 	    (udf_c_type == UDF_C_METADATA_SBM))
1214 	{
1215 		udf_record_allocation_in_node(ump, buf, vpart_num, lmapping,
1216 			node_ad_cpy);
1217 		/* decrement our outstanding bufs counter */
1218 		s = splbio();
1219 			udf_node->outstanding_bufs--;
1220 		splx(s);
1221 	}
1222 }
1223 
1224 /* --------------------------------------------------------------------- */
1225 
1226 /*
1227  * Try to merge a1 with the new piece a2. udf_ads_merge returns error when not
1228  * possible (anymore); a2 returns the rest piece.
1229  */
1230 
1231 static int
1232 udf_ads_merge(uint32_t lb_size, struct long_ad *a1, struct long_ad *a2)
1233 {
1234 	uint32_t max_len, merge_len;
1235 	uint32_t a1_len, a2_len;
1236 	uint32_t a1_flags, a2_flags;
1237 	uint32_t a1_lbnum, a2_lbnum;
1238 	uint16_t a1_part, a2_part;
1239 
1240 	max_len = ((UDF_EXT_MAXLEN / lb_size) * lb_size);
1241 
1242 	a1_flags = UDF_EXT_FLAGS(udf_rw32(a1->len));
1243 	a1_len   = UDF_EXT_LEN(udf_rw32(a1->len));
1244 	a1_lbnum = udf_rw32(a1->loc.lb_num);
1245 	a1_part  = udf_rw16(a1->loc.part_num);
1246 
1247 	a2_flags = UDF_EXT_FLAGS(udf_rw32(a2->len));
1248 	a2_len   = UDF_EXT_LEN(udf_rw32(a2->len));
1249 	a2_lbnum = udf_rw32(a2->loc.lb_num);
1250 	a2_part  = udf_rw16(a2->loc.part_num);
1251 
1252 	/* defines same space */
1253 	if (a1_flags != a2_flags)
1254 		return 1;
1255 
1256 	if (a1_flags != UDF_EXT_FREE) {
1257 		/* the same partition */
1258 		if (a1_part != a2_part)
1259 			return 1;
1260 
1261 		/* a2 is successor of a1 */
1262 		if (a1_lbnum * lb_size + a1_len != a2_lbnum * lb_size)
1263 			return 1;
1264 	}
1265 
1266 	/* merge as most from a2 if possible */
1267 	merge_len = MIN(a2_len, max_len - a1_len);
1268 	a1_len   += merge_len;
1269 	a2_len   -= merge_len;
1270 	a2_lbnum += merge_len/lb_size;
1271 
1272 	a1->len = udf_rw32(a1_len | a1_flags);
1273 	a2->len = udf_rw32(a2_len | a2_flags);
1274 	a2->loc.lb_num = udf_rw32(a2_lbnum);
1275 
1276 	if (a2_len > 0)
1277 		return 1;
1278 
1279 	/* there is space over to merge */
1280 	return 0;
1281 }
1282 
1283 /* --------------------------------------------------------------------- */
1284 
1285 static void
1286 udf_wipe_adslots(struct udf_node *udf_node)
1287 {
1288 	struct file_entry      *fe;
1289 	struct extfile_entry   *efe;
1290 	struct alloc_ext_entry *ext;
1291 	uint64_t inflen, objsize;
1292 	uint32_t lb_size, dscr_size, l_ea, l_ad, max_l_ad, crclen;
1293 	uint8_t *data_pos;
1294 	int extnr;
1295 
1296 	lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1297 
1298 	fe  = udf_node->fe;
1299 	efe = udf_node->efe;
1300 	if (fe) {
1301 		inflen  = udf_rw64(fe->inf_len);
1302 		objsize = inflen;
1303 		dscr_size  = sizeof(struct file_entry) -1;
1304 		l_ea       = udf_rw32(fe->l_ea);
1305 		l_ad       = udf_rw32(fe->l_ad);
1306 		data_pos = (uint8_t *) fe + dscr_size + l_ea;
1307 	} else {
1308 		inflen  = udf_rw64(efe->inf_len);
1309 		objsize = udf_rw64(efe->obj_size);
1310 		dscr_size  = sizeof(struct extfile_entry) -1;
1311 		l_ea       = udf_rw32(efe->l_ea);
1312 		l_ad       = udf_rw32(efe->l_ad);
1313 		data_pos = (uint8_t *) efe + dscr_size + l_ea;
1314 	}
1315 	max_l_ad = lb_size - dscr_size - l_ea;
1316 
1317 	/* wipe fe/efe */
1318 	memset(data_pos, 0, max_l_ad);
1319 	crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea;
1320 	if (fe) {
1321 		fe->l_ad         = udf_rw32(0);
1322 		fe->logblks_rec  = udf_rw64(0);
1323 		fe->tag.desc_crc_len = udf_rw32(crclen);
1324 	} else {
1325 		efe->l_ad        = udf_rw32(0);
1326 		efe->logblks_rec = udf_rw64(0);
1327 		efe->tag.desc_crc_len = udf_rw32(crclen);
1328 	}
1329 
1330 	/* wipe all allocation extent entries */
1331 	for (extnr = 0; extnr < udf_node->num_extensions; extnr++) {
1332 		ext = udf_node->ext[extnr];
1333 		dscr_size  = sizeof(struct alloc_ext_entry) -1;
1334 		data_pos = (uint8_t *) ext->data;
1335 		max_l_ad = lb_size - dscr_size;
1336 		memset(data_pos, 0, max_l_ad);
1337 		ext->l_ad = udf_rw32(0);
1338 
1339 		crclen = dscr_size - UDF_DESC_TAG_LENGTH;
1340 		ext->tag.desc_crc_len = udf_rw32(crclen);
1341 	}
1342 	udf_node->i_flags |= IN_NODE_REBUILD;
1343 }
1344 
1345 /* --------------------------------------------------------------------- */
1346 
1347 void
1348 udf_get_adslot(struct udf_node *udf_node, int slot, struct long_ad *icb,
1349 	int *eof) {
1350 	struct file_entry      *fe;
1351 	struct extfile_entry   *efe;
1352 	struct alloc_ext_entry *ext;
1353 	struct icb_tag *icbtag;
1354 	struct short_ad *short_ad;
1355 	struct long_ad *long_ad, l_icb;
1356 	uint32_t offset;
1357 	uint32_t lb_size, dscr_size, l_ea, l_ad, flags;
1358 	uint8_t *data_pos;
1359 	int icbflags, addr_type, adlen, extnr;
1360 
1361 	/* determine what descriptor we are in */
1362 	lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1363 
1364 	fe  = udf_node->fe;
1365 	efe = udf_node->efe;
1366 	if (fe) {
1367 		icbtag  = &fe->icbtag;
1368 		dscr_size  = sizeof(struct file_entry) -1;
1369 		l_ea       = udf_rw32(fe->l_ea);
1370 		l_ad       = udf_rw32(fe->l_ad);
1371 		data_pos = (uint8_t *) fe + dscr_size + l_ea;
1372 	} else {
1373 		icbtag  = &efe->icbtag;
1374 		dscr_size  = sizeof(struct extfile_entry) -1;
1375 		l_ea       = udf_rw32(efe->l_ea);
1376 		l_ad       = udf_rw32(efe->l_ad);
1377 		data_pos = (uint8_t *) efe + dscr_size + l_ea;
1378 	}
1379 
1380 	icbflags  = udf_rw16(icbtag->flags);
1381 	addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
1382 
1383 	/* just in case we're called on an intern, its EOF */
1384 	if (addr_type == UDF_ICB_INTERN_ALLOC) {
1385 		memset(icb, 0, sizeof(struct long_ad));
1386 		*eof = 1;
1387 		return;
1388 	}
1389 
1390 	adlen = 0;
1391 	if (addr_type == UDF_ICB_SHORT_ALLOC) {
1392 		adlen = sizeof(struct short_ad);
1393 	} else if (addr_type == UDF_ICB_LONG_ALLOC) {
1394 		adlen = sizeof(struct long_ad);
1395 	}
1396 
1397 	/* if offset too big, we go to the allocation extensions */
1398 	offset = slot * adlen;
1399 	extnr  = -1;
1400 	while (offset >= l_ad) {
1401 		/* check if our last entry is a redirect */
1402 		if (addr_type == UDF_ICB_SHORT_ALLOC) {
1403 			short_ad = (struct short_ad *) (data_pos + l_ad-adlen);
1404 			l_icb.len          = short_ad->len;
1405 			l_icb.loc.part_num = udf_node->loc.loc.part_num;
1406 			l_icb.loc.lb_num   = short_ad->lb_num;
1407 		} else {
1408 			KASSERT(addr_type == UDF_ICB_LONG_ALLOC);
1409 			long_ad = (struct long_ad *) (data_pos + l_ad-adlen);
1410 			l_icb = *long_ad;
1411 		}
1412 		flags = UDF_EXT_FLAGS(udf_rw32(l_icb.len));
1413 		if (flags != UDF_EXT_REDIRECT) {
1414 			l_ad = 0;	/* force EOF */
1415 			break;
1416 		}
1417 
1418 		/* advance to next extent */
1419 		extnr++;
1420 		if (extnr >= udf_node->num_extensions) {
1421 			l_ad = 0;	/* force EOF */
1422 			break;
1423 		}
1424 		offset = offset - l_ad;
1425 		ext  = udf_node->ext[extnr];
1426 		dscr_size  = sizeof(struct alloc_ext_entry) -1;
1427 		l_ad = udf_rw32(ext->l_ad);
1428 		data_pos = (uint8_t *) ext + dscr_size;
1429 	}
1430 
1431 	/* XXX l_ad == 0 should be enough to check */
1432 	*eof = (offset >= l_ad) || (l_ad == 0);
1433 	if (*eof) {
1434 		DPRINTF(PARANOIDADWLK, ("returning EOF, extnr %d, offset %d, "
1435 			"l_ad %d\n", extnr, offset, l_ad));
1436 		memset(icb, 0, sizeof(struct long_ad));
1437 		return;
1438 	}
1439 
1440 	/* get the element */
1441 	if (addr_type == UDF_ICB_SHORT_ALLOC) {
1442 		short_ad = (struct short_ad *) (data_pos + offset);
1443 		icb->len          = short_ad->len;
1444 		icb->loc.part_num = udf_node->loc.loc.part_num;
1445 		icb->loc.lb_num   = short_ad->lb_num;
1446 	} else if (addr_type == UDF_ICB_LONG_ALLOC) {
1447 		long_ad = (struct long_ad *) (data_pos + offset);
1448 		*icb = *long_ad;
1449 	}
1450 	DPRINTF(PARANOIDADWLK, ("returning element : v %d, lb %d, len %d, "
1451 		"flags %d\n", icb->loc.part_num, icb->loc.lb_num,
1452 		UDF_EXT_LEN(icb->len), UDF_EXT_FLAGS(icb->len)));
1453 }
1454 
1455 /* --------------------------------------------------------------------- */
1456 
1457 int
1458 udf_append_adslot(struct udf_node *udf_node, int *slot, struct long_ad *icb) {
1459 	struct udf_mount *ump = udf_node->ump;
1460 	union dscrptr          *dscr, *extdscr;
1461 	struct file_entry      *fe;
1462 	struct extfile_entry   *efe;
1463 	struct alloc_ext_entry *ext;
1464 	struct icb_tag *icbtag;
1465 	struct short_ad *short_ad;
1466 	struct long_ad *long_ad, o_icb, l_icb;
1467 	uint64_t logblks_rec, *logblks_rec_p;
1468 	uint64_t lmapping;
1469 	uint32_t offset, rest, len, lb_num;
1470 	uint32_t lb_size, dscr_size, l_ea, l_ad, *l_ad_p, max_l_ad, crclen;
1471 	uint32_t flags;
1472 	uint16_t vpart_num;
1473 	uint8_t *data_pos;
1474 	int icbflags, addr_type, adlen, extnr;
1475 	int error;
1476 
1477 	lb_size = udf_rw32(ump->logical_vol->lb_size);
1478 	vpart_num = udf_rw16(udf_node->loc.loc.part_num);
1479 
1480 	/* determine what descriptor we are in */
1481 	fe  = udf_node->fe;
1482 	efe = udf_node->efe;
1483 	if (fe) {
1484 		icbtag  = &fe->icbtag;
1485 		dscr      = (union dscrptr *) fe;
1486 		dscr_size = sizeof(struct file_entry) -1;
1487 
1488 		l_ea      = udf_rw32(fe->l_ea);
1489 		l_ad_p    = &fe->l_ad;
1490 		logblks_rec_p = &fe->logblks_rec;
1491 	} else {
1492 		icbtag    = &efe->icbtag;
1493 		dscr      = (union dscrptr *) efe;
1494 		dscr_size = sizeof(struct extfile_entry) -1;
1495 
1496 		l_ea      = udf_rw32(efe->l_ea);
1497 		l_ad_p    = &efe->l_ad;
1498 		logblks_rec_p = &efe->logblks_rec;
1499 	}
1500 	data_pos  = (uint8_t *) dscr + dscr_size + l_ea;
1501 	max_l_ad = lb_size - dscr_size - l_ea;
1502 
1503 	icbflags  = udf_rw16(icbtag->flags);
1504 	addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
1505 
1506 	/* just in case we're called on an intern, its EOF */
1507 	if (addr_type == UDF_ICB_INTERN_ALLOC) {
1508 		panic("udf_append_adslot on UDF_ICB_INTERN_ALLOC\n");
1509 	}
1510 
1511 	adlen = 0;
1512 	if (addr_type == UDF_ICB_SHORT_ALLOC) {
1513 		adlen = sizeof(struct short_ad);
1514 	} else if (addr_type == UDF_ICB_LONG_ALLOC) {
1515 		adlen = sizeof(struct long_ad);
1516 	}
1517 
1518 	/* clean up given long_ad since it can be a synthesized one */
1519 	flags = UDF_EXT_FLAGS(udf_rw32(icb->len));
1520 	if (flags == UDF_EXT_FREE) {
1521 		icb->loc.part_num = udf_rw16(0);
1522 		icb->loc.lb_num   = udf_rw32(0);
1523 	}
1524 
1525 	/* if offset too big, we go to the allocation extensions */
1526 	l_ad   = udf_rw32(*l_ad_p);
1527 	offset = (*slot) * adlen;
1528 	extnr  = -1;
1529 	while (offset >= l_ad) {
1530 		/* check if our last entry is a redirect */
1531 		if (addr_type == UDF_ICB_SHORT_ALLOC) {
1532 			short_ad = (struct short_ad *) (data_pos + l_ad-adlen);
1533 			l_icb.len          = short_ad->len;
1534 			l_icb.loc.part_num = udf_node->loc.loc.part_num;
1535 			l_icb.loc.lb_num   = short_ad->lb_num;
1536 		} else {
1537 			KASSERT(addr_type == UDF_ICB_LONG_ALLOC);
1538 			long_ad = (struct long_ad *) (data_pos + l_ad-adlen);
1539 			l_icb = *long_ad;
1540 		}
1541 		flags = UDF_EXT_FLAGS(udf_rw32(l_icb.len));
1542 		if (flags != UDF_EXT_REDIRECT) {
1543 			/* only one past the last one is adressable */
1544 			break;
1545 		}
1546 
1547 		/* advance to next extent */
1548 		extnr++;
1549 		KASSERT(extnr < udf_node->num_extensions);
1550 		offset = offset - l_ad;
1551 
1552 		ext  = udf_node->ext[extnr];
1553 		dscr = (union dscrptr *) ext;
1554 		dscr_size  = sizeof(struct alloc_ext_entry) -1;
1555 		max_l_ad = lb_size - dscr_size;
1556 		l_ad_p = &ext->l_ad;
1557 		l_ad   = udf_rw32(*l_ad_p);
1558 		data_pos = (uint8_t *) ext + dscr_size;
1559 	}
1560 	DPRINTF(PARANOIDADWLK, ("append, ext %d, offset %d, l_ad %d\n",
1561 		extnr, offset, udf_rw32(*l_ad_p)));
1562 	KASSERT(l_ad == udf_rw32(*l_ad_p));
1563 
1564 	/* offset is offset within the current (E)FE/AED */
1565 	l_ad   = udf_rw32(*l_ad_p);
1566 	crclen = udf_rw32(dscr->tag.desc_crc_len);
1567 	logblks_rec = udf_rw64(*logblks_rec_p);
1568 
1569 	/* overwriting old piece? */
1570 	if (offset < l_ad) {
1571 		/* overwrite entry; compensate for the old element */
1572 		if (addr_type == UDF_ICB_SHORT_ALLOC) {
1573 			short_ad = (struct short_ad *) (data_pos + offset);
1574 			o_icb.len          = short_ad->len;
1575 			o_icb.loc.part_num = udf_rw16(0);	/* ignore */
1576 			o_icb.loc.lb_num   = short_ad->lb_num;
1577 		} else if (addr_type == UDF_ICB_LONG_ALLOC) {
1578 			long_ad = (struct long_ad *) (data_pos + offset);
1579 			o_icb = *long_ad;
1580 		} else {
1581 			panic("Invalid address type in udf_append_adslot\n");
1582 		}
1583 
1584 		len = udf_rw32(o_icb.len);
1585 		if (UDF_EXT_FLAGS(len) == UDF_EXT_ALLOCATED) {
1586 			/* adjust counts */
1587 			len = UDF_EXT_LEN(len);
1588 			logblks_rec -= (len + lb_size -1) / lb_size;
1589 		}
1590 	}
1591 
1592 	/* check if we're not appending a redirection */
1593 	flags = UDF_EXT_FLAGS(udf_rw32(icb->len));
1594 	KASSERT(flags != UDF_EXT_REDIRECT);
1595 
1596 	/* round down available space */
1597 	rest = adlen * ((max_l_ad - offset) / adlen);
1598 	if (rest <= adlen) {
1599 		/* have to append aed, see if we already have a spare one */
1600 		extnr++;
1601 		ext = udf_node->ext[extnr];
1602 		l_icb = udf_node->ext_loc[extnr];
1603 		if (ext == NULL) {
1604 			DPRINTF(ALLOC,("adding allocation extent %d\n", extnr));
1605 
1606 			error = udf_pre_allocate_space(ump, UDF_C_NODE, 1,
1607 					vpart_num, &lmapping);
1608 			lb_num = lmapping;
1609 			if (error)
1610 				return error;
1611 
1612 			/* initialise pointer to location */
1613 			memset(&l_icb, 0, sizeof(struct long_ad));
1614 			l_icb.len = udf_rw32(lb_size | UDF_EXT_REDIRECT);
1615 			l_icb.loc.lb_num   = udf_rw32(lb_num);
1616 			l_icb.loc.part_num = udf_rw16(vpart_num);
1617 
1618 			/* create new aed descriptor */
1619 			udf_create_logvol_dscr(ump, udf_node, &l_icb, &extdscr);
1620 			ext = &extdscr->aee;
1621 
1622 			udf_inittag(ump, &ext->tag, TAGID_ALLOCEXTENT, lb_num);
1623 			dscr_size  = sizeof(struct alloc_ext_entry) -1;
1624 			max_l_ad = lb_size - dscr_size;
1625 			memset(ext->data, 0, max_l_ad);
1626 			ext->l_ad = udf_rw32(0);
1627 			ext->tag.desc_crc_len =
1628 				udf_rw32(dscr_size - UDF_DESC_TAG_LENGTH);
1629 
1630 			/* declare aed */
1631 			udf_node->num_extensions++;
1632 			udf_node->ext_loc[extnr] = l_icb;
1633 			udf_node->ext[extnr] = ext;
1634 		}
1635 		/* add redirect and adjust l_ad and crclen for old descr */
1636 		if (addr_type == UDF_ICB_SHORT_ALLOC) {
1637 			short_ad = (struct short_ad *) (data_pos + offset);
1638 			short_ad->len    = l_icb.len;
1639 			short_ad->lb_num = l_icb.loc.lb_num;
1640 		} else if (addr_type == UDF_ICB_LONG_ALLOC) {
1641 			long_ad = (struct long_ad *) (data_pos + offset);
1642 			*long_ad = l_icb;
1643 		}
1644 		l_ad   += adlen;
1645 		crclen += adlen;
1646 		dscr->tag.desc_crc_len = udf_rw32(crclen);
1647 		*l_ad_p = udf_rw32(l_ad);
1648 
1649 		/* advance to the new extension */
1650 		KASSERT(ext != NULL);
1651 		dscr = (union dscrptr *) ext;
1652 		dscr_size  = sizeof(struct alloc_ext_entry) -1;
1653 		max_l_ad = lb_size - dscr_size;
1654 		data_pos = (uint8_t *) dscr + dscr_size;
1655 
1656 		l_ad_p = &ext->l_ad;
1657 		l_ad   = udf_rw32(*l_ad_p);
1658 		crclen = udf_rw32(dscr->tag.desc_crc_len);
1659 		offset = 0;
1660 
1661 		/* adjust callees slot count for link insert */
1662 		*slot += 1;
1663 	}
1664 
1665 	/* write out the element */
1666 	DPRINTF(PARANOIDADWLK, ("adding element : %p : v %d, lb %d, "
1667 			"len %d, flags %d\n", data_pos + offset,
1668 			icb->loc.part_num, icb->loc.lb_num,
1669 			UDF_EXT_LEN(icb->len), UDF_EXT_FLAGS(icb->len)));
1670 	if (addr_type == UDF_ICB_SHORT_ALLOC) {
1671 		short_ad = (struct short_ad *) (data_pos + offset);
1672 		short_ad->len    = icb->len;
1673 		short_ad->lb_num = icb->loc.lb_num;
1674 	} else if (addr_type == UDF_ICB_LONG_ALLOC) {
1675 		long_ad = (struct long_ad *) (data_pos + offset);
1676 		*long_ad = *icb;
1677 	}
1678 
1679 	/* adjust logblks recorded count */
1680 	flags = UDF_EXT_FLAGS(udf_rw32(icb->len));
1681 	if (flags == UDF_EXT_ALLOCATED)
1682 		logblks_rec += (UDF_EXT_LEN(icb->len) + lb_size -1) / lb_size;
1683 	*logblks_rec_p = udf_rw64(logblks_rec);
1684 
1685 	/* adjust l_ad and crclen when needed */
1686 	if (offset >= l_ad) {
1687 		l_ad   += adlen;
1688 		crclen += adlen;
1689 		dscr->tag.desc_crc_len = udf_rw32(crclen);
1690 		*l_ad_p = udf_rw32(l_ad);
1691 	}
1692 
1693 	return 0;
1694 }
1695 
1696 /* --------------------------------------------------------------------- */
1697 
1698 static void
1699 udf_count_alloc_exts(struct udf_node *udf_node)
1700 {
1701 	struct long_ad s_ad;
1702 	uint32_t lb_num, len, flags;
1703 	uint16_t vpart_num;
1704 	int slot, eof;
1705 	int num_extents, extnr;
1706 	int lb_size;
1707 
1708 	if (udf_node->num_extensions == 0)
1709 		return;
1710 
1711 	lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1712 	/* count number of allocation extents in use */
1713 	num_extents = 0;
1714 	slot = 0;
1715 	for (;;) {
1716 		udf_get_adslot(udf_node, slot, &s_ad, &eof);
1717 		if (eof)
1718 			break;
1719 		len   = udf_rw32(s_ad.len);
1720 		flags = UDF_EXT_FLAGS(len);
1721 
1722 		if (flags == UDF_EXT_REDIRECT)
1723 			num_extents++;
1724 
1725 		slot++;
1726 	}
1727 
1728 	DPRINTF(ALLOC, ("udf_count_alloc_ext counted %d live extents\n",
1729 		num_extents));
1730 
1731 	/* XXX choice: we could delay freeing them on node writeout */
1732 	/* free excess entries */
1733 	extnr = num_extents;
1734 	for (;extnr < udf_node->num_extensions; extnr++) {
1735 		DPRINTF(ALLOC, ("freeing alloc ext %d\n", extnr));
1736 		/* free dscriptor */
1737 		s_ad = udf_node->ext_loc[extnr];
1738 		udf_free_logvol_dscr(udf_node->ump, &s_ad,
1739 			udf_node->ext[extnr]);
1740 		udf_node->ext[extnr] = NULL;
1741 
1742 		/* free disc space */
1743 		lb_num    = udf_rw32(s_ad.loc.lb_num);
1744 		vpart_num = udf_rw16(s_ad.loc.part_num);
1745 		udf_free_allocated_space(udf_node->ump, lb_num, vpart_num, 1);
1746 
1747 		memset(&udf_node->ext_loc[extnr], 0, sizeof(struct long_ad));
1748 	}
1749 
1750 	/* set our new number of allocation extents */
1751 	udf_node->num_extensions = num_extents;
1752 }
1753 
1754 
1755 /* --------------------------------------------------------------------- */
1756 
1757 /*
1758  * Adjust the node's allocation descriptors to reflect the new mapping; do
1759  * take note that we might glue to existing allocation descriptors.
1760  *
1761  * XXX Note there can only be one allocation being recorded/mount; maybe
1762  * explicit allocation in shedule thread?
1763  */
1764 
1765 static void
1766 udf_record_allocation_in_node(struct udf_mount *ump, struct buf *buf,
1767 	uint16_t vpart_num, uint64_t *mapping, struct long_ad *node_ad_cpy)
1768 {
1769 	struct vnode    *vp = buf->b_vp;
1770 	struct udf_node *udf_node = VTOI(vp);
1771 	struct file_entry      *fe;
1772 	struct extfile_entry   *efe;
1773 	struct icb_tag  *icbtag;
1774 	struct long_ad   s_ad, c_ad;
1775 	uint64_t inflen, from, till;
1776 	uint64_t foffset, end_foffset, restart_foffset;
1777 	uint64_t orig_inflen, orig_lbrec, new_inflen, new_lbrec;
1778 	uint32_t num_lb, len, flags, lb_num;
1779 	uint32_t run_start;
1780 	uint32_t slot_offset, replace_len, replace;
1781 	int addr_type, icbflags;
1782 //	int udf_c_type = buf->b_udf_c_type;
1783 	int lb_size, run_length, eof;
1784 	int slot, cpy_slot, cpy_slots, restart_slot;
1785 	int error;
1786 
1787 	DPRINTF(ALLOC, ("udf_record_allocation_in_node\n"));
1788 
1789 #if 0
1790 	/* XXX disable sanity check for now */
1791 	/* sanity check ... should be panic ? */
1792 	if ((udf_c_type != UDF_C_USERDATA) && (udf_c_type != UDF_C_FIDS))
1793 		return;
1794 #endif
1795 
1796 	lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1797 
1798 	/* do the job */
1799 	UDF_LOCK_NODE(udf_node, 0);	/* XXX can deadlock ? */
1800 	udf_node_sanity_check(udf_node, &orig_inflen, &orig_lbrec);
1801 
1802 	fe  = udf_node->fe;
1803 	efe = udf_node->efe;
1804 	if (fe) {
1805 		icbtag = &fe->icbtag;
1806 		inflen = udf_rw64(fe->inf_len);
1807 	} else {
1808 		icbtag = &efe->icbtag;
1809 		inflen = udf_rw64(efe->inf_len);
1810 	}
1811 
1812 	/* do check if `till' is not past file information length */
1813 	from = buf->b_lblkno * lb_size;
1814 	till = MIN(inflen, from + buf->b_resid);
1815 
1816 	num_lb = (till - from + lb_size -1) / lb_size;
1817 
1818 	DPRINTF(ALLOC, ("record allocation from %"PRIu64" + %d\n", from, buf->b_bcount));
1819 
1820 	icbflags  = udf_rw16(icbtag->flags);
1821 	addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
1822 
1823 	if (addr_type == UDF_ICB_INTERN_ALLOC) {
1824 		/* nothing to do */
1825 		/* XXX clean up rest of node? just in case? */
1826 		UDF_UNLOCK_NODE(udf_node, 0);
1827 		return;
1828 	}
1829 
1830 	slot     = 0;
1831 	cpy_slot = 0;
1832 	foffset  = 0;
1833 
1834 	/* 1) copy till first overlap piece to the rewrite buffer */
1835 	for (;;) {
1836 		udf_get_adslot(udf_node, slot, &s_ad, &eof);
1837 		if (eof) {
1838 			DPRINTF(WRITE,
1839 				("Record allocation in node "
1840 				 "failed: encountered EOF\n"));
1841 			UDF_UNLOCK_NODE(udf_node, 0);
1842 			buf->b_error = EINVAL;
1843 			return;
1844 		}
1845 		len   = udf_rw32(s_ad.len);
1846 		flags = UDF_EXT_FLAGS(len);
1847 		len   = UDF_EXT_LEN(len);
1848 
1849 		if (flags == UDF_EXT_REDIRECT) {
1850 			slot++;
1851 			continue;
1852 		}
1853 
1854 		end_foffset = foffset + len;
1855 		if (end_foffset > from)
1856 			break;	/* found */
1857 
1858 		node_ad_cpy[cpy_slot++] = s_ad;
1859 
1860 		DPRINTF(ALLOC, ("\t1: vp %d, lb %d, len %d, flags %d "
1861 			"-> stack\n",
1862 			udf_rw16(s_ad.loc.part_num),
1863 			udf_rw32(s_ad.loc.lb_num),
1864 			UDF_EXT_LEN(udf_rw32(s_ad.len)),
1865 			UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1866 
1867 		foffset = end_foffset;
1868 		slot++;
1869 	}
1870 	restart_slot    = slot;
1871 	restart_foffset = foffset;
1872 
1873 	/* 2) trunc overlapping slot at overlap and copy it */
1874 	slot_offset = from - foffset;
1875 	if (slot_offset > 0) {
1876 		DPRINTF(ALLOC, ("\tslot_offset = %d, flags = %d (%d)\n",
1877 				slot_offset, flags >> 30, flags));
1878 
1879 		s_ad.len = udf_rw32(slot_offset | flags);
1880 		node_ad_cpy[cpy_slot++] = s_ad;
1881 
1882 		DPRINTF(ALLOC, ("\t2: vp %d, lb %d, len %d, flags %d "
1883 			"-> stack\n",
1884 			udf_rw16(s_ad.loc.part_num),
1885 			udf_rw32(s_ad.loc.lb_num),
1886 			UDF_EXT_LEN(udf_rw32(s_ad.len)),
1887 			UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1888 	}
1889 	foffset += slot_offset;
1890 
1891 	/* 3) insert new mappings */
1892 	memset(&s_ad, 0, sizeof(struct long_ad));
1893 	lb_num = 0;
1894 	for (lb_num = 0; lb_num < num_lb; lb_num++) {
1895 		run_start  = mapping[lb_num];
1896 		run_length = 1;
1897 		while (lb_num < num_lb-1) {
1898 			if (mapping[lb_num+1] != mapping[lb_num]+1)
1899 				if (mapping[lb_num+1] != mapping[lb_num])
1900 					break;
1901 			run_length++;
1902 			lb_num++;
1903 		}
1904 		/* insert slot for this mapping */
1905 		len = run_length * lb_size;
1906 
1907 		/* bounds checking */
1908 		if (foffset + len > till)
1909 			len = till - foffset;
1910 		KASSERT(foffset + len <= inflen);
1911 
1912 		s_ad.len = udf_rw32(len | UDF_EXT_ALLOCATED);
1913 		s_ad.loc.part_num = udf_rw16(vpart_num);
1914 		s_ad.loc.lb_num   = udf_rw32(run_start);
1915 
1916 		foffset += len;
1917 
1918 		/* paranoia */
1919 		if (len == 0) {
1920 			DPRINTF(WRITE,
1921 				("Record allocation in node "
1922 				 "failed: insert failed\n"));
1923 			UDF_UNLOCK_NODE(udf_node, 0);
1924 			buf->b_error = EINVAL;
1925 			return;
1926 		}
1927 		node_ad_cpy[cpy_slot++] = s_ad;
1928 
1929 		DPRINTF(ALLOC, ("\t3: insert new mapping vp %d lb %d, len %d, "
1930 				"flags %d -> stack\n",
1931 			udf_rw16(s_ad.loc.part_num), udf_rw32(s_ad.loc.lb_num),
1932 			UDF_EXT_LEN(udf_rw32(s_ad.len)),
1933 			UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1934 	}
1935 
1936 	/* 4) pop replaced length */
1937 	slot    = restart_slot;
1938 	foffset = restart_foffset;
1939 
1940 	replace_len = till - foffset;	/* total amount of bytes to pop */
1941 	slot_offset = from - foffset;	/* offset in first encounted slot */
1942 	KASSERT((slot_offset % lb_size) == 0);
1943 
1944 	for (;;) {
1945 		udf_get_adslot(udf_node, slot, &s_ad, &eof);
1946 		if (eof)
1947 			break;
1948 
1949 		len    = udf_rw32(s_ad.len);
1950 		flags  = UDF_EXT_FLAGS(len);
1951 		len    = UDF_EXT_LEN(len);
1952 		lb_num = udf_rw32(s_ad.loc.lb_num);
1953 
1954 		if (flags == UDF_EXT_REDIRECT) {
1955 			slot++;
1956 			continue;
1957 		}
1958 
1959 		DPRINTF(ALLOC, ("\t4i: got slot %d, slot_offset %d, "
1960 				"replace_len %d, "
1961 				"vp %d, lb %d, len %d, flags %d\n",
1962 			slot, slot_offset, replace_len,
1963 			udf_rw16(s_ad.loc.part_num),
1964 			udf_rw32(s_ad.loc.lb_num),
1965 			UDF_EXT_LEN(udf_rw32(s_ad.len)),
1966 			UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1967 
1968 		/* adjust for slot offset */
1969 		if (slot_offset) {
1970 			DPRINTF(ALLOC, ("\t4s: skipping %d\n", slot_offset));
1971 			lb_num += slot_offset / lb_size;
1972 			len    -= slot_offset;
1973 			foffset += slot_offset;
1974 			replace_len -= slot_offset;
1975 
1976 			/* mark adjusted */
1977 			slot_offset = 0;
1978 		}
1979 
1980 		/* advance for (the rest of) this slot */
1981 		replace = MIN(len, replace_len);
1982 		DPRINTF(ALLOC, ("\t4d: replacing %d\n", replace));
1983 
1984 		/* advance for this slot */
1985 		if (replace) {
1986 			/* note: dont round DOWN on num_lb since we then
1987 			 * forget the last partial one */
1988 			num_lb = (replace + lb_size - 1) / lb_size;
1989 			if (flags != UDF_EXT_FREE) {
1990 				udf_free_allocated_space(ump, lb_num,
1991 					udf_rw16(s_ad.loc.part_num), num_lb);
1992 			}
1993 			lb_num      += num_lb;
1994 			len         -= replace;
1995 			foffset     += replace;
1996 			replace_len -= replace;
1997 		}
1998 
1999 		/* do we have a slot tail ? */
2000 		if (len) {
2001 			KASSERT(foffset % lb_size == 0);
2002 
2003 			/* we arrived at our point, push remainder */
2004 			s_ad.len        = udf_rw32(len | flags);
2005 			s_ad.loc.lb_num = udf_rw32(lb_num);
2006 			if (flags == UDF_EXT_FREE)
2007 				s_ad.loc.lb_num = udf_rw32(0);
2008 			node_ad_cpy[cpy_slot++] = s_ad;
2009 			foffset += len;
2010 			slot++;
2011 
2012 			DPRINTF(ALLOC, ("\t4: vp %d, lb %d, len %d, flags %d "
2013 				"-> stack\n",
2014 				udf_rw16(s_ad.loc.part_num),
2015 				udf_rw32(s_ad.loc.lb_num),
2016 				UDF_EXT_LEN(udf_rw32(s_ad.len)),
2017 				UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2018 			break;
2019 		}
2020 
2021 		slot++;
2022 	}
2023 
2024 	/* 5) copy remainder */
2025 	for (;;) {
2026 		udf_get_adslot(udf_node, slot, &s_ad, &eof);
2027 		if (eof)
2028 			break;
2029 
2030 		len   = udf_rw32(s_ad.len);
2031 		flags = UDF_EXT_FLAGS(len);
2032 		len   = UDF_EXT_LEN(len);
2033 
2034 		if (flags == UDF_EXT_REDIRECT) {
2035 			slot++;
2036 			continue;
2037 		}
2038 
2039 		node_ad_cpy[cpy_slot++] = s_ad;
2040 
2041 		DPRINTF(ALLOC, ("\t5: insert new mapping "
2042 			"vp %d lb %d, len %d, flags %d "
2043 			"-> stack\n",
2044 		udf_rw16(s_ad.loc.part_num),
2045 		udf_rw32(s_ad.loc.lb_num),
2046 		UDF_EXT_LEN(udf_rw32(s_ad.len)),
2047 		UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2048 
2049 		slot++;
2050 	}
2051 
2052 	/* 6) reset node descriptors */
2053 	udf_wipe_adslots(udf_node);
2054 
2055 	/* 7) copy back extents; merge when possible. Recounting on the fly */
2056 	cpy_slots = cpy_slot;
2057 
2058 	c_ad = node_ad_cpy[0];
2059 	slot = 0;
2060 	DPRINTF(ALLOC, ("\t7s: stack -> got mapping vp %d "
2061 		"lb %d, len %d, flags %d\n",
2062 	udf_rw16(c_ad.loc.part_num),
2063 	udf_rw32(c_ad.loc.lb_num),
2064 	UDF_EXT_LEN(udf_rw32(c_ad.len)),
2065 	UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2066 
2067 	for (cpy_slot = 1; cpy_slot < cpy_slots; cpy_slot++) {
2068 		s_ad = node_ad_cpy[cpy_slot];
2069 
2070 		DPRINTF(ALLOC, ("\t7i: stack -> got mapping vp %d "
2071 			"lb %d, len %d, flags %d\n",
2072 		udf_rw16(s_ad.loc.part_num),
2073 		udf_rw32(s_ad.loc.lb_num),
2074 		UDF_EXT_LEN(udf_rw32(s_ad.len)),
2075 		UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2076 
2077 		/* see if we can merge */
2078 		if (udf_ads_merge(lb_size, &c_ad, &s_ad)) {
2079 			/* not mergable (anymore) */
2080 			DPRINTF(ALLOC, ("\t7: appending vp %d lb %d, "
2081 				"len %d, flags %d\n",
2082 			udf_rw16(c_ad.loc.part_num),
2083 			udf_rw32(c_ad.loc.lb_num),
2084 			UDF_EXT_LEN(udf_rw32(c_ad.len)),
2085 			UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2086 
2087 			error = udf_append_adslot(udf_node, &slot, &c_ad);
2088 			if (error) {
2089 				buf->b_error = error;
2090 				goto out;
2091 			}
2092 			c_ad = s_ad;
2093 			slot++;
2094 		}
2095 	}
2096 
2097 	/* 8) push rest slot (if any) */
2098 	if (UDF_EXT_LEN(c_ad.len) > 0) {
2099 		DPRINTF(ALLOC, ("\t8: last append vp %d lb %d, "
2100 				"len %d, flags %d\n",
2101 		udf_rw16(c_ad.loc.part_num),
2102 		udf_rw32(c_ad.loc.lb_num),
2103 		UDF_EXT_LEN(udf_rw32(c_ad.len)),
2104 		UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2105 
2106 		error = udf_append_adslot(udf_node, &slot, &c_ad);
2107 		if (error) {
2108 			buf->b_error = error;
2109 			goto out;
2110 		}
2111 	}
2112 
2113 out:
2114 	udf_count_alloc_exts(udf_node);
2115 
2116 	/* the node's descriptors should now be sane */
2117 	udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2118 	UDF_UNLOCK_NODE(udf_node, 0);
2119 
2120 	KASSERT(orig_inflen == new_inflen);
2121 	KASSERT(new_lbrec >= orig_lbrec);
2122 
2123 	return;
2124 }
2125 
2126 /* --------------------------------------------------------------------- */
2127 
2128 int
2129 udf_grow_node(struct udf_node *udf_node, uint64_t new_size)
2130 {
2131 	union dscrptr *dscr;
2132 	struct vnode *vp = udf_node->vnode;
2133 	struct udf_mount *ump = udf_node->ump;
2134 	struct file_entry    *fe;
2135 	struct extfile_entry *efe;
2136 	struct icb_tag  *icbtag;
2137 	struct long_ad c_ad, s_ad;
2138 	uint64_t size_diff, old_size, inflen, objsize, chunk, append_len;
2139 	uint64_t foffset, end_foffset;
2140 	uint64_t orig_inflen, orig_lbrec, new_inflen, new_lbrec;
2141 	uint32_t lb_size, dscr_size, crclen, lastblock_grow;
2142 	uint32_t icbflags, len, flags, max_len;
2143 	uint32_t max_l_ad, l_ad, l_ea;
2144 	uint16_t my_part, dst_part;
2145 	uint8_t *data_pos, *evacuated_data;
2146 	int addr_type;
2147 	int slot, cpy_slot;
2148 	int isdir, eof, error;
2149 
2150 	DPRINTF(ALLOC, ("udf_grow_node\n"));
2151 
2152 	UDF_LOCK_NODE(udf_node, 0);
2153 	udf_node_sanity_check(udf_node, &orig_inflen, &orig_lbrec);
2154 
2155 	lb_size = udf_rw32(ump->logical_vol->lb_size);
2156 	max_len = ((UDF_EXT_MAXLEN / lb_size) * lb_size);
2157 
2158 	fe  = udf_node->fe;
2159 	efe = udf_node->efe;
2160 	if (fe) {
2161 		dscr       = (union dscrptr *) fe;
2162 		icbtag  = &fe->icbtag;
2163 		inflen  = udf_rw64(fe->inf_len);
2164 		objsize = inflen;
2165 		dscr_size  = sizeof(struct file_entry) -1;
2166 		l_ea       = udf_rw32(fe->l_ea);
2167 		l_ad       = udf_rw32(fe->l_ad);
2168 	} else {
2169 		dscr       = (union dscrptr *) efe;
2170 		icbtag  = &efe->icbtag;
2171 		inflen  = udf_rw64(efe->inf_len);
2172 		objsize = udf_rw64(efe->obj_size);
2173 		dscr_size  = sizeof(struct extfile_entry) -1;
2174 		l_ea       = udf_rw32(efe->l_ea);
2175 		l_ad       = udf_rw32(efe->l_ad);
2176 	}
2177 	data_pos  = (uint8_t *) dscr + dscr_size + l_ea;
2178 	max_l_ad = lb_size - dscr_size - l_ea;
2179 
2180 	icbflags   = udf_rw16(icbtag->flags);
2181 	addr_type  = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2182 
2183 	old_size  = inflen;
2184 	size_diff = new_size - old_size;
2185 
2186 	DPRINTF(ALLOC, ("\tfrom %"PRIu64" to %"PRIu64"\n", old_size, new_size));
2187 
2188 	evacuated_data = NULL;
2189 	if (addr_type == UDF_ICB_INTERN_ALLOC) {
2190 		if (l_ad + size_diff <= max_l_ad) {
2191 			/* only reflect size change directly in the node */
2192 			inflen  += size_diff;
2193 			objsize += size_diff;
2194 			l_ad    += size_diff;
2195 			crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea + l_ad;
2196 			if (fe) {
2197 				fe->inf_len   = udf_rw64(inflen);
2198 				fe->l_ad      = udf_rw32(l_ad);
2199 				fe->tag.desc_crc_len = udf_rw32(crclen);
2200 			} else {
2201 				efe->inf_len  = udf_rw64(inflen);
2202 				efe->obj_size = udf_rw64(objsize);
2203 				efe->l_ad     = udf_rw32(l_ad);
2204 				efe->tag.desc_crc_len = udf_rw32(crclen);
2205 			}
2206 			error = 0;
2207 
2208 			/* set new size for uvm */
2209 			uvm_vnp_setsize(vp, old_size);
2210 			uvm_vnp_setwritesize(vp, new_size);
2211 
2212 #if 0
2213 			/* zero append space in buffer */
2214 			uvm_vnp_zerorange(vp, old_size, new_size - old_size);
2215 #endif
2216 
2217 			udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2218 
2219 			/* unlock */
2220 			UDF_UNLOCK_NODE(udf_node, 0);
2221 
2222 			KASSERT(new_inflen == orig_inflen + size_diff);
2223 			KASSERT(new_lbrec == orig_lbrec);
2224 			KASSERT(new_lbrec == 0);
2225 			return 0;
2226 		}
2227 
2228 		DPRINTF(ALLOC, ("\tCONVERT from internal\n"));
2229 
2230 		if (old_size > 0) {
2231 			/* allocate some space and copy in the stuff to keep */
2232 			evacuated_data = malloc(lb_size, M_UDFTEMP, M_WAITOK);
2233 			memset(evacuated_data, 0, lb_size);
2234 
2235 			/* node is locked, so safe to exit mutex */
2236 			UDF_UNLOCK_NODE(udf_node, 0);
2237 
2238 			/* read in using the `normal' vn_rdwr() */
2239 			error = vn_rdwr(UIO_READ, udf_node->vnode,
2240 					evacuated_data, old_size, 0,
2241 					UIO_SYSSPACE, IO_ALTSEMANTICS | IO_NODELOCKED,
2242 					FSCRED, NULL, NULL);
2243 
2244 			/* enter again */
2245 			UDF_LOCK_NODE(udf_node, 0);
2246 		}
2247 
2248 		/* convert to a normal alloc and select type */
2249 		isdir    = (vp->v_type == VDIR);
2250 		my_part  = udf_rw16(udf_node->loc.loc.part_num);
2251 		dst_part = isdir? ump->fids_part : ump->data_part;
2252 		addr_type = UDF_ICB_SHORT_ALLOC;
2253 		if (dst_part != my_part)
2254 			addr_type = UDF_ICB_LONG_ALLOC;
2255 
2256 		icbflags &= ~UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2257 		icbflags |= addr_type;
2258 		icbtag->flags = udf_rw16(icbflags);
2259 
2260 		/* wipe old descriptor space */
2261 		udf_wipe_adslots(udf_node);
2262 
2263 		memset(&c_ad, 0, sizeof(struct long_ad));
2264 		c_ad.len          = udf_rw32(old_size | UDF_EXT_FREE);
2265 		c_ad.loc.part_num = udf_rw16(0); /* not relevant */
2266 		c_ad.loc.lb_num   = udf_rw32(0); /* not relevant */
2267 
2268 		slot = 0;
2269 	} else {
2270 		/* goto the last entry (if any) */
2271 		slot     = 0;
2272 		cpy_slot = 0;
2273 		foffset  = 0;
2274 		memset(&c_ad, 0, sizeof(struct long_ad));
2275 		for (;;) {
2276 			udf_get_adslot(udf_node, slot, &c_ad, &eof);
2277 			if (eof)
2278 				break;
2279 
2280 			len   = udf_rw32(c_ad.len);
2281 			flags = UDF_EXT_FLAGS(len);
2282 			len   = UDF_EXT_LEN(len);
2283 
2284 			end_foffset = foffset + len;
2285 			if (flags != UDF_EXT_REDIRECT)
2286 				foffset = end_foffset;
2287 
2288 			slot++;
2289 		}
2290 		/* at end of adslots */
2291 
2292 		/* special case if the old size was zero, then there is no last slot */
2293 		if (old_size == 0) {
2294 			c_ad.len          = udf_rw32(0 | UDF_EXT_FREE);
2295 			c_ad.loc.part_num = udf_rw16(0); /* not relevant */
2296 			c_ad.loc.lb_num   = udf_rw32(0); /* not relevant */
2297 		} else {
2298 			/* refetch last slot */
2299 			slot--;
2300 			udf_get_adslot(udf_node, slot, &c_ad, &eof);
2301 		}
2302 	}
2303 
2304 	/*
2305 	 * If the length of the last slot is not a multiple of lb_size, adjust
2306 	 * length so that it is; don't forget to adjust `append_len'! relevant for
2307 	 * extending existing files
2308 	 */
2309 	len   = udf_rw32(c_ad.len);
2310 	flags = UDF_EXT_FLAGS(len);
2311 	len   = UDF_EXT_LEN(len);
2312 
2313 	lastblock_grow = 0;
2314 	if (len % lb_size > 0) {
2315 		lastblock_grow = lb_size - (len % lb_size);
2316 		lastblock_grow = MIN(size_diff, lastblock_grow);
2317 		len += lastblock_grow;
2318 		c_ad.len = udf_rw32(len | flags);
2319 
2320 		/* TODO zero appened space in buffer! */
2321 		/* using uvm_vnp_zerorange(vp, old_size, new_size - old_size); ? */
2322 	}
2323 	memset(&s_ad, 0, sizeof(struct long_ad));
2324 
2325 	/* size_diff can be bigger than allowed, so grow in chunks */
2326 	append_len = size_diff - lastblock_grow;
2327 	while (append_len > 0) {
2328 		chunk = MIN(append_len, max_len);
2329 		s_ad.len = udf_rw32(chunk | UDF_EXT_FREE);
2330 		s_ad.loc.part_num = udf_rw16(0);
2331 		s_ad.loc.lb_num   = udf_rw32(0);
2332 
2333 		if (udf_ads_merge(lb_size, &c_ad, &s_ad)) {
2334 			/* not mergable (anymore) */
2335 			error = udf_append_adslot(udf_node, &slot, &c_ad);
2336 			if (error)
2337 				goto errorout;
2338 			slot++;
2339 			c_ad = s_ad;
2340 			memset(&s_ad, 0, sizeof(struct long_ad));
2341 		}
2342 		append_len -= chunk;
2343 	}
2344 
2345 	/* if there is a rest piece in the accumulator, append it */
2346 	if (UDF_EXT_LEN(udf_rw32(c_ad.len)) > 0) {
2347 		error = udf_append_adslot(udf_node, &slot, &c_ad);
2348 		if (error)
2349 			goto errorout;
2350 		slot++;
2351 	}
2352 
2353 	/* if there is a rest piece that didn't fit, append it */
2354 	if (UDF_EXT_LEN(udf_rw32(s_ad.len)) > 0) {
2355 		error = udf_append_adslot(udf_node, &slot, &s_ad);
2356 		if (error)
2357 			goto errorout;
2358 		slot++;
2359 	}
2360 
2361 	inflen  += size_diff;
2362 	objsize += size_diff;
2363 	if (fe) {
2364 		fe->inf_len   = udf_rw64(inflen);
2365 	} else {
2366 		efe->inf_len  = udf_rw64(inflen);
2367 		efe->obj_size = udf_rw64(objsize);
2368 	}
2369 	error = 0;
2370 
2371 	if (evacuated_data) {
2372 		/* set new write size for uvm */
2373 		uvm_vnp_setwritesize(vp, old_size);
2374 
2375 		/* write out evacuated data */
2376 		error = vn_rdwr(UIO_WRITE, udf_node->vnode,
2377 				evacuated_data, old_size, 0,
2378 				UIO_SYSSPACE, IO_ALTSEMANTICS | IO_NODELOCKED,
2379 				FSCRED, NULL, NULL);
2380 		uvm_vnp_setsize(vp, old_size);
2381 	}
2382 
2383 errorout:
2384 	if (evacuated_data)
2385 		free(evacuated_data, M_UDFTEMP);
2386 
2387 	udf_count_alloc_exts(udf_node);
2388 
2389 	udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2390 	UDF_UNLOCK_NODE(udf_node, 0);
2391 
2392 	KASSERT(new_inflen == orig_inflen + size_diff);
2393 	KASSERT(new_lbrec == orig_lbrec);
2394 
2395 	return error;
2396 }
2397 
2398 /* --------------------------------------------------------------------- */
2399 
2400 int
2401 udf_shrink_node(struct udf_node *udf_node, uint64_t new_size)
2402 {
2403 	struct vnode *vp = udf_node->vnode;
2404 	struct udf_mount *ump = udf_node->ump;
2405 	struct file_entry    *fe;
2406 	struct extfile_entry *efe;
2407 	struct icb_tag  *icbtag;
2408 	struct long_ad c_ad, s_ad, *node_ad_cpy;
2409 	uint64_t size_diff, old_size, inflen, objsize;
2410 	uint64_t foffset, end_foffset;
2411 	uint64_t orig_inflen, orig_lbrec, new_inflen, new_lbrec;
2412 	uint32_t lb_size, dscr_size, crclen;
2413 	uint32_t slot_offset;
2414 	uint32_t len, flags, max_len;
2415 	uint32_t num_lb, lb_num;
2416 	uint32_t max_l_ad, l_ad, l_ea;
2417 	uint16_t vpart_num;
2418 	uint8_t *data_pos;
2419 	int icbflags, addr_type;
2420 	int slot, cpy_slot, cpy_slots;
2421 	int eof, error;
2422 
2423 	DPRINTF(ALLOC, ("udf_shrink_node\n"));
2424 
2425 	UDF_LOCK_NODE(udf_node, 0);
2426 	udf_node_sanity_check(udf_node, &orig_inflen, &orig_lbrec);
2427 
2428 	lb_size = udf_rw32(ump->logical_vol->lb_size);
2429 	max_len = ((UDF_EXT_MAXLEN / lb_size) * lb_size);
2430 
2431 	/* do the work */
2432 	fe  = udf_node->fe;
2433 	efe = udf_node->efe;
2434 	if (fe) {
2435 		icbtag  = &fe->icbtag;
2436 		inflen  = udf_rw64(fe->inf_len);
2437 		objsize = inflen;
2438 		dscr_size  = sizeof(struct file_entry) -1;
2439 		l_ea       = udf_rw32(fe->l_ea);
2440 		l_ad       = udf_rw32(fe->l_ad);
2441 		data_pos = (uint8_t *) fe + dscr_size + l_ea;
2442 	} else {
2443 		icbtag  = &efe->icbtag;
2444 		inflen  = udf_rw64(efe->inf_len);
2445 		objsize = udf_rw64(efe->obj_size);
2446 		dscr_size  = sizeof(struct extfile_entry) -1;
2447 		l_ea       = udf_rw32(efe->l_ea);
2448 		l_ad       = udf_rw32(efe->l_ad);
2449 		data_pos = (uint8_t *) efe + dscr_size + l_ea;
2450 	}
2451 	max_l_ad = lb_size - dscr_size - l_ea;
2452 
2453 	icbflags   = udf_rw16(icbtag->flags);
2454 	addr_type  = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2455 
2456 	old_size  = inflen;
2457 	size_diff = old_size - new_size;
2458 
2459 	DPRINTF(ALLOC, ("\tfrom %"PRIu64" to %"PRIu64"\n", old_size, new_size));
2460 
2461 	/* shrink the node to its new size */
2462 	if (addr_type == UDF_ICB_INTERN_ALLOC) {
2463 		/* only reflect size change directly in the node */
2464 		KASSERT(new_size <= max_l_ad);
2465 		inflen  -= size_diff;
2466 		objsize -= size_diff;
2467 		l_ad    -= size_diff;
2468 		crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea + l_ad;
2469 		if (fe) {
2470 			fe->inf_len   = udf_rw64(inflen);
2471 			fe->l_ad      = udf_rw32(l_ad);
2472 			fe->tag.desc_crc_len = udf_rw32(crclen);
2473 		} else {
2474 			efe->inf_len  = udf_rw64(inflen);
2475 			efe->obj_size = udf_rw64(objsize);
2476 			efe->l_ad     = udf_rw32(l_ad);
2477 			efe->tag.desc_crc_len = udf_rw32(crclen);
2478 		}
2479 		error = 0;
2480 
2481 		/* clear the space in the descriptor */
2482 		KASSERT(old_size > new_size);
2483 		memset(data_pos + new_size, 0, old_size - new_size);
2484 
2485 		/* TODO zero appened space in buffer! */
2486 		/* using uvm_vnp_zerorange(vp, old_size, old_size - new_size); ? */
2487 
2488 		/* set new size for uvm */
2489 		uvm_vnp_setsize(vp, new_size);
2490 
2491 		udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2492 		UDF_UNLOCK_NODE(udf_node, 0);
2493 
2494 		KASSERT(new_inflen == orig_inflen - size_diff);
2495 		KASSERT(new_lbrec == orig_lbrec);
2496 		KASSERT(new_lbrec == 0);
2497 
2498 		return 0;
2499 	}
2500 
2501 	/* setup node cleanup extents copy space */
2502 	node_ad_cpy = malloc(lb_size * UDF_MAX_ALLOC_EXTENTS,
2503 		M_UDFMNT, M_WAITOK);
2504 	memset(node_ad_cpy, 0, lb_size * UDF_MAX_ALLOC_EXTENTS);
2505 
2506 	/*
2507 	 * Shrink the node by releasing the allocations and truncate the last
2508 	 * allocation to the new size. If the new size fits into the
2509 	 * allocation descriptor itself, transform it into an
2510 	 * UDF_ICB_INTERN_ALLOC.
2511 	 */
2512 	slot     = 0;
2513 	cpy_slot = 0;
2514 	foffset  = 0;
2515 
2516 	/* 1) copy till first overlap piece to the rewrite buffer */
2517 	for (;;) {
2518 		udf_get_adslot(udf_node, slot, &s_ad, &eof);
2519 		if (eof) {
2520 			DPRINTF(WRITE,
2521 				("Shrink node failed: "
2522 				 "encountered EOF\n"));
2523 			error = EINVAL;
2524 			goto errorout; /* panic? */
2525 		}
2526 		len   = udf_rw32(s_ad.len);
2527 		flags = UDF_EXT_FLAGS(len);
2528 		len   = UDF_EXT_LEN(len);
2529 
2530 		if (flags == UDF_EXT_REDIRECT) {
2531 			slot++;
2532 			continue;
2533 		}
2534 
2535 		end_foffset = foffset + len;
2536 		if (end_foffset > new_size)
2537 			break;	/* found */
2538 
2539 		node_ad_cpy[cpy_slot++] = s_ad;
2540 
2541 		DPRINTF(ALLOC, ("\t1: vp %d, lb %d, len %d, flags %d "
2542 			"-> stack\n",
2543 			udf_rw16(s_ad.loc.part_num),
2544 			udf_rw32(s_ad.loc.lb_num),
2545 			UDF_EXT_LEN(udf_rw32(s_ad.len)),
2546 			UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2547 
2548 		foffset = end_foffset;
2549 		slot++;
2550 	}
2551 	slot_offset = new_size - foffset;
2552 
2553 	/* 2) trunc overlapping slot at overlap and copy it */
2554 	if (slot_offset > 0) {
2555 		lb_num    = udf_rw32(s_ad.loc.lb_num);
2556 		vpart_num = udf_rw16(s_ad.loc.part_num);
2557 
2558 		if (flags == UDF_EXT_ALLOCATED) {
2559 			/* note: round DOWN on num_lb */
2560 			lb_num += (slot_offset + lb_size -1) / lb_size;
2561 			num_lb  = (len - slot_offset) / lb_size;
2562 
2563 			udf_free_allocated_space(ump, lb_num, vpart_num, num_lb);
2564 		}
2565 
2566 		s_ad.len = udf_rw32(slot_offset | flags);
2567 		node_ad_cpy[cpy_slot++] = s_ad;
2568 		slot++;
2569 
2570 		DPRINTF(ALLOC, ("\t2: vp %d, lb %d, len %d, flags %d "
2571 			"-> stack\n",
2572 			udf_rw16(s_ad.loc.part_num),
2573 			udf_rw32(s_ad.loc.lb_num),
2574 			UDF_EXT_LEN(udf_rw32(s_ad.len)),
2575 			UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2576 	}
2577 
2578 	/* 3) delete remainder */
2579 	for (;;) {
2580 		udf_get_adslot(udf_node, slot, &s_ad, &eof);
2581 		if (eof)
2582 			break;
2583 
2584 		len       = udf_rw32(s_ad.len);
2585 		flags     = UDF_EXT_FLAGS(len);
2586 		len       = UDF_EXT_LEN(len);
2587 
2588 		if (flags == UDF_EXT_REDIRECT) {
2589 			slot++;
2590 			continue;
2591 		}
2592 
2593 		DPRINTF(ALLOC, ("\t3: delete remainder "
2594 			"vp %d lb %d, len %d, flags %d\n",
2595 		udf_rw16(s_ad.loc.part_num),
2596 		udf_rw32(s_ad.loc.lb_num),
2597 		UDF_EXT_LEN(udf_rw32(s_ad.len)),
2598 		UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2599 
2600 		if (flags == UDF_EXT_ALLOCATED) {
2601 			lb_num    = udf_rw32(s_ad.loc.lb_num);
2602 			vpart_num = udf_rw16(s_ad.loc.part_num);
2603 			num_lb    = (len + lb_size - 1) / lb_size;
2604 
2605 			udf_free_allocated_space(ump, lb_num, vpart_num,
2606 				num_lb);
2607 		}
2608 
2609 		slot++;
2610 	}
2611 
2612 	/* 4) if it will fit into the descriptor then convert */
2613 	if (new_size < max_l_ad) {
2614 		/*
2615 		 * resque/evacuate old piece by reading it in, and convert it
2616 		 * to internal alloc.
2617 		 */
2618 		if (new_size == 0) {
2619 			/* XXX/TODO only for zero sizing now */
2620 			udf_wipe_adslots(udf_node);
2621 
2622 			icbflags &= ~UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2623 			icbflags |=  UDF_ICB_INTERN_ALLOC;
2624 			icbtag->flags = udf_rw16(icbflags);
2625 
2626 			inflen  -= size_diff;	KASSERT(inflen == 0);
2627 			objsize -= size_diff;
2628 			l_ad     = new_size;
2629 			crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea + l_ad;
2630 			if (fe) {
2631 				fe->inf_len   = udf_rw64(inflen);
2632 				fe->l_ad      = udf_rw32(l_ad);
2633 				fe->tag.desc_crc_len = udf_rw32(crclen);
2634 			} else {
2635 				efe->inf_len  = udf_rw64(inflen);
2636 				efe->obj_size = udf_rw64(objsize);
2637 				efe->l_ad     = udf_rw32(l_ad);
2638 				efe->tag.desc_crc_len = udf_rw32(crclen);
2639 			}
2640 			/* eventually copy in evacuated piece */
2641 			/* set new size for uvm */
2642 			uvm_vnp_setsize(vp, new_size);
2643 
2644 			free(node_ad_cpy, M_UDFMNT);
2645 			udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2646 
2647 			UDF_UNLOCK_NODE(udf_node, 0);
2648 
2649 			KASSERT(new_inflen == orig_inflen - size_diff);
2650 			KASSERT(new_inflen == 0);
2651 			KASSERT(new_lbrec == 0);
2652 
2653 			return 0;
2654 		}
2655 
2656 		printf("UDF_SHRINK_NODE: could convert to internal alloc!\n");
2657 	}
2658 
2659 	/* 5) reset node descriptors */
2660 	udf_wipe_adslots(udf_node);
2661 
2662 	/* 6) copy back extents; merge when possible. Recounting on the fly */
2663 	cpy_slots = cpy_slot;
2664 
2665 	c_ad = node_ad_cpy[0];
2666 	slot = 0;
2667 	for (cpy_slot = 1; cpy_slot < cpy_slots; cpy_slot++) {
2668 		s_ad = node_ad_cpy[cpy_slot];
2669 
2670 		DPRINTF(ALLOC, ("\t6: stack -> got mapping vp %d "
2671 			"lb %d, len %d, flags %d\n",
2672 		udf_rw16(s_ad.loc.part_num),
2673 		udf_rw32(s_ad.loc.lb_num),
2674 		UDF_EXT_LEN(udf_rw32(s_ad.len)),
2675 		UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2676 
2677 		/* see if we can merge */
2678 		if (udf_ads_merge(lb_size, &c_ad, &s_ad)) {
2679 			/* not mergable (anymore) */
2680 			DPRINTF(ALLOC, ("\t6: appending vp %d lb %d, "
2681 				"len %d, flags %d\n",
2682 			udf_rw16(c_ad.loc.part_num),
2683 			udf_rw32(c_ad.loc.lb_num),
2684 			UDF_EXT_LEN(udf_rw32(c_ad.len)),
2685 			UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2686 
2687 			error = udf_append_adslot(udf_node, &slot, &c_ad);
2688 			if (error)
2689 				goto errorout; /* panic? */
2690 			c_ad = s_ad;
2691 			slot++;
2692 		}
2693 	}
2694 
2695 	/* 7) push rest slot (if any) */
2696 	if (UDF_EXT_LEN(c_ad.len) > 0) {
2697 		DPRINTF(ALLOC, ("\t7: last append vp %d lb %d, "
2698 				"len %d, flags %d\n",
2699 		udf_rw16(c_ad.loc.part_num),
2700 		udf_rw32(c_ad.loc.lb_num),
2701 		UDF_EXT_LEN(udf_rw32(c_ad.len)),
2702 		UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2703 
2704 		error = udf_append_adslot(udf_node, &slot, &c_ad);
2705 		if (error)
2706 			goto errorout; /* panic? */
2707 		;
2708 	}
2709 
2710 	inflen  -= size_diff;
2711 	objsize -= size_diff;
2712 	if (fe) {
2713 		fe->inf_len   = udf_rw64(inflen);
2714 	} else {
2715 		efe->inf_len  = udf_rw64(inflen);
2716 		efe->obj_size = udf_rw64(objsize);
2717 	}
2718 	error = 0;
2719 
2720 	/* set new size for uvm */
2721 	uvm_vnp_setsize(vp, new_size);
2722 
2723 errorout:
2724 	free(node_ad_cpy, M_UDFMNT);
2725 
2726 	udf_count_alloc_exts(udf_node);
2727 
2728 	udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2729 	UDF_UNLOCK_NODE(udf_node, 0);
2730 
2731 	KASSERT(new_inflen == orig_inflen - size_diff);
2732 
2733 	return error;
2734 }
2735 
2736