1 /* $NetBSD: udf_allocation.c,v 1.21 2009/04/21 16:19:00 reinoud Exp $ */ 2 3 /* 4 * Copyright (c) 2006, 2008 Reinoud Zandijk 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 * 27 */ 28 29 #include <sys/cdefs.h> 30 #ifndef lint 31 __KERNEL_RCSID(0, "$NetBSD: udf_allocation.c,v 1.21 2009/04/21 16:19:00 reinoud Exp $"); 32 #endif /* not lint */ 33 34 35 #if defined(_KERNEL_OPT) 36 #include "opt_quota.h" 37 #include "opt_compat_netbsd.h" 38 #endif 39 40 /* TODO strip */ 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/sysctl.h> 44 #include <sys/namei.h> 45 #include <sys/proc.h> 46 #include <sys/kernel.h> 47 #include <sys/vnode.h> 48 #include <miscfs/genfs/genfs_node.h> 49 #include <sys/mount.h> 50 #include <sys/buf.h> 51 #include <sys/file.h> 52 #include <sys/device.h> 53 #include <sys/disklabel.h> 54 #include <sys/ioctl.h> 55 #include <sys/malloc.h> 56 #include <sys/dirent.h> 57 #include <sys/stat.h> 58 #include <sys/conf.h> 59 #include <sys/kauth.h> 60 #include <sys/kthread.h> 61 #include <dev/clock_subr.h> 62 63 #include <fs/udf/ecma167-udf.h> 64 #include <fs/udf/udf_mount.h> 65 66 #include "udf.h" 67 #include "udf_subr.h" 68 #include "udf_bswap.h" 69 70 71 #define VTOI(vnode) ((struct udf_node *) vnode->v_data) 72 73 static void udf_record_allocation_in_node(struct udf_mount *ump, 74 struct buf *buf, uint16_t vpart_num, uint64_t *mapping, 75 struct long_ad *node_ad_cpy); 76 77 /* 78 * IDEA/BUSY: Each udf_node gets its own extentwalker state for all operations; 79 * this will hopefully/likely reduce O(nlog(n)) to O(1) for most functionality 80 * since actions are most likely sequencial and thus seeking doesn't need 81 * searching for the same or adjacent position again. 82 */ 83 84 /* --------------------------------------------------------------------- */ 85 86 #if 0 87 #if 1 88 static void 89 udf_node_dump(struct udf_node *udf_node) { 90 struct file_entry *fe; 91 struct extfile_entry *efe; 92 struct icb_tag *icbtag; 93 struct long_ad s_ad; 94 uint64_t inflen; 95 uint32_t icbflags, addr_type; 96 uint32_t len, lb_num; 97 uint32_t flags; 98 int part_num; 99 int lb_size, eof, slot; 100 101 if ((udf_verbose & UDF_DEBUG_NODEDUMP) == 0) 102 return; 103 104 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size); 105 106 fe = udf_node->fe; 107 efe = udf_node->efe; 108 if (fe) { 109 icbtag = &fe->icbtag; 110 inflen = udf_rw64(fe->inf_len); 111 } else { 112 icbtag = &efe->icbtag; 113 inflen = udf_rw64(efe->inf_len); 114 } 115 116 icbflags = udf_rw16(icbtag->flags); 117 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK; 118 119 printf("udf_node_dump %p :\n", udf_node); 120 121 if (addr_type == UDF_ICB_INTERN_ALLOC) { 122 printf("\tIntern alloc, len = %"PRIu64"\n", inflen); 123 return; 124 } 125 126 printf("\tInflen = %"PRIu64"\n", inflen); 127 printf("\t\t"); 128 129 slot = 0; 130 for (;;) { 131 udf_get_adslot(udf_node, slot, &s_ad, &eof); 132 if (eof) 133 break; 134 part_num = udf_rw16(s_ad.loc.part_num); 135 lb_num = udf_rw32(s_ad.loc.lb_num); 136 len = udf_rw32(s_ad.len); 137 flags = UDF_EXT_FLAGS(len); 138 len = UDF_EXT_LEN(len); 139 140 printf("["); 141 if (part_num >= 0) 142 printf("part %d, ", part_num); 143 printf("lb_num %d, len %d", lb_num, len); 144 if (flags) 145 printf(", flags %d", flags>>30); 146 printf("] "); 147 148 if (flags == UDF_EXT_REDIRECT) { 149 printf("\n\textent END\n\tallocation extent\n\t\t"); 150 } 151 152 slot++; 153 } 154 printf("\n\tl_ad END\n\n"); 155 } 156 #else 157 #define udf_node_dump(a) 158 #endif 159 160 161 static void 162 udf_assert_allocated(struct udf_mount *ump, uint16_t vpart_num, 163 uint32_t lb_num, uint32_t num_lb) 164 { 165 struct udf_bitmap *bitmap; 166 struct part_desc *pdesc; 167 uint32_t ptov; 168 uint32_t bitval; 169 uint8_t *bpos; 170 int bit; 171 int phys_part; 172 int ok; 173 174 DPRINTF(PARANOIA, ("udf_assert_allocated: check virt lbnum %d " 175 "part %d + %d sect\n", lb_num, vpart_num, num_lb)); 176 177 /* get partition backing up this vpart_num */ 178 pdesc = ump->partitions[ump->vtop[vpart_num]]; 179 180 switch (ump->vtop_tp[vpart_num]) { 181 case UDF_VTOP_TYPE_PHYS : 182 case UDF_VTOP_TYPE_SPARABLE : 183 /* free space to freed or unallocated space bitmap */ 184 ptov = udf_rw32(pdesc->start_loc); 185 phys_part = ump->vtop[vpart_num]; 186 187 /* use unallocated bitmap */ 188 bitmap = &ump->part_unalloc_bits[phys_part]; 189 190 /* if no bitmaps are defined, bail out */ 191 if (bitmap->bits == NULL) 192 break; 193 194 /* check bits */ 195 KASSERT(bitmap->bits); 196 ok = 1; 197 bpos = bitmap->bits + lb_num/8; 198 bit = lb_num % 8; 199 while (num_lb > 0) { 200 bitval = (1 << bit); 201 DPRINTF(PARANOIA, ("XXX : check %d, %p, bit %d\n", 202 lb_num, bpos, bit)); 203 KASSERT(bitmap->bits + lb_num/8 == bpos); 204 if (*bpos & bitval) { 205 printf("\tlb_num %d is NOT marked busy\n", 206 lb_num); 207 ok = 0; 208 } 209 lb_num++; num_lb--; 210 bit = (bit + 1) % 8; 211 if (bit == 0) 212 bpos++; 213 } 214 if (!ok) { 215 /* KASSERT(0); */ 216 } 217 218 break; 219 case UDF_VTOP_TYPE_VIRT : 220 /* TODO check space */ 221 KASSERT(num_lb == 1); 222 break; 223 case UDF_VTOP_TYPE_META : 224 /* TODO check space in the metadata bitmap */ 225 default: 226 /* not implemented */ 227 break; 228 } 229 } 230 231 232 static void 233 udf_node_sanity_check(struct udf_node *udf_node, 234 uint64_t *cnt_inflen, uint64_t *cnt_logblksrec) 235 { 236 union dscrptr *dscr; 237 struct file_entry *fe; 238 struct extfile_entry *efe; 239 struct icb_tag *icbtag; 240 struct long_ad s_ad; 241 uint64_t inflen, logblksrec; 242 uint32_t icbflags, addr_type; 243 uint32_t len, lb_num, l_ea, l_ad, max_l_ad; 244 uint16_t part_num; 245 uint8_t *data_pos; 246 int dscr_size, lb_size, flags, whole_lb; 247 int i, slot, eof; 248 249 // KASSERT(mutex_owned(&udf_node->ump->allocate_mutex)); 250 251 if (1) 252 udf_node_dump(udf_node); 253 254 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size); 255 256 fe = udf_node->fe; 257 efe = udf_node->efe; 258 if (fe) { 259 dscr = (union dscrptr *) fe; 260 icbtag = &fe->icbtag; 261 inflen = udf_rw64(fe->inf_len); 262 dscr_size = sizeof(struct file_entry) -1; 263 logblksrec = udf_rw64(fe->logblks_rec); 264 l_ad = udf_rw32(fe->l_ad); 265 l_ea = udf_rw32(fe->l_ea); 266 } else { 267 dscr = (union dscrptr *) efe; 268 icbtag = &efe->icbtag; 269 inflen = udf_rw64(efe->inf_len); 270 dscr_size = sizeof(struct extfile_entry) -1; 271 logblksrec = udf_rw64(efe->logblks_rec); 272 l_ad = udf_rw32(efe->l_ad); 273 l_ea = udf_rw32(efe->l_ea); 274 } 275 data_pos = (uint8_t *) dscr + dscr_size + l_ea; 276 max_l_ad = lb_size - dscr_size - l_ea; 277 icbflags = udf_rw16(icbtag->flags); 278 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK; 279 280 /* check if tail is zero */ 281 DPRINTF(PARANOIA, ("Sanity check blank tail\n")); 282 for (i = l_ad; i < max_l_ad; i++) { 283 if (data_pos[i] != 0) 284 printf( "sanity_check: violation: node byte %d " 285 "has value %d\n", i, data_pos[i]); 286 } 287 288 /* reset counters */ 289 *cnt_inflen = 0; 290 *cnt_logblksrec = 0; 291 292 if (addr_type == UDF_ICB_INTERN_ALLOC) { 293 KASSERT(l_ad <= max_l_ad); 294 KASSERT(l_ad == inflen); 295 *cnt_inflen = inflen; 296 return; 297 } 298 299 /* start counting */ 300 whole_lb = 1; 301 slot = 0; 302 for (;;) { 303 udf_get_adslot(udf_node, slot, &s_ad, &eof); 304 if (eof) 305 break; 306 KASSERT(whole_lb == 1); 307 308 part_num = udf_rw16(s_ad.loc.part_num); 309 lb_num = udf_rw32(s_ad.loc.lb_num); 310 len = udf_rw32(s_ad.len); 311 flags = UDF_EXT_FLAGS(len); 312 len = UDF_EXT_LEN(len); 313 314 if (flags != UDF_EXT_REDIRECT) { 315 *cnt_inflen += len; 316 if (flags == UDF_EXT_ALLOCATED) { 317 *cnt_logblksrec += (len + lb_size -1) / lb_size; 318 } 319 } else { 320 KASSERT(len == lb_size); 321 } 322 /* check allocation */ 323 if (flags == UDF_EXT_ALLOCATED) 324 udf_assert_allocated(udf_node->ump, part_num, lb_num, 325 (len + lb_size - 1) / lb_size); 326 327 /* check whole lb */ 328 whole_lb = ((len % lb_size) == 0); 329 330 slot++; 331 } 332 /* rest should be zero (ad_off > l_ad < max_l_ad - adlen) */ 333 334 KASSERT(*cnt_inflen == inflen); 335 KASSERT(*cnt_logblksrec == logblksrec); 336 337 // KASSERT(mutex_owned(&udf_node->ump->allocate_mutex)); 338 } 339 #else 340 static void 341 udf_node_sanity_check(struct udf_node *udf_node, 342 uint64_t *cnt_inflen, uint64_t *cnt_logblksrec) { 343 struct file_entry *fe; 344 struct extfile_entry *efe; 345 struct icb_tag *icbtag; 346 uint64_t inflen, logblksrec; 347 int dscr_size, lb_size; 348 349 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size); 350 351 fe = udf_node->fe; 352 efe = udf_node->efe; 353 if (fe) { 354 icbtag = &fe->icbtag; 355 inflen = udf_rw64(fe->inf_len); 356 dscr_size = sizeof(struct file_entry) -1; 357 logblksrec = udf_rw64(fe->logblks_rec); 358 } else { 359 icbtag = &efe->icbtag; 360 inflen = udf_rw64(efe->inf_len); 361 dscr_size = sizeof(struct extfile_entry) -1; 362 logblksrec = udf_rw64(efe->logblks_rec); 363 } 364 *cnt_logblksrec = logblksrec; 365 *cnt_inflen = inflen; 366 } 367 #endif 368 369 /* --------------------------------------------------------------------- */ 370 371 int 372 udf_translate_vtop(struct udf_mount *ump, struct long_ad *icb_loc, 373 uint32_t *lb_numres, uint32_t *extres) 374 { 375 struct part_desc *pdesc; 376 struct spare_map_entry *sme; 377 struct long_ad s_icb_loc; 378 uint64_t foffset, end_foffset; 379 uint32_t lb_size, len; 380 uint32_t lb_num, lb_rel, lb_packet; 381 uint32_t udf_rw32_lbmap, ext_offset; 382 uint16_t vpart; 383 int rel, part, error, eof, slot, flags; 384 385 assert(ump && icb_loc && lb_numres); 386 387 vpart = udf_rw16(icb_loc->loc.part_num); 388 lb_num = udf_rw32(icb_loc->loc.lb_num); 389 if (vpart > UDF_VTOP_RAWPART) 390 return EINVAL; 391 392 translate_again: 393 part = ump->vtop[vpart]; 394 pdesc = ump->partitions[part]; 395 396 switch (ump->vtop_tp[vpart]) { 397 case UDF_VTOP_TYPE_RAW : 398 /* 1:1 to the end of the device */ 399 *lb_numres = lb_num; 400 *extres = INT_MAX; 401 return 0; 402 case UDF_VTOP_TYPE_PHYS : 403 /* transform into its disc logical block */ 404 if (lb_num > udf_rw32(pdesc->part_len)) 405 return EINVAL; 406 *lb_numres = lb_num + udf_rw32(pdesc->start_loc); 407 408 /* extent from here to the end of the partition */ 409 *extres = udf_rw32(pdesc->part_len) - lb_num; 410 return 0; 411 case UDF_VTOP_TYPE_VIRT : 412 /* only maps one logical block, lookup in VAT */ 413 if (lb_num >= ump->vat_entries) /* XXX > or >= ? */ 414 return EINVAL; 415 416 /* lookup in virtual allocation table file */ 417 mutex_enter(&ump->allocate_mutex); 418 error = udf_vat_read(ump->vat_node, 419 (uint8_t *) &udf_rw32_lbmap, 4, 420 ump->vat_offset + lb_num * 4); 421 mutex_exit(&ump->allocate_mutex); 422 423 if (error) 424 return error; 425 426 lb_num = udf_rw32(udf_rw32_lbmap); 427 428 /* transform into its disc logical block */ 429 if (lb_num > udf_rw32(pdesc->part_len)) 430 return EINVAL; 431 *lb_numres = lb_num + udf_rw32(pdesc->start_loc); 432 433 /* just one logical block */ 434 *extres = 1; 435 return 0; 436 case UDF_VTOP_TYPE_SPARABLE : 437 /* check if the packet containing the lb_num is remapped */ 438 lb_packet = lb_num / ump->sparable_packet_size; 439 lb_rel = lb_num % ump->sparable_packet_size; 440 441 for (rel = 0; rel < udf_rw16(ump->sparing_table->rt_l); rel++) { 442 sme = &ump->sparing_table->entries[rel]; 443 if (lb_packet == udf_rw32(sme->org)) { 444 /* NOTE maps to absolute disc logical block! */ 445 *lb_numres = udf_rw32(sme->map) + lb_rel; 446 *extres = ump->sparable_packet_size - lb_rel; 447 return 0; 448 } 449 } 450 451 /* transform into its disc logical block */ 452 if (lb_num > udf_rw32(pdesc->part_len)) 453 return EINVAL; 454 *lb_numres = lb_num + udf_rw32(pdesc->start_loc); 455 456 /* rest of block */ 457 *extres = ump->sparable_packet_size - lb_rel; 458 return 0; 459 case UDF_VTOP_TYPE_META : 460 /* we have to look into the file's allocation descriptors */ 461 462 /* use metadatafile allocation mutex */ 463 lb_size = udf_rw32(ump->logical_vol->lb_size); 464 465 UDF_LOCK_NODE(ump->metadata_node, 0); 466 467 /* get first overlapping extent */ 468 foffset = 0; 469 slot = 0; 470 for (;;) { 471 udf_get_adslot(ump->metadata_node, 472 slot, &s_icb_loc, &eof); 473 DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, " 474 "len = %d, lb_num = %d, part = %d\n", 475 slot, eof, 476 UDF_EXT_FLAGS(udf_rw32(s_icb_loc.len)), 477 UDF_EXT_LEN(udf_rw32(s_icb_loc.len)), 478 udf_rw32(s_icb_loc.loc.lb_num), 479 udf_rw16(s_icb_loc.loc.part_num))); 480 if (eof) { 481 DPRINTF(TRANSLATE, 482 ("Meta partition translation " 483 "failed: can't seek location\n")); 484 UDF_UNLOCK_NODE(ump->metadata_node, 0); 485 return EINVAL; 486 } 487 len = udf_rw32(s_icb_loc.len); 488 flags = UDF_EXT_FLAGS(len); 489 len = UDF_EXT_LEN(len); 490 491 if (flags == UDF_EXT_REDIRECT) { 492 slot++; 493 continue; 494 } 495 496 end_foffset = foffset + len; 497 498 if (end_foffset > lb_num * lb_size) 499 break; /* found */ 500 foffset = end_foffset; 501 slot++; 502 } 503 /* found overlapping slot */ 504 ext_offset = lb_num * lb_size - foffset; 505 506 /* process extent offset */ 507 lb_num = udf_rw32(s_icb_loc.loc.lb_num); 508 vpart = udf_rw16(s_icb_loc.loc.part_num); 509 lb_num += (ext_offset + lb_size -1) / lb_size; 510 ext_offset = 0; 511 512 UDF_UNLOCK_NODE(ump->metadata_node, 0); 513 if (flags != UDF_EXT_ALLOCATED) { 514 DPRINTF(TRANSLATE, ("Metadata partition translation " 515 "failed: not allocated\n")); 516 return EINVAL; 517 } 518 519 /* 520 * vpart and lb_num are updated, translate again since we 521 * might be mapped on sparable media 522 */ 523 goto translate_again; 524 default: 525 printf("UDF vtop translation scheme %d unimplemented yet\n", 526 ump->vtop_tp[vpart]); 527 } 528 529 return EINVAL; 530 } 531 532 533 /* XXX provisional primitive braindead version */ 534 /* TODO use ext_res */ 535 void 536 udf_translate_vtop_list(struct udf_mount *ump, uint32_t sectors, 537 uint16_t vpart_num, uint64_t *lmapping, uint64_t *pmapping) 538 { 539 struct long_ad loc; 540 uint32_t lb_numres, ext_res; 541 int sector; 542 543 for (sector = 0; sector < sectors; sector++) { 544 memset(&loc, 0, sizeof(struct long_ad)); 545 loc.loc.part_num = udf_rw16(vpart_num); 546 loc.loc.lb_num = udf_rw32(*lmapping); 547 udf_translate_vtop(ump, &loc, &lb_numres, &ext_res); 548 *pmapping = lb_numres; 549 lmapping++; pmapping++; 550 } 551 } 552 553 554 /* --------------------------------------------------------------------- */ 555 556 /* 557 * Translate an extent (in logical_blocks) into logical block numbers; used 558 * for read and write operations. DOESNT't check extents. 559 */ 560 561 int 562 udf_translate_file_extent(struct udf_node *udf_node, 563 uint32_t from, uint32_t num_lb, 564 uint64_t *map) 565 { 566 struct udf_mount *ump; 567 struct icb_tag *icbtag; 568 struct long_ad t_ad, s_ad; 569 uint64_t transsec; 570 uint64_t foffset, end_foffset; 571 uint32_t transsec32; 572 uint32_t lb_size; 573 uint32_t ext_offset; 574 uint32_t lb_num, len; 575 uint32_t overlap, translen; 576 uint16_t vpart_num; 577 int eof, error, flags; 578 int slot, addr_type, icbflags; 579 580 if (!udf_node) 581 return ENOENT; 582 583 KASSERT(num_lb > 0); 584 585 UDF_LOCK_NODE(udf_node, 0); 586 587 /* initialise derivative vars */ 588 ump = udf_node->ump; 589 lb_size = udf_rw32(ump->logical_vol->lb_size); 590 591 if (udf_node->fe) { 592 icbtag = &udf_node->fe->icbtag; 593 } else { 594 icbtag = &udf_node->efe->icbtag; 595 } 596 icbflags = udf_rw16(icbtag->flags); 597 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK; 598 599 /* do the work */ 600 if (addr_type == UDF_ICB_INTERN_ALLOC) { 601 *map = UDF_TRANS_INTERN; 602 UDF_UNLOCK_NODE(udf_node, 0); 603 return 0; 604 } 605 606 /* find first overlapping extent */ 607 foffset = 0; 608 slot = 0; 609 for (;;) { 610 udf_get_adslot(udf_node, slot, &s_ad, &eof); 611 DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, len = %d, " 612 "lb_num = %d, part = %d\n", slot, eof, 613 UDF_EXT_FLAGS(udf_rw32(s_ad.len)), 614 UDF_EXT_LEN(udf_rw32(s_ad.len)), 615 udf_rw32(s_ad.loc.lb_num), 616 udf_rw16(s_ad.loc.part_num))); 617 if (eof) { 618 DPRINTF(TRANSLATE, 619 ("Translate file extent " 620 "failed: can't seek location\n")); 621 UDF_UNLOCK_NODE(udf_node, 0); 622 return EINVAL; 623 } 624 len = udf_rw32(s_ad.len); 625 flags = UDF_EXT_FLAGS(len); 626 len = UDF_EXT_LEN(len); 627 lb_num = udf_rw32(s_ad.loc.lb_num); 628 629 if (flags == UDF_EXT_REDIRECT) { 630 slot++; 631 continue; 632 } 633 634 end_foffset = foffset + len; 635 636 if (end_foffset > from * lb_size) 637 break; /* found */ 638 foffset = end_foffset; 639 slot++; 640 } 641 /* found overlapping slot */ 642 ext_offset = from * lb_size - foffset; 643 644 for (;;) { 645 udf_get_adslot(udf_node, slot, &s_ad, &eof); 646 DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, len = %d, " 647 "lb_num = %d, part = %d\n", slot, eof, 648 UDF_EXT_FLAGS(udf_rw32(s_ad.len)), 649 UDF_EXT_LEN(udf_rw32(s_ad.len)), 650 udf_rw32(s_ad.loc.lb_num), 651 udf_rw16(s_ad.loc.part_num))); 652 if (eof) { 653 DPRINTF(TRANSLATE, 654 ("Translate file extent " 655 "failed: past eof\n")); 656 UDF_UNLOCK_NODE(udf_node, 0); 657 return EINVAL; 658 } 659 660 len = udf_rw32(s_ad.len); 661 flags = UDF_EXT_FLAGS(len); 662 len = UDF_EXT_LEN(len); 663 664 lb_num = udf_rw32(s_ad.loc.lb_num); 665 vpart_num = udf_rw16(s_ad.loc.part_num); 666 667 end_foffset = foffset + len; 668 669 /* process extent, don't forget to advance on ext_offset! */ 670 lb_num += (ext_offset + lb_size -1) / lb_size; 671 overlap = (len - ext_offset + lb_size -1) / lb_size; 672 ext_offset = 0; 673 674 /* 675 * note that the while(){} is nessisary for the extent that 676 * the udf_translate_vtop() returns doens't have to span the 677 * whole extent. 678 */ 679 680 overlap = MIN(overlap, num_lb); 681 while (overlap && (flags != UDF_EXT_REDIRECT)) { 682 switch (flags) { 683 case UDF_EXT_FREE : 684 case UDF_EXT_ALLOCATED_BUT_NOT_USED : 685 transsec = UDF_TRANS_ZERO; 686 translen = overlap; 687 while (overlap && num_lb && translen) { 688 *map++ = transsec; 689 lb_num++; 690 overlap--; num_lb--; translen--; 691 } 692 break; 693 case UDF_EXT_ALLOCATED : 694 t_ad.loc.lb_num = udf_rw32(lb_num); 695 t_ad.loc.part_num = udf_rw16(vpart_num); 696 error = udf_translate_vtop(ump, 697 &t_ad, &transsec32, &translen); 698 transsec = transsec32; 699 if (error) { 700 UDF_UNLOCK_NODE(udf_node, 0); 701 return error; 702 } 703 while (overlap && num_lb && translen) { 704 *map++ = transsec; 705 lb_num++; transsec++; 706 overlap--; num_lb--; translen--; 707 } 708 break; 709 default: 710 DPRINTF(TRANSLATE, 711 ("Translate file extent " 712 "failed: bad flags %x\n", flags)); 713 UDF_UNLOCK_NODE(udf_node, 0); 714 return EINVAL; 715 } 716 } 717 if (num_lb == 0) 718 break; 719 720 if (flags != UDF_EXT_REDIRECT) 721 foffset = end_foffset; 722 slot++; 723 } 724 UDF_UNLOCK_NODE(udf_node, 0); 725 726 return 0; 727 } 728 729 /* --------------------------------------------------------------------- */ 730 731 static int 732 udf_search_free_vatloc(struct udf_mount *ump, uint32_t *lbnumres) 733 { 734 uint32_t lb_size, lb_num, lb_map, udf_rw32_lbmap; 735 uint8_t *blob; 736 int entry, chunk, found, error; 737 738 KASSERT(ump); 739 KASSERT(ump->logical_vol); 740 741 lb_size = udf_rw32(ump->logical_vol->lb_size); 742 blob = malloc(lb_size, M_UDFTEMP, M_WAITOK); 743 744 /* TODO static allocation of search chunk */ 745 746 lb_num = MIN(ump->vat_entries, ump->vat_last_free_lb); 747 found = 0; 748 error = 0; 749 entry = 0; 750 do { 751 chunk = MIN(lb_size, (ump->vat_entries - lb_num) * 4); 752 if (chunk <= 0) 753 break; 754 /* load in chunk */ 755 error = udf_vat_read(ump->vat_node, blob, chunk, 756 ump->vat_offset + lb_num * 4); 757 758 if (error) 759 break; 760 761 /* search this chunk */ 762 for (entry=0; entry < chunk /4; entry++, lb_num++) { 763 udf_rw32_lbmap = *((uint32_t *) (blob + entry * 4)); 764 lb_map = udf_rw32(udf_rw32_lbmap); 765 if (lb_map == 0xffffffff) { 766 found = 1; 767 break; 768 } 769 } 770 } while (!found); 771 if (error) { 772 printf("udf_search_free_vatloc: error reading in vat chunk " 773 "(lb %d, size %d)\n", lb_num, chunk); 774 } 775 776 if (!found) { 777 /* extend VAT */ 778 DPRINTF(WRITE, ("udf_search_free_vatloc: extending\n")); 779 lb_num = ump->vat_entries; 780 ump->vat_entries++; 781 } 782 783 /* mark entry with initialiser just in case */ 784 lb_map = udf_rw32(0xfffffffe); 785 udf_vat_write(ump->vat_node, (uint8_t *) &lb_map, 4, 786 ump->vat_offset + lb_num *4); 787 ump->vat_last_free_lb = lb_num; 788 789 free(blob, M_UDFTEMP); 790 *lbnumres = lb_num; 791 return 0; 792 } 793 794 795 static void 796 udf_bitmap_allocate(struct udf_bitmap *bitmap, int ismetadata, 797 uint32_t *num_lb, uint64_t *lmappos) 798 { 799 uint32_t offset, lb_num, bit; 800 int32_t diff; 801 uint8_t *bpos; 802 int pass; 803 804 if (!ismetadata) { 805 /* heuristic to keep the two pointers not too close */ 806 diff = bitmap->data_pos - bitmap->metadata_pos; 807 if ((diff >= 0) && (diff < 1024)) 808 bitmap->data_pos = bitmap->metadata_pos + 1024; 809 } 810 offset = ismetadata ? bitmap->metadata_pos : bitmap->data_pos; 811 offset &= ~7; 812 for (pass = 0; pass < 2; pass++) { 813 if (offset >= bitmap->max_offset) 814 offset = 0; 815 816 while (offset < bitmap->max_offset) { 817 if (*num_lb == 0) 818 break; 819 820 /* use first bit not set */ 821 bpos = bitmap->bits + offset/8; 822 bit = ffs(*bpos); /* returns 0 or 1..8 */ 823 if (bit == 0) { 824 offset += 8; 825 continue; 826 } 827 828 /* check for ffs overshoot */ 829 if (offset + bit-1 >= bitmap->max_offset) { 830 offset = bitmap->max_offset; 831 break; 832 } 833 834 DPRINTF(PARANOIA, ("XXX : allocate %d, %p, bit %d\n", 835 offset + bit -1, bpos, bit-1)); 836 *bpos &= ~(1 << (bit-1)); 837 lb_num = offset + bit-1; 838 *lmappos++ = lb_num; 839 *num_lb = *num_lb - 1; 840 // offset = (offset & ~7); 841 } 842 } 843 844 if (ismetadata) { 845 bitmap->metadata_pos = offset; 846 } else { 847 bitmap->data_pos = offset; 848 } 849 } 850 851 852 static void 853 udf_bitmap_free(struct udf_bitmap *bitmap, uint32_t lb_num, uint32_t num_lb) 854 { 855 uint32_t offset; 856 uint32_t bit, bitval; 857 uint8_t *bpos; 858 859 offset = lb_num; 860 861 /* starter bits */ 862 bpos = bitmap->bits + offset/8; 863 bit = offset % 8; 864 while ((bit != 0) && (num_lb > 0)) { 865 bitval = (1 << bit); 866 KASSERT((*bpos & bitval) == 0); 867 DPRINTF(PARANOIA, ("XXX : free %d, %p, %d\n", 868 offset, bpos, bit)); 869 *bpos |= bitval; 870 offset++; num_lb--; 871 bit = (bit + 1) % 8; 872 } 873 if (num_lb == 0) 874 return; 875 876 /* whole bytes */ 877 KASSERT(bit == 0); 878 bpos = bitmap->bits + offset / 8; 879 while (num_lb >= 8) { 880 KASSERT((*bpos == 0)); 881 DPRINTF(PARANOIA, ("XXX : free %d + 8, %p\n", offset, bpos)); 882 *bpos = 255; 883 offset += 8; num_lb -= 8; 884 bpos++; 885 } 886 887 /* stop bits */ 888 KASSERT(num_lb < 8); 889 bit = 0; 890 while (num_lb > 0) { 891 bitval = (1 << bit); 892 KASSERT((*bpos & bitval) == 0); 893 DPRINTF(PARANOIA, ("XXX : free %d, %p, %d\n", 894 offset, bpos, bit)); 895 *bpos |= bitval; 896 offset++; num_lb--; 897 bit = (bit + 1) % 8; 898 } 899 } 900 901 902 /* allocate a contiguous sequence of sectornumbers */ 903 static int 904 udf_allocate_space(struct udf_mount *ump, int udf_c_type, 905 uint16_t vpart_num, uint32_t num_lb, uint64_t *lmapping) 906 { 907 struct mmc_trackinfo *alloc_track, *other_track; 908 struct udf_bitmap *bitmap; 909 struct part_desc *pdesc; 910 struct logvol_int_desc *lvid; 911 uint64_t *lmappos; 912 uint32_t ptov, lb_num, *freepos, free_lbs; 913 int lb_size, alloc_num_lb; 914 int alloc_type, error; 915 int is_node; 916 917 DPRINTF(CALL, ("udf_allocate_space(ctype %d, vpart %d, num_lb %d\n", 918 udf_c_type, vpart_num, num_lb)); 919 mutex_enter(&ump->allocate_mutex); 920 921 lb_size = udf_rw32(ump->logical_vol->lb_size); 922 KASSERT(lb_size == ump->discinfo.sector_size); 923 924 /* XXX TODO check disc space */ 925 926 alloc_type = ump->vtop_alloc[vpart_num]; 927 is_node = (udf_c_type == UDF_C_NODE); 928 929 lmappos = lmapping; 930 error = 0; 931 switch (alloc_type) { 932 case UDF_ALLOC_VAT : 933 /* search empty slot in VAT file */ 934 KASSERT(num_lb == 1); 935 error = udf_search_free_vatloc(ump, &lb_num); 936 if (!error) 937 *lmappos = lb_num; 938 break; 939 case UDF_ALLOC_SEQUENTIAL : 940 /* sequential allocation on recordable media */ 941 /* get partition backing up this vpart_num_num */ 942 pdesc = ump->partitions[ump->vtop[vpart_num]]; 943 944 /* calculate offset from physical base partition */ 945 ptov = udf_rw32(pdesc->start_loc); 946 947 /* get our track descriptors */ 948 if (vpart_num == ump->node_part) { 949 alloc_track = &ump->metadata_track; 950 other_track = &ump->data_track; 951 } else { 952 alloc_track = &ump->data_track; 953 other_track = &ump->metadata_track; 954 } 955 956 /* allocate */ 957 for (lb_num = 0; lb_num < num_lb; lb_num++) { 958 *lmappos++ = alloc_track->next_writable - ptov; 959 alloc_track->next_writable++; 960 alloc_track->free_blocks--; 961 } 962 963 /* keep other track up-to-date */ 964 if (alloc_track->tracknr == other_track->tracknr) 965 memcpy(other_track, alloc_track, 966 sizeof(struct mmc_trackinfo)); 967 break; 968 case UDF_ALLOC_SPACEMAP : 969 /* try to allocate on unallocated bits */ 970 alloc_num_lb = num_lb; 971 bitmap = &ump->part_unalloc_bits[vpart_num]; 972 udf_bitmap_allocate(bitmap, is_node, &alloc_num_lb, lmappos); 973 ump->lvclose |= UDF_WRITE_PART_BITMAPS; 974 975 /* have we allocated all? */ 976 if (alloc_num_lb) { 977 /* TODO convert freed to unalloc and try again */ 978 /* free allocated piece for now */ 979 lmappos = lmapping; 980 for (lb_num=0; lb_num < num_lb-alloc_num_lb; lb_num++) { 981 udf_bitmap_free(bitmap, *lmappos++, 1); 982 } 983 error = ENOSPC; 984 } 985 if (!error) { 986 /* adjust freecount */ 987 lvid = ump->logvol_integrity; 988 freepos = &lvid->tables[0] + vpart_num; 989 free_lbs = udf_rw32(*freepos); 990 *freepos = udf_rw32(free_lbs - num_lb); 991 } 992 break; 993 case UDF_ALLOC_METABITMAP : /* UDF 2.50, 2.60 BluRay-RE */ 994 /* allocate on metadata unallocated bits */ 995 alloc_num_lb = num_lb; 996 bitmap = &ump->metadata_unalloc_bits; 997 udf_bitmap_allocate(bitmap, is_node, &alloc_num_lb, lmappos); 998 ump->lvclose |= UDF_WRITE_PART_BITMAPS; 999 1000 /* have we allocated all? */ 1001 if (alloc_num_lb) { 1002 /* YIKES! TODO we need to extend the metadata partition */ 1003 /* free allocated piece for now */ 1004 lmappos = lmapping; 1005 for (lb_num=0; lb_num < num_lb-alloc_num_lb; lb_num++) { 1006 udf_bitmap_free(bitmap, *lmappos++, 1); 1007 } 1008 error = ENOSPC; 1009 } 1010 if (!error) { 1011 /* adjust freecount */ 1012 lvid = ump->logvol_integrity; 1013 freepos = &lvid->tables[0] + vpart_num; 1014 free_lbs = udf_rw32(*freepos); 1015 *freepos = udf_rw32(free_lbs - num_lb); 1016 } 1017 break; 1018 case UDF_ALLOC_METASEQUENTIAL : /* UDF 2.60 BluRay-R */ 1019 case UDF_ALLOC_RELAXEDSEQUENTIAL : /* UDF 2.50/~meta BluRay-R */ 1020 printf("ALERT: udf_allocate_space : allocation %d " 1021 "not implemented yet!\n", alloc_type); 1022 /* TODO implement, doesn't have to be contiguous */ 1023 error = ENOSPC; 1024 break; 1025 } 1026 1027 #ifdef DEBUG 1028 if (udf_verbose & UDF_DEBUG_ALLOC) { 1029 lmappos = lmapping; 1030 printf("udf_allocate_space, allocated logical lba :\n"); 1031 for (lb_num = 0; lb_num < num_lb; lb_num++) { 1032 printf("%s %"PRIu64",", (lb_num > 0)?",":"", 1033 *lmappos++); 1034 } 1035 printf("\n"); 1036 } 1037 #endif 1038 mutex_exit(&ump->allocate_mutex); 1039 1040 return error; 1041 } 1042 1043 /* --------------------------------------------------------------------- */ 1044 1045 void 1046 udf_free_allocated_space(struct udf_mount *ump, uint32_t lb_num, 1047 uint16_t vpart_num, uint32_t num_lb) 1048 { 1049 struct udf_bitmap *bitmap; 1050 struct part_desc *pdesc; 1051 struct logvol_int_desc *lvid; 1052 uint32_t ptov, lb_map, udf_rw32_lbmap; 1053 uint32_t *freepos, free_lbs; 1054 int phys_part; 1055 int error; 1056 1057 DPRINTF(ALLOC, ("udf_free_allocated_space: freeing virt lbnum %d " 1058 "part %d + %d sect\n", lb_num, vpart_num, num_lb)); 1059 1060 /* no use freeing zero length */ 1061 if (num_lb == 0) 1062 return; 1063 1064 mutex_enter(&ump->allocate_mutex); 1065 1066 /* get partition backing up this vpart_num */ 1067 pdesc = ump->partitions[ump->vtop[vpart_num]]; 1068 1069 switch (ump->vtop_tp[vpart_num]) { 1070 case UDF_VTOP_TYPE_PHYS : 1071 case UDF_VTOP_TYPE_SPARABLE : 1072 /* free space to freed or unallocated space bitmap */ 1073 ptov = udf_rw32(pdesc->start_loc); 1074 phys_part = ump->vtop[vpart_num]; 1075 1076 /* first try freed space bitmap */ 1077 bitmap = &ump->part_freed_bits[phys_part]; 1078 1079 /* if not defined, use unallocated bitmap */ 1080 if (bitmap->bits == NULL) 1081 bitmap = &ump->part_unalloc_bits[phys_part]; 1082 1083 /* if no bitmaps are defined, bail out; XXX OK? */ 1084 if (bitmap->bits == NULL) 1085 break; 1086 1087 /* free bits if its defined */ 1088 KASSERT(bitmap->bits); 1089 ump->lvclose |= UDF_WRITE_PART_BITMAPS; 1090 udf_bitmap_free(bitmap, lb_num, num_lb); 1091 1092 /* adjust freecount */ 1093 lvid = ump->logvol_integrity; 1094 freepos = &lvid->tables[0] + vpart_num; 1095 free_lbs = udf_rw32(*freepos); 1096 *freepos = udf_rw32(free_lbs + num_lb); 1097 break; 1098 case UDF_VTOP_TYPE_VIRT : 1099 /* free this VAT entry */ 1100 KASSERT(num_lb == 1); 1101 1102 lb_map = 0xffffffff; 1103 udf_rw32_lbmap = udf_rw32(lb_map); 1104 error = udf_vat_write(ump->vat_node, 1105 (uint8_t *) &udf_rw32_lbmap, 4, 1106 ump->vat_offset + lb_num * 4); 1107 KASSERT(error == 0); 1108 ump->vat_last_free_lb = MIN(ump->vat_last_free_lb, lb_num); 1109 break; 1110 case UDF_VTOP_TYPE_META : 1111 /* free space in the metadata bitmap */ 1112 bitmap = &ump->metadata_unalloc_bits; 1113 KASSERT(bitmap->bits); 1114 1115 ump->lvclose |= UDF_WRITE_PART_BITMAPS; 1116 udf_bitmap_free(bitmap, lb_num, num_lb); 1117 1118 /* adjust freecount */ 1119 lvid = ump->logvol_integrity; 1120 freepos = &lvid->tables[0] + vpart_num; 1121 free_lbs = udf_rw32(*freepos); 1122 *freepos = udf_rw32(free_lbs + num_lb); 1123 break; 1124 default: 1125 printf("ALERT: udf_free_allocated_space : allocation %d " 1126 "not implemented yet!\n", ump->vtop_tp[vpart_num]); 1127 break; 1128 } 1129 1130 mutex_exit(&ump->allocate_mutex); 1131 } 1132 1133 /* --------------------------------------------------------------------- */ 1134 1135 int 1136 udf_pre_allocate_space(struct udf_mount *ump, int udf_c_type, 1137 uint32_t num_lb, uint16_t vpartnr, uint64_t *lmapping) 1138 { 1139 /* TODO properly maintain uncomitted_lb per partition */ 1140 1141 /* reserve size for VAT allocated data */ 1142 if (ump->vtop_alloc[vpartnr] == UDF_ALLOC_VAT) { 1143 mutex_enter(&ump->allocate_mutex); 1144 ump->uncomitted_lb += num_lb; 1145 mutex_exit(&ump->allocate_mutex); 1146 } 1147 1148 return udf_allocate_space(ump, udf_c_type, vpartnr, num_lb, lmapping); 1149 } 1150 1151 /* --------------------------------------------------------------------- */ 1152 1153 /* 1154 * Allocate a buf on disc for direct write out. The space doesn't have to be 1155 * contiguous as the caller takes care of this. 1156 */ 1157 1158 void 1159 udf_late_allocate_buf(struct udf_mount *ump, struct buf *buf, 1160 uint64_t *lmapping, struct long_ad *node_ad_cpy, uint16_t *vpart_nump) 1161 { 1162 struct udf_node *udf_node = VTOI(buf->b_vp); 1163 int lb_size, blks, udf_c_type; 1164 int vpart_num, num_lb; 1165 int error, s; 1166 1167 /* 1168 * for each sector in the buf, allocate a sector on disc and record 1169 * its position in the provided mapping array. 1170 * 1171 * If its userdata or FIDs, record its location in its node. 1172 */ 1173 1174 lb_size = udf_rw32(ump->logical_vol->lb_size); 1175 num_lb = (buf->b_bcount + lb_size -1) / lb_size; 1176 blks = lb_size / DEV_BSIZE; 1177 udf_c_type = buf->b_udf_c_type; 1178 1179 KASSERT(lb_size == ump->discinfo.sector_size); 1180 1181 /* select partition to record the buffer on */ 1182 vpart_num = ump->data_part; 1183 if (udf_c_type == UDF_C_NODE) 1184 vpart_num = ump->node_part; 1185 if (udf_c_type == UDF_C_FIDS) 1186 vpart_num = ump->fids_part; 1187 *vpart_nump = vpart_num; 1188 1189 if (udf_c_type == UDF_C_NODE) { 1190 /* if not VAT, its allready allocated */ 1191 if (ump->vtop_alloc[ump->node_part] != UDF_ALLOC_VAT) 1192 return; 1193 1194 /* allocate on its backing sequential partition */ 1195 vpart_num = ump->data_part; 1196 } 1197 1198 /* do allocation on the selected partition */ 1199 error = udf_allocate_space(ump, udf_c_type, 1200 vpart_num, num_lb, lmapping); 1201 if (error) { 1202 /* ARGH! we've not done our accounting right! */ 1203 panic("UDF disc allocation accounting gone wrong"); 1204 } 1205 1206 /* commit our sector count */ 1207 mutex_enter(&ump->allocate_mutex); 1208 if (num_lb > ump->uncomitted_lb) { 1209 ump->uncomitted_lb = 0; 1210 } else { 1211 ump->uncomitted_lb -= num_lb; 1212 } 1213 mutex_exit(&ump->allocate_mutex); 1214 1215 /* If its userdata or FIDs, record its allocation in its node. */ 1216 if ((udf_c_type == UDF_C_USERDATA) || 1217 (udf_c_type == UDF_C_FIDS) || 1218 (udf_c_type == UDF_C_METADATA_SBM)) 1219 { 1220 udf_record_allocation_in_node(ump, buf, vpart_num, lmapping, 1221 node_ad_cpy); 1222 /* decrement our outstanding bufs counter */ 1223 s = splbio(); 1224 udf_node->outstanding_bufs--; 1225 splx(s); 1226 } 1227 } 1228 1229 /* --------------------------------------------------------------------- */ 1230 1231 /* 1232 * Try to merge a1 with the new piece a2. udf_ads_merge returns error when not 1233 * possible (anymore); a2 returns the rest piece. 1234 */ 1235 1236 static int 1237 udf_ads_merge(uint32_t lb_size, struct long_ad *a1, struct long_ad *a2) 1238 { 1239 uint32_t max_len, merge_len; 1240 uint32_t a1_len, a2_len; 1241 uint32_t a1_flags, a2_flags; 1242 uint32_t a1_lbnum, a2_lbnum; 1243 uint16_t a1_part, a2_part; 1244 1245 max_len = ((UDF_EXT_MAXLEN / lb_size) * lb_size); 1246 1247 a1_flags = UDF_EXT_FLAGS(udf_rw32(a1->len)); 1248 a1_len = UDF_EXT_LEN(udf_rw32(a1->len)); 1249 a1_lbnum = udf_rw32(a1->loc.lb_num); 1250 a1_part = udf_rw16(a1->loc.part_num); 1251 1252 a2_flags = UDF_EXT_FLAGS(udf_rw32(a2->len)); 1253 a2_len = UDF_EXT_LEN(udf_rw32(a2->len)); 1254 a2_lbnum = udf_rw32(a2->loc.lb_num); 1255 a2_part = udf_rw16(a2->loc.part_num); 1256 1257 /* defines same space */ 1258 if (a1_flags != a2_flags) 1259 return 1; 1260 1261 if (a1_flags != UDF_EXT_FREE) { 1262 /* the same partition */ 1263 if (a1_part != a2_part) 1264 return 1; 1265 1266 /* a2 is successor of a1 */ 1267 if (a1_lbnum * lb_size + a1_len != a2_lbnum * lb_size) 1268 return 1; 1269 } 1270 1271 /* merge as most from a2 if possible */ 1272 merge_len = MIN(a2_len, max_len - a1_len); 1273 a1_len += merge_len; 1274 a2_len -= merge_len; 1275 a2_lbnum += merge_len/lb_size; 1276 1277 a1->len = udf_rw32(a1_len | a1_flags); 1278 a2->len = udf_rw32(a2_len | a2_flags); 1279 a2->loc.lb_num = udf_rw32(a2_lbnum); 1280 1281 if (a2_len > 0) 1282 return 1; 1283 1284 /* there is space over to merge */ 1285 return 0; 1286 } 1287 1288 /* --------------------------------------------------------------------- */ 1289 1290 static void 1291 udf_wipe_adslots(struct udf_node *udf_node) 1292 { 1293 struct file_entry *fe; 1294 struct extfile_entry *efe; 1295 struct alloc_ext_entry *ext; 1296 uint64_t inflen, objsize; 1297 uint32_t lb_size, dscr_size, l_ea, l_ad, max_l_ad, crclen; 1298 uint8_t *data_pos; 1299 int extnr; 1300 1301 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size); 1302 1303 fe = udf_node->fe; 1304 efe = udf_node->efe; 1305 if (fe) { 1306 inflen = udf_rw64(fe->inf_len); 1307 objsize = inflen; 1308 dscr_size = sizeof(struct file_entry) -1; 1309 l_ea = udf_rw32(fe->l_ea); 1310 l_ad = udf_rw32(fe->l_ad); 1311 data_pos = (uint8_t *) fe + dscr_size + l_ea; 1312 } else { 1313 inflen = udf_rw64(efe->inf_len); 1314 objsize = udf_rw64(efe->obj_size); 1315 dscr_size = sizeof(struct extfile_entry) -1; 1316 l_ea = udf_rw32(efe->l_ea); 1317 l_ad = udf_rw32(efe->l_ad); 1318 data_pos = (uint8_t *) efe + dscr_size + l_ea; 1319 } 1320 max_l_ad = lb_size - dscr_size - l_ea; 1321 1322 /* wipe fe/efe */ 1323 memset(data_pos, 0, max_l_ad); 1324 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea; 1325 if (fe) { 1326 fe->l_ad = udf_rw32(0); 1327 fe->logblks_rec = udf_rw64(0); 1328 fe->tag.desc_crc_len = udf_rw32(crclen); 1329 } else { 1330 efe->l_ad = udf_rw32(0); 1331 efe->logblks_rec = udf_rw64(0); 1332 efe->tag.desc_crc_len = udf_rw32(crclen); 1333 } 1334 1335 /* wipe all allocation extent entries */ 1336 for (extnr = 0; extnr < udf_node->num_extensions; extnr++) { 1337 ext = udf_node->ext[extnr]; 1338 dscr_size = sizeof(struct alloc_ext_entry) -1; 1339 data_pos = (uint8_t *) ext->data; 1340 max_l_ad = lb_size - dscr_size; 1341 memset(data_pos, 0, max_l_ad); 1342 ext->l_ad = udf_rw32(0); 1343 1344 crclen = dscr_size - UDF_DESC_TAG_LENGTH; 1345 ext->tag.desc_crc_len = udf_rw32(crclen); 1346 } 1347 udf_node->i_flags |= IN_NODE_REBUILD; 1348 } 1349 1350 /* --------------------------------------------------------------------- */ 1351 1352 void 1353 udf_get_adslot(struct udf_node *udf_node, int slot, struct long_ad *icb, 1354 int *eof) { 1355 struct file_entry *fe; 1356 struct extfile_entry *efe; 1357 struct alloc_ext_entry *ext; 1358 struct icb_tag *icbtag; 1359 struct short_ad *short_ad; 1360 struct long_ad *long_ad, l_icb; 1361 uint32_t offset; 1362 uint32_t lb_size, dscr_size, l_ea, l_ad, flags; 1363 uint8_t *data_pos; 1364 int icbflags, addr_type, adlen, extnr; 1365 1366 /* determine what descriptor we are in */ 1367 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size); 1368 1369 fe = udf_node->fe; 1370 efe = udf_node->efe; 1371 if (fe) { 1372 icbtag = &fe->icbtag; 1373 dscr_size = sizeof(struct file_entry) -1; 1374 l_ea = udf_rw32(fe->l_ea); 1375 l_ad = udf_rw32(fe->l_ad); 1376 data_pos = (uint8_t *) fe + dscr_size + l_ea; 1377 } else { 1378 icbtag = &efe->icbtag; 1379 dscr_size = sizeof(struct extfile_entry) -1; 1380 l_ea = udf_rw32(efe->l_ea); 1381 l_ad = udf_rw32(efe->l_ad); 1382 data_pos = (uint8_t *) efe + dscr_size + l_ea; 1383 } 1384 1385 icbflags = udf_rw16(icbtag->flags); 1386 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK; 1387 1388 /* just in case we're called on an intern, its EOF */ 1389 if (addr_type == UDF_ICB_INTERN_ALLOC) { 1390 memset(icb, 0, sizeof(struct long_ad)); 1391 *eof = 1; 1392 return; 1393 } 1394 1395 adlen = 0; 1396 if (addr_type == UDF_ICB_SHORT_ALLOC) { 1397 adlen = sizeof(struct short_ad); 1398 } else if (addr_type == UDF_ICB_LONG_ALLOC) { 1399 adlen = sizeof(struct long_ad); 1400 } 1401 1402 /* if offset too big, we go to the allocation extensions */ 1403 offset = slot * adlen; 1404 extnr = -1; 1405 while (offset >= l_ad) { 1406 /* check if our last entry is a redirect */ 1407 if (addr_type == UDF_ICB_SHORT_ALLOC) { 1408 short_ad = (struct short_ad *) (data_pos + l_ad-adlen); 1409 l_icb.len = short_ad->len; 1410 l_icb.loc.part_num = udf_node->loc.loc.part_num; 1411 l_icb.loc.lb_num = short_ad->lb_num; 1412 } else { 1413 KASSERT(addr_type == UDF_ICB_LONG_ALLOC); 1414 long_ad = (struct long_ad *) (data_pos + l_ad-adlen); 1415 l_icb = *long_ad; 1416 } 1417 flags = UDF_EXT_FLAGS(udf_rw32(l_icb.len)); 1418 if (flags != UDF_EXT_REDIRECT) { 1419 l_ad = 0; /* force EOF */ 1420 break; 1421 } 1422 1423 /* advance to next extent */ 1424 extnr++; 1425 if (extnr >= udf_node->num_extensions) { 1426 l_ad = 0; /* force EOF */ 1427 break; 1428 } 1429 offset = offset - l_ad; 1430 ext = udf_node->ext[extnr]; 1431 dscr_size = sizeof(struct alloc_ext_entry) -1; 1432 l_ad = udf_rw32(ext->l_ad); 1433 data_pos = (uint8_t *) ext + dscr_size; 1434 } 1435 1436 /* XXX l_ad == 0 should be enough to check */ 1437 *eof = (offset >= l_ad) || (l_ad == 0); 1438 if (*eof) { 1439 DPRINTF(PARANOIDADWLK, ("returning EOF, extnr %d, offset %d, " 1440 "l_ad %d\n", extnr, offset, l_ad)); 1441 memset(icb, 0, sizeof(struct long_ad)); 1442 return; 1443 } 1444 1445 /* get the element */ 1446 if (addr_type == UDF_ICB_SHORT_ALLOC) { 1447 short_ad = (struct short_ad *) (data_pos + offset); 1448 icb->len = short_ad->len; 1449 icb->loc.part_num = udf_node->loc.loc.part_num; 1450 icb->loc.lb_num = short_ad->lb_num; 1451 } else if (addr_type == UDF_ICB_LONG_ALLOC) { 1452 long_ad = (struct long_ad *) (data_pos + offset); 1453 *icb = *long_ad; 1454 } 1455 DPRINTF(PARANOIDADWLK, ("returning element : v %d, lb %d, len %d, " 1456 "flags %d\n", icb->loc.part_num, icb->loc.lb_num, 1457 UDF_EXT_LEN(icb->len), UDF_EXT_FLAGS(icb->len))); 1458 } 1459 1460 /* --------------------------------------------------------------------- */ 1461 1462 int 1463 udf_append_adslot(struct udf_node *udf_node, int *slot, struct long_ad *icb) { 1464 struct udf_mount *ump = udf_node->ump; 1465 union dscrptr *dscr, *extdscr; 1466 struct file_entry *fe; 1467 struct extfile_entry *efe; 1468 struct alloc_ext_entry *ext; 1469 struct icb_tag *icbtag; 1470 struct short_ad *short_ad; 1471 struct long_ad *long_ad, o_icb, l_icb; 1472 uint64_t logblks_rec, *logblks_rec_p; 1473 uint64_t lmapping; 1474 uint32_t offset, rest, len, lb_num; 1475 uint32_t lb_size, dscr_size, l_ea, l_ad, *l_ad_p, max_l_ad, crclen; 1476 uint32_t flags; 1477 uint16_t vpart_num; 1478 uint8_t *data_pos; 1479 int icbflags, addr_type, adlen, extnr; 1480 int error; 1481 1482 lb_size = udf_rw32(ump->logical_vol->lb_size); 1483 vpart_num = udf_rw16(udf_node->loc.loc.part_num); 1484 1485 /* determine what descriptor we are in */ 1486 fe = udf_node->fe; 1487 efe = udf_node->efe; 1488 if (fe) { 1489 icbtag = &fe->icbtag; 1490 dscr = (union dscrptr *) fe; 1491 dscr_size = sizeof(struct file_entry) -1; 1492 1493 l_ea = udf_rw32(fe->l_ea); 1494 l_ad_p = &fe->l_ad; 1495 logblks_rec_p = &fe->logblks_rec; 1496 } else { 1497 icbtag = &efe->icbtag; 1498 dscr = (union dscrptr *) efe; 1499 dscr_size = sizeof(struct extfile_entry) -1; 1500 1501 l_ea = udf_rw32(efe->l_ea); 1502 l_ad_p = &efe->l_ad; 1503 logblks_rec_p = &efe->logblks_rec; 1504 } 1505 data_pos = (uint8_t *) dscr + dscr_size + l_ea; 1506 max_l_ad = lb_size - dscr_size - l_ea; 1507 1508 icbflags = udf_rw16(icbtag->flags); 1509 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK; 1510 1511 /* just in case we're called on an intern, its EOF */ 1512 if (addr_type == UDF_ICB_INTERN_ALLOC) { 1513 panic("udf_append_adslot on UDF_ICB_INTERN_ALLOC\n"); 1514 } 1515 1516 adlen = 0; 1517 if (addr_type == UDF_ICB_SHORT_ALLOC) { 1518 adlen = sizeof(struct short_ad); 1519 } else if (addr_type == UDF_ICB_LONG_ALLOC) { 1520 adlen = sizeof(struct long_ad); 1521 } 1522 1523 /* clean up given long_ad since it can be a synthesized one */ 1524 flags = UDF_EXT_FLAGS(udf_rw32(icb->len)); 1525 if (flags == UDF_EXT_FREE) { 1526 icb->loc.part_num = udf_rw16(0); 1527 icb->loc.lb_num = udf_rw32(0); 1528 } 1529 1530 /* if offset too big, we go to the allocation extensions */ 1531 l_ad = udf_rw32(*l_ad_p); 1532 offset = (*slot) * adlen; 1533 extnr = -1; 1534 while (offset >= l_ad) { 1535 /* check if our last entry is a redirect */ 1536 if (addr_type == UDF_ICB_SHORT_ALLOC) { 1537 short_ad = (struct short_ad *) (data_pos + l_ad-adlen); 1538 l_icb.len = short_ad->len; 1539 l_icb.loc.part_num = udf_node->loc.loc.part_num; 1540 l_icb.loc.lb_num = short_ad->lb_num; 1541 } else { 1542 KASSERT(addr_type == UDF_ICB_LONG_ALLOC); 1543 long_ad = (struct long_ad *) (data_pos + l_ad-adlen); 1544 l_icb = *long_ad; 1545 } 1546 flags = UDF_EXT_FLAGS(udf_rw32(l_icb.len)); 1547 if (flags != UDF_EXT_REDIRECT) { 1548 /* only one past the last one is adressable */ 1549 break; 1550 } 1551 1552 /* advance to next extent */ 1553 extnr++; 1554 KASSERT(extnr < udf_node->num_extensions); 1555 offset = offset - l_ad; 1556 1557 ext = udf_node->ext[extnr]; 1558 dscr = (union dscrptr *) ext; 1559 dscr_size = sizeof(struct alloc_ext_entry) -1; 1560 max_l_ad = lb_size - dscr_size; 1561 l_ad_p = &ext->l_ad; 1562 l_ad = udf_rw32(*l_ad_p); 1563 data_pos = (uint8_t *) ext + dscr_size; 1564 } 1565 DPRINTF(PARANOIDADWLK, ("append, ext %d, offset %d, l_ad %d\n", 1566 extnr, offset, udf_rw32(*l_ad_p))); 1567 KASSERT(l_ad == udf_rw32(*l_ad_p)); 1568 1569 /* offset is offset within the current (E)FE/AED */ 1570 l_ad = udf_rw32(*l_ad_p); 1571 crclen = udf_rw32(dscr->tag.desc_crc_len); 1572 logblks_rec = udf_rw64(*logblks_rec_p); 1573 1574 /* overwriting old piece? */ 1575 if (offset < l_ad) { 1576 /* overwrite entry; compensate for the old element */ 1577 if (addr_type == UDF_ICB_SHORT_ALLOC) { 1578 short_ad = (struct short_ad *) (data_pos + offset); 1579 o_icb.len = short_ad->len; 1580 o_icb.loc.part_num = udf_rw16(0); /* ignore */ 1581 o_icb.loc.lb_num = short_ad->lb_num; 1582 } else if (addr_type == UDF_ICB_LONG_ALLOC) { 1583 long_ad = (struct long_ad *) (data_pos + offset); 1584 o_icb = *long_ad; 1585 } else { 1586 panic("Invalid address type in udf_append_adslot\n"); 1587 } 1588 1589 len = udf_rw32(o_icb.len); 1590 if (UDF_EXT_FLAGS(len) == UDF_EXT_ALLOCATED) { 1591 /* adjust counts */ 1592 len = UDF_EXT_LEN(len); 1593 logblks_rec -= (len + lb_size -1) / lb_size; 1594 } 1595 } 1596 1597 /* check if we're not appending a redirection */ 1598 flags = UDF_EXT_FLAGS(udf_rw32(icb->len)); 1599 KASSERT(flags != UDF_EXT_REDIRECT); 1600 1601 /* round down available space */ 1602 rest = adlen * ((max_l_ad - offset) / adlen); 1603 if (rest <= adlen) { 1604 /* have to append aed, see if we already have a spare one */ 1605 extnr++; 1606 ext = udf_node->ext[extnr]; 1607 l_icb = udf_node->ext_loc[extnr]; 1608 if (ext == NULL) { 1609 DPRINTF(ALLOC,("adding allocation extent %d\n", extnr)); 1610 1611 error = udf_pre_allocate_space(ump, UDF_C_NODE, 1, 1612 vpart_num, &lmapping); 1613 lb_num = lmapping; 1614 if (error) 1615 return error; 1616 1617 /* initialise pointer to location */ 1618 memset(&l_icb, 0, sizeof(struct long_ad)); 1619 l_icb.len = udf_rw32(lb_size | UDF_EXT_REDIRECT); 1620 l_icb.loc.lb_num = udf_rw32(lb_num); 1621 l_icb.loc.part_num = udf_rw16(vpart_num); 1622 1623 /* create new aed descriptor */ 1624 udf_create_logvol_dscr(ump, udf_node, &l_icb, &extdscr); 1625 ext = &extdscr->aee; 1626 1627 udf_inittag(ump, &ext->tag, TAGID_ALLOCEXTENT, lb_num); 1628 dscr_size = sizeof(struct alloc_ext_entry) -1; 1629 max_l_ad = lb_size - dscr_size; 1630 memset(ext->data, 0, max_l_ad); 1631 ext->l_ad = udf_rw32(0); 1632 ext->tag.desc_crc_len = 1633 udf_rw32(dscr_size - UDF_DESC_TAG_LENGTH); 1634 1635 /* declare aed */ 1636 udf_node->num_extensions++; 1637 udf_node->ext_loc[extnr] = l_icb; 1638 udf_node->ext[extnr] = ext; 1639 } 1640 /* add redirect and adjust l_ad and crclen for old descr */ 1641 if (addr_type == UDF_ICB_SHORT_ALLOC) { 1642 short_ad = (struct short_ad *) (data_pos + offset); 1643 short_ad->len = l_icb.len; 1644 short_ad->lb_num = l_icb.loc.lb_num; 1645 } else if (addr_type == UDF_ICB_LONG_ALLOC) { 1646 long_ad = (struct long_ad *) (data_pos + offset); 1647 *long_ad = l_icb; 1648 } 1649 l_ad += adlen; 1650 crclen += adlen; 1651 dscr->tag.desc_crc_len = udf_rw32(crclen); 1652 *l_ad_p = udf_rw32(l_ad); 1653 1654 /* advance to the new extension */ 1655 KASSERT(ext != NULL); 1656 dscr = (union dscrptr *) ext; 1657 dscr_size = sizeof(struct alloc_ext_entry) -1; 1658 max_l_ad = lb_size - dscr_size; 1659 data_pos = (uint8_t *) dscr + dscr_size; 1660 1661 l_ad_p = &ext->l_ad; 1662 l_ad = udf_rw32(*l_ad_p); 1663 crclen = udf_rw32(dscr->tag.desc_crc_len); 1664 offset = 0; 1665 1666 /* adjust callees slot count for link insert */ 1667 *slot += 1; 1668 } 1669 1670 /* write out the element */ 1671 DPRINTF(PARANOIDADWLK, ("adding element : %p : v %d, lb %d, " 1672 "len %d, flags %d\n", data_pos + offset, 1673 icb->loc.part_num, icb->loc.lb_num, 1674 UDF_EXT_LEN(icb->len), UDF_EXT_FLAGS(icb->len))); 1675 if (addr_type == UDF_ICB_SHORT_ALLOC) { 1676 short_ad = (struct short_ad *) (data_pos + offset); 1677 short_ad->len = icb->len; 1678 short_ad->lb_num = icb->loc.lb_num; 1679 } else if (addr_type == UDF_ICB_LONG_ALLOC) { 1680 long_ad = (struct long_ad *) (data_pos + offset); 1681 *long_ad = *icb; 1682 } 1683 1684 /* adjust logblks recorded count */ 1685 flags = UDF_EXT_FLAGS(udf_rw32(icb->len)); 1686 if (flags == UDF_EXT_ALLOCATED) 1687 logblks_rec += (UDF_EXT_LEN(icb->len) + lb_size -1) / lb_size; 1688 *logblks_rec_p = udf_rw64(logblks_rec); 1689 1690 /* adjust l_ad and crclen when needed */ 1691 if (offset >= l_ad) { 1692 l_ad += adlen; 1693 crclen += adlen; 1694 dscr->tag.desc_crc_len = udf_rw32(crclen); 1695 *l_ad_p = udf_rw32(l_ad); 1696 } 1697 1698 return 0; 1699 } 1700 1701 /* --------------------------------------------------------------------- */ 1702 1703 static void 1704 udf_count_alloc_exts(struct udf_node *udf_node) 1705 { 1706 struct long_ad s_ad; 1707 uint32_t lb_num, len, flags; 1708 uint16_t vpart_num; 1709 int slot, eof; 1710 int num_extents, extnr; 1711 int lb_size; 1712 1713 if (udf_node->num_extensions == 0) 1714 return; 1715 1716 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size); 1717 /* count number of allocation extents in use */ 1718 num_extents = 0; 1719 slot = 0; 1720 for (;;) { 1721 udf_get_adslot(udf_node, slot, &s_ad, &eof); 1722 if (eof) 1723 break; 1724 len = udf_rw32(s_ad.len); 1725 flags = UDF_EXT_FLAGS(len); 1726 1727 if (flags == UDF_EXT_REDIRECT) 1728 num_extents++; 1729 1730 slot++; 1731 } 1732 1733 DPRINTF(ALLOC, ("udf_count_alloc_ext counted %d live extents\n", 1734 num_extents)); 1735 1736 /* XXX choice: we could delay freeing them on node writeout */ 1737 /* free excess entries */ 1738 extnr = num_extents; 1739 for (;extnr < udf_node->num_extensions; extnr++) { 1740 DPRINTF(ALLOC, ("freeing alloc ext %d\n", extnr)); 1741 /* free dscriptor */ 1742 s_ad = udf_node->ext_loc[extnr]; 1743 udf_free_logvol_dscr(udf_node->ump, &s_ad, 1744 udf_node->ext[extnr]); 1745 udf_node->ext[extnr] = NULL; 1746 1747 /* free disc space */ 1748 lb_num = udf_rw32(s_ad.loc.lb_num); 1749 vpart_num = udf_rw16(s_ad.loc.part_num); 1750 udf_free_allocated_space(udf_node->ump, lb_num, vpart_num, 1); 1751 1752 memset(&udf_node->ext_loc[extnr], 0, sizeof(struct long_ad)); 1753 } 1754 1755 /* set our new number of allocation extents */ 1756 udf_node->num_extensions = num_extents; 1757 } 1758 1759 1760 /* --------------------------------------------------------------------- */ 1761 1762 /* 1763 * Adjust the node's allocation descriptors to reflect the new mapping; do 1764 * take note that we might glue to existing allocation descriptors. 1765 * 1766 * XXX Note there can only be one allocation being recorded/mount; maybe 1767 * explicit allocation in shedule thread? 1768 */ 1769 1770 static void 1771 udf_record_allocation_in_node(struct udf_mount *ump, struct buf *buf, 1772 uint16_t vpart_num, uint64_t *mapping, struct long_ad *node_ad_cpy) 1773 { 1774 struct vnode *vp = buf->b_vp; 1775 struct udf_node *udf_node = VTOI(vp); 1776 struct file_entry *fe; 1777 struct extfile_entry *efe; 1778 struct icb_tag *icbtag; 1779 struct long_ad s_ad, c_ad; 1780 uint64_t inflen, from, till; 1781 uint64_t foffset, end_foffset, restart_foffset; 1782 uint64_t orig_inflen, orig_lbrec, new_inflen, new_lbrec; 1783 uint32_t num_lb, len, flags, lb_num; 1784 uint32_t run_start; 1785 uint32_t slot_offset, replace_len, replace; 1786 int addr_type, icbflags; 1787 // int udf_c_type = buf->b_udf_c_type; 1788 int lb_size, run_length, eof; 1789 int slot, cpy_slot, cpy_slots, restart_slot; 1790 int error; 1791 1792 DPRINTF(ALLOC, ("udf_record_allocation_in_node\n")); 1793 1794 #if 0 1795 /* XXX disable sanity check for now */ 1796 /* sanity check ... should be panic ? */ 1797 if ((udf_c_type != UDF_C_USERDATA) && (udf_c_type != UDF_C_FIDS)) 1798 return; 1799 #endif 1800 1801 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size); 1802 1803 /* do the job */ 1804 UDF_LOCK_NODE(udf_node, 0); /* XXX can deadlock ? */ 1805 udf_node_sanity_check(udf_node, &orig_inflen, &orig_lbrec); 1806 1807 fe = udf_node->fe; 1808 efe = udf_node->efe; 1809 if (fe) { 1810 icbtag = &fe->icbtag; 1811 inflen = udf_rw64(fe->inf_len); 1812 } else { 1813 icbtag = &efe->icbtag; 1814 inflen = udf_rw64(efe->inf_len); 1815 } 1816 1817 /* do check if `till' is not past file information length */ 1818 from = buf->b_lblkno * lb_size; 1819 till = MIN(inflen, from + buf->b_resid); 1820 1821 num_lb = (till - from + lb_size -1) / lb_size; 1822 1823 DPRINTF(ALLOC, ("record allocation from %"PRIu64" + %d\n", from, buf->b_bcount)); 1824 1825 icbflags = udf_rw16(icbtag->flags); 1826 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK; 1827 1828 if (addr_type == UDF_ICB_INTERN_ALLOC) { 1829 /* nothing to do */ 1830 /* XXX clean up rest of node? just in case? */ 1831 UDF_UNLOCK_NODE(udf_node, 0); 1832 return; 1833 } 1834 1835 slot = 0; 1836 cpy_slot = 0; 1837 foffset = 0; 1838 1839 /* 1) copy till first overlap piece to the rewrite buffer */ 1840 for (;;) { 1841 udf_get_adslot(udf_node, slot, &s_ad, &eof); 1842 if (eof) { 1843 DPRINTF(WRITE, 1844 ("Record allocation in node " 1845 "failed: encountered EOF\n")); 1846 UDF_UNLOCK_NODE(udf_node, 0); 1847 buf->b_error = EINVAL; 1848 return; 1849 } 1850 len = udf_rw32(s_ad.len); 1851 flags = UDF_EXT_FLAGS(len); 1852 len = UDF_EXT_LEN(len); 1853 1854 if (flags == UDF_EXT_REDIRECT) { 1855 slot++; 1856 continue; 1857 } 1858 1859 end_foffset = foffset + len; 1860 if (end_foffset > from) 1861 break; /* found */ 1862 1863 node_ad_cpy[cpy_slot++] = s_ad; 1864 1865 DPRINTF(ALLOC, ("\t1: vp %d, lb %d, len %d, flags %d " 1866 "-> stack\n", 1867 udf_rw16(s_ad.loc.part_num), 1868 udf_rw32(s_ad.loc.lb_num), 1869 UDF_EXT_LEN(udf_rw32(s_ad.len)), 1870 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30)); 1871 1872 foffset = end_foffset; 1873 slot++; 1874 } 1875 restart_slot = slot; 1876 restart_foffset = foffset; 1877 1878 /* 2) trunc overlapping slot at overlap and copy it */ 1879 slot_offset = from - foffset; 1880 if (slot_offset > 0) { 1881 DPRINTF(ALLOC, ("\tslot_offset = %d, flags = %d (%d)\n", 1882 slot_offset, flags >> 30, flags)); 1883 1884 s_ad.len = udf_rw32(slot_offset | flags); 1885 node_ad_cpy[cpy_slot++] = s_ad; 1886 1887 DPRINTF(ALLOC, ("\t2: vp %d, lb %d, len %d, flags %d " 1888 "-> stack\n", 1889 udf_rw16(s_ad.loc.part_num), 1890 udf_rw32(s_ad.loc.lb_num), 1891 UDF_EXT_LEN(udf_rw32(s_ad.len)), 1892 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30)); 1893 } 1894 foffset += slot_offset; 1895 1896 /* 3) insert new mappings */ 1897 memset(&s_ad, 0, sizeof(struct long_ad)); 1898 lb_num = 0; 1899 for (lb_num = 0; lb_num < num_lb; lb_num++) { 1900 run_start = mapping[lb_num]; 1901 run_length = 1; 1902 while (lb_num < num_lb-1) { 1903 if (mapping[lb_num+1] != mapping[lb_num]+1) 1904 if (mapping[lb_num+1] != mapping[lb_num]) 1905 break; 1906 run_length++; 1907 lb_num++; 1908 } 1909 /* insert slot for this mapping */ 1910 len = run_length * lb_size; 1911 1912 /* bounds checking */ 1913 if (foffset + len > till) 1914 len = till - foffset; 1915 KASSERT(foffset + len <= inflen); 1916 1917 s_ad.len = udf_rw32(len | UDF_EXT_ALLOCATED); 1918 s_ad.loc.part_num = udf_rw16(vpart_num); 1919 s_ad.loc.lb_num = udf_rw32(run_start); 1920 1921 foffset += len; 1922 1923 /* paranoia */ 1924 if (len == 0) { 1925 DPRINTF(WRITE, 1926 ("Record allocation in node " 1927 "failed: insert failed\n")); 1928 UDF_UNLOCK_NODE(udf_node, 0); 1929 buf->b_error = EINVAL; 1930 return; 1931 } 1932 node_ad_cpy[cpy_slot++] = s_ad; 1933 1934 DPRINTF(ALLOC, ("\t3: insert new mapping vp %d lb %d, len %d, " 1935 "flags %d -> stack\n", 1936 udf_rw16(s_ad.loc.part_num), udf_rw32(s_ad.loc.lb_num), 1937 UDF_EXT_LEN(udf_rw32(s_ad.len)), 1938 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30)); 1939 } 1940 1941 /* 4) pop replaced length */ 1942 slot = restart_slot; 1943 foffset = restart_foffset; 1944 1945 replace_len = till - foffset; /* total amount of bytes to pop */ 1946 slot_offset = from - foffset; /* offset in first encounted slot */ 1947 KASSERT((slot_offset % lb_size) == 0); 1948 1949 for (;;) { 1950 udf_get_adslot(udf_node, slot, &s_ad, &eof); 1951 if (eof) 1952 break; 1953 1954 len = udf_rw32(s_ad.len); 1955 flags = UDF_EXT_FLAGS(len); 1956 len = UDF_EXT_LEN(len); 1957 lb_num = udf_rw32(s_ad.loc.lb_num); 1958 1959 if (flags == UDF_EXT_REDIRECT) { 1960 slot++; 1961 continue; 1962 } 1963 1964 DPRINTF(ALLOC, ("\t4i: got slot %d, slot_offset %d, " 1965 "replace_len %d, " 1966 "vp %d, lb %d, len %d, flags %d\n", 1967 slot, slot_offset, replace_len, 1968 udf_rw16(s_ad.loc.part_num), 1969 udf_rw32(s_ad.loc.lb_num), 1970 UDF_EXT_LEN(udf_rw32(s_ad.len)), 1971 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30)); 1972 1973 /* adjust for slot offset */ 1974 if (slot_offset) { 1975 DPRINTF(ALLOC, ("\t4s: skipping %d\n", slot_offset)); 1976 lb_num += slot_offset / lb_size; 1977 len -= slot_offset; 1978 foffset += slot_offset; 1979 replace_len -= slot_offset; 1980 1981 /* mark adjusted */ 1982 slot_offset = 0; 1983 } 1984 1985 /* advance for (the rest of) this slot */ 1986 replace = MIN(len, replace_len); 1987 DPRINTF(ALLOC, ("\t4d: replacing %d\n", replace)); 1988 1989 /* advance for this slot */ 1990 if (replace) { 1991 /* note: dont round DOWN on num_lb since we then 1992 * forget the last partial one */ 1993 num_lb = (replace + lb_size - 1) / lb_size; 1994 if (flags != UDF_EXT_FREE) { 1995 udf_free_allocated_space(ump, lb_num, 1996 udf_rw16(s_ad.loc.part_num), num_lb); 1997 } 1998 lb_num += num_lb; 1999 len -= replace; 2000 foffset += replace; 2001 replace_len -= replace; 2002 } 2003 2004 /* do we have a slot tail ? */ 2005 if (len) { 2006 KASSERT(foffset % lb_size == 0); 2007 2008 /* we arrived at our point, push remainder */ 2009 s_ad.len = udf_rw32(len | flags); 2010 s_ad.loc.lb_num = udf_rw32(lb_num); 2011 if (flags == UDF_EXT_FREE) 2012 s_ad.loc.lb_num = udf_rw32(0); 2013 node_ad_cpy[cpy_slot++] = s_ad; 2014 foffset += len; 2015 slot++; 2016 2017 DPRINTF(ALLOC, ("\t4: vp %d, lb %d, len %d, flags %d " 2018 "-> stack\n", 2019 udf_rw16(s_ad.loc.part_num), 2020 udf_rw32(s_ad.loc.lb_num), 2021 UDF_EXT_LEN(udf_rw32(s_ad.len)), 2022 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30)); 2023 break; 2024 } 2025 2026 slot++; 2027 } 2028 2029 /* 5) copy remainder */ 2030 for (;;) { 2031 udf_get_adslot(udf_node, slot, &s_ad, &eof); 2032 if (eof) 2033 break; 2034 2035 len = udf_rw32(s_ad.len); 2036 flags = UDF_EXT_FLAGS(len); 2037 len = UDF_EXT_LEN(len); 2038 2039 if (flags == UDF_EXT_REDIRECT) { 2040 slot++; 2041 continue; 2042 } 2043 2044 node_ad_cpy[cpy_slot++] = s_ad; 2045 2046 DPRINTF(ALLOC, ("\t5: insert new mapping " 2047 "vp %d lb %d, len %d, flags %d " 2048 "-> stack\n", 2049 udf_rw16(s_ad.loc.part_num), 2050 udf_rw32(s_ad.loc.lb_num), 2051 UDF_EXT_LEN(udf_rw32(s_ad.len)), 2052 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30)); 2053 2054 slot++; 2055 } 2056 2057 /* 6) reset node descriptors */ 2058 udf_wipe_adslots(udf_node); 2059 2060 /* 7) copy back extents; merge when possible. Recounting on the fly */ 2061 cpy_slots = cpy_slot; 2062 2063 c_ad = node_ad_cpy[0]; 2064 slot = 0; 2065 DPRINTF(ALLOC, ("\t7s: stack -> got mapping vp %d " 2066 "lb %d, len %d, flags %d\n", 2067 udf_rw16(c_ad.loc.part_num), 2068 udf_rw32(c_ad.loc.lb_num), 2069 UDF_EXT_LEN(udf_rw32(c_ad.len)), 2070 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30)); 2071 2072 for (cpy_slot = 1; cpy_slot < cpy_slots; cpy_slot++) { 2073 s_ad = node_ad_cpy[cpy_slot]; 2074 2075 DPRINTF(ALLOC, ("\t7i: stack -> got mapping vp %d " 2076 "lb %d, len %d, flags %d\n", 2077 udf_rw16(s_ad.loc.part_num), 2078 udf_rw32(s_ad.loc.lb_num), 2079 UDF_EXT_LEN(udf_rw32(s_ad.len)), 2080 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30)); 2081 2082 /* see if we can merge */ 2083 if (udf_ads_merge(lb_size, &c_ad, &s_ad)) { 2084 /* not mergable (anymore) */ 2085 DPRINTF(ALLOC, ("\t7: appending vp %d lb %d, " 2086 "len %d, flags %d\n", 2087 udf_rw16(c_ad.loc.part_num), 2088 udf_rw32(c_ad.loc.lb_num), 2089 UDF_EXT_LEN(udf_rw32(c_ad.len)), 2090 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30)); 2091 2092 error = udf_append_adslot(udf_node, &slot, &c_ad); 2093 if (error) { 2094 buf->b_error = error; 2095 goto out; 2096 } 2097 c_ad = s_ad; 2098 slot++; 2099 } 2100 } 2101 2102 /* 8) push rest slot (if any) */ 2103 if (UDF_EXT_LEN(c_ad.len) > 0) { 2104 DPRINTF(ALLOC, ("\t8: last append vp %d lb %d, " 2105 "len %d, flags %d\n", 2106 udf_rw16(c_ad.loc.part_num), 2107 udf_rw32(c_ad.loc.lb_num), 2108 UDF_EXT_LEN(udf_rw32(c_ad.len)), 2109 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30)); 2110 2111 error = udf_append_adslot(udf_node, &slot, &c_ad); 2112 if (error) { 2113 buf->b_error = error; 2114 goto out; 2115 } 2116 } 2117 2118 out: 2119 udf_count_alloc_exts(udf_node); 2120 2121 /* the node's descriptors should now be sane */ 2122 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec); 2123 UDF_UNLOCK_NODE(udf_node, 0); 2124 2125 KASSERT(orig_inflen == new_inflen); 2126 KASSERT(new_lbrec >= orig_lbrec); 2127 2128 return; 2129 } 2130 2131 /* --------------------------------------------------------------------- */ 2132 2133 int 2134 udf_grow_node(struct udf_node *udf_node, uint64_t new_size) 2135 { 2136 union dscrptr *dscr; 2137 struct vnode *vp = udf_node->vnode; 2138 struct udf_mount *ump = udf_node->ump; 2139 struct file_entry *fe; 2140 struct extfile_entry *efe; 2141 struct icb_tag *icbtag; 2142 struct long_ad c_ad, s_ad; 2143 uint64_t size_diff, old_size, inflen, objsize, chunk, append_len; 2144 uint64_t foffset, end_foffset; 2145 uint64_t orig_inflen, orig_lbrec, new_inflen, new_lbrec; 2146 uint32_t lb_size, dscr_size, crclen, lastblock_grow; 2147 uint32_t icbflags, len, flags, max_len; 2148 uint32_t max_l_ad, l_ad, l_ea; 2149 uint16_t my_part, dst_part; 2150 uint8_t *data_pos, *evacuated_data; 2151 int addr_type; 2152 int slot, cpy_slot; 2153 int isdir, eof, error; 2154 2155 DPRINTF(ALLOC, ("udf_grow_node\n")); 2156 2157 UDF_LOCK_NODE(udf_node, 0); 2158 udf_node_sanity_check(udf_node, &orig_inflen, &orig_lbrec); 2159 2160 lb_size = udf_rw32(ump->logical_vol->lb_size); 2161 max_len = ((UDF_EXT_MAXLEN / lb_size) * lb_size); 2162 2163 fe = udf_node->fe; 2164 efe = udf_node->efe; 2165 if (fe) { 2166 dscr = (union dscrptr *) fe; 2167 icbtag = &fe->icbtag; 2168 inflen = udf_rw64(fe->inf_len); 2169 objsize = inflen; 2170 dscr_size = sizeof(struct file_entry) -1; 2171 l_ea = udf_rw32(fe->l_ea); 2172 l_ad = udf_rw32(fe->l_ad); 2173 } else { 2174 dscr = (union dscrptr *) efe; 2175 icbtag = &efe->icbtag; 2176 inflen = udf_rw64(efe->inf_len); 2177 objsize = udf_rw64(efe->obj_size); 2178 dscr_size = sizeof(struct extfile_entry) -1; 2179 l_ea = udf_rw32(efe->l_ea); 2180 l_ad = udf_rw32(efe->l_ad); 2181 } 2182 data_pos = (uint8_t *) dscr + dscr_size + l_ea; 2183 max_l_ad = lb_size - dscr_size - l_ea; 2184 2185 icbflags = udf_rw16(icbtag->flags); 2186 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK; 2187 2188 old_size = inflen; 2189 size_diff = new_size - old_size; 2190 2191 DPRINTF(ALLOC, ("\tfrom %"PRIu64" to %"PRIu64"\n", old_size, new_size)); 2192 2193 evacuated_data = NULL; 2194 if (addr_type == UDF_ICB_INTERN_ALLOC) { 2195 if (l_ad + size_diff <= max_l_ad) { 2196 /* only reflect size change directly in the node */ 2197 inflen += size_diff; 2198 objsize += size_diff; 2199 l_ad += size_diff; 2200 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea + l_ad; 2201 if (fe) { 2202 fe->inf_len = udf_rw64(inflen); 2203 fe->l_ad = udf_rw32(l_ad); 2204 fe->tag.desc_crc_len = udf_rw32(crclen); 2205 } else { 2206 efe->inf_len = udf_rw64(inflen); 2207 efe->obj_size = udf_rw64(objsize); 2208 efe->l_ad = udf_rw32(l_ad); 2209 efe->tag.desc_crc_len = udf_rw32(crclen); 2210 } 2211 error = 0; 2212 2213 /* set new size for uvm */ 2214 uvm_vnp_setsize(vp, old_size); 2215 uvm_vnp_setwritesize(vp, new_size); 2216 2217 #if 0 2218 /* zero append space in buffer */ 2219 uvm_vnp_zerorange(vp, old_size, new_size - old_size); 2220 #endif 2221 2222 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec); 2223 2224 /* unlock */ 2225 UDF_UNLOCK_NODE(udf_node, 0); 2226 2227 KASSERT(new_inflen == orig_inflen + size_diff); 2228 KASSERT(new_lbrec == orig_lbrec); 2229 KASSERT(new_lbrec == 0); 2230 return 0; 2231 } 2232 2233 DPRINTF(ALLOC, ("\tCONVERT from internal\n")); 2234 2235 if (old_size > 0) { 2236 /* allocate some space and copy in the stuff to keep */ 2237 evacuated_data = malloc(lb_size, M_UDFTEMP, M_WAITOK); 2238 memset(evacuated_data, 0, lb_size); 2239 2240 /* node is locked, so safe to exit mutex */ 2241 UDF_UNLOCK_NODE(udf_node, 0); 2242 2243 /* read in using the `normal' vn_rdwr() */ 2244 error = vn_rdwr(UIO_READ, udf_node->vnode, 2245 evacuated_data, old_size, 0, 2246 UIO_SYSSPACE, IO_ALTSEMANTICS | IO_NODELOCKED, 2247 FSCRED, NULL, NULL); 2248 2249 /* enter again */ 2250 UDF_LOCK_NODE(udf_node, 0); 2251 } 2252 2253 /* convert to a normal alloc and select type */ 2254 isdir = (vp->v_type == VDIR); 2255 my_part = udf_rw16(udf_node->loc.loc.part_num); 2256 dst_part = isdir? ump->fids_part : ump->data_part; 2257 addr_type = UDF_ICB_SHORT_ALLOC; 2258 if (dst_part != my_part) 2259 addr_type = UDF_ICB_LONG_ALLOC; 2260 2261 icbflags &= ~UDF_ICB_TAG_FLAGS_ALLOC_MASK; 2262 icbflags |= addr_type; 2263 icbtag->flags = udf_rw16(icbflags); 2264 2265 /* wipe old descriptor space */ 2266 udf_wipe_adslots(udf_node); 2267 2268 memset(&c_ad, 0, sizeof(struct long_ad)); 2269 c_ad.len = udf_rw32(old_size | UDF_EXT_FREE); 2270 c_ad.loc.part_num = udf_rw16(0); /* not relevant */ 2271 c_ad.loc.lb_num = udf_rw32(0); /* not relevant */ 2272 2273 slot = 0; 2274 } else { 2275 /* goto the last entry (if any) */ 2276 slot = 0; 2277 cpy_slot = 0; 2278 foffset = 0; 2279 memset(&c_ad, 0, sizeof(struct long_ad)); 2280 for (;;) { 2281 udf_get_adslot(udf_node, slot, &c_ad, &eof); 2282 if (eof) 2283 break; 2284 2285 len = udf_rw32(c_ad.len); 2286 flags = UDF_EXT_FLAGS(len); 2287 len = UDF_EXT_LEN(len); 2288 2289 end_foffset = foffset + len; 2290 if (flags != UDF_EXT_REDIRECT) 2291 foffset = end_foffset; 2292 2293 slot++; 2294 } 2295 /* at end of adslots */ 2296 2297 /* special case if the old size was zero, then there is no last slot */ 2298 if (old_size == 0) { 2299 c_ad.len = udf_rw32(0 | UDF_EXT_FREE); 2300 c_ad.loc.part_num = udf_rw16(0); /* not relevant */ 2301 c_ad.loc.lb_num = udf_rw32(0); /* not relevant */ 2302 } else { 2303 /* refetch last slot */ 2304 slot--; 2305 udf_get_adslot(udf_node, slot, &c_ad, &eof); 2306 } 2307 } 2308 2309 /* 2310 * If the length of the last slot is not a multiple of lb_size, adjust 2311 * length so that it is; don't forget to adjust `append_len'! relevant for 2312 * extending existing files 2313 */ 2314 len = udf_rw32(c_ad.len); 2315 flags = UDF_EXT_FLAGS(len); 2316 len = UDF_EXT_LEN(len); 2317 2318 lastblock_grow = 0; 2319 if (len % lb_size > 0) { 2320 lastblock_grow = lb_size - (len % lb_size); 2321 lastblock_grow = MIN(size_diff, lastblock_grow); 2322 len += lastblock_grow; 2323 c_ad.len = udf_rw32(len | flags); 2324 2325 /* TODO zero appened space in buffer! */ 2326 /* using uvm_vnp_zerorange(vp, old_size, new_size - old_size); ? */ 2327 } 2328 memset(&s_ad, 0, sizeof(struct long_ad)); 2329 2330 /* size_diff can be bigger than allowed, so grow in chunks */ 2331 append_len = size_diff - lastblock_grow; 2332 while (append_len > 0) { 2333 chunk = MIN(append_len, max_len); 2334 s_ad.len = udf_rw32(chunk | UDF_EXT_FREE); 2335 s_ad.loc.part_num = udf_rw16(0); 2336 s_ad.loc.lb_num = udf_rw32(0); 2337 2338 if (udf_ads_merge(lb_size, &c_ad, &s_ad)) { 2339 /* not mergable (anymore) */ 2340 error = udf_append_adslot(udf_node, &slot, &c_ad); 2341 if (error) 2342 goto errorout; 2343 slot++; 2344 c_ad = s_ad; 2345 memset(&s_ad, 0, sizeof(struct long_ad)); 2346 } 2347 append_len -= chunk; 2348 } 2349 2350 /* if there is a rest piece in the accumulator, append it */ 2351 if (UDF_EXT_LEN(udf_rw32(c_ad.len)) > 0) { 2352 error = udf_append_adslot(udf_node, &slot, &c_ad); 2353 if (error) 2354 goto errorout; 2355 slot++; 2356 } 2357 2358 /* if there is a rest piece that didn't fit, append it */ 2359 if (UDF_EXT_LEN(udf_rw32(s_ad.len)) > 0) { 2360 error = udf_append_adslot(udf_node, &slot, &s_ad); 2361 if (error) 2362 goto errorout; 2363 slot++; 2364 } 2365 2366 inflen += size_diff; 2367 objsize += size_diff; 2368 if (fe) { 2369 fe->inf_len = udf_rw64(inflen); 2370 } else { 2371 efe->inf_len = udf_rw64(inflen); 2372 efe->obj_size = udf_rw64(objsize); 2373 } 2374 error = 0; 2375 2376 if (evacuated_data) { 2377 /* set new write size for uvm */ 2378 uvm_vnp_setwritesize(vp, old_size); 2379 2380 /* write out evacuated data */ 2381 error = vn_rdwr(UIO_WRITE, udf_node->vnode, 2382 evacuated_data, old_size, 0, 2383 UIO_SYSSPACE, IO_ALTSEMANTICS | IO_NODELOCKED, 2384 FSCRED, NULL, NULL); 2385 uvm_vnp_setsize(vp, old_size); 2386 } 2387 2388 errorout: 2389 if (evacuated_data) 2390 free(evacuated_data, M_UDFTEMP); 2391 2392 udf_count_alloc_exts(udf_node); 2393 2394 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec); 2395 UDF_UNLOCK_NODE(udf_node, 0); 2396 2397 KASSERT(new_inflen == orig_inflen + size_diff); 2398 KASSERT(new_lbrec == orig_lbrec); 2399 2400 return error; 2401 } 2402 2403 /* --------------------------------------------------------------------- */ 2404 2405 int 2406 udf_shrink_node(struct udf_node *udf_node, uint64_t new_size) 2407 { 2408 struct vnode *vp = udf_node->vnode; 2409 struct udf_mount *ump = udf_node->ump; 2410 struct file_entry *fe; 2411 struct extfile_entry *efe; 2412 struct icb_tag *icbtag; 2413 struct long_ad c_ad, s_ad, *node_ad_cpy; 2414 uint64_t size_diff, old_size, inflen, objsize; 2415 uint64_t foffset, end_foffset; 2416 uint64_t orig_inflen, orig_lbrec, new_inflen, new_lbrec; 2417 uint32_t lb_size, dscr_size, crclen; 2418 uint32_t slot_offset; 2419 uint32_t len, flags, max_len; 2420 uint32_t num_lb, lb_num; 2421 uint32_t max_l_ad, l_ad, l_ea; 2422 uint16_t vpart_num; 2423 uint8_t *data_pos; 2424 int icbflags, addr_type; 2425 int slot, cpy_slot, cpy_slots; 2426 int eof, error; 2427 2428 DPRINTF(ALLOC, ("udf_shrink_node\n")); 2429 2430 UDF_LOCK_NODE(udf_node, 0); 2431 udf_node_sanity_check(udf_node, &orig_inflen, &orig_lbrec); 2432 2433 lb_size = udf_rw32(ump->logical_vol->lb_size); 2434 max_len = ((UDF_EXT_MAXLEN / lb_size) * lb_size); 2435 2436 /* do the work */ 2437 fe = udf_node->fe; 2438 efe = udf_node->efe; 2439 if (fe) { 2440 icbtag = &fe->icbtag; 2441 inflen = udf_rw64(fe->inf_len); 2442 objsize = inflen; 2443 dscr_size = sizeof(struct file_entry) -1; 2444 l_ea = udf_rw32(fe->l_ea); 2445 l_ad = udf_rw32(fe->l_ad); 2446 data_pos = (uint8_t *) fe + dscr_size + l_ea; 2447 } else { 2448 icbtag = &efe->icbtag; 2449 inflen = udf_rw64(efe->inf_len); 2450 objsize = udf_rw64(efe->obj_size); 2451 dscr_size = sizeof(struct extfile_entry) -1; 2452 l_ea = udf_rw32(efe->l_ea); 2453 l_ad = udf_rw32(efe->l_ad); 2454 data_pos = (uint8_t *) efe + dscr_size + l_ea; 2455 } 2456 max_l_ad = lb_size - dscr_size - l_ea; 2457 2458 icbflags = udf_rw16(icbtag->flags); 2459 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK; 2460 2461 old_size = inflen; 2462 size_diff = old_size - new_size; 2463 2464 DPRINTF(ALLOC, ("\tfrom %"PRIu64" to %"PRIu64"\n", old_size, new_size)); 2465 2466 /* shrink the node to its new size */ 2467 if (addr_type == UDF_ICB_INTERN_ALLOC) { 2468 /* only reflect size change directly in the node */ 2469 KASSERT(new_size <= max_l_ad); 2470 inflen -= size_diff; 2471 objsize -= size_diff; 2472 l_ad -= size_diff; 2473 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea + l_ad; 2474 if (fe) { 2475 fe->inf_len = udf_rw64(inflen); 2476 fe->l_ad = udf_rw32(l_ad); 2477 fe->tag.desc_crc_len = udf_rw32(crclen); 2478 } else { 2479 efe->inf_len = udf_rw64(inflen); 2480 efe->obj_size = udf_rw64(objsize); 2481 efe->l_ad = udf_rw32(l_ad); 2482 efe->tag.desc_crc_len = udf_rw32(crclen); 2483 } 2484 error = 0; 2485 2486 /* clear the space in the descriptor */ 2487 KASSERT(old_size > new_size); 2488 memset(data_pos + new_size, 0, old_size - new_size); 2489 2490 /* TODO zero appened space in buffer! */ 2491 /* using uvm_vnp_zerorange(vp, old_size, old_size - new_size); ? */ 2492 2493 /* set new size for uvm */ 2494 uvm_vnp_setsize(vp, new_size); 2495 2496 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec); 2497 UDF_UNLOCK_NODE(udf_node, 0); 2498 2499 KASSERT(new_inflen == orig_inflen - size_diff); 2500 KASSERT(new_lbrec == orig_lbrec); 2501 KASSERT(new_lbrec == 0); 2502 2503 return 0; 2504 } 2505 2506 /* setup node cleanup extents copy space */ 2507 node_ad_cpy = malloc(lb_size * UDF_MAX_ALLOC_EXTENTS, 2508 M_UDFMNT, M_WAITOK); 2509 memset(node_ad_cpy, 0, lb_size * UDF_MAX_ALLOC_EXTENTS); 2510 2511 /* 2512 * Shrink the node by releasing the allocations and truncate the last 2513 * allocation to the new size. If the new size fits into the 2514 * allocation descriptor itself, transform it into an 2515 * UDF_ICB_INTERN_ALLOC. 2516 */ 2517 slot = 0; 2518 cpy_slot = 0; 2519 foffset = 0; 2520 2521 /* 1) copy till first overlap piece to the rewrite buffer */ 2522 for (;;) { 2523 udf_get_adslot(udf_node, slot, &s_ad, &eof); 2524 if (eof) { 2525 DPRINTF(WRITE, 2526 ("Shrink node failed: " 2527 "encountered EOF\n")); 2528 error = EINVAL; 2529 goto errorout; /* panic? */ 2530 } 2531 len = udf_rw32(s_ad.len); 2532 flags = UDF_EXT_FLAGS(len); 2533 len = UDF_EXT_LEN(len); 2534 2535 if (flags == UDF_EXT_REDIRECT) { 2536 slot++; 2537 continue; 2538 } 2539 2540 end_foffset = foffset + len; 2541 if (end_foffset > new_size) 2542 break; /* found */ 2543 2544 node_ad_cpy[cpy_slot++] = s_ad; 2545 2546 DPRINTF(ALLOC, ("\t1: vp %d, lb %d, len %d, flags %d " 2547 "-> stack\n", 2548 udf_rw16(s_ad.loc.part_num), 2549 udf_rw32(s_ad.loc.lb_num), 2550 UDF_EXT_LEN(udf_rw32(s_ad.len)), 2551 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30)); 2552 2553 foffset = end_foffset; 2554 slot++; 2555 } 2556 slot_offset = new_size - foffset; 2557 2558 /* 2) trunc overlapping slot at overlap and copy it */ 2559 if (slot_offset > 0) { 2560 lb_num = udf_rw32(s_ad.loc.lb_num); 2561 vpart_num = udf_rw16(s_ad.loc.part_num); 2562 2563 if (flags == UDF_EXT_ALLOCATED) { 2564 /* note: round DOWN on num_lb */ 2565 lb_num += (slot_offset + lb_size -1) / lb_size; 2566 num_lb = (len - slot_offset) / lb_size; 2567 2568 udf_free_allocated_space(ump, lb_num, vpart_num, num_lb); 2569 } 2570 2571 s_ad.len = udf_rw32(slot_offset | flags); 2572 node_ad_cpy[cpy_slot++] = s_ad; 2573 slot++; 2574 2575 DPRINTF(ALLOC, ("\t2: vp %d, lb %d, len %d, flags %d " 2576 "-> stack\n", 2577 udf_rw16(s_ad.loc.part_num), 2578 udf_rw32(s_ad.loc.lb_num), 2579 UDF_EXT_LEN(udf_rw32(s_ad.len)), 2580 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30)); 2581 } 2582 2583 /* 3) delete remainder */ 2584 for (;;) { 2585 udf_get_adslot(udf_node, slot, &s_ad, &eof); 2586 if (eof) 2587 break; 2588 2589 len = udf_rw32(s_ad.len); 2590 flags = UDF_EXT_FLAGS(len); 2591 len = UDF_EXT_LEN(len); 2592 2593 if (flags == UDF_EXT_REDIRECT) { 2594 slot++; 2595 continue; 2596 } 2597 2598 DPRINTF(ALLOC, ("\t3: delete remainder " 2599 "vp %d lb %d, len %d, flags %d\n", 2600 udf_rw16(s_ad.loc.part_num), 2601 udf_rw32(s_ad.loc.lb_num), 2602 UDF_EXT_LEN(udf_rw32(s_ad.len)), 2603 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30)); 2604 2605 if (flags == UDF_EXT_ALLOCATED) { 2606 lb_num = udf_rw32(s_ad.loc.lb_num); 2607 vpart_num = udf_rw16(s_ad.loc.part_num); 2608 num_lb = (len + lb_size - 1) / lb_size; 2609 2610 udf_free_allocated_space(ump, lb_num, vpart_num, 2611 num_lb); 2612 } 2613 2614 slot++; 2615 } 2616 2617 /* 4) if it will fit into the descriptor then convert */ 2618 if (new_size < max_l_ad) { 2619 /* 2620 * resque/evacuate old piece by reading it in, and convert it 2621 * to internal alloc. 2622 */ 2623 if (new_size == 0) { 2624 /* XXX/TODO only for zero sizing now */ 2625 udf_wipe_adslots(udf_node); 2626 2627 icbflags &= ~UDF_ICB_TAG_FLAGS_ALLOC_MASK; 2628 icbflags |= UDF_ICB_INTERN_ALLOC; 2629 icbtag->flags = udf_rw16(icbflags); 2630 2631 inflen -= size_diff; KASSERT(inflen == 0); 2632 objsize -= size_diff; 2633 l_ad = new_size; 2634 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea + l_ad; 2635 if (fe) { 2636 fe->inf_len = udf_rw64(inflen); 2637 fe->l_ad = udf_rw32(l_ad); 2638 fe->tag.desc_crc_len = udf_rw32(crclen); 2639 } else { 2640 efe->inf_len = udf_rw64(inflen); 2641 efe->obj_size = udf_rw64(objsize); 2642 efe->l_ad = udf_rw32(l_ad); 2643 efe->tag.desc_crc_len = udf_rw32(crclen); 2644 } 2645 /* eventually copy in evacuated piece */ 2646 /* set new size for uvm */ 2647 uvm_vnp_setsize(vp, new_size); 2648 2649 free(node_ad_cpy, M_UDFMNT); 2650 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec); 2651 2652 UDF_UNLOCK_NODE(udf_node, 0); 2653 2654 KASSERT(new_inflen == orig_inflen - size_diff); 2655 KASSERT(new_inflen == 0); 2656 KASSERT(new_lbrec == 0); 2657 2658 return 0; 2659 } 2660 2661 printf("UDF_SHRINK_NODE: could convert to internal alloc!\n"); 2662 } 2663 2664 /* 5) reset node descriptors */ 2665 udf_wipe_adslots(udf_node); 2666 2667 /* 6) copy back extents; merge when possible. Recounting on the fly */ 2668 cpy_slots = cpy_slot; 2669 2670 c_ad = node_ad_cpy[0]; 2671 slot = 0; 2672 for (cpy_slot = 1; cpy_slot < cpy_slots; cpy_slot++) { 2673 s_ad = node_ad_cpy[cpy_slot]; 2674 2675 DPRINTF(ALLOC, ("\t6: stack -> got mapping vp %d " 2676 "lb %d, len %d, flags %d\n", 2677 udf_rw16(s_ad.loc.part_num), 2678 udf_rw32(s_ad.loc.lb_num), 2679 UDF_EXT_LEN(udf_rw32(s_ad.len)), 2680 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30)); 2681 2682 /* see if we can merge */ 2683 if (udf_ads_merge(lb_size, &c_ad, &s_ad)) { 2684 /* not mergable (anymore) */ 2685 DPRINTF(ALLOC, ("\t6: appending vp %d lb %d, " 2686 "len %d, flags %d\n", 2687 udf_rw16(c_ad.loc.part_num), 2688 udf_rw32(c_ad.loc.lb_num), 2689 UDF_EXT_LEN(udf_rw32(c_ad.len)), 2690 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30)); 2691 2692 error = udf_append_adslot(udf_node, &slot, &c_ad); 2693 if (error) 2694 goto errorout; /* panic? */ 2695 c_ad = s_ad; 2696 slot++; 2697 } 2698 } 2699 2700 /* 7) push rest slot (if any) */ 2701 if (UDF_EXT_LEN(c_ad.len) > 0) { 2702 DPRINTF(ALLOC, ("\t7: last append vp %d lb %d, " 2703 "len %d, flags %d\n", 2704 udf_rw16(c_ad.loc.part_num), 2705 udf_rw32(c_ad.loc.lb_num), 2706 UDF_EXT_LEN(udf_rw32(c_ad.len)), 2707 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30)); 2708 2709 error = udf_append_adslot(udf_node, &slot, &c_ad); 2710 if (error) 2711 goto errorout; /* panic? */ 2712 ; 2713 } 2714 2715 inflen -= size_diff; 2716 objsize -= size_diff; 2717 if (fe) { 2718 fe->inf_len = udf_rw64(inflen); 2719 } else { 2720 efe->inf_len = udf_rw64(inflen); 2721 efe->obj_size = udf_rw64(objsize); 2722 } 2723 error = 0; 2724 2725 /* set new size for uvm */ 2726 uvm_vnp_setsize(vp, new_size); 2727 2728 errorout: 2729 free(node_ad_cpy, M_UDFMNT); 2730 2731 udf_count_alloc_exts(udf_node); 2732 2733 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec); 2734 UDF_UNLOCK_NODE(udf_node, 0); 2735 2736 KASSERT(new_inflen == orig_inflen - size_diff); 2737 2738 return error; 2739 } 2740 2741