1 /* $NetBSD: udf_subr.c,v 1.70 2008/07/28 19:41:13 reinoud Exp $ */ 2 3 /* 4 * Copyright (c) 2006, 2008 Reinoud Zandijk 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 * 27 */ 28 29 30 #include <sys/cdefs.h> 31 #ifndef lint 32 __KERNEL_RCSID(0, "$NetBSD: udf_subr.c,v 1.70 2008/07/28 19:41:13 reinoud Exp $"); 33 #endif /* not lint */ 34 35 36 #if defined(_KERNEL_OPT) 37 #include "opt_quota.h" 38 #include "opt_compat_netbsd.h" 39 #endif 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/sysctl.h> 44 #include <sys/namei.h> 45 #include <sys/proc.h> 46 #include <sys/kernel.h> 47 #include <sys/vnode.h> 48 #include <miscfs/genfs/genfs_node.h> 49 #include <sys/mount.h> 50 #include <sys/buf.h> 51 #include <sys/file.h> 52 #include <sys/device.h> 53 #include <sys/disklabel.h> 54 #include <sys/ioctl.h> 55 #include <sys/malloc.h> 56 #include <sys/dirent.h> 57 #include <sys/stat.h> 58 #include <sys/conf.h> 59 #include <sys/kauth.h> 60 #include <fs/unicode.h> 61 #include <dev/clock_subr.h> 62 63 #include <fs/udf/ecma167-udf.h> 64 #include <fs/udf/udf_mount.h> 65 66 #if defined(_KERNEL_OPT) 67 #include "opt_udf.h" 68 #endif 69 70 #include "udf.h" 71 #include "udf_subr.h" 72 #include "udf_bswap.h" 73 74 75 #define VTOI(vnode) ((struct udf_node *) (vnode)->v_data) 76 77 #define UDF_SET_SYSTEMFILE(vp) \ 78 /* XXXAD Is the vnode locked? */ \ 79 (vp)->v_vflag |= VV_SYSTEM; \ 80 vref(vp); \ 81 vput(vp); \ 82 83 extern int syncer_maxdelay; /* maximum delay time */ 84 extern int (**udf_vnodeop_p)(void *); 85 86 /* --------------------------------------------------------------------- */ 87 88 //#ifdef DEBUG 89 #if 1 90 91 #if 0 92 static void 93 udf_dumpblob(boid *blob, uint32_t dlen) 94 { 95 int i, j; 96 97 printf("blob = %p\n", blob); 98 printf("dump of %d bytes\n", dlen); 99 100 for (i = 0; i < dlen; i+ = 16) { 101 printf("%04x ", i); 102 for (j = 0; j < 16; j++) { 103 if (i+j < dlen) { 104 printf("%02x ", blob[i+j]); 105 } else { 106 printf(" "); 107 } 108 } 109 for (j = 0; j < 16; j++) { 110 if (i+j < dlen) { 111 if (blob[i+j]>32 && blob[i+j]! = 127) { 112 printf("%c", blob[i+j]); 113 } else { 114 printf("."); 115 } 116 } 117 } 118 printf("\n"); 119 } 120 printf("\n"); 121 Debugger(); 122 } 123 #endif 124 125 static void 126 udf_dump_discinfo(struct udf_mount *ump) 127 { 128 char bits[128]; 129 struct mmc_discinfo *di = &ump->discinfo; 130 131 if ((udf_verbose & UDF_DEBUG_VOLUMES) == 0) 132 return; 133 134 printf("Device/media info :\n"); 135 printf("\tMMC profile 0x%02x\n", di->mmc_profile); 136 printf("\tderived class %d\n", di->mmc_class); 137 printf("\tsector size %d\n", di->sector_size); 138 printf("\tdisc state %d\n", di->disc_state); 139 printf("\tlast ses state %d\n", di->last_session_state); 140 printf("\tbg format state %d\n", di->bg_format_state); 141 printf("\tfrst track %d\n", di->first_track); 142 printf("\tfst on last ses %d\n", di->first_track_last_session); 143 printf("\tlst on last ses %d\n", di->last_track_last_session); 144 printf("\tlink block penalty %d\n", di->link_block_penalty); 145 bitmask_snprintf(di->disc_flags, MMC_DFLAGS_FLAGBITS, bits, 146 sizeof(bits)); 147 printf("\tdisc flags %s\n", bits); 148 printf("\tdisc id %x\n", di->disc_id); 149 printf("\tdisc barcode %"PRIx64"\n", di->disc_barcode); 150 151 printf("\tnum sessions %d\n", di->num_sessions); 152 printf("\tnum tracks %d\n", di->num_tracks); 153 154 bitmask_snprintf(di->mmc_cur, MMC_CAP_FLAGBITS, bits, sizeof(bits)); 155 printf("\tcapabilities cur %s\n", bits); 156 bitmask_snprintf(di->mmc_cap, MMC_CAP_FLAGBITS, bits, sizeof(bits)); 157 printf("\tcapabilities cap %s\n", bits); 158 } 159 #else 160 #define udf_dump_discinfo(a); 161 #endif 162 163 164 /* --------------------------------------------------------------------- */ 165 166 /* not called often */ 167 int 168 udf_update_discinfo(struct udf_mount *ump) 169 { 170 struct vnode *devvp = ump->devvp; 171 struct partinfo dpart; 172 struct mmc_discinfo *di; 173 int error; 174 175 DPRINTF(VOLUMES, ("read/update disc info\n")); 176 di = &ump->discinfo; 177 memset(di, 0, sizeof(struct mmc_discinfo)); 178 179 /* check if we're on a MMC capable device, i.e. CD/DVD */ 180 error = VOP_IOCTL(devvp, MMCGETDISCINFO, di, FKIOCTL, NOCRED); 181 if (error == 0) { 182 udf_dump_discinfo(ump); 183 return 0; 184 } 185 186 /* disc partition support */ 187 error = VOP_IOCTL(devvp, DIOCGPART, &dpart, FREAD, NOCRED); 188 if (error) 189 return ENODEV; 190 191 /* set up a disc info profile for partitions */ 192 di->mmc_profile = 0x01; /* disc type */ 193 di->mmc_class = MMC_CLASS_DISC; 194 di->disc_state = MMC_STATE_CLOSED; 195 di->last_session_state = MMC_STATE_CLOSED; 196 di->bg_format_state = MMC_BGFSTATE_COMPLETED; 197 di->link_block_penalty = 0; 198 199 di->mmc_cur = MMC_CAP_RECORDABLE | MMC_CAP_REWRITABLE | 200 MMC_CAP_ZEROLINKBLK | MMC_CAP_HW_DEFECTFREE; 201 di->mmc_cap = di->mmc_cur; 202 di->disc_flags = MMC_DFLAGS_UNRESTRICTED; 203 204 /* TODO problem with last_possible_lba on resizable VND; request */ 205 di->last_possible_lba = dpart.part->p_size; 206 di->sector_size = dpart.disklab->d_secsize; 207 208 di->num_sessions = 1; 209 di->num_tracks = 1; 210 211 di->first_track = 1; 212 di->first_track_last_session = di->last_track_last_session = 1; 213 214 udf_dump_discinfo(ump); 215 return 0; 216 } 217 218 219 int 220 udf_update_trackinfo(struct udf_mount *ump, struct mmc_trackinfo *ti) 221 { 222 struct vnode *devvp = ump->devvp; 223 struct mmc_discinfo *di = &ump->discinfo; 224 int error, class; 225 226 DPRINTF(VOLUMES, ("read track info\n")); 227 228 class = di->mmc_class; 229 if (class != MMC_CLASS_DISC) { 230 /* tracknr specified in struct ti */ 231 error = VOP_IOCTL(devvp, MMCGETTRACKINFO, ti, FKIOCTL, NOCRED); 232 return error; 233 } 234 235 /* disc partition support */ 236 if (ti->tracknr != 1) 237 return EIO; 238 239 /* create fake ti (TODO check for resized vnds) */ 240 ti->sessionnr = 1; 241 242 ti->track_mode = 0; /* XXX */ 243 ti->data_mode = 0; /* XXX */ 244 ti->flags = MMC_TRACKINFO_LRA_VALID | MMC_TRACKINFO_NWA_VALID; 245 246 ti->track_start = 0; 247 ti->packet_size = 1; 248 249 /* TODO support for resizable vnd */ 250 ti->track_size = di->last_possible_lba; 251 ti->next_writable = di->last_possible_lba; 252 ti->last_recorded = ti->next_writable; 253 ti->free_blocks = 0; 254 255 return 0; 256 } 257 258 259 int 260 udf_setup_writeparams(struct udf_mount *ump) 261 { 262 struct mmc_writeparams mmc_writeparams; 263 int error; 264 265 if (ump->discinfo.mmc_class == MMC_CLASS_DISC) 266 return 0; 267 268 /* 269 * only CD burning normally needs setting up, but other disc types 270 * might need other settings to be made. The MMC framework will set up 271 * the nessisary recording parameters according to the disc 272 * characteristics read in. Modifications can be made in the discinfo 273 * structure passed to change the nature of the disc. 274 */ 275 276 memset(&mmc_writeparams, 0, sizeof(struct mmc_writeparams)); 277 mmc_writeparams.mmc_class = ump->discinfo.mmc_class; 278 mmc_writeparams.mmc_cur = ump->discinfo.mmc_cur; 279 280 /* 281 * UDF dictates first track to determine track mode for the whole 282 * disc. [UDF 1.50/6.10.1.1, UDF 1.50/6.10.2.1] 283 * To prevent problems with a `reserved' track in front we start with 284 * the 2nd track and if that is not valid, go for the 1st. 285 */ 286 mmc_writeparams.tracknr = 2; 287 mmc_writeparams.data_mode = MMC_DATAMODE_DEFAULT; /* XA disc */ 288 mmc_writeparams.track_mode = MMC_TRACKMODE_DEFAULT; /* data */ 289 290 error = VOP_IOCTL(ump->devvp, MMCSETUPWRITEPARAMS, &mmc_writeparams, 291 FKIOCTL, NOCRED); 292 if (error) { 293 mmc_writeparams.tracknr = 1; 294 error = VOP_IOCTL(ump->devvp, MMCSETUPWRITEPARAMS, 295 &mmc_writeparams, FKIOCTL, NOCRED); 296 } 297 return error; 298 } 299 300 301 int 302 udf_synchronise_caches(struct udf_mount *ump) 303 { 304 struct mmc_op mmc_op; 305 306 DPRINTF(CALL, ("udf_synchronise_caches()\n")); 307 308 if (ump->vfs_mountp->mnt_flag & MNT_RDONLY) 309 return 0; 310 311 /* discs are done now */ 312 if (ump->discinfo.mmc_class == MMC_CLASS_DISC) 313 return 0; 314 315 bzero(&mmc_op, sizeof(struct mmc_op)); 316 mmc_op.operation = MMC_OP_SYNCHRONISECACHE; 317 318 /* ignore return code */ 319 (void) VOP_IOCTL(ump->devvp, MMCOP, &mmc_op, FKIOCTL, NOCRED); 320 321 return 0; 322 } 323 324 /* --------------------------------------------------------------------- */ 325 326 /* track/session searching for mounting */ 327 int 328 udf_search_tracks(struct udf_mount *ump, struct udf_args *args, 329 int *first_tracknr, int *last_tracknr) 330 { 331 struct mmc_trackinfo trackinfo; 332 uint32_t tracknr, start_track, num_tracks; 333 int error; 334 335 /* if negative, sessionnr is relative to last session */ 336 if (args->sessionnr < 0) { 337 args->sessionnr += ump->discinfo.num_sessions; 338 } 339 340 /* sanity */ 341 if (args->sessionnr < 0) 342 args->sessionnr = 0; 343 if (args->sessionnr > ump->discinfo.num_sessions) 344 args->sessionnr = ump->discinfo.num_sessions; 345 346 /* search the tracks for this session, zero session nr indicates last */ 347 if (args->sessionnr == 0) 348 args->sessionnr = ump->discinfo.num_sessions; 349 if (ump->discinfo.last_session_state == MMC_STATE_EMPTY) 350 args->sessionnr--; 351 352 /* sanity again */ 353 if (args->sessionnr < 0) 354 args->sessionnr = 0; 355 356 /* search the first and last track of the specified session */ 357 num_tracks = ump->discinfo.num_tracks; 358 start_track = ump->discinfo.first_track; 359 360 /* search for first track of this session */ 361 for (tracknr = start_track; tracknr <= num_tracks; tracknr++) { 362 /* get track info */ 363 trackinfo.tracknr = tracknr; 364 error = udf_update_trackinfo(ump, &trackinfo); 365 if (error) 366 return error; 367 368 if (trackinfo.sessionnr == args->sessionnr) 369 break; 370 } 371 *first_tracknr = tracknr; 372 373 /* search for last track of this session */ 374 for (;tracknr <= num_tracks; tracknr++) { 375 /* get track info */ 376 trackinfo.tracknr = tracknr; 377 error = udf_update_trackinfo(ump, &trackinfo); 378 if (error || (trackinfo.sessionnr != args->sessionnr)) { 379 tracknr--; 380 break; 381 } 382 } 383 if (tracknr > num_tracks) 384 tracknr--; 385 386 *last_tracknr = tracknr; 387 388 if (*last_tracknr < *first_tracknr) { 389 printf( "udf_search_tracks: sanity check on drive+disc failed, " 390 "drive returned garbage\n"); 391 return EINVAL; 392 } 393 394 assert(*last_tracknr >= *first_tracknr); 395 return 0; 396 } 397 398 399 /* 400 * NOTE: this is the only routine in this file that directly peeks into the 401 * metadata file but since its at a larval state of the mount it can't hurt. 402 * 403 * XXX candidate for udf_allocation.c 404 * XXX clean me up!, change to new node reading code. 405 */ 406 407 static void 408 udf_check_track_metadata_overlap(struct udf_mount *ump, 409 struct mmc_trackinfo *trackinfo) 410 { 411 struct part_desc *part; 412 struct file_entry *fe; 413 struct extfile_entry *efe; 414 struct short_ad *s_ad; 415 struct long_ad *l_ad; 416 uint32_t track_start, track_end; 417 uint32_t phys_part_start, phys_part_end, part_start, part_end; 418 uint32_t sector_size, len, alloclen, plb_num; 419 uint8_t *pos; 420 int addr_type, icblen, icbflags, flags; 421 422 /* get our track extents */ 423 track_start = trackinfo->track_start; 424 track_end = track_start + trackinfo->track_size; 425 426 /* get our base partition extent */ 427 part = ump->partitions[ump->metadata_part]; 428 phys_part_start = udf_rw32(part->start_loc); 429 phys_part_end = phys_part_start + udf_rw32(part->part_len); 430 431 /* no use if its outside the physical partition */ 432 if ((phys_part_start >= track_end) || (phys_part_end < track_start)) 433 return; 434 435 /* 436 * now follow all extents in the fe/efe to see if they refer to this 437 * track 438 */ 439 440 sector_size = ump->discinfo.sector_size; 441 442 /* XXX should we claim exclusive access to the metafile ? */ 443 /* TODO: move to new node read code */ 444 fe = ump->metadata_node->fe; 445 efe = ump->metadata_node->efe; 446 if (fe) { 447 alloclen = udf_rw32(fe->l_ad); 448 pos = &fe->data[0] + udf_rw32(fe->l_ea); 449 icbflags = udf_rw16(fe->icbtag.flags); 450 } else { 451 assert(efe); 452 alloclen = udf_rw32(efe->l_ad); 453 pos = &efe->data[0] + udf_rw32(efe->l_ea); 454 icbflags = udf_rw16(efe->icbtag.flags); 455 } 456 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK; 457 458 while (alloclen) { 459 if (addr_type == UDF_ICB_SHORT_ALLOC) { 460 icblen = sizeof(struct short_ad); 461 s_ad = (struct short_ad *) pos; 462 len = udf_rw32(s_ad->len); 463 plb_num = udf_rw32(s_ad->lb_num); 464 } else { 465 /* should not be present, but why not */ 466 icblen = sizeof(struct long_ad); 467 l_ad = (struct long_ad *) pos; 468 len = udf_rw32(l_ad->len); 469 plb_num = udf_rw32(l_ad->loc.lb_num); 470 /* pvpart_num = udf_rw16(l_ad->loc.part_num); */ 471 } 472 /* process extent */ 473 flags = UDF_EXT_FLAGS(len); 474 len = UDF_EXT_LEN(len); 475 476 part_start = phys_part_start + plb_num; 477 part_end = part_start + (len / sector_size); 478 479 if ((part_start >= track_start) && (part_end <= track_end)) { 480 /* extent is enclosed within this track */ 481 ump->metadata_track = *trackinfo; 482 return; 483 } 484 485 pos += icblen; 486 alloclen -= icblen; 487 } 488 } 489 490 491 int 492 udf_search_writing_tracks(struct udf_mount *ump) 493 { 494 struct mmc_trackinfo trackinfo; 495 struct part_desc *part; 496 uint32_t tracknr, start_track, num_tracks; 497 uint32_t track_start, track_end, part_start, part_end; 498 int error; 499 500 /* 501 * in the CD/(HD)DVD/BD recordable device model a few tracks within 502 * the last session might be open but in the UDF device model at most 503 * three tracks can be open: a reserved track for delayed ISO VRS 504 * writing, a data track and a metadata track. We search here for the 505 * data track and the metadata track. Note that the reserved track is 506 * troublesome but can be detected by its small size of < 512 sectors. 507 */ 508 509 num_tracks = ump->discinfo.num_tracks; 510 start_track = ump->discinfo.first_track; 511 512 /* fetch info on first and possibly only track */ 513 trackinfo.tracknr = start_track; 514 error = udf_update_trackinfo(ump, &trackinfo); 515 if (error) 516 return error; 517 518 /* copy results to our mount point */ 519 ump->data_track = trackinfo; 520 ump->metadata_track = trackinfo; 521 522 /* if not sequential, we're done */ 523 if (num_tracks == 1) 524 return 0; 525 526 for (tracknr = start_track;tracknr <= num_tracks; tracknr++) { 527 /* get track info */ 528 trackinfo.tracknr = tracknr; 529 error = udf_update_trackinfo(ump, &trackinfo); 530 if (error) 531 return error; 532 533 if ((trackinfo.flags & MMC_TRACKINFO_NWA_VALID) == 0) 534 continue; 535 536 track_start = trackinfo.track_start; 537 track_end = track_start + trackinfo.track_size; 538 539 /* check for overlap on data partition */ 540 part = ump->partitions[ump->data_part]; 541 part_start = udf_rw32(part->start_loc); 542 part_end = part_start + udf_rw32(part->part_len); 543 if ((part_start < track_end) && (part_end > track_start)) { 544 ump->data_track = trackinfo; 545 /* TODO check if UDF partition data_part is writable */ 546 } 547 548 /* check for overlap on metadata partition */ 549 if ((ump->meta_alloc == UDF_ALLOC_METASEQUENTIAL) || 550 (ump->meta_alloc == UDF_ALLOC_METABITMAP)) { 551 udf_check_track_metadata_overlap(ump, &trackinfo); 552 } else { 553 ump->metadata_track = trackinfo; 554 } 555 } 556 557 if ((ump->data_track.flags & MMC_TRACKINFO_NWA_VALID) == 0) 558 return EROFS; 559 560 if ((ump->metadata_track.flags & MMC_TRACKINFO_NWA_VALID) == 0) 561 return EROFS; 562 563 return 0; 564 } 565 566 /* --------------------------------------------------------------------- */ 567 568 /* 569 * Check if the blob starts with a good UDF tag. Tags are protected by a 570 * checksum over the reader except one byte at position 4 that is the checksum 571 * itself. 572 */ 573 574 int 575 udf_check_tag(void *blob) 576 { 577 struct desc_tag *tag = blob; 578 uint8_t *pos, sum, cnt; 579 580 /* check TAG header checksum */ 581 pos = (uint8_t *) tag; 582 sum = 0; 583 584 for(cnt = 0; cnt < 16; cnt++) { 585 if (cnt != 4) 586 sum += *pos; 587 pos++; 588 } 589 if (sum != tag->cksum) { 590 /* bad tag header checksum; this is not a valid tag */ 591 return EINVAL; 592 } 593 594 return 0; 595 } 596 597 598 /* 599 * check tag payload will check descriptor CRC as specified. 600 * If the descriptor is too long, it will return EIO otherwise EINVAL. 601 */ 602 603 int 604 udf_check_tag_payload(void *blob, uint32_t max_length) 605 { 606 struct desc_tag *tag = blob; 607 uint16_t crc, crc_len; 608 609 crc_len = udf_rw16(tag->desc_crc_len); 610 611 /* check payload CRC if applicable */ 612 if (crc_len == 0) 613 return 0; 614 615 if (crc_len > max_length) 616 return EIO; 617 618 crc = udf_cksum(((uint8_t *) tag) + UDF_DESC_TAG_LENGTH, crc_len); 619 if (crc != udf_rw16(tag->desc_crc)) { 620 /* bad payload CRC; this is a broken tag */ 621 return EINVAL; 622 } 623 624 return 0; 625 } 626 627 628 void 629 udf_validate_tag_sum(void *blob) 630 { 631 struct desc_tag *tag = blob; 632 uint8_t *pos, sum, cnt; 633 634 /* calculate TAG header checksum */ 635 pos = (uint8_t *) tag; 636 sum = 0; 637 638 for(cnt = 0; cnt < 16; cnt++) { 639 if (cnt != 4) sum += *pos; 640 pos++; 641 } 642 tag->cksum = sum; /* 8 bit */ 643 } 644 645 646 /* assumes sector number of descriptor to be saved already present */ 647 void 648 udf_validate_tag_and_crc_sums(void *blob) 649 { 650 struct desc_tag *tag = blob; 651 uint8_t *btag = (uint8_t *) tag; 652 uint16_t crc, crc_len; 653 654 crc_len = udf_rw16(tag->desc_crc_len); 655 656 /* check payload CRC if applicable */ 657 if (crc_len > 0) { 658 crc = udf_cksum(btag + UDF_DESC_TAG_LENGTH, crc_len); 659 tag->desc_crc = udf_rw16(crc); 660 } 661 662 /* calculate TAG header checksum */ 663 udf_validate_tag_sum(blob); 664 } 665 666 /* --------------------------------------------------------------------- */ 667 668 /* 669 * XXX note the different semantics from udfclient: for FIDs it still rounds 670 * up to sectors. Use udf_fidsize() for a correct length. 671 */ 672 673 int 674 udf_tagsize(union dscrptr *dscr, uint32_t lb_size) 675 { 676 uint32_t size, tag_id, num_lb, elmsz; 677 678 tag_id = udf_rw16(dscr->tag.id); 679 680 switch (tag_id) { 681 case TAGID_LOGVOL : 682 size = sizeof(struct logvol_desc) - 1; 683 size += udf_rw32(dscr->lvd.mt_l); 684 break; 685 case TAGID_UNALLOC_SPACE : 686 elmsz = sizeof(struct extent_ad); 687 size = sizeof(struct unalloc_sp_desc) - elmsz; 688 size += udf_rw32(dscr->usd.alloc_desc_num) * elmsz; 689 break; 690 case TAGID_FID : 691 size = UDF_FID_SIZE + dscr->fid.l_fi + udf_rw16(dscr->fid.l_iu); 692 size = (size + 3) & ~3; 693 break; 694 case TAGID_LOGVOL_INTEGRITY : 695 size = sizeof(struct logvol_int_desc) - sizeof(uint32_t); 696 size += udf_rw32(dscr->lvid.l_iu); 697 size += (2 * udf_rw32(dscr->lvid.num_part) * sizeof(uint32_t)); 698 break; 699 case TAGID_SPACE_BITMAP : 700 size = sizeof(struct space_bitmap_desc) - 1; 701 size += udf_rw32(dscr->sbd.num_bytes); 702 break; 703 case TAGID_SPARING_TABLE : 704 elmsz = sizeof(struct spare_map_entry); 705 size = sizeof(struct udf_sparing_table) - elmsz; 706 size += udf_rw16(dscr->spt.rt_l) * elmsz; 707 break; 708 case TAGID_FENTRY : 709 size = sizeof(struct file_entry); 710 size += udf_rw32(dscr->fe.l_ea) + udf_rw32(dscr->fe.l_ad)-1; 711 break; 712 case TAGID_EXTFENTRY : 713 size = sizeof(struct extfile_entry); 714 size += udf_rw32(dscr->efe.l_ea) + udf_rw32(dscr->efe.l_ad)-1; 715 break; 716 case TAGID_FSD : 717 size = sizeof(struct fileset_desc); 718 break; 719 default : 720 size = sizeof(union dscrptr); 721 break; 722 } 723 724 if ((size == 0) || (lb_size == 0)) 725 return 0; 726 727 if (lb_size == 1) 728 return size; 729 730 /* round up in sectors */ 731 num_lb = (size + lb_size -1) / lb_size; 732 return num_lb * lb_size; 733 } 734 735 736 int 737 udf_fidsize(struct fileid_desc *fid) 738 { 739 uint32_t size; 740 741 if (udf_rw16(fid->tag.id) != TAGID_FID) 742 panic("got udf_fidsize on non FID\n"); 743 744 size = UDF_FID_SIZE + fid->l_fi + udf_rw16(fid->l_iu); 745 size = (size + 3) & ~3; 746 747 return size; 748 } 749 750 /* --------------------------------------------------------------------- */ 751 752 void 753 udf_lock_node(struct udf_node *udf_node, int flag, char const *fname, const int lineno) 754 { 755 int ret; 756 757 mutex_enter(&udf_node->node_mutex); 758 /* wait until free */ 759 while (udf_node->i_flags & IN_LOCKED) { 760 ret = cv_timedwait(&udf_node->node_lock, &udf_node->node_mutex, hz/8); 761 /* TODO check if we should return error; abort */ 762 if (ret == EWOULDBLOCK) { 763 DPRINTF(LOCKING, ( "udf_lock_node: udf_node %p would block " 764 "wanted at %s:%d, previously locked at %s:%d\n", 765 udf_node, fname, lineno, 766 udf_node->lock_fname, udf_node->lock_lineno)); 767 } 768 } 769 /* grab */ 770 udf_node->i_flags |= IN_LOCKED | flag; 771 /* debug */ 772 udf_node->lock_fname = fname; 773 udf_node->lock_lineno = lineno; 774 775 mutex_exit(&udf_node->node_mutex); 776 } 777 778 779 void 780 udf_unlock_node(struct udf_node *udf_node, int flag) 781 { 782 mutex_enter(&udf_node->node_mutex); 783 udf_node->i_flags &= ~(IN_LOCKED | flag); 784 cv_broadcast(&udf_node->node_lock); 785 mutex_exit(&udf_node->node_mutex); 786 } 787 788 789 /* --------------------------------------------------------------------- */ 790 791 static int 792 udf_read_anchor(struct udf_mount *ump, uint32_t sector, struct anchor_vdp **dst) 793 { 794 int error; 795 796 error = udf_read_phys_dscr(ump, sector, M_UDFVOLD, 797 (union dscrptr **) dst); 798 if (!error) { 799 /* blank terminator blocks are not allowed here */ 800 if (*dst == NULL) 801 return ENOENT; 802 if (udf_rw16((*dst)->tag.id) != TAGID_ANCHOR) { 803 error = ENOENT; 804 free(*dst, M_UDFVOLD); 805 *dst = NULL; 806 DPRINTF(VOLUMES, ("Not an anchor\n")); 807 } 808 } 809 810 return error; 811 } 812 813 814 int 815 udf_read_anchors(struct udf_mount *ump) 816 { 817 struct udf_args *args = &ump->mount_args; 818 struct mmc_trackinfo first_track; 819 struct mmc_trackinfo second_track; 820 struct mmc_trackinfo last_track; 821 struct anchor_vdp **anchorsp; 822 uint32_t track_start; 823 uint32_t track_end; 824 uint32_t positions[4]; 825 int first_tracknr, last_tracknr; 826 int error, anch, ok, first_anchor; 827 828 /* search the first and last track of the specified session */ 829 error = udf_search_tracks(ump, args, &first_tracknr, &last_tracknr); 830 if (!error) { 831 first_track.tracknr = first_tracknr; 832 error = udf_update_trackinfo(ump, &first_track); 833 } 834 if (!error) { 835 last_track.tracknr = last_tracknr; 836 error = udf_update_trackinfo(ump, &last_track); 837 } 838 if ((!error) && (first_tracknr != last_tracknr)) { 839 second_track.tracknr = first_tracknr+1; 840 error = udf_update_trackinfo(ump, &second_track); 841 } 842 if (error) { 843 printf("UDF mount: reading disc geometry failed\n"); 844 return 0; 845 } 846 847 track_start = first_track.track_start; 848 849 /* `end' is not as straitforward as start. */ 850 track_end = last_track.track_start 851 + last_track.track_size - last_track.free_blocks - 1; 852 853 if (ump->discinfo.mmc_cur & MMC_CAP_SEQUENTIAL) { 854 /* end of track is not straitforward here */ 855 if (last_track.flags & MMC_TRACKINFO_LRA_VALID) 856 track_end = last_track.last_recorded; 857 else if (last_track.flags & MMC_TRACKINFO_NWA_VALID) 858 track_end = last_track.next_writable 859 - ump->discinfo.link_block_penalty; 860 } 861 862 /* its no use reading a blank track */ 863 first_anchor = 0; 864 if (first_track.flags & MMC_TRACKINFO_BLANK) 865 first_anchor = 1; 866 867 /* get our packet size */ 868 ump->packet_size = first_track.packet_size; 869 if (first_track.flags & MMC_TRACKINFO_BLANK) 870 ump->packet_size = second_track.packet_size; 871 872 if (ump->packet_size <= 1) { 873 /* take max, but not bigger than 64 */ 874 ump->packet_size = MAXPHYS / ump->discinfo.sector_size; 875 ump->packet_size = MIN(ump->packet_size, 64); 876 } 877 KASSERT(ump->packet_size >= 1); 878 879 /* read anchors start+256, start+512, end-256, end */ 880 positions[0] = track_start+256; 881 positions[1] = track_end-256; 882 positions[2] = track_end; 883 positions[3] = track_start+512; /* [UDF 2.60/6.11.2] */ 884 /* XXX shouldn't +512 be prefered above +256 for compat with Roxio CD */ 885 886 ok = 0; 887 anchorsp = ump->anchors; 888 for (anch = first_anchor; anch < 4; anch++) { 889 DPRINTF(VOLUMES, ("Read anchor %d at sector %d\n", anch, 890 positions[anch])); 891 error = udf_read_anchor(ump, positions[anch], anchorsp); 892 if (!error) { 893 anchorsp++; 894 ok++; 895 } 896 } 897 898 /* VATs are only recorded on sequential media, but initialise */ 899 ump->first_possible_vat_location = track_start + 2; 900 ump->last_possible_vat_location = track_end + last_track.packet_size; 901 902 return ok; 903 } 904 905 /* --------------------------------------------------------------------- */ 906 907 /* we dont try to be smart; we just record the parts */ 908 #define UDF_UPDATE_DSCR(name, dscr) \ 909 if (name) \ 910 free(name, M_UDFVOLD); \ 911 name = dscr; 912 913 static int 914 udf_process_vds_descriptor(struct udf_mount *ump, union dscrptr *dscr) 915 { 916 struct part_desc *part; 917 uint16_t phys_part, raw_phys_part; 918 919 DPRINTF(VOLUMES, ("\tprocessing VDS descr %d\n", 920 udf_rw16(dscr->tag.id))); 921 switch (udf_rw16(dscr->tag.id)) { 922 case TAGID_PRI_VOL : /* primary partition */ 923 UDF_UPDATE_DSCR(ump->primary_vol, &dscr->pvd); 924 break; 925 case TAGID_LOGVOL : /* logical volume */ 926 UDF_UPDATE_DSCR(ump->logical_vol, &dscr->lvd); 927 break; 928 case TAGID_UNALLOC_SPACE : /* unallocated space */ 929 UDF_UPDATE_DSCR(ump->unallocated, &dscr->usd); 930 break; 931 case TAGID_IMP_VOL : /* implementation */ 932 /* XXX do we care about multiple impl. descr ? */ 933 UDF_UPDATE_DSCR(ump->implementation, &dscr->ivd); 934 break; 935 case TAGID_PARTITION : /* physical partition */ 936 /* not much use if its not allocated */ 937 if ((udf_rw16(dscr->pd.flags) & UDF_PART_FLAG_ALLOCATED) == 0) { 938 free(dscr, M_UDFVOLD); 939 break; 940 } 941 942 /* 943 * BUGALERT: some rogue implementations use random physical 944 * partion numbers to break other implementations so lookup 945 * the number. 946 */ 947 raw_phys_part = udf_rw16(dscr->pd.part_num); 948 for (phys_part = 0; phys_part < UDF_PARTITIONS; phys_part++) { 949 part = ump->partitions[phys_part]; 950 if (part == NULL) 951 break; 952 if (udf_rw16(part->part_num) == raw_phys_part) 953 break; 954 } 955 if (phys_part == UDF_PARTITIONS) { 956 free(dscr, M_UDFVOLD); 957 return EINVAL; 958 } 959 960 UDF_UPDATE_DSCR(ump->partitions[phys_part], &dscr->pd); 961 break; 962 case TAGID_VOL : /* volume space extender; rare */ 963 DPRINTF(VOLUMES, ("VDS extender ignored\n")); 964 free(dscr, M_UDFVOLD); 965 break; 966 default : 967 DPRINTF(VOLUMES, ("Unhandled VDS type %d\n", 968 udf_rw16(dscr->tag.id))); 969 free(dscr, M_UDFVOLD); 970 } 971 972 return 0; 973 } 974 #undef UDF_UPDATE_DSCR 975 976 /* --------------------------------------------------------------------- */ 977 978 static int 979 udf_read_vds_extent(struct udf_mount *ump, uint32_t loc, uint32_t len) 980 { 981 union dscrptr *dscr; 982 uint32_t sector_size, dscr_size; 983 int error; 984 985 sector_size = ump->discinfo.sector_size; 986 987 /* loc is sectornr, len is in bytes */ 988 error = EIO; 989 while (len) { 990 error = udf_read_phys_dscr(ump, loc, M_UDFVOLD, &dscr); 991 if (error) 992 return error; 993 994 /* blank block is a terminator */ 995 if (dscr == NULL) 996 return 0; 997 998 /* TERM descriptor is a terminator */ 999 if (udf_rw16(dscr->tag.id) == TAGID_TERM) { 1000 free(dscr, M_UDFVOLD); 1001 return 0; 1002 } 1003 1004 /* process all others */ 1005 dscr_size = udf_tagsize(dscr, sector_size); 1006 error = udf_process_vds_descriptor(ump, dscr); 1007 if (error) { 1008 free(dscr, M_UDFVOLD); 1009 break; 1010 } 1011 assert((dscr_size % sector_size) == 0); 1012 1013 len -= dscr_size; 1014 loc += dscr_size / sector_size; 1015 } 1016 1017 return error; 1018 } 1019 1020 1021 int 1022 udf_read_vds_space(struct udf_mount *ump) 1023 { 1024 /* struct udf_args *args = &ump->mount_args; */ 1025 struct anchor_vdp *anchor, *anchor2; 1026 size_t size; 1027 uint32_t main_loc, main_len; 1028 uint32_t reserve_loc, reserve_len; 1029 int error; 1030 1031 /* 1032 * read in VDS space provided by the anchors; if one descriptor read 1033 * fails, try the mirror sector. 1034 * 1035 * check if 2nd anchor is different from 1st; if so, go for 2nd. This 1036 * avoids the `compatibility features' of DirectCD that may confuse 1037 * stuff completely. 1038 */ 1039 1040 anchor = ump->anchors[0]; 1041 anchor2 = ump->anchors[1]; 1042 assert(anchor); 1043 1044 if (anchor2) { 1045 size = sizeof(struct extent_ad); 1046 if (memcmp(&anchor->main_vds_ex, &anchor2->main_vds_ex, size)) 1047 anchor = anchor2; 1048 /* reserve is specified to be a literal copy of main */ 1049 } 1050 1051 main_loc = udf_rw32(anchor->main_vds_ex.loc); 1052 main_len = udf_rw32(anchor->main_vds_ex.len); 1053 1054 reserve_loc = udf_rw32(anchor->reserve_vds_ex.loc); 1055 reserve_len = udf_rw32(anchor->reserve_vds_ex.len); 1056 1057 error = udf_read_vds_extent(ump, main_loc, main_len); 1058 if (error) { 1059 printf("UDF mount: reading in reserve VDS extent\n"); 1060 error = udf_read_vds_extent(ump, reserve_loc, reserve_len); 1061 } 1062 1063 return error; 1064 } 1065 1066 /* --------------------------------------------------------------------- */ 1067 1068 /* 1069 * Read in the logical volume integrity sequence pointed to by our logical 1070 * volume descriptor. Its a sequence that can be extended using fields in the 1071 * integrity descriptor itself. On sequential media only one is found, on 1072 * rewritable media a sequence of descriptors can be found as a form of 1073 * history keeping and on non sequential write-once media the chain is vital 1074 * to allow more and more descriptors to be written. The last descriptor 1075 * written in an extent needs to claim space for a new extent. 1076 */ 1077 1078 static int 1079 udf_retrieve_lvint(struct udf_mount *ump) 1080 { 1081 union dscrptr *dscr; 1082 struct logvol_int_desc *lvint; 1083 struct udf_lvintq *trace; 1084 uint32_t lb_size, lbnum, len; 1085 int dscr_type, error, trace_len; 1086 1087 lb_size = udf_rw32(ump->logical_vol->lb_size); 1088 len = udf_rw32(ump->logical_vol->integrity_seq_loc.len); 1089 lbnum = udf_rw32(ump->logical_vol->integrity_seq_loc.loc); 1090 1091 /* clean trace */ 1092 memset(ump->lvint_trace, 0, 1093 UDF_LVDINT_SEGMENTS * sizeof(struct udf_lvintq)); 1094 1095 trace_len = 0; 1096 trace = ump->lvint_trace; 1097 trace->start = lbnum; 1098 trace->end = lbnum + len/lb_size; 1099 trace->pos = 0; 1100 trace->wpos = 0; 1101 1102 lvint = NULL; 1103 dscr = NULL; 1104 error = 0; 1105 while (len) { 1106 trace->pos = lbnum - trace->start; 1107 trace->wpos = trace->pos + 1; 1108 1109 /* read in our integrity descriptor */ 1110 error = udf_read_phys_dscr(ump, lbnum, M_UDFVOLD, &dscr); 1111 if (!error) { 1112 if (dscr == NULL) { 1113 trace->wpos = trace->pos; 1114 break; /* empty terminates */ 1115 } 1116 dscr_type = udf_rw16(dscr->tag.id); 1117 if (dscr_type == TAGID_TERM) { 1118 trace->wpos = trace->pos; 1119 break; /* clean terminator */ 1120 } 1121 if (dscr_type != TAGID_LOGVOL_INTEGRITY) { 1122 /* fatal... corrupt disc */ 1123 error = ENOENT; 1124 break; 1125 } 1126 if (lvint) 1127 free(lvint, M_UDFVOLD); 1128 lvint = &dscr->lvid; 1129 dscr = NULL; 1130 } /* else hope for the best... maybe the next is ok */ 1131 1132 DPRINTFIF(VOLUMES, lvint, ("logvol integrity read, state %s\n", 1133 udf_rw32(lvint->integrity_type) ? "CLOSED" : "OPEN")); 1134 1135 /* proceed sequential */ 1136 lbnum += 1; 1137 len -= lb_size; 1138 1139 /* are we linking to a new piece? */ 1140 if (dscr && lvint->next_extent.len) { 1141 len = udf_rw32(lvint->next_extent.len); 1142 lbnum = udf_rw32(lvint->next_extent.loc); 1143 1144 if (trace_len >= UDF_LVDINT_SEGMENTS-1) { 1145 /* IEK! segment link full... */ 1146 DPRINTF(VOLUMES, ("lvdint segments full\n")); 1147 error = EINVAL; 1148 } else { 1149 trace++; 1150 trace_len++; 1151 1152 trace->start = lbnum; 1153 trace->end = lbnum + len/lb_size; 1154 trace->pos = 0; 1155 trace->wpos = 0; 1156 } 1157 } 1158 } 1159 1160 /* clean up the mess, esp. when there is an error */ 1161 if (dscr) 1162 free(dscr, M_UDFVOLD); 1163 1164 if (error && lvint) { 1165 free(lvint, M_UDFVOLD); 1166 lvint = NULL; 1167 } 1168 1169 if (!lvint) 1170 error = ENOENT; 1171 1172 ump->logvol_integrity = lvint; 1173 return error; 1174 } 1175 1176 1177 static int 1178 udf_loose_lvint_history(struct udf_mount *ump) 1179 { 1180 union dscrptr **bufs, *dscr, *last_dscr; 1181 struct udf_lvintq *trace, *in_trace, *out_trace; 1182 struct logvol_int_desc *lvint; 1183 uint32_t in_ext, in_pos, in_len; 1184 uint32_t out_ext, out_wpos, out_len; 1185 uint32_t lb_size, packet_size, lb_num; 1186 uint32_t len, start; 1187 int ext, minext, extlen, cnt, cpy_len, dscr_type; 1188 int losing; 1189 int error; 1190 1191 DPRINTF(VOLUMES, ("need to lose some lvint history\n")); 1192 1193 lb_size = udf_rw32(ump->logical_vol->lb_size); 1194 packet_size = ump->data_track.packet_size; /* XXX data track */ 1195 1196 /* search smallest extent */ 1197 trace = &ump->lvint_trace[0]; 1198 minext = trace->end - trace->start; 1199 for (ext = 1; ext < UDF_LVDINT_SEGMENTS; ext++) { 1200 trace = &ump->lvint_trace[ext]; 1201 extlen = trace->end - trace->start; 1202 if (extlen == 0) 1203 break; 1204 minext = MIN(minext, extlen); 1205 } 1206 losing = MIN(minext, UDF_LVINT_LOSSAGE); 1207 /* no sense wiping all */ 1208 if (losing == minext) 1209 losing--; 1210 1211 DPRINTF(VOLUMES, ("\tlosing %d entries\n", losing)); 1212 1213 /* get buffer for pieces */ 1214 bufs = malloc(UDF_LVDINT_SEGMENTS * sizeof(void *), M_TEMP, M_WAITOK); 1215 1216 in_ext = 0; 1217 in_pos = losing; 1218 in_trace = &ump->lvint_trace[in_ext]; 1219 in_len = in_trace->end - in_trace->start; 1220 out_ext = 0; 1221 out_wpos = 0; 1222 out_trace = &ump->lvint_trace[out_ext]; 1223 out_len = out_trace->end - out_trace->start; 1224 1225 last_dscr = NULL; 1226 for(;;) { 1227 out_trace->pos = out_wpos; 1228 out_trace->wpos = out_trace->pos; 1229 if (in_pos >= in_len) { 1230 in_ext++; 1231 in_pos = 0; 1232 in_trace = &ump->lvint_trace[in_ext]; 1233 in_len = in_trace->end - in_trace->start; 1234 } 1235 if (out_wpos >= out_len) { 1236 out_ext++; 1237 out_wpos = 0; 1238 out_trace = &ump->lvint_trace[out_ext]; 1239 out_len = out_trace->end - out_trace->start; 1240 } 1241 /* copy overlap contents */ 1242 cpy_len = MIN(in_len - in_pos, out_len - out_wpos); 1243 cpy_len = MIN(cpy_len, in_len - in_trace->pos); 1244 if (cpy_len == 0) 1245 break; 1246 1247 /* copy */ 1248 DPRINTF(VOLUMES, ("\treading %d lvid descriptors\n", cpy_len)); 1249 for (cnt = 0; cnt < cpy_len; cnt++) { 1250 /* read in our integrity descriptor */ 1251 lb_num = in_trace->start + in_pos + cnt; 1252 error = udf_read_phys_dscr(ump, lb_num, M_UDFVOLD, 1253 &dscr); 1254 if (error) { 1255 /* copy last one */ 1256 dscr = last_dscr; 1257 } 1258 bufs[cnt] = dscr; 1259 if (!error) { 1260 if (dscr == NULL) { 1261 out_trace->pos = out_wpos + cnt; 1262 out_trace->wpos = out_trace->pos; 1263 break; /* empty terminates */ 1264 } 1265 dscr_type = udf_rw16(dscr->tag.id); 1266 if (dscr_type == TAGID_TERM) { 1267 out_trace->pos = out_wpos + cnt; 1268 out_trace->wpos = out_trace->pos; 1269 break; /* clean terminator */ 1270 } 1271 if (dscr_type != TAGID_LOGVOL_INTEGRITY) { 1272 panic( "UDF integrity sequence " 1273 "corrupted while mounted!\n"); 1274 } 1275 last_dscr = dscr; 1276 } 1277 } 1278 1279 /* patch up if first entry was on error */ 1280 if (bufs[0] == NULL) { 1281 for (cnt = 0; cnt < cpy_len; cnt++) 1282 if (bufs[cnt] != NULL) 1283 break; 1284 last_dscr = bufs[cnt]; 1285 for (; cnt > 0; cnt--) { 1286 bufs[cnt] = last_dscr; 1287 } 1288 } 1289 1290 /* glue + write out */ 1291 DPRINTF(VOLUMES, ("\twriting %d lvid descriptors\n", cpy_len)); 1292 for (cnt = 0; cnt < cpy_len; cnt++) { 1293 lb_num = out_trace->start + out_wpos + cnt; 1294 lvint = &bufs[cnt]->lvid; 1295 1296 /* set continuation */ 1297 len = 0; 1298 start = 0; 1299 if (out_wpos + cnt == out_len) { 1300 /* get continuation */ 1301 trace = &ump->lvint_trace[out_ext+1]; 1302 len = trace->end - trace->start; 1303 start = trace->start; 1304 } 1305 lvint->next_extent.len = udf_rw32(len); 1306 lvint->next_extent.loc = udf_rw32(start); 1307 1308 lb_num = trace->start + trace->wpos; 1309 error = udf_write_phys_dscr_sync(ump, NULL, UDF_C_DSCR, 1310 bufs[cnt], lb_num, lb_num); 1311 DPRINTFIF(VOLUMES, error, 1312 ("error writing lvint lb_num\n")); 1313 } 1314 1315 /* free non repeating descriptors */ 1316 last_dscr = NULL; 1317 for (cnt = 0; cnt < cpy_len; cnt++) { 1318 if (bufs[cnt] != last_dscr) 1319 free(bufs[cnt], M_UDFVOLD); 1320 last_dscr = bufs[cnt]; 1321 } 1322 1323 /* advance */ 1324 in_pos += cpy_len; 1325 out_wpos += cpy_len; 1326 } 1327 1328 free(bufs, M_TEMP); 1329 1330 return 0; 1331 } 1332 1333 1334 static int 1335 udf_writeout_lvint(struct udf_mount *ump, int lvflag) 1336 { 1337 struct udf_lvintq *trace; 1338 struct timeval now_v; 1339 struct timespec now_s; 1340 uint32_t sector; 1341 int logvol_integrity; 1342 int space, error; 1343 1344 DPRINTF(VOLUMES, ("writing out logvol integrity descriptor\n")); 1345 1346 again: 1347 /* get free space in last chunk */ 1348 trace = ump->lvint_trace; 1349 while (trace->wpos > (trace->end - trace->start)) { 1350 DPRINTF(VOLUMES, ("skip : start = %d, end = %d, pos = %d, " 1351 "wpos = %d\n", trace->start, trace->end, 1352 trace->pos, trace->wpos)); 1353 trace++; 1354 } 1355 1356 /* check if there is space to append */ 1357 space = (trace->end - trace->start) - trace->wpos; 1358 DPRINTF(VOLUMES, ("write start = %d, end = %d, pos = %d, wpos = %d, " 1359 "space = %d\n", trace->start, trace->end, trace->pos, 1360 trace->wpos, space)); 1361 1362 /* get state */ 1363 logvol_integrity = udf_rw32(ump->logvol_integrity->integrity_type); 1364 if (logvol_integrity == UDF_INTEGRITY_CLOSED) { 1365 if ((space < 3) && (lvflag & UDF_APPENDONLY_LVINT)) { 1366 /* don't allow this logvol to be opened */ 1367 /* TODO extent LVINT space if possible */ 1368 return EROFS; 1369 } 1370 } 1371 1372 if (space < 1) { 1373 if (lvflag & UDF_APPENDONLY_LVINT) 1374 return EROFS; 1375 /* loose history by re-writing extents */ 1376 error = udf_loose_lvint_history(ump); 1377 if (error) 1378 return error; 1379 goto again; 1380 } 1381 1382 /* update our integrity descriptor to identify us and timestamp it */ 1383 DPRINTF(VOLUMES, ("updating integrity descriptor\n")); 1384 microtime(&now_v); 1385 TIMEVAL_TO_TIMESPEC(&now_v, &now_s); 1386 udf_timespec_to_timestamp(&now_s, &ump->logvol_integrity->time); 1387 udf_set_regid(&ump->logvol_info->impl_id, IMPL_NAME); 1388 udf_add_impl_regid(ump, &ump->logvol_info->impl_id); 1389 1390 /* writeout integrity descriptor */ 1391 sector = trace->start + trace->wpos; 1392 error = udf_write_phys_dscr_sync(ump, NULL, UDF_C_DSCR, 1393 (union dscrptr *) ump->logvol_integrity, 1394 sector, sector); 1395 DPRINTF(VOLUMES, ("writeout lvint : error = %d\n", error)); 1396 if (error) 1397 return error; 1398 1399 /* advance write position */ 1400 trace->wpos++; space--; 1401 if (space >= 1) { 1402 /* append terminator */ 1403 sector = trace->start + trace->wpos; 1404 error = udf_write_terminator(ump, sector); 1405 1406 DPRINTF(VOLUMES, ("write terminator : error = %d\n", error)); 1407 } 1408 1409 space = (trace->end - trace->start) - trace->wpos; 1410 DPRINTF(VOLUMES, ("write start = %d, end = %d, pos = %d, wpos = %d, " 1411 "space = %d\n", trace->start, trace->end, trace->pos, 1412 trace->wpos, space)); 1413 DPRINTF(VOLUMES, ("finished writing out logvol integrity descriptor " 1414 "successfull\n")); 1415 1416 return error; 1417 } 1418 1419 /* --------------------------------------------------------------------- */ 1420 1421 static int 1422 udf_read_physical_partition_spacetables(struct udf_mount *ump) 1423 { 1424 union dscrptr *dscr; 1425 /* struct udf_args *args = &ump->mount_args; */ 1426 struct part_desc *partd; 1427 struct part_hdr_desc *parthdr; 1428 struct udf_bitmap *bitmap; 1429 uint32_t phys_part; 1430 uint32_t lb_num, len; 1431 int error, dscr_type; 1432 1433 /* unallocated space map */ 1434 for (phys_part = 0; phys_part < UDF_PARTITIONS; phys_part++) { 1435 partd = ump->partitions[phys_part]; 1436 if (partd == NULL) 1437 continue; 1438 parthdr = &partd->_impl_use.part_hdr; 1439 1440 lb_num = udf_rw32(partd->start_loc); 1441 lb_num += udf_rw32(parthdr->unalloc_space_bitmap.lb_num); 1442 len = udf_rw32(parthdr->unalloc_space_bitmap.len); 1443 if (len == 0) 1444 continue; 1445 1446 DPRINTF(VOLUMES, ("Read unalloc. space bitmap %d\n", lb_num)); 1447 error = udf_read_phys_dscr(ump, lb_num, M_UDFVOLD, &dscr); 1448 if (!error && dscr) { 1449 /* analyse */ 1450 dscr_type = udf_rw16(dscr->tag.id); 1451 if (dscr_type == TAGID_SPACE_BITMAP) { 1452 DPRINTF(VOLUMES, ("Accepting space bitmap\n")); 1453 ump->part_unalloc_dscr[phys_part] = &dscr->sbd; 1454 1455 /* fill in ump->part_unalloc_bits */ 1456 bitmap = &ump->part_unalloc_bits[phys_part]; 1457 bitmap->blob = (uint8_t *) dscr; 1458 bitmap->bits = dscr->sbd.data; 1459 bitmap->max_offset = udf_rw32(dscr->sbd.num_bits); 1460 bitmap->pages = NULL; /* TODO */ 1461 bitmap->data_pos = 0; 1462 bitmap->metadata_pos = 0; 1463 } else { 1464 free(dscr, M_UDFVOLD); 1465 1466 printf( "UDF mount: error reading unallocated " 1467 "space bitmap\n"); 1468 return EROFS; 1469 } 1470 } else { 1471 /* blank not allowed */ 1472 printf("UDF mount: blank unallocated space bitmap\n"); 1473 return EROFS; 1474 } 1475 } 1476 1477 /* unallocated space table (not supported) */ 1478 for (phys_part = 0; phys_part < UDF_PARTITIONS; phys_part++) { 1479 partd = ump->partitions[phys_part]; 1480 if (partd == NULL) 1481 continue; 1482 parthdr = &partd->_impl_use.part_hdr; 1483 1484 len = udf_rw32(parthdr->unalloc_space_table.len); 1485 if (len) { 1486 printf("UDF mount: space tables not supported\n"); 1487 return EROFS; 1488 } 1489 } 1490 1491 /* freed space map */ 1492 for (phys_part = 0; phys_part < UDF_PARTITIONS; phys_part++) { 1493 partd = ump->partitions[phys_part]; 1494 if (partd == NULL) 1495 continue; 1496 parthdr = &partd->_impl_use.part_hdr; 1497 1498 /* freed space map */ 1499 lb_num = udf_rw32(partd->start_loc); 1500 lb_num += udf_rw32(parthdr->freed_space_bitmap.lb_num); 1501 len = udf_rw32(parthdr->freed_space_bitmap.len); 1502 if (len == 0) 1503 continue; 1504 1505 DPRINTF(VOLUMES, ("Read unalloc. space bitmap %d\n", lb_num)); 1506 error = udf_read_phys_dscr(ump, lb_num, M_UDFVOLD, &dscr); 1507 if (!error && dscr) { 1508 /* analyse */ 1509 dscr_type = udf_rw16(dscr->tag.id); 1510 if (dscr_type == TAGID_SPACE_BITMAP) { 1511 DPRINTF(VOLUMES, ("Accepting space bitmap\n")); 1512 ump->part_freed_dscr[phys_part] = &dscr->sbd; 1513 1514 /* fill in ump->part_freed_bits */ 1515 bitmap = &ump->part_unalloc_bits[phys_part]; 1516 bitmap->blob = (uint8_t *) dscr; 1517 bitmap->bits = dscr->sbd.data; 1518 bitmap->max_offset = udf_rw32(dscr->sbd.num_bits); 1519 bitmap->pages = NULL; /* TODO */ 1520 bitmap->data_pos = 0; 1521 bitmap->metadata_pos = 0; 1522 } else { 1523 free(dscr, M_UDFVOLD); 1524 1525 printf( "UDF mount: error reading freed " 1526 "space bitmap\n"); 1527 return EROFS; 1528 } 1529 } else { 1530 /* blank not allowed */ 1531 printf("UDF mount: blank freed space bitmap\n"); 1532 return EROFS; 1533 } 1534 } 1535 1536 /* freed space table (not supported) */ 1537 for (phys_part = 0; phys_part < UDF_PARTITIONS; phys_part++) { 1538 partd = ump->partitions[phys_part]; 1539 if (partd == NULL) 1540 continue; 1541 parthdr = &partd->_impl_use.part_hdr; 1542 1543 len = udf_rw32(parthdr->freed_space_table.len); 1544 if (len) { 1545 printf("UDF mount: space tables not supported\n"); 1546 return EROFS; 1547 } 1548 } 1549 1550 return 0; 1551 } 1552 1553 1554 /* TODO implement async writeout */ 1555 int 1556 udf_write_physical_partition_spacetables(struct udf_mount *ump, int waitfor) 1557 { 1558 union dscrptr *dscr; 1559 /* struct udf_args *args = &ump->mount_args; */ 1560 struct part_desc *partd; 1561 struct part_hdr_desc *parthdr; 1562 uint32_t phys_part; 1563 uint32_t lb_num, len, ptov; 1564 int error_all, error; 1565 1566 error_all = 0; 1567 /* unallocated space map */ 1568 for (phys_part = 0; phys_part < UDF_PARTITIONS; phys_part++) { 1569 partd = ump->partitions[phys_part]; 1570 if (partd == NULL) 1571 continue; 1572 parthdr = &partd->_impl_use.part_hdr; 1573 1574 ptov = udf_rw32(partd->start_loc); 1575 lb_num = udf_rw32(parthdr->unalloc_space_bitmap.lb_num); 1576 len = udf_rw32(parthdr->unalloc_space_bitmap.len); 1577 if (len == 0) 1578 continue; 1579 1580 DPRINTF(VOLUMES, ("Write unalloc. space bitmap %d\n", 1581 lb_num + ptov)); 1582 dscr = (union dscrptr *) ump->part_unalloc_dscr[phys_part]; 1583 error = udf_write_phys_dscr_sync(ump, NULL, UDF_C_DSCR, 1584 (union dscrptr *) dscr, 1585 ptov + lb_num, lb_num); 1586 if (error) { 1587 DPRINTF(VOLUMES, ("\tfailed!! (error %d)\n", error)); 1588 error_all = error; 1589 } 1590 } 1591 1592 /* freed space map */ 1593 for (phys_part = 0; phys_part < UDF_PARTITIONS; phys_part++) { 1594 partd = ump->partitions[phys_part]; 1595 if (partd == NULL) 1596 continue; 1597 parthdr = &partd->_impl_use.part_hdr; 1598 1599 /* freed space map */ 1600 ptov = udf_rw32(partd->start_loc); 1601 lb_num = udf_rw32(parthdr->freed_space_bitmap.lb_num); 1602 len = udf_rw32(parthdr->freed_space_bitmap.len); 1603 if (len == 0) 1604 continue; 1605 1606 DPRINTF(VOLUMES, ("Write freed space bitmap %d\n", 1607 lb_num + ptov)); 1608 dscr = (union dscrptr *) ump->part_freed_dscr[phys_part]; 1609 error = udf_write_phys_dscr_sync(ump, NULL, UDF_C_DSCR, 1610 (union dscrptr *) dscr, 1611 ptov + lb_num, lb_num); 1612 if (error) { 1613 DPRINTF(VOLUMES, ("\tfailed!! (error %d)\n", error)); 1614 error_all = error; 1615 } 1616 } 1617 1618 return error_all; 1619 } 1620 1621 1622 static int 1623 udf_read_metadata_partition_spacetable(struct udf_mount *ump) 1624 { 1625 struct udf_node *bitmap_node; 1626 union dscrptr *dscr; 1627 struct udf_bitmap *bitmap; 1628 uint64_t inflen; 1629 int error, dscr_type; 1630 1631 bitmap_node = ump->metadatabitmap_node; 1632 1633 /* only read in when metadata bitmap node is read in */ 1634 if (bitmap_node == NULL) 1635 return 0; 1636 1637 if (bitmap_node->fe) { 1638 inflen = udf_rw64(bitmap_node->fe->inf_len); 1639 } else { 1640 KASSERT(bitmap_node->efe); 1641 inflen = udf_rw64(bitmap_node->efe->inf_len); 1642 } 1643 1644 DPRINTF(VOLUMES, ("Reading metadata space bitmap for " 1645 "%"PRIu64" bytes\n", inflen)); 1646 1647 /* allocate space for bitmap */ 1648 dscr = malloc(inflen, M_UDFVOLD, M_CANFAIL | M_WAITOK); 1649 if (!dscr) 1650 return ENOMEM; 1651 1652 /* set vnode type to regular file or we can't read from it! */ 1653 bitmap_node->vnode->v_type = VREG; 1654 1655 /* read in complete metadata bitmap file */ 1656 error = vn_rdwr(UIO_READ, bitmap_node->vnode, 1657 dscr, 1658 inflen, 0, 1659 UIO_SYSSPACE, 1660 IO_SYNC | IO_NODELOCKED | IO_ALTSEMANTICS, FSCRED, 1661 NULL, NULL); 1662 if (error) { 1663 DPRINTF(VOLUMES, ("Error reading metadata space bitmap\n")); 1664 goto errorout; 1665 } 1666 1667 /* analyse */ 1668 dscr_type = udf_rw16(dscr->tag.id); 1669 if (dscr_type == TAGID_SPACE_BITMAP) { 1670 DPRINTF(VOLUMES, ("Accepting metadata space bitmap\n")); 1671 ump->metadata_unalloc_dscr = &dscr->sbd; 1672 1673 /* fill in bitmap bits */ 1674 bitmap = &ump->metadata_unalloc_bits; 1675 bitmap->blob = (uint8_t *) dscr; 1676 bitmap->bits = dscr->sbd.data; 1677 bitmap->max_offset = udf_rw32(dscr->sbd.num_bits); 1678 bitmap->pages = NULL; /* TODO */ 1679 bitmap->data_pos = 0; 1680 bitmap->metadata_pos = 0; 1681 } else { 1682 DPRINTF(VOLUMES, ("No valid bitmap found!\n")); 1683 goto errorout; 1684 } 1685 1686 return 0; 1687 1688 errorout: 1689 free(dscr, M_UDFVOLD); 1690 printf( "UDF mount: error reading unallocated " 1691 "space bitmap for metadata partition\n"); 1692 return EROFS; 1693 } 1694 1695 1696 int 1697 udf_write_metadata_partition_spacetable(struct udf_mount *ump, int waitfor) 1698 { 1699 struct udf_node *bitmap_node; 1700 union dscrptr *dscr; 1701 uint64_t inflen, new_inflen; 1702 int dummy, error; 1703 1704 bitmap_node = ump->metadatabitmap_node; 1705 1706 /* only write out when metadata bitmap node is known */ 1707 if (bitmap_node == NULL) 1708 return 0; 1709 1710 if (bitmap_node->fe) { 1711 inflen = udf_rw64(bitmap_node->fe->inf_len); 1712 } else { 1713 KASSERT(bitmap_node->efe); 1714 inflen = udf_rw64(bitmap_node->efe->inf_len); 1715 } 1716 1717 /* reduce length to zero */ 1718 dscr = (union dscrptr *) ump->metadata_unalloc_dscr; 1719 new_inflen = udf_tagsize(dscr, 1); 1720 1721 DPRINTF(VOLUMES, ("Resize and write out metadata space bitmap from " 1722 "%"PRIu64" to %"PRIu64" bytes\n", inflen, new_inflen)); 1723 1724 error = udf_resize_node(bitmap_node, 0, &dummy); 1725 if (error) 1726 printf("Error resizing metadata space bitmap\n"); 1727 1728 error = vn_rdwr(UIO_WRITE, bitmap_node->vnode, 1729 dscr, 1730 new_inflen, 0, 1731 UIO_SYSSPACE, 1732 IO_NODELOCKED | IO_ALTSEMANTICS, FSCRED, 1733 NULL, NULL); 1734 1735 bitmap_node->i_flags |= IN_MODIFIED; 1736 vflushbuf(bitmap_node->vnode, 1 /* sync */); 1737 error = VOP_FSYNC(bitmap_node->vnode, 1738 FSCRED, FSYNC_WAIT, 0, 0); 1739 1740 if (error) 1741 printf( "Error writing out metadata partition unalloced " 1742 "space bitmap!\n"); 1743 1744 return error; 1745 } 1746 1747 1748 /* --------------------------------------------------------------------- */ 1749 1750 /* 1751 * Checks if ump's vds information is correct and complete 1752 */ 1753 1754 int 1755 udf_process_vds(struct udf_mount *ump) { 1756 union udf_pmap *mapping; 1757 /* struct udf_args *args = &ump->mount_args; */ 1758 struct logvol_int_desc *lvint; 1759 struct udf_logvol_info *lvinfo; 1760 struct part_desc *part; 1761 uint32_t n_pm, mt_l; 1762 uint8_t *pmap_pos; 1763 char *domain_name, *map_name; 1764 const char *check_name; 1765 char bits[128]; 1766 int pmap_stype, pmap_size; 1767 int pmap_type, log_part, phys_part, raw_phys_part; 1768 int n_phys, n_virt, n_spar, n_meta; 1769 int len, error; 1770 1771 if (ump == NULL) 1772 return ENOENT; 1773 1774 /* we need at least an anchor (trivial, but for safety) */ 1775 if (ump->anchors[0] == NULL) 1776 return EINVAL; 1777 1778 /* we need at least one primary and one logical volume descriptor */ 1779 if ((ump->primary_vol == NULL) || (ump->logical_vol) == NULL) 1780 return EINVAL; 1781 1782 /* we need at least one partition descriptor */ 1783 if (ump->partitions[0] == NULL) 1784 return EINVAL; 1785 1786 /* check logical volume sector size verses device sector size */ 1787 if (udf_rw32(ump->logical_vol->lb_size) != ump->discinfo.sector_size) { 1788 printf("UDF mount: format violation, lb_size != sector size\n"); 1789 return EINVAL; 1790 } 1791 1792 /* check domain name */ 1793 domain_name = ump->logical_vol->domain_id.id; 1794 if (strncmp(domain_name, "*OSTA UDF Compliant", 20)) { 1795 printf("mount_udf: disc not OSTA UDF Compliant, aborting\n"); 1796 return EINVAL; 1797 } 1798 1799 /* retrieve logical volume integrity sequence */ 1800 error = udf_retrieve_lvint(ump); 1801 1802 /* 1803 * We need at least one logvol integrity descriptor recorded. Note 1804 * that its OK to have an open logical volume integrity here. The VAT 1805 * will close/update the integrity. 1806 */ 1807 if (ump->logvol_integrity == NULL) 1808 return EINVAL; 1809 1810 /* process derived structures */ 1811 n_pm = udf_rw32(ump->logical_vol->n_pm); /* num partmaps */ 1812 lvint = ump->logvol_integrity; 1813 lvinfo = (struct udf_logvol_info *) (&lvint->tables[2 * n_pm]); 1814 ump->logvol_info = lvinfo; 1815 1816 /* TODO check udf versions? */ 1817 1818 /* 1819 * check logvol mappings: effective virt->log partmap translation 1820 * check and recording of the mapping results. Saves expensive 1821 * strncmp() in tight places. 1822 */ 1823 DPRINTF(VOLUMES, ("checking logvol mappings\n")); 1824 n_pm = udf_rw32(ump->logical_vol->n_pm); /* num partmaps */ 1825 mt_l = udf_rw32(ump->logical_vol->mt_l); /* partmaps data length */ 1826 pmap_pos = ump->logical_vol->maps; 1827 1828 if (n_pm > UDF_PMAPS) { 1829 printf("UDF mount: too many mappings\n"); 1830 return EINVAL; 1831 } 1832 1833 ump->data_part = ump->metadata_part = 0; 1834 n_phys = n_virt = n_spar = n_meta = 0; 1835 for (log_part = 0; log_part < n_pm; log_part++) { 1836 mapping = (union udf_pmap *) pmap_pos; 1837 pmap_stype = pmap_pos[0]; 1838 pmap_size = pmap_pos[1]; 1839 switch (pmap_stype) { 1840 case 1: /* physical mapping */ 1841 /* volseq = udf_rw16(mapping->pm1.vol_seq_num); */ 1842 raw_phys_part = udf_rw16(mapping->pm1.part_num); 1843 pmap_type = UDF_VTOP_TYPE_PHYS; 1844 n_phys++; 1845 ump->data_part = log_part; 1846 ump->metadata_part = log_part; 1847 break; 1848 case 2: /* virtual/sparable/meta mapping */ 1849 map_name = mapping->pm2.part_id.id; 1850 /* volseq = udf_rw16(mapping->pm2.vol_seq_num); */ 1851 raw_phys_part = udf_rw16(mapping->pm2.part_num); 1852 pmap_type = UDF_VTOP_TYPE_UNKNOWN; 1853 len = UDF_REGID_ID_SIZE; 1854 1855 check_name = "*UDF Virtual Partition"; 1856 if (strncmp(map_name, check_name, len) == 0) { 1857 pmap_type = UDF_VTOP_TYPE_VIRT; 1858 n_virt++; 1859 ump->metadata_part = log_part; 1860 break; 1861 } 1862 check_name = "*UDF Sparable Partition"; 1863 if (strncmp(map_name, check_name, len) == 0) { 1864 pmap_type = UDF_VTOP_TYPE_SPARABLE; 1865 n_spar++; 1866 ump->data_part = log_part; 1867 ump->metadata_part = log_part; 1868 break; 1869 } 1870 check_name = "*UDF Metadata Partition"; 1871 if (strncmp(map_name, check_name, len) == 0) { 1872 pmap_type = UDF_VTOP_TYPE_META; 1873 n_meta++; 1874 ump->metadata_part = log_part; 1875 break; 1876 } 1877 break; 1878 default: 1879 return EINVAL; 1880 } 1881 1882 /* 1883 * BUGALERT: some rogue implementations use random physical 1884 * partion numbers to break other implementations so lookup 1885 * the number. 1886 */ 1887 for (phys_part = 0; phys_part < UDF_PARTITIONS; phys_part++) { 1888 part = ump->partitions[phys_part]; 1889 if (part == NULL) 1890 continue; 1891 if (udf_rw16(part->part_num) == raw_phys_part) 1892 break; 1893 } 1894 1895 DPRINTF(VOLUMES, ("\t%d -> %d(%d) type %d\n", log_part, 1896 raw_phys_part, phys_part, pmap_type)); 1897 1898 if (phys_part == UDF_PARTITIONS) 1899 return EINVAL; 1900 if (pmap_type == UDF_VTOP_TYPE_UNKNOWN) 1901 return EINVAL; 1902 1903 ump->vtop [log_part] = phys_part; 1904 ump->vtop_tp[log_part] = pmap_type; 1905 1906 pmap_pos += pmap_size; 1907 } 1908 /* not winning the beauty contest */ 1909 ump->vtop_tp[UDF_VTOP_RAWPART] = UDF_VTOP_TYPE_RAW; 1910 1911 /* test some basic UDF assertions/requirements */ 1912 if ((n_virt > 1) || (n_spar > 1) || (n_meta > 1)) 1913 return EINVAL; 1914 1915 if (n_virt) { 1916 if ((n_phys == 0) || n_spar || n_meta) 1917 return EINVAL; 1918 } 1919 if (n_spar + n_phys == 0) 1920 return EINVAL; 1921 1922 /* determine allocation scheme's based on disc format */ 1923 /* VAT's can only be on a sequential media */ 1924 ump->data_alloc = UDF_ALLOC_SPACEMAP; 1925 if (n_virt) 1926 ump->data_alloc = UDF_ALLOC_SEQUENTIAL; 1927 1928 ump->meta_alloc = UDF_ALLOC_SPACEMAP; 1929 if (n_virt) 1930 ump->meta_alloc = UDF_ALLOC_VAT; 1931 if (n_meta) 1932 ump->meta_alloc = UDF_ALLOC_METABITMAP; 1933 1934 /* special cases for pseudo-overwrite */ 1935 if (ump->discinfo.mmc_cur & MMC_CAP_PSEUDOOVERWRITE) { 1936 ump->data_alloc = UDF_ALLOC_SEQUENTIAL; 1937 if (n_meta) { 1938 ump->meta_alloc = UDF_ALLOC_METASEQUENTIAL; 1939 } else { 1940 ump->meta_alloc = UDF_ALLOC_RELAXEDSEQUENTIAL; 1941 } 1942 } 1943 1944 /* determine default allocation descriptors to use */ 1945 ump->data_allocdscr = UDF_ICB_SHORT_ALLOC; 1946 ump->meta_allocdscr = UDF_ICB_SHORT_ALLOC; 1947 if (n_pm > 1) { 1948 ump->data_allocdscr = UDF_ICB_LONG_ALLOC; 1949 ump->meta_allocdscr = UDF_ICB_LONG_ALLOC; 1950 /* metadata partitions are forced to have short */ 1951 if (n_meta) 1952 ump->meta_allocdscr = UDF_ICB_SHORT_ALLOC; 1953 } 1954 1955 /* determine logical volume open/closure actions */ 1956 if (n_virt) { 1957 ump->lvopen = 0; 1958 if (ump->discinfo.last_session_state == MMC_STATE_CLOSED) 1959 ump->lvopen |= UDF_OPEN_SESSION ; 1960 ump->lvclose = UDF_WRITE_VAT; 1961 if (ump->mount_args.udfmflags & UDFMNT_CLOSESESSION) 1962 ump->lvclose |= UDF_CLOSE_SESSION; 1963 } else { 1964 /* `normal' rewritable or non sequential media */ 1965 ump->lvopen = UDF_WRITE_LVINT; 1966 ump->lvclose = UDF_WRITE_LVINT; 1967 if ((ump->discinfo.mmc_cur & MMC_CAP_REWRITABLE) == 0) 1968 ump->lvopen |= UDF_APPENDONLY_LVINT; 1969 } 1970 1971 /* 1972 * Determine sheduler error behaviour. For virtual partions, update 1973 * the trackinfo; for sparable partitions replace a whole block on the 1974 * sparable table. Allways requeue. 1975 */ 1976 ump->lvreadwrite = 0; 1977 if (n_virt) 1978 ump->lvreadwrite = UDF_UPDATE_TRACKINFO; 1979 if (n_spar) 1980 ump->lvreadwrite = UDF_REMAP_BLOCK; 1981 1982 /* 1983 * Select our sheduler 1984 */ 1985 ump->strategy = &udf_strat_rmw; 1986 if (n_virt || (ump->discinfo.mmc_cur & MMC_CAP_PSEUDOOVERWRITE)) 1987 ump->strategy = &udf_strat_sequential; 1988 if ((ump->discinfo.mmc_class == MMC_CLASS_DISC) || 1989 (ump->discinfo.mmc_class == MMC_CLASS_UNKN)) 1990 ump->strategy = &udf_strat_direct; 1991 if (n_spar) 1992 ump->strategy = &udf_strat_rmw; 1993 1994 /* print results */ 1995 DPRINTF(VOLUMES, ("\tdata alloc scheme %d, meta alloc scheme %d\n", 1996 ump->data_alloc, ump->meta_alloc)); 1997 DPRINTF(VOLUMES, ("\tdata partition %d, metadata partition %d\n", 1998 ump->data_part, ump->metadata_part)); 1999 2000 bitmask_snprintf(ump->lvopen, UDFLOGVOL_BITS, bits, sizeof(bits)); 2001 DPRINTF(VOLUMES, ("\tactions on logvol open %s\n", bits)); 2002 bitmask_snprintf(ump->lvclose, UDFLOGVOL_BITS, bits, sizeof(bits)); 2003 DPRINTF(VOLUMES, ("\tactions on logvol close %s\n", bits)); 2004 bitmask_snprintf(ump->lvreadwrite, UDFONERROR_BITS, bits, sizeof(bits)); 2005 DPRINTF(VOLUMES, ("\tactions on logvol errors %s\n", bits)); 2006 2007 DPRINTF(VOLUMES, ("\tselected sheduler `%s`\n", 2008 (ump->strategy == &udf_strat_direct) ? "Direct" : 2009 (ump->strategy == &udf_strat_sequential) ? "Sequential" : 2010 (ump->strategy == &udf_strat_rmw) ? "RMW" : "UNKNOWN!")); 2011 2012 /* signal its OK for now */ 2013 return 0; 2014 } 2015 2016 /* --------------------------------------------------------------------- */ 2017 2018 /* 2019 * Update logical volume name in all structures that keep a record of it. We 2020 * use memmove since each of them might be specified as a source. 2021 * 2022 * Note that it doesn't update the VAT structure! 2023 */ 2024 2025 static void 2026 udf_update_logvolname(struct udf_mount *ump, char *logvol_id) 2027 { 2028 struct logvol_desc *lvd = NULL; 2029 struct fileset_desc *fsd = NULL; 2030 struct udf_lv_info *lvi = NULL; 2031 2032 DPRINTF(VOLUMES, ("Updating logical volume name\n")); 2033 lvd = ump->logical_vol; 2034 fsd = ump->fileset_desc; 2035 if (ump->implementation) 2036 lvi = &ump->implementation->_impl_use.lv_info; 2037 2038 /* logvol's id might be specified as origional so use memmove here */ 2039 memmove(lvd->logvol_id, logvol_id, 128); 2040 if (fsd) 2041 memmove(fsd->logvol_id, logvol_id, 128); 2042 if (lvi) 2043 memmove(lvi->logvol_id, logvol_id, 128); 2044 } 2045 2046 /* --------------------------------------------------------------------- */ 2047 2048 void 2049 udf_inittag(struct udf_mount *ump, struct desc_tag *tag, int tagid, 2050 uint32_t sector) 2051 { 2052 assert(ump->logical_vol); 2053 2054 tag->id = udf_rw16(tagid); 2055 tag->descriptor_ver = ump->logical_vol->tag.descriptor_ver; 2056 tag->cksum = 0; 2057 tag->reserved = 0; 2058 tag->serial_num = ump->logical_vol->tag.serial_num; 2059 tag->tag_loc = udf_rw32(sector); 2060 } 2061 2062 2063 uint64_t 2064 udf_advance_uniqueid(struct udf_mount *ump) 2065 { 2066 uint64_t unique_id; 2067 2068 mutex_enter(&ump->logvol_mutex); 2069 unique_id = udf_rw64(ump->logvol_integrity->lvint_next_unique_id); 2070 if (unique_id < 0x10) 2071 unique_id = 0x10; 2072 ump->logvol_integrity->lvint_next_unique_id = udf_rw64(unique_id + 1); 2073 mutex_exit(&ump->logvol_mutex); 2074 2075 return unique_id; 2076 } 2077 2078 2079 static void 2080 udf_adjust_filecount(struct udf_node *udf_node, int sign) 2081 { 2082 struct udf_mount *ump = udf_node->ump; 2083 uint32_t num_dirs, num_files; 2084 int udf_file_type; 2085 2086 /* get file type */ 2087 if (udf_node->fe) { 2088 udf_file_type = udf_node->fe->icbtag.file_type; 2089 } else { 2090 udf_file_type = udf_node->efe->icbtag.file_type; 2091 } 2092 2093 /* adjust file count */ 2094 mutex_enter(&ump->allocate_mutex); 2095 if (udf_file_type == UDF_ICB_FILETYPE_DIRECTORY) { 2096 num_dirs = udf_rw32(ump->logvol_info->num_directories); 2097 ump->logvol_info->num_directories = 2098 udf_rw32((num_dirs + sign)); 2099 } else { 2100 num_files = udf_rw32(ump->logvol_info->num_files); 2101 ump->logvol_info->num_files = 2102 udf_rw32((num_files + sign)); 2103 } 2104 mutex_exit(&ump->allocate_mutex); 2105 } 2106 2107 2108 void 2109 udf_osta_charset(struct charspec *charspec) 2110 { 2111 bzero(charspec, sizeof(struct charspec)); 2112 charspec->type = 0; 2113 strcpy((char *) charspec->inf, "OSTA Compressed Unicode"); 2114 } 2115 2116 2117 /* first call udf_set_regid and then the suffix */ 2118 void 2119 udf_set_regid(struct regid *regid, char const *name) 2120 { 2121 bzero(regid, sizeof(struct regid)); 2122 regid->flags = 0; /* not dirty and not protected */ 2123 strcpy((char *) regid->id, name); 2124 } 2125 2126 2127 void 2128 udf_add_domain_regid(struct udf_mount *ump, struct regid *regid) 2129 { 2130 uint16_t *ver; 2131 2132 ver = (uint16_t *) regid->id_suffix; 2133 *ver = ump->logvol_info->min_udf_readver; 2134 } 2135 2136 2137 void 2138 udf_add_udf_regid(struct udf_mount *ump, struct regid *regid) 2139 { 2140 uint16_t *ver; 2141 2142 ver = (uint16_t *) regid->id_suffix; 2143 *ver = ump->logvol_info->min_udf_readver; 2144 2145 regid->id_suffix[2] = 4; /* unix */ 2146 regid->id_suffix[3] = 8; /* NetBSD */ 2147 } 2148 2149 2150 void 2151 udf_add_impl_regid(struct udf_mount *ump, struct regid *regid) 2152 { 2153 regid->id_suffix[0] = 4; /* unix */ 2154 regid->id_suffix[1] = 8; /* NetBSD */ 2155 } 2156 2157 2158 void 2159 udf_add_app_regid(struct udf_mount *ump, struct regid *regid) 2160 { 2161 regid->id_suffix[0] = APP_VERSION_MAIN; 2162 regid->id_suffix[1] = APP_VERSION_SUB; 2163 } 2164 2165 static int 2166 udf_create_parentfid(struct udf_mount *ump, struct fileid_desc *fid, 2167 struct long_ad *parent, uint64_t unique_id) 2168 { 2169 /* the size of an empty FID is 38 but needs to be a multiple of 4 */ 2170 int fidsize = 40; 2171 2172 udf_inittag(ump, &fid->tag, TAGID_FID, udf_rw32(parent->loc.lb_num)); 2173 fid->file_version_num = udf_rw16(1); /* UDF 2.3.4.1 */ 2174 fid->file_char = UDF_FILE_CHAR_DIR | UDF_FILE_CHAR_PAR; 2175 fid->icb = *parent; 2176 fid->icb.longad_uniqueid = udf_rw32((uint32_t) unique_id); 2177 fid->tag.desc_crc_len = fidsize - UDF_DESC_TAG_LENGTH; 2178 (void) udf_validate_tag_and_crc_sums((union dscrptr *) fid); 2179 2180 return fidsize; 2181 } 2182 2183 /* --------------------------------------------------------------------- */ 2184 2185 /* 2186 * Extended attribute support. UDF knows of 3 places for extended attributes: 2187 * 2188 * (a) inside the file's (e)fe in the length of the extended attribute area 2189 * before the allocation descriptors/filedata 2190 * 2191 * (b) in a file referenced by (e)fe->ext_attr_icb and 2192 * 2193 * (c) in the e(fe)'s associated stream directory that can hold various 2194 * sub-files. In the stream directory a few fixed named subfiles are reserved 2195 * for NT/Unix ACL's and OS/2 attributes. 2196 * 2197 * NOTE: Extended attributes are read randomly but allways written 2198 * *atomicaly*. For ACL's this interface is propably different but not known 2199 * to me yet. 2200 * 2201 * Order of extended attributes in a space : 2202 * ECMA 167 EAs 2203 * Non block aligned Implementation Use EAs 2204 * Block aligned Implementation Use EAs 2205 * Application Use EAs 2206 */ 2207 2208 static int 2209 udf_impl_extattr_check(struct impl_extattr_entry *implext) 2210 { 2211 uint16_t *spos; 2212 2213 if (strncmp(implext->imp_id.id, "*UDF", 4) == 0) { 2214 /* checksum valid? */ 2215 DPRINTF(EXTATTR, ("checking UDF impl. attr checksum\n")); 2216 spos = (uint16_t *) implext->data; 2217 if (udf_rw16(*spos) != udf_ea_cksum((uint8_t *) implext)) 2218 return EINVAL; 2219 } 2220 return 0; 2221 } 2222 2223 static void 2224 udf_calc_impl_extattr_checksum(struct impl_extattr_entry *implext) 2225 { 2226 uint16_t *spos; 2227 2228 if (strncmp(implext->imp_id.id, "*UDF", 4) == 0) { 2229 /* set checksum */ 2230 spos = (uint16_t *) implext->data; 2231 *spos = udf_rw16(udf_ea_cksum((uint8_t *) implext)); 2232 } 2233 } 2234 2235 2236 int 2237 udf_extattr_search_intern(struct udf_node *node, 2238 uint32_t sattr, char const *sattrname, 2239 uint32_t *offsetp, uint32_t *lengthp) 2240 { 2241 struct extattrhdr_desc *eahdr; 2242 struct extattr_entry *attrhdr; 2243 struct impl_extattr_entry *implext; 2244 uint32_t offset, a_l, sector_size; 2245 int32_t l_ea; 2246 uint8_t *pos; 2247 int error; 2248 2249 /* get mountpoint */ 2250 sector_size = node->ump->discinfo.sector_size; 2251 2252 /* get information from fe/efe */ 2253 if (node->fe) { 2254 l_ea = udf_rw32(node->fe->l_ea); 2255 eahdr = (struct extattrhdr_desc *) node->fe->data; 2256 } else { 2257 assert(node->efe); 2258 l_ea = udf_rw32(node->efe->l_ea); 2259 eahdr = (struct extattrhdr_desc *) node->efe->data; 2260 } 2261 2262 /* something recorded here? */ 2263 if (l_ea == 0) 2264 return ENOENT; 2265 2266 /* check extended attribute tag; what to do if it fails? */ 2267 error = udf_check_tag(eahdr); 2268 if (error) 2269 return EINVAL; 2270 if (udf_rw16(eahdr->tag.id) != TAGID_EXTATTR_HDR) 2271 return EINVAL; 2272 error = udf_check_tag_payload(eahdr, sizeof(struct extattrhdr_desc)); 2273 if (error) 2274 return EINVAL; 2275 2276 DPRINTF(EXTATTR, ("Found %d bytes of extended attributes\n", l_ea)); 2277 2278 /* looking for Ecma-167 attributes? */ 2279 offset = sizeof(struct extattrhdr_desc); 2280 2281 /* looking for either implemenation use or application use */ 2282 if (sattr == 2048) { /* [4/48.10.8] */ 2283 offset = udf_rw32(eahdr->impl_attr_loc); 2284 if (offset == UDF_IMPL_ATTR_LOC_NOT_PRESENT) 2285 return ENOENT; 2286 } 2287 if (sattr == 65536) { /* [4/48.10.9] */ 2288 offset = udf_rw32(eahdr->appl_attr_loc); 2289 if (offset == UDF_APPL_ATTR_LOC_NOT_PRESENT) 2290 return ENOENT; 2291 } 2292 2293 /* paranoia check offset and l_ea */ 2294 if (l_ea + offset >= sector_size - sizeof(struct extattr_entry)) 2295 return EINVAL; 2296 2297 DPRINTF(EXTATTR, ("Starting at offset %d\n", offset)); 2298 2299 /* find our extended attribute */ 2300 l_ea -= offset; 2301 pos = (uint8_t *) eahdr + offset; 2302 2303 while (l_ea >= sizeof(struct extattr_entry)) { 2304 DPRINTF(EXTATTR, ("%d extended attr bytes left\n", l_ea)); 2305 attrhdr = (struct extattr_entry *) pos; 2306 implext = (struct impl_extattr_entry *) pos; 2307 2308 /* get complete attribute length and check for roque values */ 2309 a_l = udf_rw32(attrhdr->a_l); 2310 DPRINTF(EXTATTR, ("attribute %d:%d, len %d/%d\n", 2311 udf_rw32(attrhdr->type), 2312 attrhdr->subtype, a_l, l_ea)); 2313 if ((a_l == 0) || (a_l > l_ea)) 2314 return EINVAL; 2315 2316 if (attrhdr->type != sattr) 2317 goto next_attribute; 2318 2319 /* we might have found it! */ 2320 if (attrhdr->type < 2048) { /* Ecma-167 attribute */ 2321 *offsetp = offset; 2322 *lengthp = a_l; 2323 return 0; /* success */ 2324 } 2325 2326 /* 2327 * Implementation use and application use extended attributes 2328 * have a name to identify. They share the same structure only 2329 * UDF implementation use extended attributes have a checksum 2330 * we need to check 2331 */ 2332 2333 DPRINTF(EXTATTR, ("named attribute %s\n", implext->imp_id.id)); 2334 if (strcmp(implext->imp_id.id, sattrname) == 0) { 2335 /* we have found our appl/implementation attribute */ 2336 *offsetp = offset; 2337 *lengthp = a_l; 2338 return 0; /* success */ 2339 } 2340 2341 next_attribute: 2342 /* next attribute */ 2343 pos += a_l; 2344 l_ea -= a_l; 2345 offset += a_l; 2346 } 2347 /* not found */ 2348 return ENOENT; 2349 } 2350 2351 2352 static void 2353 udf_extattr_insert_internal(struct udf_mount *ump, union dscrptr *dscr, 2354 struct extattr_entry *extattr) 2355 { 2356 struct file_entry *fe; 2357 struct extfile_entry *efe; 2358 struct extattrhdr_desc *extattrhdr; 2359 struct impl_extattr_entry *implext; 2360 uint32_t impl_attr_loc, appl_attr_loc, l_ea, a_l, exthdr_len; 2361 uint32_t *l_eap, l_ad; 2362 uint16_t *spos; 2363 uint8_t *bpos, *data; 2364 2365 if (udf_rw16(dscr->tag.id) == TAGID_FENTRY) { 2366 fe = &dscr->fe; 2367 data = fe->data; 2368 l_eap = &fe->l_ea; 2369 l_ad = udf_rw32(fe->l_ad); 2370 } else if (udf_rw16(dscr->tag.id) == TAGID_EXTFENTRY) { 2371 efe = &dscr->efe; 2372 data = efe->data; 2373 l_eap = &efe->l_ea; 2374 l_ad = udf_rw32(efe->l_ad); 2375 } else { 2376 panic("Bad tag passed to udf_extattr_insert_internal"); 2377 } 2378 2379 /* can't append already written to file descriptors yet */ 2380 assert(l_ad == 0); 2381 2382 /* should have a header! */ 2383 extattrhdr = (struct extattrhdr_desc *) data; 2384 l_ea = udf_rw32(*l_eap); 2385 if (l_ea == 0) { 2386 /* create empty extended attribute header */ 2387 exthdr_len = sizeof(struct extattrhdr_desc); 2388 2389 udf_inittag(ump, &extattrhdr->tag, TAGID_EXTATTR_HDR, 2390 /* loc */ 0); 2391 extattrhdr->impl_attr_loc = udf_rw32(exthdr_len); 2392 extattrhdr->appl_attr_loc = udf_rw32(exthdr_len); 2393 extattrhdr->tag.desc_crc_len = udf_rw16(8); 2394 2395 /* record extended attribute header length */ 2396 l_ea = exthdr_len; 2397 *l_eap = udf_rw32(l_ea); 2398 } 2399 2400 /* extract locations */ 2401 impl_attr_loc = udf_rw32(extattrhdr->impl_attr_loc); 2402 appl_attr_loc = udf_rw32(extattrhdr->appl_attr_loc); 2403 if (impl_attr_loc == UDF_IMPL_ATTR_LOC_NOT_PRESENT) 2404 impl_attr_loc = l_ea; 2405 if (appl_attr_loc == UDF_IMPL_ATTR_LOC_NOT_PRESENT) 2406 appl_attr_loc = l_ea; 2407 2408 /* Ecma 167 EAs */ 2409 if (udf_rw32(extattr->type) < 2048) { 2410 assert(impl_attr_loc == l_ea); 2411 assert(appl_attr_loc == l_ea); 2412 } 2413 2414 /* implementation use extended attributes */ 2415 if (udf_rw32(extattr->type) == 2048) { 2416 assert(appl_attr_loc == l_ea); 2417 2418 /* calculate and write extended attribute header checksum */ 2419 implext = (struct impl_extattr_entry *) extattr; 2420 assert(udf_rw32(implext->iu_l) == 4); /* [UDF 3.3.4.5] */ 2421 spos = (uint16_t *) implext->data; 2422 *spos = udf_rw16(udf_ea_cksum((uint8_t *) implext)); 2423 } 2424 2425 /* application use extended attributes */ 2426 assert(udf_rw32(extattr->type) != 65536); 2427 assert(appl_attr_loc == l_ea); 2428 2429 /* append the attribute at the end of the current space */ 2430 bpos = data + udf_rw32(*l_eap); 2431 a_l = udf_rw32(extattr->a_l); 2432 2433 /* update impl. attribute locations */ 2434 if (udf_rw32(extattr->type) < 2048) { 2435 impl_attr_loc = l_ea + a_l; 2436 appl_attr_loc = l_ea + a_l; 2437 } 2438 if (udf_rw32(extattr->type) == 2048) { 2439 appl_attr_loc = l_ea + a_l; 2440 } 2441 2442 /* copy and advance */ 2443 memcpy(bpos, extattr, a_l); 2444 l_ea += a_l; 2445 *l_eap = udf_rw32(l_ea); 2446 2447 /* do the `dance` again backwards */ 2448 if (udf_rw16(ump->logical_vol->tag.descriptor_ver) != 2) { 2449 if (impl_attr_loc == l_ea) 2450 impl_attr_loc = UDF_IMPL_ATTR_LOC_NOT_PRESENT; 2451 if (appl_attr_loc == l_ea) 2452 appl_attr_loc = UDF_APPL_ATTR_LOC_NOT_PRESENT; 2453 } 2454 2455 /* store offsets */ 2456 extattrhdr->impl_attr_loc = udf_rw32(impl_attr_loc); 2457 extattrhdr->appl_attr_loc = udf_rw32(appl_attr_loc); 2458 } 2459 2460 2461 /* --------------------------------------------------------------------- */ 2462 2463 static int 2464 udf_update_lvid_from_vat_extattr(struct udf_node *vat_node) 2465 { 2466 struct udf_mount *ump; 2467 struct udf_logvol_info *lvinfo; 2468 struct impl_extattr_entry *implext; 2469 struct vatlvext_extattr_entry lvext; 2470 const char *extstr = "*UDF VAT LVExtension"; 2471 uint64_t vat_uniqueid; 2472 uint32_t offset, a_l; 2473 uint8_t *ea_start, *lvextpos; 2474 int error; 2475 2476 /* get mountpoint and lvinfo */ 2477 ump = vat_node->ump; 2478 lvinfo = ump->logvol_info; 2479 2480 /* get information from fe/efe */ 2481 if (vat_node->fe) { 2482 vat_uniqueid = udf_rw64(vat_node->fe->unique_id); 2483 ea_start = vat_node->fe->data; 2484 } else { 2485 vat_uniqueid = udf_rw64(vat_node->efe->unique_id); 2486 ea_start = vat_node->efe->data; 2487 } 2488 2489 error = udf_extattr_search_intern(vat_node, 2048, extstr, &offset, &a_l); 2490 if (error) 2491 return error; 2492 2493 implext = (struct impl_extattr_entry *) (ea_start + offset); 2494 error = udf_impl_extattr_check(implext); 2495 if (error) 2496 return error; 2497 2498 /* paranoia */ 2499 if (a_l != sizeof(*implext) -1 + udf_rw32(implext->iu_l) + sizeof(lvext)) { 2500 DPRINTF(VOLUMES, ("VAT LVExtension size doesn't compute\n")); 2501 return EINVAL; 2502 } 2503 2504 /* 2505 * we have found our "VAT LVExtension attribute. BUT due to a 2506 * bug in the specification it might not be word aligned so 2507 * copy first to avoid panics on some machines (!!) 2508 */ 2509 DPRINTF(VOLUMES, ("Found VAT LVExtension attr\n")); 2510 lvextpos = implext->data + udf_rw32(implext->iu_l); 2511 memcpy(&lvext, lvextpos, sizeof(lvext)); 2512 2513 /* check if it was updated the last time */ 2514 if (udf_rw64(lvext.unique_id_chk) == vat_uniqueid) { 2515 lvinfo->num_files = lvext.num_files; 2516 lvinfo->num_directories = lvext.num_directories; 2517 udf_update_logvolname(ump, lvext.logvol_id); 2518 } else { 2519 DPRINTF(VOLUMES, ("VAT LVExtension out of date\n")); 2520 /* replace VAT LVExt by free space EA */ 2521 memset(implext->imp_id.id, 0, UDF_REGID_ID_SIZE); 2522 strcpy(implext->imp_id.id, "*UDF FreeEASpace"); 2523 udf_calc_impl_extattr_checksum(implext); 2524 } 2525 2526 return 0; 2527 } 2528 2529 2530 static int 2531 udf_update_vat_extattr_from_lvid(struct udf_node *vat_node) 2532 { 2533 struct udf_mount *ump; 2534 struct udf_logvol_info *lvinfo; 2535 struct impl_extattr_entry *implext; 2536 struct vatlvext_extattr_entry lvext; 2537 const char *extstr = "*UDF VAT LVExtension"; 2538 uint64_t vat_uniqueid; 2539 uint32_t offset, a_l; 2540 uint8_t *ea_start, *lvextpos; 2541 int error; 2542 2543 /* get mountpoint and lvinfo */ 2544 ump = vat_node->ump; 2545 lvinfo = ump->logvol_info; 2546 2547 /* get information from fe/efe */ 2548 if (vat_node->fe) { 2549 vat_uniqueid = udf_rw64(vat_node->fe->unique_id); 2550 ea_start = vat_node->fe->data; 2551 } else { 2552 vat_uniqueid = udf_rw64(vat_node->efe->unique_id); 2553 ea_start = vat_node->efe->data; 2554 } 2555 2556 error = udf_extattr_search_intern(vat_node, 2048, extstr, &offset, &a_l); 2557 if (error) 2558 return error; 2559 /* found, it existed */ 2560 2561 /* paranoia */ 2562 implext = (struct impl_extattr_entry *) (ea_start + offset); 2563 error = udf_impl_extattr_check(implext); 2564 if (error) { 2565 DPRINTF(VOLUMES, ("VAT LVExtension bad on update\n")); 2566 return error; 2567 } 2568 /* it is correct */ 2569 2570 /* 2571 * we have found our "VAT LVExtension attribute. BUT due to a 2572 * bug in the specification it might not be word aligned so 2573 * copy first to avoid panics on some machines (!!) 2574 */ 2575 DPRINTF(VOLUMES, ("Updating VAT LVExtension attr\n")); 2576 lvextpos = implext->data + udf_rw32(implext->iu_l); 2577 2578 lvext.unique_id_chk = vat_uniqueid; 2579 lvext.num_files = lvinfo->num_files; 2580 lvext.num_directories = lvinfo->num_directories; 2581 memmove(lvext.logvol_id, ump->logical_vol->logvol_id, 128); 2582 2583 memcpy(lvextpos, &lvext, sizeof(lvext)); 2584 2585 return 0; 2586 } 2587 2588 /* --------------------------------------------------------------------- */ 2589 2590 int 2591 udf_vat_read(struct udf_node *vat_node, uint8_t *blob, int size, uint32_t offset) 2592 { 2593 struct udf_mount *ump = vat_node->ump; 2594 2595 if (offset + size > ump->vat_offset + ump->vat_entries * 4) 2596 return EINVAL; 2597 2598 memcpy(blob, ump->vat_table + offset, size); 2599 return 0; 2600 } 2601 2602 int 2603 udf_vat_write(struct udf_node *vat_node, uint8_t *blob, int size, uint32_t offset) 2604 { 2605 struct udf_mount *ump = vat_node->ump; 2606 uint32_t offset_high; 2607 uint8_t *new_vat_table; 2608 2609 /* extent VAT allocation if needed */ 2610 offset_high = offset + size; 2611 if (offset_high >= ump->vat_table_alloc_len) { 2612 /* realloc */ 2613 new_vat_table = realloc(ump->vat_table, 2614 ump->vat_table_alloc_len + UDF_VAT_CHUNKSIZE, 2615 M_UDFVOLD, M_WAITOK | M_CANFAIL); 2616 if (!new_vat_table) { 2617 printf("udf_vat_write: can't extent VAT, out of mem\n"); 2618 return ENOMEM; 2619 } 2620 ump->vat_table = new_vat_table; 2621 ump->vat_table_alloc_len += UDF_VAT_CHUNKSIZE; 2622 } 2623 ump->vat_table_len = MAX(ump->vat_table_len, offset_high); 2624 2625 memcpy(ump->vat_table + offset, blob, size); 2626 return 0; 2627 } 2628 2629 /* --------------------------------------------------------------------- */ 2630 2631 /* TODO support previous VAT location writeout */ 2632 static int 2633 udf_update_vat_descriptor(struct udf_mount *ump) 2634 { 2635 struct udf_node *vat_node = ump->vat_node; 2636 struct udf_logvol_info *lvinfo = ump->logvol_info; 2637 struct icb_tag *icbtag; 2638 struct udf_oldvat_tail *oldvat_tl; 2639 struct udf_vat *vat; 2640 uint64_t unique_id; 2641 uint32_t lb_size; 2642 uint8_t *raw_vat; 2643 int filetype, error; 2644 2645 KASSERT(vat_node); 2646 KASSERT(lvinfo); 2647 lb_size = udf_rw32(ump->logical_vol->lb_size); 2648 2649 /* get our new unique_id */ 2650 unique_id = udf_advance_uniqueid(ump); 2651 2652 /* get information from fe/efe */ 2653 if (vat_node->fe) { 2654 icbtag = &vat_node->fe->icbtag; 2655 vat_node->fe->unique_id = udf_rw64(unique_id); 2656 } else { 2657 icbtag = &vat_node->efe->icbtag; 2658 vat_node->efe->unique_id = udf_rw64(unique_id); 2659 } 2660 2661 /* Check icb filetype! it has to be 0 or UDF_ICB_FILETYPE_VAT */ 2662 filetype = icbtag->file_type; 2663 KASSERT((filetype == 0) || (filetype == UDF_ICB_FILETYPE_VAT)); 2664 2665 /* allocate piece to process head or tail of VAT file */ 2666 raw_vat = malloc(lb_size, M_TEMP, M_WAITOK); 2667 2668 if (filetype == 0) { 2669 /* 2670 * Update "*UDF VAT LVExtension" extended attribute from the 2671 * lvint if present. 2672 */ 2673 udf_update_vat_extattr_from_lvid(vat_node); 2674 2675 /* setup identifying regid */ 2676 oldvat_tl = (struct udf_oldvat_tail *) raw_vat; 2677 memset(oldvat_tl, 0, sizeof(struct udf_oldvat_tail)); 2678 2679 udf_set_regid(&oldvat_tl->id, "*UDF Virtual Alloc Tbl"); 2680 udf_add_udf_regid(ump, &oldvat_tl->id); 2681 oldvat_tl->prev_vat = udf_rw32(0xffffffff); 2682 2683 /* write out new tail of virtual allocation table file */ 2684 error = udf_vat_write(vat_node, raw_vat, 2685 sizeof(struct udf_oldvat_tail), ump->vat_entries * 4); 2686 } else { 2687 /* compose the VAT2 header */ 2688 vat = (struct udf_vat *) raw_vat; 2689 memset(vat, 0, sizeof(struct udf_vat)); 2690 2691 vat->header_len = udf_rw16(152); /* as per spec */ 2692 vat->impl_use_len = udf_rw16(0); 2693 memmove(vat->logvol_id, ump->logical_vol->logvol_id, 128); 2694 vat->prev_vat = udf_rw32(0xffffffff); 2695 vat->num_files = lvinfo->num_files; 2696 vat->num_directories = lvinfo->num_directories; 2697 vat->min_udf_readver = lvinfo->min_udf_readver; 2698 vat->min_udf_writever = lvinfo->min_udf_writever; 2699 vat->max_udf_writever = lvinfo->max_udf_writever; 2700 2701 error = udf_vat_write(vat_node, raw_vat, 2702 sizeof(struct udf_vat), 0); 2703 } 2704 free(raw_vat, M_TEMP); 2705 2706 return error; /* success! */ 2707 } 2708 2709 2710 int 2711 udf_writeout_vat(struct udf_mount *ump) 2712 { 2713 struct udf_node *vat_node = ump->vat_node; 2714 uint32_t vat_length; 2715 int error; 2716 2717 KASSERT(vat_node); 2718 2719 DPRINTF(CALL, ("udf_writeout_vat\n")); 2720 2721 mutex_enter(&ump->allocate_mutex); 2722 udf_update_vat_descriptor(ump); 2723 2724 /* write out the VAT contents ; TODO intelligent writing */ 2725 vat_length = ump->vat_table_len; 2726 error = vn_rdwr(UIO_WRITE, vat_node->vnode, 2727 ump->vat_table, ump->vat_table_len, 0, 2728 UIO_SYSSPACE, IO_NODELOCKED, FSCRED, NULL, NULL); 2729 if (error) { 2730 printf("udf_writeout_vat: failed to write out VAT contents\n"); 2731 goto out; 2732 } 2733 2734 mutex_exit(&ump->allocate_mutex); 2735 2736 vflushbuf(ump->vat_node->vnode, 1 /* sync */); 2737 error = VOP_FSYNC(ump->vat_node->vnode, 2738 FSCRED, FSYNC_WAIT, 0, 0); 2739 if (error) 2740 printf("udf_writeout_vat: error writing VAT node!\n"); 2741 out: 2742 2743 return error; 2744 } 2745 2746 /* --------------------------------------------------------------------- */ 2747 2748 /* 2749 * Read in relevant pieces of VAT file and check if its indeed a VAT file 2750 * descriptor. If OK, read in complete VAT file. 2751 */ 2752 2753 static int 2754 udf_check_for_vat(struct udf_node *vat_node) 2755 { 2756 struct udf_mount *ump; 2757 struct icb_tag *icbtag; 2758 struct timestamp *mtime; 2759 struct udf_vat *vat; 2760 struct udf_oldvat_tail *oldvat_tl; 2761 struct udf_logvol_info *lvinfo; 2762 uint64_t unique_id; 2763 uint32_t vat_length; 2764 uint32_t vat_offset, vat_entries, vat_table_alloc_len; 2765 uint32_t sector_size; 2766 uint32_t *raw_vat; 2767 uint8_t *vat_table; 2768 char *regid_name; 2769 int filetype; 2770 int error; 2771 2772 /* vat_length is really 64 bits though impossible */ 2773 2774 DPRINTF(VOLUMES, ("Checking for VAT\n")); 2775 if (!vat_node) 2776 return ENOENT; 2777 2778 /* get mount info */ 2779 ump = vat_node->ump; 2780 sector_size = udf_rw32(ump->logical_vol->lb_size); 2781 2782 /* check assertions */ 2783 assert(vat_node->fe || vat_node->efe); 2784 assert(ump->logvol_integrity); 2785 2786 /* set vnode type to regular file or we can't read from it! */ 2787 vat_node->vnode->v_type = VREG; 2788 2789 /* get information from fe/efe */ 2790 if (vat_node->fe) { 2791 vat_length = udf_rw64(vat_node->fe->inf_len); 2792 icbtag = &vat_node->fe->icbtag; 2793 mtime = &vat_node->fe->mtime; 2794 unique_id = udf_rw64(vat_node->fe->unique_id); 2795 } else { 2796 vat_length = udf_rw64(vat_node->efe->inf_len); 2797 icbtag = &vat_node->efe->icbtag; 2798 mtime = &vat_node->efe->mtime; 2799 unique_id = udf_rw64(vat_node->efe->unique_id); 2800 } 2801 2802 /* Check icb filetype! it has to be 0 or UDF_ICB_FILETYPE_VAT */ 2803 filetype = icbtag->file_type; 2804 if ((filetype != 0) && (filetype != UDF_ICB_FILETYPE_VAT)) 2805 return ENOENT; 2806 2807 DPRINTF(VOLUMES, ("\tPossible VAT length %d\n", vat_length)); 2808 2809 vat_table_alloc_len = 2810 ((vat_length + UDF_VAT_CHUNKSIZE-1) / UDF_VAT_CHUNKSIZE) 2811 * UDF_VAT_CHUNKSIZE; 2812 2813 vat_table = malloc(vat_table_alloc_len, M_UDFVOLD, 2814 M_CANFAIL | M_WAITOK); 2815 if (vat_table == NULL) { 2816 printf("allocation of %d bytes failed for VAT\n", 2817 vat_table_alloc_len); 2818 return ENOMEM; 2819 } 2820 2821 /* allocate piece to read in head or tail of VAT file */ 2822 raw_vat = malloc(sector_size, M_TEMP, M_WAITOK); 2823 2824 /* 2825 * check contents of the file if its the old 1.50 VAT table format. 2826 * Its notoriously broken and allthough some implementations support an 2827 * extention as defined in the UDF 1.50 errata document, its doubtfull 2828 * to be useable since a lot of implementations don't maintain it. 2829 */ 2830 lvinfo = ump->logvol_info; 2831 2832 if (filetype == 0) { 2833 /* definition */ 2834 vat_offset = 0; 2835 vat_entries = (vat_length-36)/4; 2836 2837 /* read in tail of virtual allocation table file */ 2838 error = vn_rdwr(UIO_READ, vat_node->vnode, 2839 (uint8_t *) raw_vat, 2840 sizeof(struct udf_oldvat_tail), 2841 vat_entries * 4, 2842 UIO_SYSSPACE, IO_SYNC | IO_NODELOCKED, FSCRED, 2843 NULL, NULL); 2844 if (error) 2845 goto out; 2846 2847 /* check 1.50 VAT */ 2848 oldvat_tl = (struct udf_oldvat_tail *) raw_vat; 2849 regid_name = (char *) oldvat_tl->id.id; 2850 error = strncmp(regid_name, "*UDF Virtual Alloc Tbl", 22); 2851 if (error) { 2852 DPRINTF(VOLUMES, ("VAT format 1.50 rejected\n")); 2853 error = ENOENT; 2854 goto out; 2855 } 2856 2857 /* 2858 * update LVID from "*UDF VAT LVExtension" extended attribute 2859 * if present. 2860 */ 2861 udf_update_lvid_from_vat_extattr(vat_node); 2862 } else { 2863 /* read in head of virtual allocation table file */ 2864 error = vn_rdwr(UIO_READ, vat_node->vnode, 2865 (uint8_t *) raw_vat, 2866 sizeof(struct udf_vat), 0, 2867 UIO_SYSSPACE, IO_SYNC | IO_NODELOCKED, FSCRED, 2868 NULL, NULL); 2869 if (error) 2870 goto out; 2871 2872 /* definition */ 2873 vat = (struct udf_vat *) raw_vat; 2874 vat_offset = vat->header_len; 2875 vat_entries = (vat_length - vat_offset)/4; 2876 2877 assert(lvinfo); 2878 lvinfo->num_files = vat->num_files; 2879 lvinfo->num_directories = vat->num_directories; 2880 lvinfo->min_udf_readver = vat->min_udf_readver; 2881 lvinfo->min_udf_writever = vat->min_udf_writever; 2882 lvinfo->max_udf_writever = vat->max_udf_writever; 2883 2884 udf_update_logvolname(ump, vat->logvol_id); 2885 } 2886 2887 /* read in complete VAT file */ 2888 error = vn_rdwr(UIO_READ, vat_node->vnode, 2889 vat_table, 2890 vat_length, 0, 2891 UIO_SYSSPACE, IO_SYNC | IO_NODELOCKED, FSCRED, 2892 NULL, NULL); 2893 if (error) 2894 printf("read in of complete VAT file failed (error %d)\n", 2895 error); 2896 if (error) 2897 goto out; 2898 2899 DPRINTF(VOLUMES, ("VAT format accepted, marking it closed\n")); 2900 ump->logvol_integrity->lvint_next_unique_id = unique_id; 2901 ump->logvol_integrity->integrity_type = udf_rw32(UDF_INTEGRITY_CLOSED); 2902 ump->logvol_integrity->time = *mtime; 2903 2904 ump->vat_table_len = vat_length; 2905 ump->vat_table_alloc_len = vat_table_alloc_len; 2906 ump->vat_table = vat_table; 2907 ump->vat_offset = vat_offset; 2908 ump->vat_entries = vat_entries; 2909 ump->vat_last_free_lb = 0; /* start at beginning */ 2910 2911 out: 2912 if (error) { 2913 if (vat_table) 2914 free(vat_table, M_UDFVOLD); 2915 } 2916 free(raw_vat, M_TEMP); 2917 2918 return error; 2919 } 2920 2921 /* --------------------------------------------------------------------- */ 2922 2923 static int 2924 udf_search_vat(struct udf_mount *ump, union udf_pmap *mapping) 2925 { 2926 struct udf_node *vat_node; 2927 struct long_ad icb_loc; 2928 uint32_t early_vat_loc, late_vat_loc, vat_loc; 2929 int error; 2930 2931 /* mapping info not needed */ 2932 mapping = mapping; 2933 2934 vat_loc = ump->last_possible_vat_location; 2935 early_vat_loc = vat_loc - 256; /* 8 blocks of 32 sectors */ 2936 2937 DPRINTF(VOLUMES, ("1) last possible %d, early_vat_loc %d \n", 2938 vat_loc, early_vat_loc)); 2939 early_vat_loc = MAX(early_vat_loc, ump->first_possible_vat_location); 2940 late_vat_loc = vat_loc + 1024; 2941 2942 DPRINTF(VOLUMES, ("2) last possible %d, early_vat_loc %d \n", 2943 vat_loc, early_vat_loc)); 2944 2945 /* start looking from the end of the range */ 2946 do { 2947 DPRINTF(VOLUMES, ("Checking for VAT at sector %d\n", vat_loc)); 2948 icb_loc.loc.part_num = udf_rw16(UDF_VTOP_RAWPART); 2949 icb_loc.loc.lb_num = udf_rw32(vat_loc); 2950 2951 error = udf_get_node(ump, &icb_loc, &vat_node); 2952 if (!error) { 2953 error = udf_check_for_vat(vat_node); 2954 DPRINTFIF(VOLUMES, !error, 2955 ("VAT accepted at %d\n", vat_loc)); 2956 if (!error) 2957 break; 2958 } 2959 if (vat_node) { 2960 vput(vat_node->vnode); 2961 vat_node = NULL; 2962 } 2963 vat_loc--; /* walk backwards */ 2964 } while (vat_loc >= early_vat_loc); 2965 2966 /* keep our VAT node around */ 2967 if (vat_node) { 2968 UDF_SET_SYSTEMFILE(vat_node->vnode); 2969 ump->vat_node = vat_node; 2970 } 2971 2972 return error; 2973 } 2974 2975 /* --------------------------------------------------------------------- */ 2976 2977 static int 2978 udf_read_sparables(struct udf_mount *ump, union udf_pmap *mapping) 2979 { 2980 union dscrptr *dscr; 2981 struct part_map_spare *pms = &mapping->pms; 2982 uint32_t lb_num; 2983 int spar, error; 2984 2985 /* 2986 * The partition mapping passed on to us specifies the information we 2987 * need to locate and initialise the sparable partition mapping 2988 * information we need. 2989 */ 2990 2991 DPRINTF(VOLUMES, ("Read sparable table\n")); 2992 ump->sparable_packet_size = udf_rw16(pms->packet_len); 2993 KASSERT(ump->sparable_packet_size >= ump->packet_size); /* XXX */ 2994 2995 for (spar = 0; spar < pms->n_st; spar++) { 2996 lb_num = pms->st_loc[spar]; 2997 DPRINTF(VOLUMES, ("Checking for sparing table %d\n", lb_num)); 2998 error = udf_read_phys_dscr(ump, lb_num, M_UDFVOLD, &dscr); 2999 if (!error && dscr) { 3000 if (udf_rw16(dscr->tag.id) == TAGID_SPARING_TABLE) { 3001 if (ump->sparing_table) 3002 free(ump->sparing_table, M_UDFVOLD); 3003 ump->sparing_table = &dscr->spt; 3004 dscr = NULL; 3005 DPRINTF(VOLUMES, 3006 ("Sparing table accepted (%d entries)\n", 3007 udf_rw16(ump->sparing_table->rt_l))); 3008 break; /* we're done */ 3009 } 3010 } 3011 if (dscr) 3012 free(dscr, M_UDFVOLD); 3013 } 3014 3015 if (ump->sparing_table) 3016 return 0; 3017 3018 return ENOENT; 3019 } 3020 3021 /* --------------------------------------------------------------------- */ 3022 3023 static int 3024 udf_read_metadata_nodes(struct udf_mount *ump, union udf_pmap *mapping) 3025 { 3026 struct part_map_meta *pmm = &mapping->pmm; 3027 struct long_ad icb_loc; 3028 struct vnode *vp; 3029 int error; 3030 3031 DPRINTF(VOLUMES, ("Reading in Metadata files\n")); 3032 icb_loc.loc.part_num = pmm->part_num; 3033 icb_loc.loc.lb_num = pmm->meta_file_lbn; 3034 DPRINTF(VOLUMES, ("Metadata file\n")); 3035 error = udf_get_node(ump, &icb_loc, &ump->metadata_node); 3036 if (ump->metadata_node) { 3037 vp = ump->metadata_node->vnode; 3038 UDF_SET_SYSTEMFILE(vp); 3039 } 3040 3041 icb_loc.loc.lb_num = pmm->meta_mirror_file_lbn; 3042 if (icb_loc.loc.lb_num != -1) { 3043 DPRINTF(VOLUMES, ("Metadata copy file\n")); 3044 error = udf_get_node(ump, &icb_loc, &ump->metadatamirror_node); 3045 if (ump->metadatamirror_node) { 3046 vp = ump->metadatamirror_node->vnode; 3047 UDF_SET_SYSTEMFILE(vp); 3048 } 3049 } 3050 3051 icb_loc.loc.lb_num = pmm->meta_bitmap_file_lbn; 3052 if (icb_loc.loc.lb_num != -1) { 3053 DPRINTF(VOLUMES, ("Metadata bitmap file\n")); 3054 error = udf_get_node(ump, &icb_loc, &ump->metadatabitmap_node); 3055 if (ump->metadatabitmap_node) { 3056 vp = ump->metadatabitmap_node->vnode; 3057 UDF_SET_SYSTEMFILE(vp); 3058 } 3059 } 3060 3061 /* if we're mounting read-only we relax the requirements */ 3062 if (ump->vfs_mountp->mnt_flag & MNT_RDONLY) { 3063 error = EFAULT; 3064 if (ump->metadata_node) 3065 error = 0; 3066 if ((ump->metadata_node == NULL) && (ump->metadatamirror_node)) { 3067 printf( "udf mount: Metadata file not readable, " 3068 "substituting Metadata copy file\n"); 3069 ump->metadata_node = ump->metadatamirror_node; 3070 ump->metadatamirror_node = NULL; 3071 error = 0; 3072 } 3073 } else { 3074 /* mounting read/write */ 3075 /* XXX DISABLED! metadata writing is not working yet XXX */ 3076 /* if (error) */ 3077 error = EROFS; 3078 } 3079 DPRINTFIF(VOLUMES, error, ("udf mount: failed to read " 3080 "metadata files\n")); 3081 return error; 3082 } 3083 3084 /* --------------------------------------------------------------------- */ 3085 3086 int 3087 udf_read_vds_tables(struct udf_mount *ump) 3088 { 3089 union udf_pmap *mapping; 3090 /* struct udf_args *args = &ump->mount_args; */ 3091 uint32_t n_pm, mt_l; 3092 uint32_t log_part; 3093 uint8_t *pmap_pos; 3094 int pmap_size; 3095 int error; 3096 3097 /* Iterate (again) over the part mappings for locations */ 3098 n_pm = udf_rw32(ump->logical_vol->n_pm); /* num partmaps */ 3099 mt_l = udf_rw32(ump->logical_vol->mt_l); /* partmaps data length */ 3100 pmap_pos = ump->logical_vol->maps; 3101 3102 for (log_part = 0; log_part < n_pm; log_part++) { 3103 mapping = (union udf_pmap *) pmap_pos; 3104 switch (ump->vtop_tp[log_part]) { 3105 case UDF_VTOP_TYPE_PHYS : 3106 /* nothing */ 3107 break; 3108 case UDF_VTOP_TYPE_VIRT : 3109 /* search and load VAT */ 3110 error = udf_search_vat(ump, mapping); 3111 if (error) 3112 return ENOENT; 3113 break; 3114 case UDF_VTOP_TYPE_SPARABLE : 3115 /* load one of the sparable tables */ 3116 error = udf_read_sparables(ump, mapping); 3117 if (error) 3118 return ENOENT; 3119 break; 3120 case UDF_VTOP_TYPE_META : 3121 /* load the associated file descriptors */ 3122 error = udf_read_metadata_nodes(ump, mapping); 3123 if (error) 3124 return ENOENT; 3125 break; 3126 default: 3127 break; 3128 } 3129 pmap_size = pmap_pos[1]; 3130 pmap_pos += pmap_size; 3131 } 3132 3133 /* read in and check unallocated and free space info if writing */ 3134 if ((ump->vfs_mountp->mnt_flag & MNT_RDONLY) == 0) { 3135 error = udf_read_physical_partition_spacetables(ump); 3136 if (error) 3137 return error; 3138 3139 /* also read in metadata partion spacebitmap if defined */ 3140 error = udf_read_metadata_partition_spacetable(ump); 3141 return error; 3142 } 3143 3144 return 0; 3145 } 3146 3147 /* --------------------------------------------------------------------- */ 3148 3149 int 3150 udf_read_rootdirs(struct udf_mount *ump) 3151 { 3152 union dscrptr *dscr; 3153 /* struct udf_args *args = &ump->mount_args; */ 3154 struct udf_node *rootdir_node, *streamdir_node; 3155 struct long_ad fsd_loc, *dir_loc; 3156 uint32_t lb_num, dummy; 3157 uint32_t fsd_len; 3158 int dscr_type; 3159 int error; 3160 3161 /* TODO implement FSD reading in separate function like integrity? */ 3162 /* get fileset descriptor sequence */ 3163 fsd_loc = ump->logical_vol->lv_fsd_loc; 3164 fsd_len = udf_rw32(fsd_loc.len); 3165 3166 dscr = NULL; 3167 error = 0; 3168 while (fsd_len || error) { 3169 DPRINTF(VOLUMES, ("fsd_len = %d\n", fsd_len)); 3170 /* translate fsd_loc to lb_num */ 3171 error = udf_translate_vtop(ump, &fsd_loc, &lb_num, &dummy); 3172 if (error) 3173 break; 3174 DPRINTF(VOLUMES, ("Reading FSD at lb %d\n", lb_num)); 3175 error = udf_read_phys_dscr(ump, lb_num, M_UDFVOLD, &dscr); 3176 /* end markers */ 3177 if (error || (dscr == NULL)) 3178 break; 3179 3180 /* analyse */ 3181 dscr_type = udf_rw16(dscr->tag.id); 3182 if (dscr_type == TAGID_TERM) 3183 break; 3184 if (dscr_type != TAGID_FSD) { 3185 free(dscr, M_UDFVOLD); 3186 return ENOENT; 3187 } 3188 3189 /* 3190 * TODO check for multiple fileset descriptors; its only 3191 * picking the last now. Also check for FSD 3192 * correctness/interpretability 3193 */ 3194 3195 /* update */ 3196 if (ump->fileset_desc) { 3197 free(ump->fileset_desc, M_UDFVOLD); 3198 } 3199 ump->fileset_desc = &dscr->fsd; 3200 dscr = NULL; 3201 3202 /* continue to the next fsd */ 3203 fsd_len -= ump->discinfo.sector_size; 3204 fsd_loc.loc.lb_num = udf_rw32(udf_rw32(fsd_loc.loc.lb_num)+1); 3205 3206 /* follow up to fsd->next_ex (long_ad) if its not null */ 3207 if (udf_rw32(ump->fileset_desc->next_ex.len)) { 3208 DPRINTF(VOLUMES, ("follow up FSD extent\n")); 3209 fsd_loc = ump->fileset_desc->next_ex; 3210 fsd_len = udf_rw32(ump->fileset_desc->next_ex.len); 3211 } 3212 } 3213 if (dscr) 3214 free(dscr, M_UDFVOLD); 3215 3216 /* there has to be one */ 3217 if (ump->fileset_desc == NULL) 3218 return ENOENT; 3219 3220 DPRINTF(VOLUMES, ("FSD read in fine\n")); 3221 DPRINTF(VOLUMES, ("Updating fsd logical volume id\n")); 3222 udf_update_logvolname(ump, ump->logical_vol->logvol_id); 3223 3224 /* 3225 * Now the FSD is known, read in the rootdirectory and if one exists, 3226 * the system stream dir. Some files in the system streamdir are not 3227 * wanted in this implementation since they are not maintained. If 3228 * writing is enabled we'll delete these files if they exist. 3229 */ 3230 3231 rootdir_node = streamdir_node = NULL; 3232 dir_loc = NULL; 3233 3234 /* try to read in the rootdir */ 3235 dir_loc = &ump->fileset_desc->rootdir_icb; 3236 error = udf_get_node(ump, dir_loc, &rootdir_node); 3237 if (error) 3238 return ENOENT; 3239 3240 /* aparently it read in fine */ 3241 3242 /* 3243 * Try the system stream directory; not very likely in the ones we 3244 * test, but for completeness. 3245 */ 3246 dir_loc = &ump->fileset_desc->streamdir_icb; 3247 if (udf_rw32(dir_loc->len)) { 3248 printf("udf_read_rootdirs: streamdir defined "); 3249 error = udf_get_node(ump, dir_loc, &streamdir_node); 3250 if (error) { 3251 printf("but error in streamdir reading\n"); 3252 } else { 3253 printf("but ignored\n"); 3254 /* 3255 * TODO process streamdir `baddies' i.e. files we dont 3256 * want if R/W 3257 */ 3258 } 3259 } 3260 3261 DPRINTF(VOLUMES, ("Rootdir(s) read in fine\n")); 3262 3263 /* release the vnodes again; they'll be auto-recycled later */ 3264 if (streamdir_node) { 3265 vput(streamdir_node->vnode); 3266 } 3267 if (rootdir_node) { 3268 vput(rootdir_node->vnode); 3269 } 3270 3271 return 0; 3272 } 3273 3274 /* --------------------------------------------------------------------- */ 3275 3276 /* To make absolutely sure we are NOT returning zero, add one :) */ 3277 3278 long 3279 udf_calchash(struct long_ad *icbptr) 3280 { 3281 /* ought to be enough since each mountpoint has its own chain */ 3282 return udf_rw32(icbptr->loc.lb_num) + 1; 3283 } 3284 3285 3286 static struct udf_node * 3287 udf_hash_lookup(struct udf_mount *ump, struct long_ad *icbptr) 3288 { 3289 struct udf_node *node; 3290 struct vnode *vp; 3291 uint32_t hashline; 3292 3293 loop: 3294 mutex_enter(&ump->ihash_lock); 3295 3296 hashline = udf_calchash(icbptr) & UDF_INODE_HASHMASK; 3297 LIST_FOREACH(node, &ump->udf_nodes[hashline], hashchain) { 3298 assert(node); 3299 if (node->loc.loc.lb_num == icbptr->loc.lb_num && 3300 node->loc.loc.part_num == icbptr->loc.part_num) { 3301 vp = node->vnode; 3302 assert(vp); 3303 mutex_enter(&vp->v_interlock); 3304 mutex_exit(&ump->ihash_lock); 3305 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK)) 3306 goto loop; 3307 return node; 3308 } 3309 } 3310 mutex_exit(&ump->ihash_lock); 3311 3312 return NULL; 3313 } 3314 3315 3316 static void 3317 udf_sorted_list_insert(struct udf_node *node) 3318 { 3319 struct udf_mount *ump; 3320 struct udf_node *s_node, *last_node; 3321 uint32_t loc, s_loc; 3322 3323 ump = node->ump; 3324 last_node = NULL; /* XXX gcc */ 3325 3326 if (LIST_EMPTY(&ump->sorted_udf_nodes)) { 3327 LIST_INSERT_HEAD(&ump->sorted_udf_nodes, node, sortchain); 3328 return; 3329 } 3330 3331 /* 3332 * We sort on logical block number here and not on physical block 3333 * number here. Ideally we should go for the physical block nr to get 3334 * better sync performance though this sort will ensure that packets 3335 * won't get spit up unnessisarily. 3336 */ 3337 3338 loc = udf_rw32(node->loc.loc.lb_num); 3339 LIST_FOREACH(s_node, &ump->sorted_udf_nodes, sortchain) { 3340 s_loc = udf_rw32(s_node->loc.loc.lb_num); 3341 if (s_loc > loc) { 3342 LIST_INSERT_BEFORE(s_node, node, sortchain); 3343 return; 3344 } 3345 last_node = s_node; 3346 } 3347 LIST_INSERT_AFTER(last_node, node, sortchain); 3348 } 3349 3350 3351 static void 3352 udf_register_node(struct udf_node *node) 3353 { 3354 struct udf_mount *ump; 3355 struct udf_node *chk; 3356 uint32_t hashline; 3357 3358 ump = node->ump; 3359 mutex_enter(&ump->ihash_lock); 3360 3361 /* add to our hash table */ 3362 hashline = udf_calchash(&node->loc) & UDF_INODE_HASHMASK; 3363 #ifdef DEBUG 3364 LIST_FOREACH(chk, &ump->udf_nodes[hashline], hashchain) { 3365 assert(chk); 3366 if (chk->loc.loc.lb_num == node->loc.loc.lb_num && 3367 chk->loc.loc.part_num == node->loc.loc.part_num) 3368 panic("Double node entered\n"); 3369 } 3370 #else 3371 chk = NULL; 3372 #endif 3373 LIST_INSERT_HEAD(&ump->udf_nodes[hashline], node, hashchain); 3374 3375 /* add to our sorted list */ 3376 udf_sorted_list_insert(node); 3377 3378 mutex_exit(&ump->ihash_lock); 3379 } 3380 3381 3382 static void 3383 udf_deregister_node(struct udf_node *node) 3384 { 3385 struct udf_mount *ump; 3386 3387 ump = node->ump; 3388 mutex_enter(&ump->ihash_lock); 3389 3390 /* from hash and sorted list */ 3391 LIST_REMOVE(node, hashchain); 3392 LIST_REMOVE(node, sortchain); 3393 3394 mutex_exit(&ump->ihash_lock); 3395 } 3396 3397 /* --------------------------------------------------------------------- */ 3398 3399 int 3400 udf_open_logvol(struct udf_mount *ump) 3401 { 3402 int logvol_integrity; 3403 int error; 3404 3405 /* already/still open? */ 3406 logvol_integrity = udf_rw32(ump->logvol_integrity->integrity_type); 3407 if (logvol_integrity == UDF_INTEGRITY_OPEN) 3408 return 0; 3409 3410 /* can we open it ? */ 3411 if (ump->vfs_mountp->mnt_flag & MNT_RDONLY) 3412 return EROFS; 3413 3414 /* setup write parameters */ 3415 DPRINTF(VOLUMES, ("Setting up write parameters\n")); 3416 if ((error = udf_setup_writeparams(ump)) != 0) 3417 return error; 3418 3419 /* determine data and metadata tracks (most likely same) */ 3420 error = udf_search_writing_tracks(ump); 3421 if (error) { 3422 /* most likely lack of space */ 3423 printf("udf_open_logvol: error searching writing tracks\n"); 3424 return EROFS; 3425 } 3426 3427 /* writeout/update lvint on disc or only in memory */ 3428 DPRINTF(VOLUMES, ("Opening logical volume\n")); 3429 if (ump->lvopen & UDF_OPEN_SESSION) { 3430 /* TODO implement writeout of VRS + VDS */ 3431 printf( "udf_open_logvol:Opening a closed session not yet " 3432 "implemented\n"); 3433 return EROFS; 3434 3435 /* determine data and metadata tracks again */ 3436 error = udf_search_writing_tracks(ump); 3437 } 3438 3439 /* mark it open */ 3440 ump->logvol_integrity->integrity_type = udf_rw32(UDF_INTEGRITY_OPEN); 3441 3442 /* do we need to write it out? */ 3443 if (ump->lvopen & UDF_WRITE_LVINT) { 3444 error = udf_writeout_lvint(ump, ump->lvopen); 3445 /* if we couldn't write it mark it closed again */ 3446 if (error) { 3447 ump->logvol_integrity->integrity_type = 3448 udf_rw32(UDF_INTEGRITY_CLOSED); 3449 return error; 3450 } 3451 } 3452 3453 return 0; 3454 } 3455 3456 3457 int 3458 udf_close_logvol(struct udf_mount *ump, int mntflags) 3459 { 3460 int logvol_integrity; 3461 int error = 0, error1 = 0, error2 = 0; 3462 int n; 3463 3464 /* already/still closed? */ 3465 logvol_integrity = udf_rw32(ump->logvol_integrity->integrity_type); 3466 if (logvol_integrity == UDF_INTEGRITY_CLOSED) 3467 return 0; 3468 3469 /* writeout/update lvint or write out VAT */ 3470 DPRINTF(VOLUMES, ("Closing logical volume\n")); 3471 if (ump->lvclose & UDF_WRITE_VAT) { 3472 DPRINTF(VOLUMES, ("lvclose & UDF_WRITE_VAT\n")); 3473 3474 /* preprocess the VAT node; its modified on every writeout */ 3475 DPRINTF(VOLUMES, ("writeout vat_node\n")); 3476 udf_update_vat_descriptor(ump->vat_node->ump); 3477 3478 /* write out the VAT node */ 3479 vflushbuf(ump->vat_node->vnode, 1 /* sync */); 3480 for (n = 0; n < 16; n++) { 3481 ump->vat_node->i_flags |= IN_MODIFIED; 3482 error = VOP_FSYNC(ump->vat_node->vnode, 3483 FSCRED, FSYNC_WAIT, 0, 0); 3484 } 3485 if (error) { 3486 printf("udf_close_logvol: writeout of VAT failed\n"); 3487 return error; 3488 } 3489 } 3490 3491 if (ump->lvclose & UDF_WRITE_PART_BITMAPS) { 3492 /* sync writeout metadata spacetable if existing */ 3493 error1 = udf_write_metadata_partition_spacetable(ump, true); 3494 if (error1) 3495 printf( "udf_close_logvol: writeout of metadata space " 3496 "bitmap failed\n"); 3497 3498 /* sync writeout partition spacetables */ 3499 error2 = udf_write_physical_partition_spacetables(ump, true); 3500 if (error2) 3501 printf( "udf_close_logvol: writeout of space tables " 3502 "failed\n"); 3503 3504 if (error1 || error2) 3505 return (error1 | error2); 3506 3507 ump->lvclose &= ~UDF_WRITE_PART_BITMAPS; 3508 } 3509 3510 if (ump->lvclose & UDF_CLOSE_SESSION) { 3511 printf("TODO: Closing a session is not yet implemented\n"); 3512 return EROFS; 3513 ump->lvopen |= UDF_OPEN_SESSION; 3514 } 3515 3516 /* mark it closed */ 3517 ump->logvol_integrity->integrity_type = udf_rw32(UDF_INTEGRITY_CLOSED); 3518 3519 /* do we need to write out the logical volume integrity */ 3520 if (ump->lvclose & UDF_WRITE_LVINT) 3521 error = udf_writeout_lvint(ump, ump->lvopen); 3522 if (error) { 3523 /* HELP now what? mark it open again for now */ 3524 ump->logvol_integrity->integrity_type = 3525 udf_rw32(UDF_INTEGRITY_OPEN); 3526 return error; 3527 } 3528 3529 (void) udf_synchronise_caches(ump); 3530 3531 return 0; 3532 } 3533 3534 /* --------------------------------------------------------------------- */ 3535 3536 /* 3537 * Genfs interfacing 3538 * 3539 * static const struct genfs_ops udf_genfsops = { 3540 * .gop_size = genfs_size, 3541 * size of transfers 3542 * .gop_alloc = udf_gop_alloc, 3543 * allocate len bytes at offset 3544 * .gop_write = genfs_gop_write, 3545 * putpages interface code 3546 * .gop_markupdate = udf_gop_markupdate, 3547 * set update/modify flags etc. 3548 * } 3549 */ 3550 3551 /* 3552 * Genfs interface. These four functions are the only ones defined though not 3553 * documented... great.... 3554 */ 3555 3556 /* 3557 * Callback from genfs to allocate len bytes at offset off; only called when 3558 * filling up gaps in the allocation. 3559 */ 3560 /* XXX should we check if there is space enough in udf_gop_alloc? */ 3561 static int 3562 udf_gop_alloc(struct vnode *vp, off_t off, 3563 off_t len, int flags, kauth_cred_t cred) 3564 { 3565 #if 0 3566 struct udf_node *udf_node = VTOI(vp); 3567 struct udf_mount *ump = udf_node->ump; 3568 uint32_t lb_size, num_lb; 3569 #endif 3570 3571 DPRINTF(NOTIMPL, ("udf_gop_alloc not implemented\n")); 3572 DPRINTF(ALLOC, ("udf_gop_alloc called for %"PRIu64" bytes\n", len)); 3573 3574 return 0; 3575 } 3576 3577 3578 /* 3579 * callback from genfs to update our flags 3580 */ 3581 static void 3582 udf_gop_markupdate(struct vnode *vp, int flags) 3583 { 3584 struct udf_node *udf_node = VTOI(vp); 3585 u_long mask = 0; 3586 3587 if ((flags & GOP_UPDATE_ACCESSED) != 0) { 3588 mask = IN_ACCESS; 3589 } 3590 if ((flags & GOP_UPDATE_MODIFIED) != 0) { 3591 if (vp->v_type == VREG) { 3592 mask |= IN_CHANGE | IN_UPDATE; 3593 } else { 3594 mask |= IN_MODIFY; 3595 } 3596 } 3597 if (mask) { 3598 udf_node->i_flags |= mask; 3599 } 3600 } 3601 3602 3603 static const struct genfs_ops udf_genfsops = { 3604 .gop_size = genfs_size, 3605 .gop_alloc = udf_gop_alloc, 3606 .gop_write = genfs_gop_write_rwmap, 3607 .gop_markupdate = udf_gop_markupdate, 3608 }; 3609 3610 3611 /* --------------------------------------------------------------------- */ 3612 3613 int 3614 udf_write_terminator(struct udf_mount *ump, uint32_t sector) 3615 { 3616 union dscrptr *dscr; 3617 int error; 3618 3619 dscr = malloc(ump->discinfo.sector_size, M_TEMP, M_WAITOK); 3620 bzero(dscr, ump->discinfo.sector_size); 3621 udf_inittag(ump, &dscr->tag, TAGID_TERM, sector); 3622 3623 /* CRC length for an anchor is 512 - tag length; defined in Ecma 167 */ 3624 dscr->tag.desc_crc_len = udf_rw16(512-UDF_DESC_TAG_LENGTH); 3625 (void) udf_validate_tag_and_crc_sums(dscr); 3626 3627 error = udf_write_phys_dscr_sync(ump, NULL, UDF_C_DSCR, 3628 dscr, sector, sector); 3629 3630 free(dscr, M_TEMP); 3631 3632 return error; 3633 } 3634 3635 3636 /* --------------------------------------------------------------------- */ 3637 3638 /* UDF<->unix converters */ 3639 3640 /* --------------------------------------------------------------------- */ 3641 3642 static mode_t 3643 udf_perm_to_unix_mode(uint32_t perm) 3644 { 3645 mode_t mode; 3646 3647 mode = ((perm & UDF_FENTRY_PERM_USER_MASK) ); 3648 mode |= ((perm & UDF_FENTRY_PERM_GRP_MASK ) >> 2); 3649 mode |= ((perm & UDF_FENTRY_PERM_OWNER_MASK) >> 4); 3650 3651 return mode; 3652 } 3653 3654 /* --------------------------------------------------------------------- */ 3655 3656 static uint32_t 3657 unix_mode_to_udf_perm(mode_t mode) 3658 { 3659 uint32_t perm; 3660 3661 perm = ((mode & S_IRWXO) ); 3662 perm |= ((mode & S_IRWXG) << 2); 3663 perm |= ((mode & S_IRWXU) << 4); 3664 perm |= ((mode & S_IWOTH) << 3); 3665 perm |= ((mode & S_IWGRP) << 5); 3666 perm |= ((mode & S_IWUSR) << 7); 3667 3668 return perm; 3669 } 3670 3671 /* --------------------------------------------------------------------- */ 3672 3673 static uint32_t 3674 udf_icb_to_unix_filetype(uint32_t icbftype) 3675 { 3676 switch (icbftype) { 3677 case UDF_ICB_FILETYPE_DIRECTORY : 3678 case UDF_ICB_FILETYPE_STREAMDIR : 3679 return S_IFDIR; 3680 case UDF_ICB_FILETYPE_FIFO : 3681 return S_IFIFO; 3682 case UDF_ICB_FILETYPE_CHARDEVICE : 3683 return S_IFCHR; 3684 case UDF_ICB_FILETYPE_BLOCKDEVICE : 3685 return S_IFBLK; 3686 case UDF_ICB_FILETYPE_RANDOMACCESS : 3687 case UDF_ICB_FILETYPE_REALTIME : 3688 return S_IFREG; 3689 case UDF_ICB_FILETYPE_SYMLINK : 3690 return S_IFLNK; 3691 case UDF_ICB_FILETYPE_SOCKET : 3692 return S_IFSOCK; 3693 } 3694 /* no idea what this is */ 3695 return 0; 3696 } 3697 3698 /* --------------------------------------------------------------------- */ 3699 3700 void 3701 udf_to_unix_name(char *result, int result_len, char *id, int len, 3702 struct charspec *chsp) 3703 { 3704 uint16_t *raw_name, *unix_name; 3705 uint16_t *inchp, ch; 3706 uint8_t *outchp; 3707 const char *osta_id = "OSTA Compressed Unicode"; 3708 int ucode_chars, nice_uchars, is_osta_typ0, nout; 3709 3710 raw_name = malloc(2048 * sizeof(uint16_t), M_UDFTEMP, M_WAITOK); 3711 unix_name = raw_name + 1024; /* split space in half */ 3712 assert(sizeof(char) == sizeof(uint8_t)); 3713 outchp = (uint8_t *) result; 3714 3715 is_osta_typ0 = (chsp->type == 0); 3716 is_osta_typ0 &= (strcmp((char *) chsp->inf, osta_id) == 0); 3717 if (is_osta_typ0) { 3718 /* TODO clean up */ 3719 *raw_name = *unix_name = 0; 3720 ucode_chars = udf_UncompressUnicode(len, (uint8_t *) id, raw_name); 3721 ucode_chars = MIN(ucode_chars, UnicodeLength((unicode_t *) raw_name)); 3722 nice_uchars = UDFTransName(unix_name, raw_name, ucode_chars); 3723 /* output UTF8 */ 3724 for (inchp = unix_name; nice_uchars>0; inchp++, nice_uchars--) { 3725 ch = *inchp; 3726 nout = wput_utf8(outchp, result_len, ch); 3727 outchp += nout; result_len -= nout; 3728 if (!ch) break; 3729 } 3730 *outchp++ = 0; 3731 } else { 3732 /* assume 8bit char length byte latin-1 */ 3733 assert(*id == 8); 3734 assert(strlen((char *) (id+1)) <= MAXNAMLEN); 3735 strncpy((char *) result, (char *) (id+1), strlen((char *) (id+1))); 3736 } 3737 free(raw_name, M_UDFTEMP); 3738 } 3739 3740 /* --------------------------------------------------------------------- */ 3741 3742 void 3743 unix_to_udf_name(char *result, uint8_t *result_len, char const *name, int name_len, 3744 struct charspec *chsp) 3745 { 3746 uint16_t *raw_name; 3747 uint16_t *outchp; 3748 const char *inchp; 3749 const char *osta_id = "OSTA Compressed Unicode"; 3750 int udf_chars, is_osta_typ0, bits; 3751 size_t cnt; 3752 3753 /* allocate temporary unicode-16 buffer */ 3754 raw_name = malloc(1024, M_UDFTEMP, M_WAITOK); 3755 3756 /* convert utf8 to unicode-16 */ 3757 *raw_name = 0; 3758 inchp = name; 3759 outchp = raw_name; 3760 bits = 8; 3761 for (cnt = name_len, udf_chars = 0; cnt;) { 3762 /*###3490 [cc] warning: passing argument 2 of 'wget_utf8' from incompatible pointer type%%%*/ 3763 *outchp = wget_utf8(&inchp, &cnt); 3764 if (*outchp > 0xff) 3765 bits=16; 3766 outchp++; 3767 udf_chars++; 3768 } 3769 /* null terminate just in case */ 3770 *outchp++ = 0; 3771 3772 is_osta_typ0 = (chsp->type == 0); 3773 is_osta_typ0 &= (strcmp((char *) chsp->inf, osta_id) == 0); 3774 if (is_osta_typ0) { 3775 udf_chars = udf_CompressUnicode(udf_chars, bits, 3776 (unicode_t *) raw_name, 3777 (byte *) result); 3778 } else { 3779 printf("unix to udf name: no CHSP0 ?\n"); 3780 /* XXX assume 8bit char length byte latin-1 */ 3781 *result++ = 8; udf_chars = 1; 3782 strncpy(result, name + 1, name_len); 3783 udf_chars += name_len; 3784 } 3785 *result_len = udf_chars; 3786 free(raw_name, M_UDFTEMP); 3787 } 3788 3789 /* --------------------------------------------------------------------- */ 3790 3791 void 3792 udf_timestamp_to_timespec(struct udf_mount *ump, 3793 struct timestamp *timestamp, 3794 struct timespec *timespec) 3795 { 3796 struct clock_ymdhms ymdhms; 3797 uint32_t usecs, secs, nsecs; 3798 uint16_t tz; 3799 3800 /* fill in ymdhms structure from timestamp */ 3801 memset(&ymdhms, 0, sizeof(ymdhms)); 3802 ymdhms.dt_year = udf_rw16(timestamp->year); 3803 ymdhms.dt_mon = timestamp->month; 3804 ymdhms.dt_day = timestamp->day; 3805 ymdhms.dt_wday = 0; /* ? */ 3806 ymdhms.dt_hour = timestamp->hour; 3807 ymdhms.dt_min = timestamp->minute; 3808 ymdhms.dt_sec = timestamp->second; 3809 3810 secs = clock_ymdhms_to_secs(&ymdhms); 3811 usecs = timestamp->usec + 3812 100*timestamp->hund_usec + 10000*timestamp->centisec; 3813 nsecs = usecs * 1000; 3814 3815 /* 3816 * Calculate the time zone. The timezone is 12 bit signed 2's 3817 * compliment, so we gotta do some extra magic to handle it right. 3818 */ 3819 tz = udf_rw16(timestamp->type_tz); 3820 tz &= 0x0fff; /* only lower 12 bits are significant */ 3821 if (tz & 0x0800) /* sign extention */ 3822 tz |= 0xf000; 3823 3824 /* TODO check timezone conversion */ 3825 /* check if we are specified a timezone to convert */ 3826 if (udf_rw16(timestamp->type_tz) & 0x1000) { 3827 if ((int16_t) tz != -2047) 3828 secs -= (int16_t) tz * 60; 3829 } else { 3830 secs -= ump->mount_args.gmtoff; 3831 } 3832 3833 timespec->tv_sec = secs; 3834 timespec->tv_nsec = nsecs; 3835 } 3836 3837 3838 void 3839 udf_timespec_to_timestamp(struct timespec *timespec, struct timestamp *timestamp) 3840 { 3841 struct clock_ymdhms ymdhms; 3842 uint32_t husec, usec, csec; 3843 3844 (void) clock_secs_to_ymdhms(timespec->tv_sec, &ymdhms); 3845 3846 usec = timespec->tv_nsec / 1000; 3847 husec = usec / 100; 3848 usec -= husec * 100; /* only 0-99 in usec */ 3849 csec = husec / 100; /* only 0-99 in csec */ 3850 husec -= csec * 100; /* only 0-99 in husec */ 3851 3852 /* set method 1 for CUT/GMT */ 3853 timestamp->type_tz = udf_rw16((1<<12) + 0); 3854 timestamp->year = udf_rw16(ymdhms.dt_year); 3855 timestamp->month = ymdhms.dt_mon; 3856 timestamp->day = ymdhms.dt_day; 3857 timestamp->hour = ymdhms.dt_hour; 3858 timestamp->minute = ymdhms.dt_min; 3859 timestamp->second = ymdhms.dt_sec; 3860 timestamp->centisec = csec; 3861 timestamp->hund_usec = husec; 3862 timestamp->usec = usec; 3863 } 3864 3865 /* --------------------------------------------------------------------- */ 3866 3867 /* 3868 * Attribute and filetypes converters with get/set pairs 3869 */ 3870 3871 uint32_t 3872 udf_getaccessmode(struct udf_node *udf_node) 3873 { 3874 struct file_entry *fe = udf_node->fe;; 3875 struct extfile_entry *efe = udf_node->efe; 3876 uint32_t udf_perm, icbftype; 3877 uint32_t mode, ftype; 3878 uint16_t icbflags; 3879 3880 UDF_LOCK_NODE(udf_node, 0); 3881 if (fe) { 3882 udf_perm = udf_rw32(fe->perm); 3883 icbftype = fe->icbtag.file_type; 3884 icbflags = udf_rw16(fe->icbtag.flags); 3885 } else { 3886 assert(udf_node->efe); 3887 udf_perm = udf_rw32(efe->perm); 3888 icbftype = efe->icbtag.file_type; 3889 icbflags = udf_rw16(efe->icbtag.flags); 3890 } 3891 3892 mode = udf_perm_to_unix_mode(udf_perm); 3893 ftype = udf_icb_to_unix_filetype(icbftype); 3894 3895 /* set suid, sgid, sticky from flags in fe/efe */ 3896 if (icbflags & UDF_ICB_TAG_FLAGS_SETUID) 3897 mode |= S_ISUID; 3898 if (icbflags & UDF_ICB_TAG_FLAGS_SETGID) 3899 mode |= S_ISGID; 3900 if (icbflags & UDF_ICB_TAG_FLAGS_STICKY) 3901 mode |= S_ISVTX; 3902 3903 UDF_UNLOCK_NODE(udf_node, 0); 3904 3905 return mode | ftype; 3906 } 3907 3908 3909 void 3910 udf_setaccessmode(struct udf_node *udf_node, mode_t mode) 3911 { 3912 struct file_entry *fe = udf_node->fe; 3913 struct extfile_entry *efe = udf_node->efe; 3914 uint32_t udf_perm; 3915 uint16_t icbflags; 3916 3917 UDF_LOCK_NODE(udf_node, 0); 3918 udf_perm = unix_mode_to_udf_perm(mode & ALLPERMS); 3919 if (fe) { 3920 icbflags = udf_rw16(fe->icbtag.flags); 3921 } else { 3922 icbflags = udf_rw16(efe->icbtag.flags); 3923 } 3924 3925 icbflags &= ~UDF_ICB_TAG_FLAGS_SETUID; 3926 icbflags &= ~UDF_ICB_TAG_FLAGS_SETGID; 3927 icbflags &= ~UDF_ICB_TAG_FLAGS_STICKY; 3928 if (mode & S_ISUID) 3929 icbflags |= UDF_ICB_TAG_FLAGS_SETUID; 3930 if (mode & S_ISGID) 3931 icbflags |= UDF_ICB_TAG_FLAGS_SETGID; 3932 if (mode & S_ISVTX) 3933 icbflags |= UDF_ICB_TAG_FLAGS_STICKY; 3934 3935 if (fe) { 3936 fe->perm = udf_rw32(udf_perm); 3937 fe->icbtag.flags = udf_rw16(icbflags); 3938 } else { 3939 efe->perm = udf_rw32(udf_perm); 3940 efe->icbtag.flags = udf_rw16(icbflags); 3941 } 3942 3943 UDF_UNLOCK_NODE(udf_node, 0); 3944 } 3945 3946 3947 void 3948 udf_getownership(struct udf_node *udf_node, uid_t *uidp, gid_t *gidp) 3949 { 3950 struct udf_mount *ump = udf_node->ump; 3951 struct file_entry *fe = udf_node->fe; 3952 struct extfile_entry *efe = udf_node->efe; 3953 uid_t uid; 3954 gid_t gid; 3955 3956 UDF_LOCK_NODE(udf_node, 0); 3957 if (fe) { 3958 uid = (uid_t)udf_rw32(fe->uid); 3959 gid = (gid_t)udf_rw32(fe->gid); 3960 } else { 3961 assert(udf_node->efe); 3962 uid = (uid_t)udf_rw32(efe->uid); 3963 gid = (gid_t)udf_rw32(efe->gid); 3964 } 3965 3966 /* do the uid/gid translation game */ 3967 if ((uid == (uid_t) -1) && (gid == (gid_t) -1)) { 3968 uid = ump->mount_args.anon_uid; 3969 gid = ump->mount_args.anon_gid; 3970 } 3971 *uidp = uid; 3972 *gidp = gid; 3973 3974 UDF_UNLOCK_NODE(udf_node, 0); 3975 } 3976 3977 3978 void 3979 udf_setownership(struct udf_node *udf_node, uid_t uid, gid_t gid) 3980 { 3981 struct udf_mount *ump = udf_node->ump; 3982 struct file_entry *fe = udf_node->fe; 3983 struct extfile_entry *efe = udf_node->efe; 3984 uid_t nobody_uid; 3985 gid_t nobody_gid; 3986 3987 UDF_LOCK_NODE(udf_node, 0); 3988 3989 /* do the uid/gid translation game */ 3990 nobody_uid = ump->mount_args.nobody_uid; 3991 nobody_gid = ump->mount_args.nobody_gid; 3992 if ((uid == nobody_uid) && (gid == nobody_gid)) { 3993 uid = (uid_t) -1; 3994 gid = (gid_t) -1; 3995 } 3996 3997 if (fe) { 3998 fe->uid = udf_rw32((uint32_t) uid); 3999 fe->gid = udf_rw32((uint32_t) gid); 4000 } else { 4001 efe->uid = udf_rw32((uint32_t) uid); 4002 efe->gid = udf_rw32((uint32_t) gid); 4003 } 4004 4005 UDF_UNLOCK_NODE(udf_node, 0); 4006 } 4007 4008 4009 /* --------------------------------------------------------------------- */ 4010 4011 /* 4012 * UDF dirhash implementation 4013 */ 4014 4015 static uint32_t 4016 udf_dirhash_hash(const char *str, int namelen) 4017 { 4018 uint32_t hash = 5381; 4019 int i, c; 4020 4021 for (i = 0; i < namelen; i++) { 4022 c = *str++; 4023 hash = ((hash << 5) + hash) + c; /* hash * 33 + c */ 4024 } 4025 return hash; 4026 } 4027 4028 4029 static void 4030 udf_dirhash_purge(struct udf_dirhash *dirh) 4031 { 4032 struct udf_dirhash_entry *dirh_e; 4033 uint32_t hashline; 4034 4035 if (dirh == NULL) 4036 return; 4037 4038 if (dirh->size == 0) 4039 return; 4040 4041 for (hashline = 0; hashline < UDF_DIRHASH_HASHSIZE; hashline++) { 4042 dirh_e = LIST_FIRST(&dirh->entries[hashline]); 4043 while (dirh_e) { 4044 LIST_REMOVE(dirh_e, next); 4045 pool_put(&udf_dirhash_entry_pool, dirh_e); 4046 dirh_e = LIST_FIRST(&dirh->entries[hashline]); 4047 } 4048 } 4049 dirh_e = LIST_FIRST(&dirh->free_entries); 4050 4051 while (dirh_e) { 4052 LIST_REMOVE(dirh_e, next); 4053 pool_put(&udf_dirhash_entry_pool, dirh_e); 4054 dirh_e = LIST_FIRST(&dirh->entries[hashline]); 4055 } 4056 4057 dirh->flags &= ~UDF_DIRH_COMPLETE; 4058 dirh->flags |= UDF_DIRH_PURGED; 4059 4060 udf_dirhashsize -= dirh->size; 4061 dirh->size = 0; 4062 } 4063 4064 4065 static void 4066 udf_dirhash_destroy(struct udf_dirhash **dirhp) 4067 { 4068 struct udf_dirhash *dirh = *dirhp; 4069 4070 if (dirh == NULL) 4071 return; 4072 4073 mutex_enter(&udf_dirhashmutex); 4074 4075 udf_dirhash_purge(dirh); 4076 TAILQ_REMOVE(&udf_dirhash_queue, dirh, next); 4077 pool_put(&udf_dirhash_pool, dirh); 4078 4079 *dirhp = NULL; 4080 4081 mutex_exit(&udf_dirhashmutex); 4082 } 4083 4084 4085 static void 4086 udf_dirhash_get(struct udf_dirhash **dirhp) 4087 { 4088 struct udf_dirhash *dirh; 4089 uint32_t hashline; 4090 4091 mutex_enter(&udf_dirhashmutex); 4092 4093 dirh = *dirhp; 4094 if (*dirhp == NULL) { 4095 dirh = pool_get(&udf_dirhash_pool, PR_WAITOK); 4096 *dirhp = dirh; 4097 memset(dirh, 0, sizeof(struct udf_dirhash)); 4098 for (hashline = 0; hashline < UDF_DIRHASH_HASHSIZE; hashline++) 4099 LIST_INIT(&dirh->entries[hashline]); 4100 dirh->size = 0; 4101 dirh->refcnt = 0; 4102 dirh->flags = 0; 4103 } else { 4104 TAILQ_REMOVE(&udf_dirhash_queue, dirh, next); 4105 } 4106 4107 dirh->refcnt++; 4108 TAILQ_INSERT_HEAD(&udf_dirhash_queue, dirh, next); 4109 4110 mutex_exit(&udf_dirhashmutex); 4111 } 4112 4113 4114 static void 4115 udf_dirhash_put(struct udf_dirhash *dirh) 4116 { 4117 mutex_enter(&udf_dirhashmutex); 4118 dirh->refcnt--; 4119 mutex_exit(&udf_dirhashmutex); 4120 } 4121 4122 4123 static void 4124 udf_dirhash_enter(struct udf_node *dir_node, struct fileid_desc *fid, 4125 struct dirent *dirent, uint64_t offset, uint32_t fid_size, int new) 4126 { 4127 struct udf_dirhash *dirh, *del_dirh, *prev_dirh; 4128 struct udf_dirhash_entry *dirh_e; 4129 uint32_t hashvalue, hashline; 4130 int entrysize; 4131 4132 /* make sure we have a dirhash to work on */ 4133 dirh = dir_node->dir_hash; 4134 KASSERT(dirh); 4135 KASSERT(dirh->refcnt > 0); 4136 4137 /* are we trying to re-enter an entry? */ 4138 if (!new && (dirh->flags & UDF_DIRH_COMPLETE)) 4139 return; 4140 4141 /* calculate our hash */ 4142 hashvalue = udf_dirhash_hash(dirent->d_name, dirent->d_namlen); 4143 hashline = hashvalue & UDF_DIRHASH_HASHMASK; 4144 4145 /* lookup and insert entry if not there yet */ 4146 LIST_FOREACH(dirh_e, &dirh->entries[hashline], next) { 4147 /* check for hash collision */ 4148 if (dirh_e->hashvalue != hashvalue) 4149 continue; 4150 if (dirh_e->offset != offset) 4151 continue; 4152 /* got it already */ 4153 KASSERT(dirh_e->d_namlen == dirent->d_namlen); 4154 KASSERT(dirh_e->fid_size == fid_size); 4155 return; 4156 } 4157 4158 DPRINTF(DIRHASH, ("dirhash enter %"PRIu64", %d, %d for `%*.*s`\n", 4159 offset, fid_size, dirent->d_namlen, 4160 dirent->d_namlen, dirent->d_namlen, dirent->d_name)); 4161 4162 /* check if entry is in free space list */ 4163 LIST_FOREACH(dirh_e, &dirh->free_entries, next) { 4164 if (dirh_e->offset == offset) { 4165 DPRINTF(DIRHASH, ("\tremoving free entry\n")); 4166 LIST_REMOVE(dirh_e, next); 4167 break; 4168 } 4169 } 4170 4171 /* ensure we are not passing the dirhash limit */ 4172 entrysize = sizeof(struct udf_dirhash_entry); 4173 if (udf_dirhashsize + entrysize > udf_maxdirhashsize) { 4174 del_dirh = TAILQ_LAST(&udf_dirhash_queue, _udf_dirhash); 4175 KASSERT(del_dirh); 4176 while (udf_dirhashsize + entrysize > udf_maxdirhashsize) { 4177 /* no use trying to delete myself */ 4178 if (del_dirh == dirh) 4179 break; 4180 prev_dirh = TAILQ_PREV(del_dirh, _udf_dirhash, next); 4181 if (del_dirh->refcnt == 0) 4182 udf_dirhash_purge(del_dirh); 4183 del_dirh = prev_dirh; 4184 } 4185 } 4186 4187 /* add to the hashline */ 4188 dirh_e = pool_get(&udf_dirhash_entry_pool, PR_WAITOK); 4189 memset(dirh_e, 0, sizeof(struct udf_dirhash_entry)); 4190 4191 dirh_e->hashvalue = hashvalue; 4192 dirh_e->offset = offset; 4193 dirh_e->d_namlen = dirent->d_namlen; 4194 dirh_e->fid_size = fid_size; 4195 4196 dirh->size += sizeof(struct udf_dirhash_entry); 4197 udf_dirhashsize += sizeof(struct udf_dirhash_entry); 4198 LIST_INSERT_HEAD(&dirh->entries[hashline], dirh_e, next); 4199 } 4200 4201 4202 static void 4203 udf_dirhash_enter_freed(struct udf_node *dir_node, uint64_t offset, 4204 uint32_t fid_size) 4205 { 4206 struct udf_dirhash *dirh; 4207 struct udf_dirhash_entry *dirh_e; 4208 4209 /* make sure we have a dirhash to work on */ 4210 dirh = dir_node->dir_hash; 4211 KASSERT(dirh); 4212 KASSERT(dirh->refcnt > 0); 4213 4214 #ifdef DEBUG 4215 /* check for double entry of free space */ 4216 LIST_FOREACH(dirh_e, &dirh->free_entries, next) 4217 KASSERT(dirh_e->offset != offset); 4218 #endif 4219 4220 DPRINTF(DIRHASH, ("dirhash enter FREED %"PRIu64", %d\n", 4221 offset, fid_size)); 4222 dirh_e = pool_get(&udf_dirhash_entry_pool, PR_WAITOK); 4223 memset(dirh_e, 0, sizeof(struct udf_dirhash_entry)); 4224 4225 dirh_e->hashvalue = 0; /* not relevant */ 4226 dirh_e->offset = offset; 4227 dirh_e->d_namlen = 0; /* not relevant */ 4228 dirh_e->fid_size = fid_size; 4229 4230 /* XXX it might be preferable to append them at the tail */ 4231 LIST_INSERT_HEAD(&dirh->free_entries, dirh_e, next); 4232 dirh->size += sizeof(struct udf_dirhash_entry); 4233 udf_dirhashsize += sizeof(struct udf_dirhash_entry); 4234 } 4235 4236 4237 static void 4238 udf_dirhash_remove(struct udf_node *dir_node, struct dirent *dirent, 4239 uint64_t offset, uint32_t fid_size) 4240 { 4241 struct udf_dirhash *dirh; 4242 struct udf_dirhash_entry *dirh_e; 4243 uint32_t hashvalue, hashline; 4244 4245 DPRINTF(DIRHASH, ("dirhash remove %"PRIu64", %d for `%*.*s`\n", 4246 offset, fid_size, 4247 dirent->d_namlen, dirent->d_namlen, dirent->d_name)); 4248 4249 /* make sure we have a dirhash to work on */ 4250 dirh = dir_node->dir_hash; 4251 KASSERT(dirh); 4252 KASSERT(dirh->refcnt > 0); 4253 4254 /* calculate our hash */ 4255 hashvalue = udf_dirhash_hash(dirent->d_name, dirent->d_namlen); 4256 hashline = hashvalue & UDF_DIRHASH_HASHMASK; 4257 4258 /* lookup entry */ 4259 LIST_FOREACH(dirh_e, &dirh->entries[hashline], next) { 4260 /* check for hash collision */ 4261 if (dirh_e->hashvalue != hashvalue) 4262 continue; 4263 if (dirh_e->offset != offset) 4264 continue; 4265 4266 /* got it! */ 4267 KASSERT(dirh_e->d_namlen == dirent->d_namlen); 4268 KASSERT(dirh_e->fid_size == fid_size); 4269 LIST_REMOVE(dirh_e, next); 4270 dirh->size -= sizeof(struct udf_dirhash_entry); 4271 udf_dirhashsize -= sizeof(struct udf_dirhash_entry); 4272 4273 udf_dirhash_enter_freed(dir_node, offset, fid_size); 4274 return; 4275 } 4276 4277 /* not found! */ 4278 panic("dirhash_remove couldn't find entry in hash table\n"); 4279 } 4280 4281 4282 /* BUGALERT: don't use result longer than needed, never past the node lock */ 4283 /* call with NULL *result initially and it will return nonzero if again */ 4284 static int 4285 udf_dirhash_lookup(struct udf_node *dir_node, const char *d_name, int d_namlen, 4286 struct udf_dirhash_entry **result) 4287 { 4288 struct udf_dirhash *dirh; 4289 struct udf_dirhash_entry *dirh_e; 4290 uint32_t hashvalue, hashline; 4291 4292 KASSERT(VOP_ISLOCKED(dir_node->vnode)); 4293 4294 /* make sure we have a dirhash to work on */ 4295 dirh = dir_node->dir_hash; 4296 KASSERT(dirh); 4297 KASSERT(dirh->refcnt > 0); 4298 4299 /* start where we were */ 4300 if (*result) { 4301 KASSERT(dir_node->dir_hash); 4302 dirh_e = *result; 4303 4304 /* retrieve information to avoid recalculation and advance */ 4305 hashvalue = dirh_e->hashvalue; 4306 dirh_e = LIST_NEXT(*result, next); 4307 } else { 4308 /* calculate our hash and lookup all entries in hashline */ 4309 hashvalue = udf_dirhash_hash(d_name, d_namlen); 4310 hashline = hashvalue & UDF_DIRHASH_HASHMASK; 4311 dirh_e = LIST_FIRST(&dirh->entries[hashline]); 4312 } 4313 4314 for (; dirh_e; dirh_e = LIST_NEXT(dirh_e, next)) { 4315 /* check for hash collision */ 4316 if (dirh_e->hashvalue != hashvalue) 4317 continue; 4318 if (dirh_e->d_namlen != d_namlen) 4319 continue; 4320 /* might have an entry in the cache */ 4321 *result = dirh_e; 4322 return 1; 4323 } 4324 4325 *result = NULL; 4326 return 0; 4327 } 4328 4329 4330 /* BUGALERT: don't use result longer than needed, never past the node lock */ 4331 /* call with NULL *result initially and it will return nonzero if again */ 4332 static int 4333 udf_dirhash_lookup_freed(struct udf_node *dir_node, uint32_t min_fidsize, 4334 struct udf_dirhash_entry **result) 4335 { 4336 struct udf_dirhash *dirh; 4337 struct udf_dirhash_entry *dirh_e; 4338 4339 KASSERT(VOP_ISLOCKED(dir_node->vnode)); 4340 4341 /* make sure we have a dirhash to work on */ 4342 dirh = dir_node->dir_hash; 4343 KASSERT(dirh); 4344 KASSERT(dirh->refcnt > 0); 4345 4346 /* start where we were */ 4347 if (*result) { 4348 KASSERT(dir_node->dir_hash); 4349 dirh_e = LIST_NEXT(*result, next); 4350 } else { 4351 /* lookup all entries that match */ 4352 dirh_e = LIST_FIRST(&dirh->free_entries); 4353 } 4354 4355 for (; dirh_e; dirh_e = LIST_NEXT(dirh_e, next)) { 4356 /* check for minimum size */ 4357 if (dirh_e->fid_size < min_fidsize) 4358 continue; 4359 /* might be a candidate */ 4360 *result = dirh_e; 4361 return 1; 4362 } 4363 4364 *result = NULL; 4365 return 0; 4366 } 4367 4368 4369 static int 4370 udf_dirhash_fill(struct udf_node *dir_node) 4371 { 4372 struct vnode *dvp = dir_node->vnode; 4373 struct udf_dirhash *dirh; 4374 struct file_entry *fe = dir_node->fe; 4375 struct extfile_entry *efe = dir_node->efe; 4376 struct fileid_desc *fid; 4377 struct dirent *dirent; 4378 uint64_t file_size, pre_diroffset, diroffset; 4379 uint32_t lb_size; 4380 int error; 4381 4382 /* make sure we have a dirhash to work on */ 4383 dirh = dir_node->dir_hash; 4384 KASSERT(dirh); 4385 KASSERT(dirh->refcnt > 0); 4386 4387 if (dirh->flags & UDF_DIRH_BROKEN) 4388 return EIO; 4389 if (dirh->flags & UDF_DIRH_COMPLETE) 4390 return 0; 4391 4392 /* make sure we have a clean dirhash to add to */ 4393 udf_dirhash_purge(dirh); 4394 4395 /* get directory filesize */ 4396 if (fe) { 4397 file_size = udf_rw64(fe->inf_len); 4398 } else { 4399 assert(efe); 4400 file_size = udf_rw64(efe->inf_len); 4401 } 4402 4403 /* allocate temporary space for fid */ 4404 lb_size = udf_rw32(dir_node->ump->logical_vol->lb_size); 4405 fid = malloc(lb_size, M_UDFTEMP, M_WAITOK); 4406 4407 /* allocate temporary space for dirent */ 4408 dirent = malloc(sizeof(struct dirent), M_UDFTEMP, M_WAITOK); 4409 4410 error = 0; 4411 diroffset = 0; 4412 while (diroffset < file_size) { 4413 /* transfer a new fid/dirent */ 4414 pre_diroffset = diroffset; 4415 error = udf_read_fid_stream(dvp, &diroffset, fid, dirent); 4416 if (error) { 4417 /* TODO what to do? continue but not add? */ 4418 dirh->flags |= UDF_DIRH_BROKEN; 4419 udf_dirhash_purge(dirh); 4420 break; 4421 } 4422 4423 if ((fid->file_char & UDF_FILE_CHAR_DEL)) { 4424 /* register deleted extent for reuse */ 4425 udf_dirhash_enter_freed(dir_node, pre_diroffset, 4426 udf_fidsize(fid)); 4427 } else { 4428 /* append to the dirhash */ 4429 udf_dirhash_enter(dir_node, fid, dirent, pre_diroffset, 4430 udf_fidsize(fid), 0); 4431 } 4432 } 4433 dirh->flags |= UDF_DIRH_COMPLETE; 4434 4435 free(fid, M_UDFTEMP); 4436 free(dirent, M_UDFTEMP); 4437 4438 return error; 4439 } 4440 4441 4442 /* --------------------------------------------------------------------- */ 4443 4444 /* 4445 * Directory read and manipulation functions. 4446 * 4447 * Note that if the file is found, the cached diroffset position *before* the 4448 * advance is remembered. Thus if the same filename is lookup again just after 4449 * this lookup its immediately found. 4450 */ 4451 4452 int 4453 udf_lookup_name_in_dir(struct vnode *vp, const char *name, int namelen, 4454 struct long_ad *icb_loc, int *found) 4455 { 4456 struct udf_node *dir_node = VTOI(vp); 4457 struct udf_dirhash_entry *dirh_ep; 4458 struct fileid_desc *fid; 4459 struct dirent *dirent; 4460 uint64_t diroffset; 4461 uint32_t lb_size; 4462 int hit, error; 4463 4464 /* set default return */ 4465 *found = 0; 4466 4467 /* get our dirhash and make sure its read in */ 4468 udf_dirhash_get(&dir_node->dir_hash); 4469 error = udf_dirhash_fill(dir_node); 4470 if (error) { 4471 udf_dirhash_put(dir_node->dir_hash); 4472 return error; 4473 } 4474 4475 /* allocate temporary space for fid */ 4476 lb_size = udf_rw32(dir_node->ump->logical_vol->lb_size); 4477 fid = malloc(lb_size, M_UDFTEMP, M_WAITOK); 4478 dirent = malloc(sizeof(struct dirent), M_UDFTEMP, M_WAITOK); 4479 4480 DPRINTF(DIRHASH, ("dirhash_lookup looking for `%*.*s`\n", 4481 namelen, namelen, name)); 4482 4483 /* search our dirhash hits */ 4484 memset(icb_loc, 0, sizeof(*icb_loc)); 4485 dirh_ep = NULL; 4486 for (;;) { 4487 hit = udf_dirhash_lookup(dir_node, name, namelen, &dirh_ep); 4488 /* if no hit, abort the search */ 4489 if (!hit) 4490 break; 4491 4492 /* check this hit */ 4493 diroffset = dirh_ep->offset; 4494 4495 /* transfer a new fid/dirent */ 4496 error = udf_read_fid_stream(vp, &diroffset, fid, dirent); 4497 if (error) 4498 break; 4499 4500 DPRINTF(DIRHASH, ("dirhash_lookup\tchecking `%*.*s`\n", 4501 dirent->d_namlen, dirent->d_namlen, dirent->d_name)); 4502 4503 /* see if its our entry */ 4504 KASSERT(dirent->d_namlen == namelen); 4505 if (strncmp(dirent->d_name, name, namelen) == 0) { 4506 *found = 1; 4507 *icb_loc = fid->icb; 4508 break; 4509 } 4510 } 4511 free(fid, M_UDFTEMP); 4512 free(dirent, M_UDFTEMP); 4513 4514 udf_dirhash_put(dir_node->dir_hash); 4515 4516 return error; 4517 } 4518 4519 /* --------------------------------------------------------------------- */ 4520 4521 static int 4522 udf_create_new_fe(struct udf_mount *ump, struct file_entry *fe, int file_type, 4523 struct long_ad *node_icb, struct long_ad *parent_icb, 4524 uint64_t parent_unique_id) 4525 { 4526 struct timespec now; 4527 struct icb_tag *icb; 4528 struct filetimes_extattr_entry *ft_extattr; 4529 uint64_t unique_id; 4530 uint32_t fidsize, lb_num; 4531 uint8_t *bpos; 4532 int crclen, attrlen; 4533 4534 lb_num = udf_rw32(node_icb->loc.lb_num); 4535 udf_inittag(ump, &fe->tag, TAGID_FENTRY, lb_num); 4536 icb = &fe->icbtag; 4537 4538 /* 4539 * Always use strategy type 4 unless on WORM wich we don't support 4540 * (yet). Fill in defaults and set for internal allocation of data. 4541 */ 4542 icb->strat_type = udf_rw16(4); 4543 icb->max_num_entries = udf_rw16(1); 4544 icb->file_type = file_type; /* 8 bit */ 4545 icb->flags = udf_rw16(UDF_ICB_INTERN_ALLOC); 4546 4547 fe->perm = udf_rw32(0x7fff); /* all is allowed */ 4548 fe->link_cnt = udf_rw16(0); /* explicit setting */ 4549 4550 fe->ckpoint = udf_rw32(1); /* user supplied file version */ 4551 4552 vfs_timestamp(&now); 4553 udf_timespec_to_timestamp(&now, &fe->atime); 4554 udf_timespec_to_timestamp(&now, &fe->attrtime); 4555 udf_timespec_to_timestamp(&now, &fe->mtime); 4556 4557 udf_set_regid(&fe->imp_id, IMPL_NAME); 4558 udf_add_impl_regid(ump, &fe->imp_id); 4559 4560 unique_id = udf_advance_uniqueid(ump); 4561 fe->unique_id = udf_rw64(unique_id); 4562 fe->l_ea = udf_rw32(0); 4563 4564 /* create extended attribute to record our creation time */ 4565 attrlen = UDF_FILETIMES_ATTR_SIZE(1); 4566 ft_extattr = malloc(attrlen, M_UDFTEMP, M_WAITOK); 4567 memset(ft_extattr, 0, attrlen); 4568 ft_extattr->hdr.type = udf_rw32(UDF_FILETIMES_ATTR_NO); 4569 ft_extattr->hdr.subtype = 1; /* [4/48.10.5] */ 4570 ft_extattr->hdr.a_l = udf_rw32(UDF_FILETIMES_ATTR_SIZE(1)); 4571 ft_extattr->d_l = udf_rw32(UDF_TIMESTAMP_SIZE); /* one item */ 4572 ft_extattr->existence = UDF_FILETIMES_FILE_CREATION; 4573 udf_timespec_to_timestamp(&now, &ft_extattr->times[0]); 4574 4575 udf_extattr_insert_internal(ump, (union dscrptr *) fe, 4576 (struct extattr_entry *) ft_extattr); 4577 free(ft_extattr, M_UDFTEMP); 4578 4579 /* if its a directory, create '..' */ 4580 bpos = (uint8_t *) fe->data + udf_rw32(fe->l_ea); 4581 fidsize = 0; 4582 if (file_type == UDF_ICB_FILETYPE_DIRECTORY) { 4583 fidsize = udf_create_parentfid(ump, 4584 (struct fileid_desc *) bpos, parent_icb, 4585 parent_unique_id); 4586 } 4587 4588 /* record fidlength information */ 4589 fe->inf_len = udf_rw64(fidsize); 4590 fe->l_ad = udf_rw32(fidsize); 4591 fe->logblks_rec = udf_rw64(0); /* intern */ 4592 4593 crclen = sizeof(struct file_entry) - 1 - UDF_DESC_TAG_LENGTH; 4594 crclen += udf_rw32(fe->l_ea) + fidsize; 4595 fe->tag.desc_crc_len = udf_rw16(crclen); 4596 4597 (void) udf_validate_tag_and_crc_sums((union dscrptr *) fe); 4598 4599 return fidsize; 4600 } 4601 4602 /* --------------------------------------------------------------------- */ 4603 4604 static int 4605 udf_create_new_efe(struct udf_mount *ump, struct extfile_entry *efe, 4606 int file_type, struct long_ad *node_icb, struct long_ad *parent_icb, 4607 uint64_t parent_unique_id) 4608 { 4609 struct timespec now; 4610 struct icb_tag *icb; 4611 uint64_t unique_id; 4612 uint32_t fidsize, lb_num; 4613 uint8_t *bpos; 4614 int crclen; 4615 4616 lb_num = udf_rw32(node_icb->loc.lb_num); 4617 udf_inittag(ump, &efe->tag, TAGID_EXTFENTRY, lb_num); 4618 icb = &efe->icbtag; 4619 4620 /* 4621 * Always use strategy type 4 unless on WORM wich we don't support 4622 * (yet). Fill in defaults and set for internal allocation of data. 4623 */ 4624 icb->strat_type = udf_rw16(4); 4625 icb->max_num_entries = udf_rw16(1); 4626 icb->file_type = file_type; /* 8 bit */ 4627 icb->flags = udf_rw16(UDF_ICB_INTERN_ALLOC); 4628 4629 efe->perm = udf_rw32(0x7fff); /* all is allowed */ 4630 efe->link_cnt = udf_rw16(0); /* explicit setting */ 4631 4632 efe->ckpoint = udf_rw32(1); /* user supplied file version */ 4633 4634 vfs_timestamp(&now); 4635 udf_timespec_to_timestamp(&now, &efe->ctime); 4636 udf_timespec_to_timestamp(&now, &efe->atime); 4637 udf_timespec_to_timestamp(&now, &efe->attrtime); 4638 udf_timespec_to_timestamp(&now, &efe->mtime); 4639 4640 udf_set_regid(&efe->imp_id, IMPL_NAME); 4641 udf_add_impl_regid(ump, &efe->imp_id); 4642 4643 unique_id = udf_advance_uniqueid(ump); 4644 efe->unique_id = udf_rw64(unique_id); 4645 efe->l_ea = udf_rw32(0); 4646 4647 /* if its a directory, create '..' */ 4648 bpos = (uint8_t *) efe->data + udf_rw32(efe->l_ea); 4649 fidsize = 0; 4650 if (file_type == UDF_ICB_FILETYPE_DIRECTORY) { 4651 fidsize = udf_create_parentfid(ump, 4652 (struct fileid_desc *) bpos, parent_icb, 4653 parent_unique_id); 4654 } 4655 4656 /* record fidlength information */ 4657 efe->obj_size = udf_rw64(fidsize); 4658 efe->inf_len = udf_rw64(fidsize); 4659 efe->l_ad = udf_rw32(fidsize); 4660 efe->logblks_rec = udf_rw64(0); /* intern */ 4661 4662 crclen = sizeof(struct extfile_entry) - 1 - UDF_DESC_TAG_LENGTH; 4663 crclen += udf_rw32(efe->l_ea) + fidsize; 4664 efe->tag.desc_crc_len = udf_rw16(crclen); 4665 4666 (void) udf_validate_tag_and_crc_sums((union dscrptr *) efe); 4667 4668 return fidsize; 4669 } 4670 4671 /* --------------------------------------------------------------------- */ 4672 4673 int 4674 udf_dir_detach(struct udf_mount *ump, struct udf_node *dir_node, 4675 struct udf_node *udf_node, struct componentname *cnp) 4676 { 4677 struct vnode *dvp = dir_node->vnode; 4678 struct udf_dirhash_entry *dirh_ep; 4679 struct file_entry *fe = dir_node->fe; 4680 struct extfile_entry *efe = dir_node->efe; 4681 struct fileid_desc *fid; 4682 struct dirent *dirent; 4683 uint64_t file_size, diroffset; 4684 uint32_t lb_size, fidsize; 4685 int found, error; 4686 char const *name = cnp->cn_nameptr; 4687 int namelen = cnp->cn_namelen; 4688 int hit, refcnt; 4689 4690 /* get our dirhash and make sure its read in */ 4691 udf_dirhash_get(&dir_node->dir_hash); 4692 error = udf_dirhash_fill(dir_node); 4693 if (error) { 4694 udf_dirhash_put(dir_node->dir_hash); 4695 return error; 4696 } 4697 4698 /* get directory filesize */ 4699 if (fe) { 4700 file_size = udf_rw64(fe->inf_len); 4701 } else { 4702 assert(efe); 4703 file_size = udf_rw64(efe->inf_len); 4704 } 4705 4706 /* allocate temporary space for fid */ 4707 lb_size = udf_rw32(dir_node->ump->logical_vol->lb_size); 4708 fid = malloc(lb_size, M_UDFTEMP, M_WAITOK); 4709 dirent = malloc(sizeof(struct dirent), M_UDFTEMP, M_WAITOK); 4710 4711 /* search our dirhash hits */ 4712 found = 0; 4713 dirh_ep = NULL; 4714 for (;;) { 4715 hit = udf_dirhash_lookup(dir_node, name, namelen, &dirh_ep); 4716 /* if no hit, abort the search */ 4717 if (!hit) 4718 break; 4719 4720 /* check this hit */ 4721 diroffset = dirh_ep->offset; 4722 4723 /* transfer a new fid/dirent */ 4724 error = udf_read_fid_stream(dvp, &diroffset, fid, dirent); 4725 if (error) 4726 break; 4727 4728 /* see if its our entry */ 4729 KASSERT(dirent->d_namlen == namelen); 4730 if (strncmp(dirent->d_name, name, namelen) == 0) { 4731 found = 1; 4732 break; 4733 } 4734 } 4735 4736 if (!found) 4737 error = ENOENT; 4738 if (error) 4739 goto error_out; 4740 4741 /* mark deleted */ 4742 fid->file_char |= UDF_FILE_CHAR_DEL; 4743 #ifdef UDF_COMPLETE_DELETE 4744 memset(&fid->icb, 0, sizeof(fid->icb)); 4745 #endif 4746 (void) udf_validate_tag_and_crc_sums((union dscrptr *) fid); 4747 4748 /* get size of fid and compensate for the read_fid_stream advance */ 4749 fidsize = udf_fidsize(fid); 4750 diroffset -= fidsize; 4751 4752 /* write out */ 4753 error = vn_rdwr(UIO_WRITE, dir_node->vnode, 4754 fid, fidsize, diroffset, 4755 UIO_SYSSPACE, IO_ALTSEMANTICS | IO_NODELOCKED, 4756 FSCRED, NULL, NULL); 4757 if (error) 4758 goto error_out; 4759 4760 /* get reference count of attached node */ 4761 if (udf_node->fe) { 4762 refcnt = udf_rw16(udf_node->fe->link_cnt); 4763 } else { 4764 KASSERT(udf_node->efe); 4765 refcnt = udf_rw16(udf_node->efe->link_cnt); 4766 } 4767 #ifdef UDF_COMPLETE_DELETE 4768 /* substract reference counter in attached node */ 4769 refcnt -= 1; 4770 if (udf_node->fe) { 4771 udf_node->fe->link_cnt = udf_rw16(refcnt); 4772 } else { 4773 udf_node->efe->link_cnt = udf_rw16(refcnt); 4774 } 4775 4776 /* prevent writeout when refcnt == 0 */ 4777 if (refcnt == 0) 4778 udf_node->i_flags |= IN_DELETED; 4779 4780 if (fid->file_char & UDF_FILE_CHAR_DIR) { 4781 int drefcnt; 4782 4783 /* substract reference counter in directory node */ 4784 /* note subtract 2 (?) for its was also backreferenced */ 4785 if (dir_node->fe) { 4786 drefcnt = udf_rw16(dir_node->fe->link_cnt); 4787 drefcnt -= 1; 4788 dir_node->fe->link_cnt = udf_rw16(drefcnt); 4789 } else { 4790 KASSERT(dir_node->efe); 4791 drefcnt = udf_rw16(dir_node->efe->link_cnt); 4792 drefcnt -= 1; 4793 dir_node->efe->link_cnt = udf_rw16(drefcnt); 4794 } 4795 } 4796 4797 udf_node->i_flags |= IN_MODIFIED; 4798 dir_node->i_flags |= IN_MODIFIED; 4799 #endif 4800 /* if it is/was a hardlink adjust the file count */ 4801 if (refcnt > 0) 4802 udf_adjust_filecount(udf_node, -1); 4803 4804 /* remove from the dirhash */ 4805 udf_dirhash_remove(dir_node, dirent, diroffset, 4806 udf_fidsize(fid)); 4807 4808 error_out: 4809 free(fid, M_UDFTEMP); 4810 free(dirent, M_UDFTEMP); 4811 4812 udf_dirhash_put(dir_node->dir_hash); 4813 4814 return error; 4815 } 4816 4817 /* --------------------------------------------------------------------- */ 4818 4819 /* 4820 * We are not allowed to split the fid tag itself over an logical block so 4821 * check the space remaining in the logical block. 4822 * 4823 * We try to select the smallest candidate for recycling or when none is 4824 * found, append a new one at the end of the directory. 4825 */ 4826 4827 int 4828 udf_dir_attach(struct udf_mount *ump, struct udf_node *dir_node, 4829 struct udf_node *udf_node, struct vattr *vap, struct componentname *cnp) 4830 { 4831 struct vnode *dvp = dir_node->vnode; 4832 struct udf_dirhash_entry *dirh_ep; 4833 struct fileid_desc *fid; 4834 struct icb_tag *icbtag; 4835 struct charspec osta_charspec; 4836 struct dirent dirent; 4837 uint64_t unique_id, dir_size, diroffset; 4838 uint64_t fid_pos, end_fid_pos, chosen_fid_pos; 4839 uint32_t chosen_size, chosen_size_diff; 4840 int lb_size, lb_rest, fidsize, this_fidsize, size_diff; 4841 int file_char, refcnt, icbflags, addr_type, hit, error; 4842 4843 /* get our dirhash and make sure its read in */ 4844 udf_dirhash_get(&dir_node->dir_hash); 4845 error = udf_dirhash_fill(dir_node); 4846 if (error) { 4847 udf_dirhash_put(dir_node->dir_hash); 4848 return error; 4849 } 4850 4851 /* get info */ 4852 lb_size = udf_rw32(ump->logical_vol->lb_size); 4853 udf_osta_charset(&osta_charspec); 4854 4855 if (dir_node->fe) { 4856 dir_size = udf_rw64(dir_node->fe->inf_len); 4857 icbtag = &dir_node->fe->icbtag; 4858 } else { 4859 dir_size = udf_rw64(dir_node->efe->inf_len); 4860 icbtag = &dir_node->efe->icbtag; 4861 } 4862 4863 icbflags = udf_rw16(icbtag->flags); 4864 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK; 4865 4866 if (udf_node->fe) { 4867 unique_id = udf_rw64(udf_node->fe->unique_id); 4868 refcnt = udf_rw16(udf_node->fe->link_cnt); 4869 } else { 4870 unique_id = udf_rw64(udf_node->efe->unique_id); 4871 refcnt = udf_rw16(udf_node->efe->link_cnt); 4872 } 4873 4874 if (refcnt > 0) { 4875 unique_id = udf_advance_uniqueid(ump); 4876 udf_adjust_filecount(udf_node, 1); 4877 } 4878 4879 /* determine file characteristics */ 4880 file_char = 0; /* visible non deleted file and not stream metadata */ 4881 if (vap->va_type == VDIR) 4882 file_char = UDF_FILE_CHAR_DIR; 4883 4884 /* malloc scrap buffer */ 4885 fid = malloc(lb_size, M_TEMP, M_WAITOK); 4886 bzero(fid, lb_size); 4887 4888 /* calculate _minimum_ fid size */ 4889 unix_to_udf_name((char *) fid->data, &fid->l_fi, 4890 cnp->cn_nameptr, cnp->cn_namelen, &osta_charspec); 4891 fidsize = UDF_FID_SIZE + fid->l_fi; 4892 fidsize = (fidsize + 3) & ~3; /* multiple of 4 */ 4893 4894 /* find position that will fit the FID */ 4895 chosen_fid_pos = dir_size; 4896 chosen_size = 0; 4897 chosen_size_diff = UINT_MAX; 4898 4899 /* shut up gcc */ 4900 dirent.d_namlen = 0; 4901 4902 /* search our dirhash hits */ 4903 error = 0; 4904 dirh_ep = NULL; 4905 for (;;) { 4906 hit = udf_dirhash_lookup_freed(dir_node, fidsize, &dirh_ep); 4907 /* if no hit, abort the search */ 4908 if (!hit) 4909 break; 4910 4911 /* check this hit for size */ 4912 this_fidsize = dirh_ep->fid_size; 4913 4914 /* check this hit */ 4915 fid_pos = dirh_ep->offset; 4916 end_fid_pos = fid_pos + this_fidsize; 4917 size_diff = this_fidsize - fidsize; 4918 lb_rest = lb_size - (end_fid_pos % lb_size); 4919 4920 #ifndef UDF_COMPLETE_DELETE 4921 /* transfer a new fid/dirent */ 4922 error = udf_read_fid_stream(vp, &fid_pos, fid, dirent); 4923 if (error) 4924 goto error_out; 4925 4926 /* only reuse entries that are wiped */ 4927 /* check if the len + loc are marked zero */ 4928 if (udf_rw32(fid->icb.len != 0)) 4929 continue; 4930 if (udf_rw32(fid->icb.loc.lb_num) != 0) 4931 continue; 4932 if (udf_rw16(fid->icb.loc.part_num != 0)) 4933 continue; 4934 #endif /* UDF_COMPLETE_DELETE */ 4935 4936 /* select if not splitting the tag and its smaller */ 4937 if ((size_diff >= 0) && 4938 (size_diff < chosen_size_diff) && 4939 (lb_rest >= sizeof(struct desc_tag))) 4940 { 4941 /* UDF 2.3.4.2+3 specifies rules for iu size */ 4942 if ((size_diff == 0) || (size_diff >= 32)) { 4943 chosen_fid_pos = fid_pos; 4944 chosen_size = this_fidsize; 4945 chosen_size_diff = size_diff; 4946 } 4947 } 4948 } 4949 4950 4951 /* extend directory if no other candidate found */ 4952 if (chosen_size == 0) { 4953 chosen_fid_pos = dir_size; 4954 chosen_size = fidsize; 4955 chosen_size_diff = 0; 4956 4957 /* special case UDF 2.00+ 2.3.4.4, no splitting up fid tag */ 4958 if (addr_type == UDF_ICB_INTERN_ALLOC) { 4959 /* pre-grow directory to see if we're to switch */ 4960 udf_grow_node(dir_node, dir_size + chosen_size); 4961 4962 icbflags = udf_rw16(icbtag->flags); 4963 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK; 4964 } 4965 4966 /* make sure the next fid desc_tag won't be splitted */ 4967 if (addr_type != UDF_ICB_INTERN_ALLOC) { 4968 end_fid_pos = chosen_fid_pos + chosen_size; 4969 lb_rest = lb_size - (end_fid_pos % lb_size); 4970 4971 /* pad with implementation use regid if needed */ 4972 if (lb_rest < sizeof(struct desc_tag)) 4973 chosen_size += 32; 4974 } 4975 } 4976 chosen_size_diff = chosen_size - fidsize; 4977 diroffset = chosen_fid_pos + chosen_size; 4978 4979 /* populate the FID */ 4980 memset(fid, 0, lb_size); 4981 udf_inittag(ump, &fid->tag, TAGID_FID, 0); 4982 fid->file_version_num = udf_rw16(1); /* UDF 2.3.4.1 */ 4983 fid->file_char = file_char; 4984 fid->icb = udf_node->loc; 4985 fid->icb.longad_uniqueid = udf_rw32((uint32_t) unique_id); 4986 fid->l_iu = udf_rw16(0); 4987 4988 if (chosen_size > fidsize) { 4989 /* insert implementation-use regid to space it correctly */ 4990 fid->l_iu = udf_rw16(chosen_size_diff); 4991 4992 /* set implementation use */ 4993 udf_set_regid((struct regid *) fid->data, IMPL_NAME); 4994 udf_add_impl_regid(ump, (struct regid *) fid->data); 4995 } 4996 4997 /* fill in name */ 4998 unix_to_udf_name((char *) fid->data + udf_rw16(fid->l_iu), 4999 &fid->l_fi, cnp->cn_nameptr, cnp->cn_namelen, &osta_charspec); 5000 5001 fid->tag.desc_crc_len = chosen_size - UDF_DESC_TAG_LENGTH; 5002 (void) udf_validate_tag_and_crc_sums((union dscrptr *) fid); 5003 5004 /* writeout FID/update parent directory */ 5005 error = vn_rdwr(UIO_WRITE, dvp, 5006 fid, chosen_size, chosen_fid_pos, 5007 UIO_SYSSPACE, IO_ALTSEMANTICS | IO_NODELOCKED, 5008 FSCRED, NULL, NULL); 5009 5010 if (error) 5011 goto error_out; 5012 5013 /* add reference counter in attached node */ 5014 if (udf_node->fe) { 5015 refcnt = udf_rw16(udf_node->fe->link_cnt); 5016 udf_node->fe->link_cnt = udf_rw16(refcnt+1); 5017 } else { 5018 KASSERT(udf_node->efe); 5019 refcnt = udf_rw16(udf_node->efe->link_cnt); 5020 udf_node->efe->link_cnt = udf_rw16(refcnt+1); 5021 } 5022 5023 /* mark not deleted if it was... just in case, but do warn */ 5024 if (udf_node->i_flags & IN_DELETED) { 5025 printf("udf: warning, marking a file undeleted\n"); 5026 udf_node->i_flags &= ~IN_DELETED; 5027 } 5028 5029 if (file_char & UDF_FILE_CHAR_DIR) { 5030 /* add reference counter in directory node for '..' */ 5031 if (dir_node->fe) { 5032 refcnt = udf_rw16(dir_node->fe->link_cnt); 5033 refcnt++; 5034 dir_node->fe->link_cnt = udf_rw16(refcnt); 5035 } else { 5036 KASSERT(dir_node->efe); 5037 refcnt = udf_rw16(dir_node->efe->link_cnt); 5038 refcnt++; 5039 dir_node->efe->link_cnt = udf_rw16(refcnt); 5040 } 5041 } 5042 5043 /* append to the dirhash */ 5044 dirent.d_namlen = cnp->cn_namelen; 5045 memcpy(dirent.d_name, cnp->cn_nameptr, cnp->cn_namelen); 5046 udf_dirhash_enter(dir_node, fid, &dirent, chosen_fid_pos, 5047 udf_fidsize(fid), 1); 5048 5049 /* note updates */ 5050 udf_node->i_flags |= IN_CHANGE | IN_MODIFY; /* | IN_CREATE? */ 5051 /* VN_KNOTE(udf_node, ...) */ 5052 udf_update(udf_node->vnode, NULL, NULL, NULL, 0); 5053 5054 error_out: 5055 free(fid, M_TEMP); 5056 5057 udf_dirhash_put(dir_node->dir_hash); 5058 5059 return error; 5060 } 5061 5062 /* --------------------------------------------------------------------- */ 5063 5064 /* 5065 * Each node can have an attached streamdir node though not recursively. These 5066 * are otherwise known as named substreams/named extended attributes that have 5067 * no size limitations. 5068 * 5069 * `Normal' extended attributes are indicated with a number and are recorded 5070 * in either the fe/efe descriptor itself for small descriptors or recorded in 5071 * the attached extended attribute file. Since these spaces can get 5072 * fragmented, care ought to be taken. 5073 * 5074 * Since the size of the space reserved for allocation descriptors is limited, 5075 * there is a mechanim provided for extending this space; this is done by a 5076 * special extent to allow schrinking of the allocations without breaking the 5077 * linkage to the allocation extent descriptor. 5078 */ 5079 5080 int 5081 udf_get_node(struct udf_mount *ump, struct long_ad *node_icb_loc, 5082 struct udf_node **udf_noderes) 5083 { 5084 union dscrptr *dscr; 5085 struct udf_node *udf_node; 5086 struct vnode *nvp; 5087 struct long_ad icb_loc, last_fe_icb_loc; 5088 uint64_t file_size; 5089 uint32_t lb_size, sector, dummy; 5090 uint8_t *file_data; 5091 int udf_file_type, dscr_type, strat, strat4096, needs_indirect; 5092 int slot, eof, error; 5093 5094 DPRINTF(NODE, ("udf_get_node called\n")); 5095 *udf_noderes = udf_node = NULL; 5096 5097 /* lock to disallow simultanious creation of same udf_node */ 5098 mutex_enter(&ump->get_node_lock); 5099 5100 DPRINTF(NODE, ("\tlookup in hash table\n")); 5101 /* lookup in hash table */ 5102 assert(ump); 5103 assert(node_icb_loc); 5104 udf_node = udf_hash_lookup(ump, node_icb_loc); 5105 if (udf_node) { 5106 DPRINTF(NODE, ("\tgot it from the hash!\n")); 5107 /* vnode is returned locked */ 5108 *udf_noderes = udf_node; 5109 mutex_exit(&ump->get_node_lock); 5110 return 0; 5111 } 5112 5113 /* garbage check: translate udf_node_icb_loc to sectornr */ 5114 error = udf_translate_vtop(ump, node_icb_loc, §or, &dummy); 5115 if (error) { 5116 /* no use, this will fail anyway */ 5117 mutex_exit(&ump->get_node_lock); 5118 return EINVAL; 5119 } 5120 5121 /* build udf_node (do initialise!) */ 5122 udf_node = pool_get(&udf_node_pool, PR_WAITOK); 5123 memset(udf_node, 0, sizeof(struct udf_node)); 5124 5125 DPRINTF(NODE, ("\tget new vnode\n")); 5126 /* give it a vnode */ 5127 error = getnewvnode(VT_UDF, ump->vfs_mountp, udf_vnodeop_p, &nvp); 5128 if (error) { 5129 pool_put(&udf_node_pool, udf_node); 5130 mutex_exit(&ump->get_node_lock); 5131 return error; 5132 } 5133 5134 /* always return locked vnode */ 5135 if ((error = vn_lock(nvp, LK_EXCLUSIVE | LK_RETRY))) { 5136 /* recycle vnode and unlock; simultanious will fail too */ 5137 ungetnewvnode(nvp); 5138 mutex_exit(&ump->get_node_lock); 5139 return error; 5140 } 5141 5142 /* initialise crosslinks, note location of fe/efe for hashing */ 5143 udf_node->ump = ump; 5144 udf_node->vnode = nvp; 5145 nvp->v_data = udf_node; 5146 udf_node->loc = *node_icb_loc; 5147 udf_node->lockf = 0; 5148 mutex_init(&udf_node->node_mutex, MUTEX_DEFAULT, IPL_NONE); 5149 cv_init(&udf_node->node_lock, "udf_nlk"); 5150 genfs_node_init(nvp, &udf_genfsops); /* inititise genfs */ 5151 udf_node->outstanding_bufs = 0; 5152 udf_node->outstanding_nodedscr = 0; 5153 5154 /* insert into the hash lookup */ 5155 udf_register_node(udf_node); 5156 5157 /* safe to unlock, the entry is in the hash table, vnode is locked */ 5158 mutex_exit(&ump->get_node_lock); 5159 5160 icb_loc = *node_icb_loc; 5161 needs_indirect = 0; 5162 strat4096 = 0; 5163 udf_file_type = UDF_ICB_FILETYPE_UNKNOWN; 5164 file_size = 0; 5165 file_data = NULL; 5166 lb_size = udf_rw32(ump->logical_vol->lb_size); 5167 5168 DPRINTF(NODE, ("\tstart reading descriptors\n")); 5169 do { 5170 /* try to read in fe/efe */ 5171 error = udf_read_logvol_dscr(ump, &icb_loc, &dscr); 5172 5173 /* blank sector marks end of sequence, check this */ 5174 if ((dscr == NULL) && (!strat4096)) 5175 error = ENOENT; 5176 5177 /* break if read error or blank sector */ 5178 if (error || (dscr == NULL)) 5179 break; 5180 5181 /* process descriptor based on the descriptor type */ 5182 dscr_type = udf_rw16(dscr->tag.id); 5183 DPRINTF(NODE, ("\tread descriptor %d\n", dscr_type)); 5184 5185 /* if dealing with an indirect entry, follow the link */ 5186 if (dscr_type == TAGID_INDIRECTENTRY) { 5187 needs_indirect = 0; 5188 udf_free_logvol_dscr(ump, &icb_loc, dscr); 5189 icb_loc = dscr->inde.indirect_icb; 5190 continue; 5191 } 5192 5193 /* only file entries and extended file entries allowed here */ 5194 if ((dscr_type != TAGID_FENTRY) && 5195 (dscr_type != TAGID_EXTFENTRY)) { 5196 udf_free_logvol_dscr(ump, &icb_loc, dscr); 5197 error = ENOENT; 5198 break; 5199 } 5200 5201 KASSERT(udf_tagsize(dscr, lb_size) == lb_size); 5202 5203 /* choose this one */ 5204 last_fe_icb_loc = icb_loc; 5205 5206 /* record and process/update (ext)fentry */ 5207 file_data = NULL; 5208 if (dscr_type == TAGID_FENTRY) { 5209 if (udf_node->fe) 5210 udf_free_logvol_dscr(ump, &last_fe_icb_loc, 5211 udf_node->fe); 5212 udf_node->fe = &dscr->fe; 5213 strat = udf_rw16(udf_node->fe->icbtag.strat_type); 5214 udf_file_type = udf_node->fe->icbtag.file_type; 5215 file_size = udf_rw64(udf_node->fe->inf_len); 5216 file_data = udf_node->fe->data; 5217 } else { 5218 if (udf_node->efe) 5219 udf_free_logvol_dscr(ump, &last_fe_icb_loc, 5220 udf_node->efe); 5221 udf_node->efe = &dscr->efe; 5222 strat = udf_rw16(udf_node->efe->icbtag.strat_type); 5223 udf_file_type = udf_node->efe->icbtag.file_type; 5224 file_size = udf_rw64(udf_node->efe->inf_len); 5225 file_data = udf_node->efe->data; 5226 } 5227 5228 /* check recording strategy (structure) */ 5229 5230 /* 5231 * Strategy 4096 is a daisy linked chain terminating with an 5232 * unrecorded sector or a TERM descriptor. The next 5233 * descriptor is to be found in the sector that follows the 5234 * current sector. 5235 */ 5236 if (strat == 4096) { 5237 strat4096 = 1; 5238 needs_indirect = 1; 5239 5240 icb_loc.loc.lb_num = udf_rw32(icb_loc.loc.lb_num) + 1; 5241 } 5242 5243 /* 5244 * Strategy 4 is the normal strategy and terminates, but if 5245 * we're in strategy 4096, we can't have strategy 4 mixed in 5246 */ 5247 5248 if (strat == 4) { 5249 if (strat4096) { 5250 error = EINVAL; 5251 break; 5252 } 5253 break; /* done */ 5254 } 5255 } while (!error); 5256 5257 /* first round of cleanup code */ 5258 if (error) { 5259 DPRINTF(NODE, ("\tnode fe/efe failed!\n")); 5260 /* recycle udf_node */ 5261 udf_dispose_node(udf_node); 5262 5263 vlockmgr(nvp->v_vnlock, LK_RELEASE); 5264 nvp->v_data = NULL; 5265 ungetnewvnode(nvp); 5266 5267 return EINVAL; /* error code ok? */ 5268 } 5269 DPRINTF(NODE, ("\tnode fe/efe read in fine\n")); 5270 5271 /* assert no references to dscr anymore beyong this point */ 5272 assert((udf_node->fe) || (udf_node->efe)); 5273 dscr = NULL; 5274 5275 /* 5276 * Remember where to record an updated version of the descriptor. If 5277 * there is a sequence of indirect entries, icb_loc will have been 5278 * updated. Its the write disipline to allocate new space and to make 5279 * sure the chain is maintained. 5280 * 5281 * `needs_indirect' flags if the next location is to be filled with 5282 * with an indirect entry. 5283 */ 5284 udf_node->write_loc = icb_loc; 5285 udf_node->needs_indirect = needs_indirect; 5286 5287 /* 5288 * Go trough all allocations extents of this descriptor and when 5289 * encountering a redirect read in the allocation extension. These are 5290 * daisy-chained. 5291 */ 5292 UDF_LOCK_NODE(udf_node, 0); 5293 udf_node->num_extensions = 0; 5294 5295 error = 0; 5296 slot = 0; 5297 for (;;) { 5298 udf_get_adslot(udf_node, slot, &icb_loc, &eof); 5299 DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, len = %d, " 5300 "lb_num = %d, part = %d\n", slot, eof, 5301 UDF_EXT_FLAGS(udf_rw32(icb_loc.len)), 5302 UDF_EXT_LEN(udf_rw32(icb_loc.len)), 5303 udf_rw32(icb_loc.loc.lb_num), 5304 udf_rw16(icb_loc.loc.part_num))); 5305 if (eof) 5306 break; 5307 slot++; 5308 5309 if (UDF_EXT_FLAGS(udf_rw32(icb_loc.len)) != UDF_EXT_REDIRECT) 5310 continue; 5311 5312 DPRINTF(NODE, ("\tgot redirect extent\n")); 5313 if (udf_node->num_extensions >= UDF_MAX_ALLOC_EXTENTS) { 5314 DPRINTF(ALLOC, ("udf_get_node: implementation limit, " 5315 "too many allocation extensions on " 5316 "udf_node\n")); 5317 error = EINVAL; 5318 break; 5319 } 5320 5321 /* length can only be *one* lb : UDF 2.50/2.3.7.1 */ 5322 if (UDF_EXT_LEN(udf_rw32(icb_loc.len)) != lb_size) { 5323 DPRINTF(ALLOC, ("udf_get_node: bad allocation " 5324 "extension size in udf_node\n")); 5325 error = EINVAL; 5326 break; 5327 } 5328 5329 DPRINTF(NODE, ("read allocation extent at lb_num %d\n", 5330 UDF_EXT_LEN(udf_rw32(icb_loc.loc.lb_num)))); 5331 /* load in allocation extent */ 5332 error = udf_read_logvol_dscr(ump, &icb_loc, &dscr); 5333 if (error || (dscr == NULL)) 5334 break; 5335 5336 /* process read-in descriptor */ 5337 dscr_type = udf_rw16(dscr->tag.id); 5338 5339 if (dscr_type != TAGID_ALLOCEXTENT) { 5340 udf_free_logvol_dscr(ump, &icb_loc, dscr); 5341 error = ENOENT; 5342 break; 5343 } 5344 5345 DPRINTF(NODE, ("\trecording redirect extent\n")); 5346 udf_node->ext[udf_node->num_extensions] = &dscr->aee; 5347 udf_node->ext_loc[udf_node->num_extensions] = icb_loc; 5348 5349 udf_node->num_extensions++; 5350 5351 } /* while */ 5352 UDF_UNLOCK_NODE(udf_node, 0); 5353 5354 /* second round of cleanup code */ 5355 if (error) { 5356 /* recycle udf_node */ 5357 udf_dispose_node(udf_node); 5358 5359 vlockmgr(nvp->v_vnlock, LK_RELEASE); 5360 nvp->v_data = NULL; 5361 ungetnewvnode(nvp); 5362 5363 return EINVAL; /* error code ok? */ 5364 } 5365 5366 DPRINTF(NODE, ("\tnode read in fine\n")); 5367 5368 /* 5369 * Translate UDF filetypes into vnode types. 5370 * 5371 * Systemfiles like the meta main and mirror files are not treated as 5372 * normal files, so we type them as having no type. UDF dictates that 5373 * they are not allowed to be visible. 5374 */ 5375 5376 switch (udf_file_type) { 5377 case UDF_ICB_FILETYPE_DIRECTORY : 5378 case UDF_ICB_FILETYPE_STREAMDIR : 5379 nvp->v_type = VDIR; 5380 break; 5381 case UDF_ICB_FILETYPE_BLOCKDEVICE : 5382 nvp->v_type = VBLK; 5383 break; 5384 case UDF_ICB_FILETYPE_CHARDEVICE : 5385 nvp->v_type = VCHR; 5386 break; 5387 case UDF_ICB_FILETYPE_SOCKET : 5388 nvp->v_type = VSOCK; 5389 break; 5390 case UDF_ICB_FILETYPE_FIFO : 5391 nvp->v_type = VFIFO; 5392 break; 5393 case UDF_ICB_FILETYPE_SYMLINK : 5394 nvp->v_type = VLNK; 5395 break; 5396 case UDF_ICB_FILETYPE_VAT : 5397 case UDF_ICB_FILETYPE_META_MAIN : 5398 case UDF_ICB_FILETYPE_META_MIRROR : 5399 nvp->v_type = VNON; 5400 break; 5401 case UDF_ICB_FILETYPE_RANDOMACCESS : 5402 case UDF_ICB_FILETYPE_REALTIME : 5403 nvp->v_type = VREG; 5404 break; 5405 default: 5406 /* YIKES, something else */ 5407 nvp->v_type = VNON; 5408 } 5409 5410 /* TODO specfs, fifofs etc etc. vnops setting */ 5411 5412 /* don't forget to set vnode's v_size */ 5413 uvm_vnp_setsize(nvp, file_size); 5414 5415 /* TODO ext attr and streamdir udf_nodes */ 5416 5417 *udf_noderes = udf_node; 5418 5419 return 0; 5420 } 5421 5422 /* --------------------------------------------------------------------- */ 5423 5424 5425 int 5426 udf_writeout_node(struct udf_node *udf_node, int waitfor) 5427 { 5428 union dscrptr *dscr; 5429 struct long_ad *loc; 5430 int extnr, flags, error; 5431 5432 DPRINTF(NODE, ("udf_writeout_node called\n")); 5433 5434 KASSERT(udf_node->outstanding_bufs == 0); 5435 KASSERT(udf_node->outstanding_nodedscr == 0); 5436 5437 KASSERT(LIST_EMPTY(&udf_node->vnode->v_dirtyblkhd)); 5438 5439 if (udf_node->i_flags & IN_DELETED) { 5440 DPRINTF(NODE, ("\tnode deleted; not writing out\n")); 5441 return 0; 5442 } 5443 5444 /* lock node */ 5445 flags = waitfor ? 0 : IN_CALLBACK_ULK; 5446 UDF_LOCK_NODE(udf_node, flags); 5447 5448 /* at least one descriptor writeout */ 5449 udf_node->outstanding_nodedscr = 1; 5450 5451 /* we're going to write out the descriptor so clear the flags */ 5452 udf_node->i_flags &= ~(IN_MODIFIED | IN_ACCESSED); 5453 5454 /* if we were rebuild, write out the allocation extents */ 5455 if (udf_node->i_flags & IN_NODE_REBUILD) { 5456 /* mark outstanding node dscriptors and issue them */ 5457 udf_node->outstanding_nodedscr += udf_node->num_extensions; 5458 for (extnr = 0; extnr < udf_node->num_extensions; extnr++) { 5459 loc = &udf_node->ext_loc[extnr]; 5460 dscr = (union dscrptr *) udf_node->ext[extnr]; 5461 error = udf_write_logvol_dscr(udf_node, dscr, loc, 0); 5462 if (error) 5463 return error; 5464 } 5465 /* mark allocation extents written out */ 5466 udf_node->i_flags &= ~(IN_NODE_REBUILD); 5467 } 5468 5469 if (udf_node->fe) { 5470 dscr = (union dscrptr *) udf_node->fe; 5471 } else { 5472 KASSERT(udf_node->efe); 5473 dscr = (union dscrptr *) udf_node->efe; 5474 } 5475 KASSERT(dscr); 5476 5477 loc = &udf_node->write_loc; 5478 error = udf_write_logvol_dscr(udf_node, dscr, loc, waitfor); 5479 return error; 5480 } 5481 5482 /* --------------------------------------------------------------------- */ 5483 5484 int 5485 udf_dispose_node(struct udf_node *udf_node) 5486 { 5487 struct vnode *vp; 5488 int extnr; 5489 5490 DPRINTF(NODE, ("udf_dispose_node called on node %p\n", udf_node)); 5491 if (!udf_node) { 5492 DPRINTF(NODE, ("UDF: Dispose node on node NULL, ignoring\n")); 5493 return 0; 5494 } 5495 5496 vp = udf_node->vnode; 5497 #ifdef DIAGNOSTIC 5498 if (vp->v_numoutput) 5499 panic("disposing UDF node with pending I/O's, udf_node = %p, " 5500 "v_numoutput = %d", udf_node, vp->v_numoutput); 5501 #endif 5502 5503 /* wait until out of sync (just in case we happen to stumble over one */ 5504 KASSERT(!mutex_owned(&mntvnode_lock)); 5505 mutex_enter(&mntvnode_lock); 5506 while (udf_node->i_flags & IN_SYNCED) { 5507 cv_timedwait(&udf_node->ump->dirtynodes_cv, &mntvnode_lock, 5508 hz/16); 5509 } 5510 mutex_exit(&mntvnode_lock); 5511 5512 /* TODO extended attributes and streamdir */ 5513 5514 /* remove dirhash if present */ 5515 udf_dirhash_destroy(&udf_node->dir_hash); 5516 5517 /* remove from our hash lookup table */ 5518 udf_deregister_node(udf_node); 5519 5520 /* destroy our lock */ 5521 mutex_destroy(&udf_node->node_mutex); 5522 cv_destroy(&udf_node->node_lock); 5523 5524 /* dissociate our udf_node from the vnode */ 5525 genfs_node_destroy(udf_node->vnode); 5526 vp->v_data = NULL; 5527 5528 /* free associated memory and the node itself */ 5529 for (extnr = 0; extnr < udf_node->num_extensions; extnr++) { 5530 udf_free_logvol_dscr(udf_node->ump, &udf_node->ext_loc[extnr], 5531 udf_node->ext[extnr]); 5532 udf_node->ext[extnr] = (void *) 0xdeadcccc; 5533 } 5534 5535 if (udf_node->fe) 5536 udf_free_logvol_dscr(udf_node->ump, &udf_node->loc, 5537 udf_node->fe); 5538 if (udf_node->efe) 5539 udf_free_logvol_dscr(udf_node->ump, &udf_node->loc, 5540 udf_node->efe); 5541 5542 udf_node->fe = (void *) 0xdeadaaaa; 5543 udf_node->efe = (void *) 0xdeadbbbb; 5544 udf_node->ump = (void *) 0xdeadbeef; 5545 pool_put(&udf_node_pool, udf_node); 5546 5547 return 0; 5548 } 5549 5550 5551 5552 /* 5553 * create a new node using the specified vnodeops, vap and cnp but with the 5554 * udf_file_type. This allows special files to be created. Use with care. 5555 */ 5556 5557 static int 5558 udf_create_node_raw(struct vnode *dvp, struct vnode **vpp, int udf_file_type, 5559 int (**vnodeops)(void *), struct vattr *vap, struct componentname *cnp) 5560 { 5561 union dscrptr *dscr; 5562 struct udf_node *dir_node = VTOI(dvp);; 5563 struct udf_node *udf_node; 5564 struct udf_mount *ump = dir_node->ump; 5565 struct vnode *nvp; 5566 struct long_ad node_icb_loc; 5567 uint64_t parent_unique_id; 5568 uint64_t lmapping; 5569 uint32_t lb_size, lb_num; 5570 uint16_t vpart_num; 5571 uid_t uid; 5572 gid_t gid, parent_gid; 5573 int fid_size, error; 5574 5575 lb_size = udf_rw32(ump->logical_vol->lb_size); 5576 *vpp = NULL; 5577 5578 /* allocate vnode */ 5579 error = getnewvnode(VT_UDF, ump->vfs_mountp, vnodeops, &nvp); 5580 if (error) 5581 return error; 5582 5583 /* lock node */ 5584 error = vn_lock(nvp, LK_EXCLUSIVE | LK_RETRY); 5585 if (error) { 5586 nvp->v_data = NULL; 5587 ungetnewvnode(nvp); 5588 return error; 5589 } 5590 5591 /* get disc allocation for one logical block */ 5592 error = udf_pre_allocate_space(ump, UDF_C_NODE, 1, 5593 &vpart_num, &lmapping); 5594 lb_num = lmapping; 5595 if (error) { 5596 vlockmgr(nvp->v_vnlock, LK_RELEASE); 5597 ungetnewvnode(nvp); 5598 return error; 5599 } 5600 5601 /* initialise pointer to location */ 5602 memset(&node_icb_loc, 0, sizeof(struct long_ad)); 5603 node_icb_loc.len = lb_size; 5604 node_icb_loc.loc.lb_num = udf_rw32(lb_num); 5605 node_icb_loc.loc.part_num = udf_rw16(vpart_num); 5606 5607 /* build udf_node (do initialise!) */ 5608 udf_node = pool_get(&udf_node_pool, PR_WAITOK); 5609 memset(udf_node, 0, sizeof(struct udf_node)); 5610 5611 /* initialise crosslinks, note location of fe/efe for hashing */ 5612 /* bugalert: synchronise with udf_get_node() */ 5613 udf_node->ump = ump; 5614 udf_node->vnode = nvp; 5615 nvp->v_data = udf_node; 5616 udf_node->loc = node_icb_loc; 5617 udf_node->write_loc = node_icb_loc; 5618 udf_node->lockf = 0; 5619 mutex_init(&udf_node->node_mutex, MUTEX_DEFAULT, IPL_NONE); 5620 cv_init(&udf_node->node_lock, "udf_nlk"); 5621 udf_node->outstanding_bufs = 0; 5622 udf_node->outstanding_nodedscr = 0; 5623 5624 /* initialise genfs */ 5625 genfs_node_init(nvp, &udf_genfsops); 5626 5627 /* insert into the hash lookup */ 5628 udf_register_node(udf_node); 5629 5630 /* get parent's unique ID for refering '..' if its a directory */ 5631 if (dir_node->fe) { 5632 parent_unique_id = udf_rw64(dir_node->fe->unique_id); 5633 parent_gid = (gid_t) udf_rw32(dir_node->fe->gid); 5634 } else { 5635 parent_unique_id = udf_rw64(dir_node->efe->unique_id); 5636 parent_gid = (gid_t) udf_rw32(dir_node->efe->gid); 5637 } 5638 5639 /* get descriptor */ 5640 udf_create_logvol_dscr(ump, udf_node, &node_icb_loc, &dscr); 5641 5642 /* choose a fe or an efe for it */ 5643 if (ump->logical_vol->tag.descriptor_ver == 2) { 5644 udf_node->fe = &dscr->fe; 5645 fid_size = udf_create_new_fe(ump, udf_node->fe, 5646 udf_file_type, &udf_node->loc, 5647 &dir_node->loc, parent_unique_id); 5648 /* TODO add extended attribute for creation time */ 5649 } else { 5650 udf_node->efe = &dscr->efe; 5651 fid_size = udf_create_new_efe(ump, udf_node->efe, 5652 udf_file_type, &udf_node->loc, 5653 &dir_node->loc, parent_unique_id); 5654 } 5655 KASSERT(dscr->tag.tag_loc == udf_node->loc.loc.lb_num); 5656 5657 /* update vnode's size and type */ 5658 nvp->v_type = vap->va_type; 5659 uvm_vnp_setsize(nvp, fid_size); 5660 5661 /* set access mode */ 5662 udf_setaccessmode(udf_node, vap->va_mode); 5663 5664 /* set ownership */ 5665 uid = kauth_cred_geteuid(cnp->cn_cred); 5666 gid = parent_gid; 5667 udf_setownership(udf_node, uid, gid); 5668 5669 error = udf_dir_attach(ump, dir_node, udf_node, vap, cnp); 5670 if (error) { 5671 /* free disc allocation for node */ 5672 udf_free_allocated_space(ump, lb_num, vpart_num, 1); 5673 5674 /* recycle udf_node */ 5675 udf_dispose_node(udf_node); 5676 vput(nvp); 5677 5678 *vpp = NULL; 5679 return error; 5680 } 5681 5682 /* adjust file count */ 5683 udf_adjust_filecount(udf_node, 1); 5684 5685 /* return result */ 5686 *vpp = nvp; 5687 5688 return 0; 5689 } 5690 5691 5692 int 5693 udf_create_node(struct vnode *dvp, struct vnode **vpp, struct vattr *vap, 5694 struct componentname *cnp) 5695 { 5696 int (**vnodeops)(void *); 5697 int udf_file_type; 5698 5699 DPRINTF(NODE, ("udf_create_node called\n")); 5700 5701 /* what type are we creating ? */ 5702 vnodeops = udf_vnodeop_p; 5703 /* start with a default */ 5704 udf_file_type = UDF_ICB_FILETYPE_RANDOMACCESS; 5705 5706 *vpp = NULL; 5707 5708 switch (vap->va_type) { 5709 case VREG : 5710 udf_file_type = UDF_ICB_FILETYPE_RANDOMACCESS; 5711 break; 5712 case VDIR : 5713 udf_file_type = UDF_ICB_FILETYPE_DIRECTORY; 5714 break; 5715 case VLNK : 5716 udf_file_type = UDF_ICB_FILETYPE_SYMLINK; 5717 break; 5718 case VBLK : 5719 udf_file_type = UDF_ICB_FILETYPE_BLOCKDEVICE; 5720 /* specfs */ 5721 return ENOTSUP; 5722 break; 5723 case VCHR : 5724 udf_file_type = UDF_ICB_FILETYPE_CHARDEVICE; 5725 /* specfs */ 5726 return ENOTSUP; 5727 break; 5728 case VFIFO : 5729 udf_file_type = UDF_ICB_FILETYPE_FIFO; 5730 /* specfs */ 5731 return ENOTSUP; 5732 break; 5733 case VSOCK : 5734 udf_file_type = UDF_ICB_FILETYPE_SOCKET; 5735 /* specfs */ 5736 return ENOTSUP; 5737 break; 5738 case VNON : 5739 case VBAD : 5740 default : 5741 /* nothing; can we even create these? */ 5742 return EINVAL; 5743 } 5744 5745 return udf_create_node_raw(dvp, vpp, udf_file_type, vnodeops, vap, cnp); 5746 } 5747 5748 /* --------------------------------------------------------------------- */ 5749 5750 static void 5751 udf_free_descriptor_space(struct udf_node *udf_node, struct long_ad *loc, void *mem) 5752 { 5753 struct udf_mount *ump = udf_node->ump; 5754 uint32_t lb_size, lb_num, len, num_lb; 5755 uint16_t vpart_num; 5756 5757 /* is there really one? */ 5758 if (mem == NULL) 5759 return; 5760 5761 /* got a descriptor here */ 5762 len = UDF_EXT_LEN(udf_rw32(loc->len)); 5763 lb_num = udf_rw32(loc->loc.lb_num); 5764 vpart_num = udf_rw16(loc->loc.part_num); 5765 5766 lb_size = udf_rw32(ump->logical_vol->lb_size); 5767 num_lb = (len + lb_size -1) / lb_size; 5768 5769 udf_free_allocated_space(ump, lb_num, vpart_num, num_lb); 5770 } 5771 5772 void 5773 udf_delete_node(struct udf_node *udf_node) 5774 { 5775 void *dscr; 5776 struct udf_mount *ump; 5777 struct long_ad *loc; 5778 int extnr, lvint, dummy; 5779 5780 ump = udf_node->ump; 5781 5782 /* paranoia check on integrity; should be open!; we could panic */ 5783 lvint = udf_rw32(udf_node->ump->logvol_integrity->integrity_type); 5784 if (lvint == UDF_INTEGRITY_CLOSED) 5785 printf("\tIntegrity was CLOSED!\n"); 5786 5787 /* whatever the node type, change its size to zero */ 5788 (void) udf_resize_node(udf_node, 0, &dummy); 5789 5790 /* force it to be `clean'; no use writing it out */ 5791 udf_node->i_flags &= ~(IN_MODIFIED | IN_ACCESSED | IN_ACCESS | 5792 IN_CHANGE | IN_UPDATE | IN_MODIFY); 5793 5794 /* adjust file count */ 5795 udf_adjust_filecount(udf_node, -1); 5796 5797 /* 5798 * Free its allocated descriptors; memory will be released when 5799 * vop_reclaim() is called. 5800 */ 5801 loc = &udf_node->loc; 5802 5803 dscr = udf_node->fe; 5804 udf_free_descriptor_space(udf_node, loc, dscr); 5805 dscr = udf_node->efe; 5806 udf_free_descriptor_space(udf_node, loc, dscr); 5807 5808 for (extnr = 0; extnr < UDF_MAX_ALLOC_EXTENTS; extnr++) { 5809 dscr = udf_node->ext[extnr]; 5810 loc = &udf_node->ext_loc[extnr]; 5811 udf_free_descriptor_space(udf_node, loc, dscr); 5812 } 5813 } 5814 5815 /* --------------------------------------------------------------------- */ 5816 5817 /* set new filesize; node but be LOCKED on entry and is locked on exit */ 5818 int 5819 udf_resize_node(struct udf_node *udf_node, uint64_t new_size, int *extended) 5820 { 5821 struct file_entry *fe = udf_node->fe; 5822 struct extfile_entry *efe = udf_node->efe; 5823 uint64_t file_size; 5824 int error; 5825 5826 if (fe) { 5827 file_size = udf_rw64(fe->inf_len); 5828 } else { 5829 assert(udf_node->efe); 5830 file_size = udf_rw64(efe->inf_len); 5831 } 5832 5833 DPRINTF(ATTR, ("\tchanging file length from %"PRIu64" to %"PRIu64"\n", 5834 file_size, new_size)); 5835 5836 /* if not changing, we're done */ 5837 if (file_size == new_size) 5838 return 0; 5839 5840 *extended = (new_size > file_size); 5841 if (*extended) { 5842 error = udf_grow_node(udf_node, new_size); 5843 } else { 5844 error = udf_shrink_node(udf_node, new_size); 5845 } 5846 5847 return error; 5848 } 5849 5850 5851 /* --------------------------------------------------------------------- */ 5852 5853 void 5854 udf_itimes(struct udf_node *udf_node, struct timespec *acc, 5855 struct timespec *mod, struct timespec *birth) 5856 { 5857 struct timespec now; 5858 struct file_entry *fe; 5859 struct extfile_entry *efe; 5860 struct filetimes_extattr_entry *ft_extattr; 5861 struct timestamp *atime, *mtime, *attrtime, *ctime; 5862 struct timestamp fe_ctime; 5863 struct timespec cur_birth; 5864 uint32_t offset, a_l; 5865 uint8_t *filedata; 5866 int error; 5867 5868 /* protect against rogue values */ 5869 if (!udf_node) 5870 return; 5871 5872 fe = udf_node->fe; 5873 efe = udf_node->efe; 5874 5875 if (!(udf_node->i_flags & (IN_ACCESS|IN_CHANGE|IN_UPDATE|IN_MODIFY))) 5876 return; 5877 5878 /* get descriptor information */ 5879 if (fe) { 5880 atime = &fe->atime; 5881 mtime = &fe->mtime; 5882 attrtime = &fe->attrtime; 5883 filedata = fe->data; 5884 5885 /* initial save dummy setting */ 5886 ctime = &fe_ctime; 5887 5888 /* check our extended attribute if present */ 5889 error = udf_extattr_search_intern(udf_node, 5890 UDF_FILETIMES_ATTR_NO, "", &offset, &a_l); 5891 if (!error) { 5892 ft_extattr = (struct filetimes_extattr_entry *) 5893 (filedata + offset); 5894 if (ft_extattr->existence & UDF_FILETIMES_FILE_CREATION) 5895 ctime = &ft_extattr->times[0]; 5896 } 5897 /* TODO create the extended attribute if not found ? */ 5898 } else { 5899 assert(udf_node->efe); 5900 atime = &efe->atime; 5901 mtime = &efe->mtime; 5902 attrtime = &efe->attrtime; 5903 ctime = &efe->ctime; 5904 } 5905 5906 vfs_timestamp(&now); 5907 5908 /* set access time */ 5909 if (udf_node->i_flags & IN_ACCESS) { 5910 if (acc == NULL) 5911 acc = &now; 5912 udf_timespec_to_timestamp(acc, atime); 5913 } 5914 5915 /* set modification time */ 5916 if (udf_node->i_flags & (IN_UPDATE | IN_MODIFY)) { 5917 if (mod == NULL) 5918 mod = &now; 5919 udf_timespec_to_timestamp(mod, mtime); 5920 5921 /* ensure birthtime is older than set modification! */ 5922 udf_timestamp_to_timespec(udf_node->ump, ctime, &cur_birth); 5923 if ((cur_birth.tv_sec > mod->tv_sec) || 5924 ((cur_birth.tv_sec == mod->tv_sec) && 5925 (cur_birth.tv_nsec > mod->tv_nsec))) { 5926 udf_timespec_to_timestamp(mod, ctime); 5927 } 5928 } 5929 5930 /* update birthtime if specified */ 5931 /* XXX we asume here that given birthtime is older than mod */ 5932 if (birth && (birth->tv_sec != VNOVAL)) { 5933 udf_timespec_to_timestamp(birth, ctime); 5934 } 5935 5936 /* set change time */ 5937 if (udf_node->i_flags & (IN_CHANGE | IN_MODIFY)) 5938 udf_timespec_to_timestamp(&now, attrtime); 5939 5940 /* notify updates to the node itself */ 5941 if (udf_node->i_flags & (IN_ACCESS | IN_MODIFY)) 5942 udf_node->i_flags |= IN_ACCESSED; 5943 if (udf_node->i_flags & (IN_UPDATE | IN_CHANGE)) 5944 udf_node->i_flags |= IN_MODIFIED; 5945 5946 /* clear modification flags */ 5947 udf_node->i_flags &= ~(IN_ACCESS | IN_CHANGE | IN_UPDATE | IN_MODIFY); 5948 } 5949 5950 /* --------------------------------------------------------------------- */ 5951 5952 int 5953 udf_update(struct vnode *vp, struct timespec *acc, 5954 struct timespec *mod, struct timespec *birth, int updflags) 5955 { 5956 struct udf_node *udf_node = VTOI(vp); 5957 struct udf_mount *ump = udf_node->ump; 5958 struct regid *impl_id; 5959 int mnt_async = (vp->v_mount->mnt_flag & MNT_ASYNC); 5960 int waitfor, flags; 5961 5962 #ifdef DEBUG 5963 char bits[128]; 5964 DPRINTF(CALL, ("udf_update(node, %p, %p, %p, %d)\n", acc, mod, birth, 5965 updflags)); 5966 bitmask_snprintf(udf_node->i_flags, IN_FLAGBITS, bits, sizeof(bits)); 5967 DPRINTF(CALL, ("\tnode flags %s\n", bits)); 5968 DPRINTF(CALL, ("\t\tmnt_async = %d\n", mnt_async)); 5969 #endif 5970 5971 /* set our times */ 5972 udf_itimes(udf_node, acc, mod, birth); 5973 5974 /* set our implementation id */ 5975 if (udf_node->fe) { 5976 impl_id = &udf_node->fe->imp_id; 5977 } else { 5978 impl_id = &udf_node->efe->imp_id; 5979 } 5980 udf_set_regid(impl_id, IMPL_NAME); 5981 udf_add_impl_regid(ump, impl_id); 5982 5983 /* if called when mounted readonly, never write back */ 5984 if (vp->v_mount->mnt_flag & MNT_RDONLY) 5985 return 0; 5986 5987 /* check if the node is dirty 'enough'*/ 5988 if (updflags & UPDATE_CLOSE) { 5989 flags = udf_node->i_flags & (IN_MODIFIED | IN_ACCESSED); 5990 } else { 5991 flags = udf_node->i_flags & IN_MODIFIED; 5992 } 5993 if (flags == 0) 5994 return 0; 5995 5996 /* determine if we need to write sync or async */ 5997 waitfor = 0; 5998 if ((flags & IN_MODIFIED) && (mnt_async == 0)) { 5999 /* sync mounted */ 6000 waitfor = updflags & UPDATE_WAIT; 6001 if (updflags & UPDATE_DIROP) 6002 waitfor |= UPDATE_WAIT; 6003 } 6004 if (waitfor) 6005 return VOP_FSYNC(vp, FSCRED, FSYNC_WAIT, 0,0); 6006 6007 return 0; 6008 } 6009 6010 6011 /* --------------------------------------------------------------------- */ 6012 6013 6014 /* 6015 * Read one fid and process it into a dirent and advance to the next (*fid) 6016 * has to be allocated a logical block in size, (*dirent) struct dirent length 6017 */ 6018 6019 int 6020 udf_read_fid_stream(struct vnode *vp, uint64_t *offset, 6021 struct fileid_desc *fid, struct dirent *dirent) 6022 { 6023 struct udf_node *dir_node = VTOI(vp); 6024 struct udf_mount *ump = dir_node->ump; 6025 struct file_entry *fe = dir_node->fe; 6026 struct extfile_entry *efe = dir_node->efe; 6027 uint32_t fid_size, lb_size; 6028 uint64_t file_size; 6029 char *fid_name; 6030 int enough, error; 6031 6032 assert(fid); 6033 assert(dirent); 6034 assert(dir_node); 6035 assert(offset); 6036 assert(*offset != 1); 6037 6038 DPRINTF(FIDS, ("read_fid_stream called at offset %"PRIu64"\n", *offset)); 6039 /* check if we're past the end of the directory */ 6040 if (fe) { 6041 file_size = udf_rw64(fe->inf_len); 6042 } else { 6043 assert(dir_node->efe); 6044 file_size = udf_rw64(efe->inf_len); 6045 } 6046 if (*offset >= file_size) 6047 return EINVAL; 6048 6049 /* get maximum length of FID descriptor */ 6050 lb_size = udf_rw32(ump->logical_vol->lb_size); 6051 6052 /* initialise return values */ 6053 fid_size = 0; 6054 memset(dirent, 0, sizeof(struct dirent)); 6055 memset(fid, 0, lb_size); 6056 6057 enough = (file_size - (*offset) >= UDF_FID_SIZE); 6058 if (!enough) { 6059 /* short dir ... */ 6060 return EIO; 6061 } 6062 6063 error = vn_rdwr(UIO_READ, vp, 6064 fid, MIN(file_size - (*offset), lb_size), *offset, 6065 UIO_SYSSPACE, IO_ALTSEMANTICS | IO_NODELOCKED, FSCRED, 6066 NULL, NULL); 6067 if (error) 6068 return error; 6069 6070 DPRINTF(FIDS, ("\tfid piece read in fine\n")); 6071 /* 6072 * Check if we got a whole descriptor. 6073 * TODO Try to `resync' directory stream when something is very wrong. 6074 */ 6075 6076 /* check if our FID header is OK */ 6077 error = udf_check_tag(fid); 6078 if (error) { 6079 goto brokendir; 6080 } 6081 DPRINTF(FIDS, ("\ttag check ok\n")); 6082 6083 if (udf_rw16(fid->tag.id) != TAGID_FID) { 6084 error = EIO; 6085 goto brokendir; 6086 } 6087 DPRINTF(FIDS, ("\ttag checked ok: got TAGID_FID\n")); 6088 6089 /* check for length */ 6090 fid_size = udf_fidsize(fid); 6091 enough = (file_size - (*offset) >= fid_size); 6092 if (!enough) { 6093 error = EIO; 6094 goto brokendir; 6095 } 6096 DPRINTF(FIDS, ("\tthe complete fid is read in\n")); 6097 6098 /* check FID contents */ 6099 error = udf_check_tag_payload((union dscrptr *) fid, lb_size); 6100 brokendir: 6101 if (error) { 6102 /* note that is sometimes a bit quick to report */ 6103 printf("BROKEN DIRECTORY ENTRY\n"); 6104 /* RESYNC? */ 6105 /* TODO: use udf_resync_fid_stream */ 6106 return EIO; 6107 } 6108 DPRINTF(FIDS, ("\tpayload checked ok\n")); 6109 6110 /* we got a whole and valid descriptor! */ 6111 DPRINTF(FIDS, ("\tinterpret FID\n")); 6112 6113 /* create resulting dirent structure */ 6114 fid_name = (char *) fid->data + udf_rw16(fid->l_iu); 6115 udf_to_unix_name(dirent->d_name, MAXNAMLEN, 6116 fid_name, fid->l_fi, &ump->logical_vol->desc_charset); 6117 6118 /* '..' has no name, so provide one */ 6119 if (fid->file_char & UDF_FILE_CHAR_PAR) 6120 strcpy(dirent->d_name, ".."); 6121 6122 dirent->d_fileno = udf_calchash(&fid->icb); /* inode hash XXX */ 6123 dirent->d_namlen = strlen(dirent->d_name); 6124 dirent->d_reclen = _DIRENT_SIZE(dirent); 6125 6126 /* 6127 * Note that its not worth trying to go for the filetypes now... its 6128 * too expensive too 6129 */ 6130 dirent->d_type = DT_UNKNOWN; 6131 6132 /* initial guess for filetype we can make */ 6133 if (fid->file_char & UDF_FILE_CHAR_DIR) 6134 dirent->d_type = DT_DIR; 6135 6136 /* advance */ 6137 *offset += fid_size; 6138 6139 return error; 6140 } 6141 6142 6143 /* --------------------------------------------------------------------- */ 6144 6145 static void 6146 udf_sync_pass(struct udf_mount *ump, kauth_cred_t cred, int waitfor, 6147 int pass, int *ndirty) 6148 { 6149 struct udf_node *udf_node, *n_udf_node; 6150 struct vnode *vp; 6151 int vdirty, error; 6152 int on_type, on_flags, on_vnode; 6153 6154 derailed: 6155 KASSERT(mutex_owned(&mntvnode_lock)); 6156 6157 DPRINTF(SYNC, ("sync_pass %d\n", pass)); 6158 udf_node = LIST_FIRST(&ump->sorted_udf_nodes); 6159 for (;udf_node; udf_node = n_udf_node) { 6160 DPRINTF(SYNC, (".")); 6161 6162 udf_node->i_flags &= ~IN_SYNCED; 6163 vp = udf_node->vnode; 6164 6165 mutex_enter(&vp->v_interlock); 6166 n_udf_node = LIST_NEXT(udf_node, sortchain); 6167 if (n_udf_node) 6168 n_udf_node->i_flags |= IN_SYNCED; 6169 6170 /* system nodes are not synced this way */ 6171 if (vp->v_vflag & VV_SYSTEM) { 6172 mutex_exit(&vp->v_interlock); 6173 continue; 6174 } 6175 6176 /* check if its dirty enough to even try */ 6177 on_type = (waitfor == MNT_LAZY || vp->v_type == VNON); 6178 on_flags = ((udf_node->i_flags & 6179 (IN_ACCESSED | IN_UPDATE | IN_MODIFIED)) == 0); 6180 on_vnode = LIST_EMPTY(&vp->v_dirtyblkhd) 6181 && UVM_OBJ_IS_CLEAN(&vp->v_uobj); 6182 if (on_type || (on_flags || on_vnode)) { /* XXX */ 6183 /* not dirty (enough?) */ 6184 mutex_exit(&vp->v_interlock); 6185 continue; 6186 } 6187 6188 mutex_exit(&mntvnode_lock); 6189 error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK); 6190 if (error) { 6191 mutex_enter(&mntvnode_lock); 6192 if (error == ENOENT) 6193 goto derailed; 6194 *ndirty += 1; 6195 continue; 6196 } 6197 6198 switch (pass) { 6199 case 1: 6200 VOP_FSYNC(vp, cred, 0 | FSYNC_DATAONLY,0,0); 6201 break; 6202 case 2: 6203 vdirty = vp->v_numoutput; 6204 if (vp->v_tag == VT_UDF) 6205 vdirty += udf_node->outstanding_bufs + 6206 udf_node->outstanding_nodedscr; 6207 if (vdirty == 0) 6208 VOP_FSYNC(vp, cred, 0,0,0); 6209 *ndirty += vdirty; 6210 break; 6211 case 3: 6212 vdirty = vp->v_numoutput; 6213 if (vp->v_tag == VT_UDF) 6214 vdirty += udf_node->outstanding_bufs + 6215 udf_node->outstanding_nodedscr; 6216 *ndirty += vdirty; 6217 break; 6218 } 6219 6220 vput(vp); 6221 mutex_enter(&mntvnode_lock); 6222 } 6223 DPRINTF(SYNC, ("END sync_pass %d\n", pass)); 6224 } 6225 6226 6227 void 6228 udf_do_sync(struct udf_mount *ump, kauth_cred_t cred, int waitfor) 6229 { 6230 int dummy, ndirty; 6231 6232 mutex_enter(&mntvnode_lock); 6233 recount: 6234 dummy = 0; 6235 DPRINTF(CALL, ("issue VOP_FSYNC(DATA only) on all nodes\n")); 6236 DPRINTF(SYNC, ("issue VOP_FSYNC(DATA only) on all nodes\n")); 6237 udf_sync_pass(ump, cred, waitfor, 1, &dummy); 6238 6239 DPRINTF(CALL, ("issue VOP_FSYNC(COMPLETE) on all finished nodes\n")); 6240 DPRINTF(SYNC, ("issue VOP_FSYNC(COMPLETE) on all finished nodes\n")); 6241 udf_sync_pass(ump, cred, waitfor, 2, &dummy); 6242 6243 if (waitfor == MNT_WAIT) { 6244 ndirty = ump->devvp->v_numoutput; 6245 DPRINTF(NODE, ("counting pending blocks: on devvp %d\n", 6246 ndirty)); 6247 udf_sync_pass(ump, cred, waitfor, 3, &ndirty); 6248 DPRINTF(NODE, ("counted num dirty pending blocks %d\n", 6249 ndirty)); 6250 6251 if (ndirty) { 6252 /* 1/4 second wait */ 6253 cv_timedwait(&ump->dirtynodes_cv, &mntvnode_lock, 6254 hz/4); 6255 goto recount; 6256 } 6257 } 6258 6259 mutex_exit(&mntvnode_lock); 6260 } 6261 6262 /* --------------------------------------------------------------------- */ 6263 6264 /* 6265 * Read and write file extent in/from the buffer. 6266 * 6267 * The splitup of the extent into seperate request-buffers is to minimise 6268 * copying around as much as possible. 6269 * 6270 * block based file reading and writing 6271 */ 6272 6273 static int 6274 udf_read_internal(struct udf_node *node, uint8_t *blob) 6275 { 6276 struct udf_mount *ump; 6277 struct file_entry *fe = node->fe; 6278 struct extfile_entry *efe = node->efe; 6279 uint64_t inflen; 6280 uint32_t sector_size; 6281 uint8_t *pos; 6282 int icbflags, addr_type; 6283 6284 /* get extent and do some paranoia checks */ 6285 ump = node->ump; 6286 sector_size = ump->discinfo.sector_size; 6287 6288 if (fe) { 6289 inflen = udf_rw64(fe->inf_len); 6290 pos = &fe->data[0] + udf_rw32(fe->l_ea); 6291 icbflags = udf_rw16(fe->icbtag.flags); 6292 } else { 6293 assert(node->efe); 6294 inflen = udf_rw64(efe->inf_len); 6295 pos = &efe->data[0] + udf_rw32(efe->l_ea); 6296 icbflags = udf_rw16(efe->icbtag.flags); 6297 } 6298 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK; 6299 6300 assert(addr_type == UDF_ICB_INTERN_ALLOC); 6301 assert(inflen < sector_size); 6302 6303 /* copy out info */ 6304 memset(blob, 0, sector_size); 6305 memcpy(blob, pos, inflen); 6306 6307 return 0; 6308 } 6309 6310 6311 static int 6312 udf_write_internal(struct udf_node *node, uint8_t *blob) 6313 { 6314 struct udf_mount *ump; 6315 struct file_entry *fe = node->fe; 6316 struct extfile_entry *efe = node->efe; 6317 uint64_t inflen; 6318 uint32_t sector_size; 6319 uint8_t *pos; 6320 int icbflags, addr_type; 6321 6322 /* get extent and do some paranoia checks */ 6323 ump = node->ump; 6324 sector_size = ump->discinfo.sector_size; 6325 6326 if (fe) { 6327 inflen = udf_rw64(fe->inf_len); 6328 pos = &fe->data[0] + udf_rw32(fe->l_ea); 6329 icbflags = udf_rw16(fe->icbtag.flags); 6330 } else { 6331 assert(node->efe); 6332 inflen = udf_rw64(efe->inf_len); 6333 pos = &efe->data[0] + udf_rw32(efe->l_ea); 6334 icbflags = udf_rw16(efe->icbtag.flags); 6335 } 6336 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK; 6337 6338 assert(addr_type == UDF_ICB_INTERN_ALLOC); 6339 assert(inflen < sector_size); 6340 6341 /* copy in blob */ 6342 /* memset(pos, 0, inflen); */ 6343 memcpy(pos, blob, inflen); 6344 6345 return 0; 6346 } 6347 6348 6349 void 6350 udf_read_filebuf(struct udf_node *udf_node, struct buf *buf) 6351 { 6352 struct buf *nestbuf; 6353 struct udf_mount *ump = udf_node->ump; 6354 uint64_t *mapping; 6355 uint64_t run_start; 6356 uint32_t sector_size; 6357 uint32_t buf_offset, sector, rbuflen, rblk; 6358 uint32_t from, lblkno; 6359 uint32_t sectors; 6360 uint8_t *buf_pos; 6361 int error, run_length, isdir, what; 6362 6363 sector_size = udf_node->ump->discinfo.sector_size; 6364 6365 from = buf->b_blkno; 6366 sectors = buf->b_bcount / sector_size; 6367 6368 isdir = (udf_node->vnode->v_type == VDIR); 6369 what = isdir ? UDF_C_FIDS : UDF_C_USERDATA; 6370 6371 /* assure we have enough translation slots */ 6372 KASSERT(buf->b_bcount / sector_size <= UDF_MAX_MAPPINGS); 6373 KASSERT(MAXPHYS / sector_size <= UDF_MAX_MAPPINGS); 6374 6375 if (sectors > UDF_MAX_MAPPINGS) { 6376 printf("udf_read_filebuf: implementation limit on bufsize\n"); 6377 buf->b_error = EIO; 6378 biodone(buf); 6379 return; 6380 } 6381 6382 mapping = malloc(sizeof(*mapping) * UDF_MAX_MAPPINGS, M_TEMP, M_WAITOK); 6383 6384 error = 0; 6385 DPRINTF(READ, ("\ttranslate %d-%d\n", from, sectors)); 6386 error = udf_translate_file_extent(udf_node, from, sectors, mapping); 6387 if (error) { 6388 buf->b_error = error; 6389 biodone(buf); 6390 goto out; 6391 } 6392 DPRINTF(READ, ("\ttranslate extent went OK\n")); 6393 6394 /* pre-check if its an internal */ 6395 if (*mapping == UDF_TRANS_INTERN) { 6396 error = udf_read_internal(udf_node, (uint8_t *) buf->b_data); 6397 if (error) 6398 buf->b_error = error; 6399 biodone(buf); 6400 goto out; 6401 } 6402 DPRINTF(READ, ("\tnot intern\n")); 6403 6404 #ifdef DEBUG 6405 if (udf_verbose & UDF_DEBUG_TRANSLATE) { 6406 printf("Returned translation table:\n"); 6407 for (sector = 0; sector < sectors; sector++) { 6408 printf("%d : %"PRIu64"\n", sector, mapping[sector]); 6409 } 6410 } 6411 #endif 6412 6413 /* request read-in of data from disc sheduler */ 6414 buf->b_resid = buf->b_bcount; 6415 for (sector = 0; sector < sectors; sector++) { 6416 buf_offset = sector * sector_size; 6417 buf_pos = (uint8_t *) buf->b_data + buf_offset; 6418 DPRINTF(READ, ("\tprocessing rel sector %d\n", sector)); 6419 6420 /* check if its zero or unmapped to stop reading */ 6421 switch (mapping[sector]) { 6422 case UDF_TRANS_UNMAPPED: 6423 case UDF_TRANS_ZERO: 6424 /* copy zero sector TODO runlength like below */ 6425 memset(buf_pos, 0, sector_size); 6426 DPRINTF(READ, ("\treturning zero sector\n")); 6427 nestiobuf_done(buf, sector_size, 0); 6428 break; 6429 default : 6430 DPRINTF(READ, ("\tread sector " 6431 "%"PRIu64"\n", mapping[sector])); 6432 6433 lblkno = from + sector; 6434 run_start = mapping[sector]; 6435 run_length = 1; 6436 while (sector < sectors-1) { 6437 if (mapping[sector+1] != mapping[sector]+1) 6438 break; 6439 run_length++; 6440 sector++; 6441 } 6442 6443 /* 6444 * nest an iobuf and mark it for async reading. Since 6445 * we're using nested buffers, they can't be cached by 6446 * design. 6447 */ 6448 rbuflen = run_length * sector_size; 6449 rblk = run_start * (sector_size/DEV_BSIZE); 6450 6451 nestbuf = getiobuf(NULL, true); 6452 nestiobuf_setup(buf, nestbuf, buf_offset, rbuflen); 6453 /* nestbuf is B_ASYNC */ 6454 6455 /* identify this nestbuf */ 6456 nestbuf->b_lblkno = lblkno; 6457 assert(nestbuf->b_vp == udf_node->vnode); 6458 6459 /* CD shedules on raw blkno */ 6460 nestbuf->b_blkno = rblk; 6461 nestbuf->b_proc = NULL; 6462 nestbuf->b_rawblkno = rblk; 6463 nestbuf->b_udf_c_type = what; 6464 6465 udf_discstrat_queuebuf(ump, nestbuf); 6466 } 6467 } 6468 out: 6469 /* if we're synchronously reading, wait for the completion */ 6470 if ((buf->b_flags & B_ASYNC) == 0) 6471 biowait(buf); 6472 6473 DPRINTF(READ, ("\tend of read_filebuf\n")); 6474 free(mapping, M_TEMP); 6475 return; 6476 } 6477 6478 6479 void 6480 udf_write_filebuf(struct udf_node *udf_node, struct buf *buf) 6481 { 6482 struct buf *nestbuf; 6483 struct udf_mount *ump = udf_node->ump; 6484 uint64_t *mapping; 6485 uint64_t run_start; 6486 uint32_t lb_size; 6487 uint32_t buf_offset, lb_num, rbuflen, rblk; 6488 uint32_t from, lblkno; 6489 uint32_t num_lb; 6490 uint8_t *buf_pos; 6491 int error, run_length, isdir, what, s; 6492 6493 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size); 6494 6495 from = buf->b_blkno; 6496 num_lb = buf->b_bcount / lb_size; 6497 6498 isdir = (udf_node->vnode->v_type == VDIR); 6499 what = isdir ? UDF_C_FIDS : UDF_C_USERDATA; 6500 6501 if (udf_node == ump->metadatabitmap_node) 6502 what = UDF_C_METADATA_SBM; 6503 6504 /* assure we have enough translation slots */ 6505 KASSERT(buf->b_bcount / lb_size <= UDF_MAX_MAPPINGS); 6506 KASSERT(MAXPHYS / lb_size <= UDF_MAX_MAPPINGS); 6507 6508 if (num_lb > UDF_MAX_MAPPINGS) { 6509 printf("udf_write_filebuf: implementation limit on bufsize\n"); 6510 buf->b_error = EIO; 6511 biodone(buf); 6512 return; 6513 } 6514 6515 mapping = malloc(sizeof(*mapping) * UDF_MAX_MAPPINGS, M_TEMP, M_WAITOK); 6516 6517 error = 0; 6518 DPRINTF(WRITE, ("\ttranslate %d-%d\n", from, num_lb)); 6519 error = udf_translate_file_extent(udf_node, from, num_lb, mapping); 6520 if (error) { 6521 buf->b_error = error; 6522 biodone(buf); 6523 goto out; 6524 } 6525 DPRINTF(WRITE, ("\ttranslate extent went OK\n")); 6526 6527 /* if its internally mapped, we can write it in the descriptor itself */ 6528 if (*mapping == UDF_TRANS_INTERN) { 6529 /* TODO paranoia check if we ARE going to have enough space */ 6530 error = udf_write_internal(udf_node, (uint8_t *) buf->b_data); 6531 if (error) 6532 buf->b_error = error; 6533 biodone(buf); 6534 goto out; 6535 } 6536 DPRINTF(WRITE, ("\tnot intern\n")); 6537 6538 /* request write out of data to disc sheduler */ 6539 buf->b_resid = buf->b_bcount; 6540 for (lb_num = 0; lb_num < num_lb; lb_num++) { 6541 buf_offset = lb_num * lb_size; 6542 buf_pos = (uint8_t *) buf->b_data + buf_offset; 6543 DPRINTF(WRITE, ("\tprocessing rel lb_num %d\n", lb_num)); 6544 6545 /* 6546 * Mappings are not that important here. Just before we write 6547 * the lb_num we late-allocate them when needed and update the 6548 * mapping in the udf_node. 6549 */ 6550 6551 /* XXX why not ignore the mapping altogether ? */ 6552 /* TODO estimate here how much will be late-allocated */ 6553 DPRINTF(WRITE, ("\twrite lb_num " 6554 "%"PRIu64, mapping[lb_num])); 6555 6556 lblkno = from + lb_num; 6557 run_start = mapping[lb_num]; 6558 run_length = 1; 6559 while (lb_num < num_lb-1) { 6560 if (mapping[lb_num+1] != mapping[lb_num]+1) 6561 if (mapping[lb_num+1] != mapping[lb_num]) 6562 break; 6563 run_length++; 6564 lb_num++; 6565 } 6566 DPRINTF(WRITE, ("+ %d\n", run_length)); 6567 6568 /* nest an iobuf on the master buffer for the extent */ 6569 rbuflen = run_length * lb_size; 6570 rblk = run_start * (lb_size/DEV_BSIZE); 6571 6572 #if 0 6573 /* if its zero or unmapped, our blknr gets -1 for unmapped */ 6574 switch (mapping[lb_num]) { 6575 case UDF_TRANS_UNMAPPED: 6576 case UDF_TRANS_ZERO: 6577 rblk = -1; 6578 break; 6579 default: 6580 rblk = run_start * (lb_size/DEV_BSIZE); 6581 break; 6582 } 6583 #endif 6584 6585 nestbuf = getiobuf(NULL, true); 6586 nestiobuf_setup(buf, nestbuf, buf_offset, rbuflen); 6587 /* nestbuf is B_ASYNC */ 6588 6589 /* identify this nestbuf */ 6590 nestbuf->b_lblkno = lblkno; 6591 KASSERT(nestbuf->b_vp == udf_node->vnode); 6592 6593 /* CD shedules on raw blkno */ 6594 nestbuf->b_blkno = rblk; 6595 nestbuf->b_proc = NULL; 6596 nestbuf->b_rawblkno = rblk; 6597 nestbuf->b_udf_c_type = what; 6598 6599 /* increment our outstanding bufs counter */ 6600 s = splbio(); 6601 udf_node->outstanding_bufs++; 6602 splx(s); 6603 6604 udf_discstrat_queuebuf(ump, nestbuf); 6605 } 6606 out: 6607 /* if we're synchronously writing, wait for the completion */ 6608 if ((buf->b_flags & B_ASYNC) == 0) 6609 biowait(buf); 6610 6611 DPRINTF(WRITE, ("\tend of write_filebuf\n")); 6612 free(mapping, M_TEMP); 6613 return; 6614 } 6615 6616 /* --------------------------------------------------------------------- */ 6617 6618 6619