1 /* $NetBSD: udf_readwrite.c,v 1.9 2008/12/16 16:18:25 pooka Exp $ */ 2 3 /* 4 * Copyright (c) 2007, 2008 Reinoud Zandijk 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 * 27 */ 28 29 #include <sys/cdefs.h> 30 #ifndef lint 31 __KERNEL_RCSID(0, "$NetBSD: udf_readwrite.c,v 1.9 2008/12/16 16:18:25 pooka Exp $"); 32 #endif /* not lint */ 33 34 35 #if defined(_KERNEL_OPT) 36 #include "opt_compat_netbsd.h" 37 #endif 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/sysctl.h> 42 #include <sys/namei.h> 43 #include <sys/proc.h> 44 #include <sys/kernel.h> 45 #include <sys/vnode.h> 46 #include <miscfs/genfs/genfs_node.h> 47 #include <sys/mount.h> 48 #include <sys/buf.h> 49 #include <sys/file.h> 50 #include <sys/device.h> 51 #include <sys/disklabel.h> 52 #include <sys/ioctl.h> 53 #include <sys/malloc.h> 54 #include <sys/dirent.h> 55 #include <sys/stat.h> 56 #include <sys/conf.h> 57 #include <sys/kauth.h> 58 #include <sys/kthread.h> 59 #include <dev/clock_subr.h> 60 61 #include <fs/udf/ecma167-udf.h> 62 #include <fs/udf/udf_mount.h> 63 64 #include "udf.h" 65 #include "udf_subr.h" 66 #include "udf_bswap.h" 67 68 69 #define VTOI(vnode) ((struct udf_node *) vnode->v_data) 70 71 /* --------------------------------------------------------------------- */ 72 73 void 74 udf_fixup_fid_block(uint8_t *blob, int lb_size, 75 int rfix_pos, int max_rfix_pos, uint32_t lb_num) 76 { 77 struct fileid_desc *fid; 78 uint8_t *fid_pos; 79 int fid_len, found; 80 81 /* needs to be word aligned */ 82 KASSERT(rfix_pos % 4 == 0); 83 84 /* first resync with the FID stream !!! */ 85 found = 0; 86 while (rfix_pos + sizeof(struct desc_tag) <= max_rfix_pos) { 87 fid_pos = blob + rfix_pos; 88 fid = (struct fileid_desc *) fid_pos; 89 if (udf_rw16(fid->tag.id) == TAGID_FID) { 90 if (udf_check_tag((union dscrptr *) fid) == 0) 91 found = 1; 92 } 93 if (found) 94 break; 95 /* try next location; can only be 4 bytes aligned */ 96 rfix_pos += 4; 97 } 98 99 /* walk over the fids */ 100 fid_pos = blob + rfix_pos; 101 while (rfix_pos + sizeof(struct desc_tag) <= max_rfix_pos) { 102 fid = (struct fileid_desc *) fid_pos; 103 if (udf_rw16(fid->tag.id) != TAGID_FID) { 104 /* end of FID stream; end of directory or currupted */ 105 break; 106 } 107 108 /* update sector number and recalculate checkum */ 109 fid->tag.tag_loc = udf_rw32(lb_num); 110 udf_validate_tag_sum((union dscrptr *) fid); 111 112 /* if the FID crosses the memory, we're done! */ 113 if (rfix_pos + UDF_FID_SIZE >= max_rfix_pos) 114 break; 115 116 fid_len = udf_fidsize(fid); 117 fid_pos += fid_len; 118 rfix_pos += fid_len; 119 } 120 } 121 122 123 void 124 udf_fixup_internal_extattr(uint8_t *blob, uint32_t lb_num) 125 { 126 struct desc_tag *tag; 127 struct file_entry *fe; 128 struct extfile_entry *efe; 129 struct extattrhdr_desc *eahdr; 130 int l_ea; 131 132 /* get information from fe/efe */ 133 tag = (struct desc_tag *) blob; 134 switch (udf_rw16(tag->id)) { 135 case TAGID_FENTRY : 136 fe = (struct file_entry *) blob; 137 l_ea = udf_rw32(fe->l_ea); 138 eahdr = (struct extattrhdr_desc *) fe->data; 139 break; 140 case TAGID_EXTFENTRY : 141 efe = (struct extfile_entry *) blob; 142 l_ea = udf_rw32(efe->l_ea); 143 eahdr = (struct extattrhdr_desc *) efe->data; 144 break; 145 case TAGID_INDIRECTENTRY : 146 case TAGID_ALLOCEXTENT : 147 case TAGID_EXTATTR_HDR : 148 return; 149 default: 150 panic("%s: passed bad tag\n", __func__); 151 } 152 153 /* something recorded here? (why am i called?) */ 154 if (l_ea == 0) 155 return; 156 157 #if 0 158 /* check extended attribute tag */ 159 /* TODO XXX what to do when we encounter an error here? */ 160 error = udf_check_tag(eahdr); 161 if (error) 162 return; /* for now */ 163 if (udf_rw16(eahdr->tag.id) != TAGID_EXTATTR_HDR) 164 return; /* for now */ 165 error = udf_check_tag_payload(eahdr, sizeof(struct extattrhdr_desc)); 166 if (error) 167 return; /* for now */ 168 #endif 169 170 DPRINTF(EXTATTR, ("node fixup: found %d bytes of extended attributes\n", 171 l_ea)); 172 173 /* fixup eahdr tag */ 174 eahdr->tag.tag_loc = udf_rw32(lb_num); 175 udf_validate_tag_and_crc_sums((union dscrptr *) eahdr); 176 } 177 178 179 void 180 udf_fixup_node_internals(struct udf_mount *ump, uint8_t *blob, int udf_c_type) 181 { 182 struct desc_tag *tag, *sbm_tag; 183 struct file_entry *fe; 184 struct extfile_entry *efe; 185 struct alloc_ext_entry *ext; 186 uint32_t lb_size, lb_num; 187 uint32_t intern_pos, max_intern_pos; 188 int icbflags, addr_type, file_type, intern, has_fids, has_sbm, l_ea; 189 190 lb_size = udf_rw32(ump->logical_vol->lb_size); 191 /* if its not a node we're done */ 192 if (udf_c_type != UDF_C_NODE) 193 return; 194 195 /* NOTE this could also be done in write_internal */ 196 /* start of a descriptor */ 197 l_ea = 0; 198 has_fids = 0; 199 has_sbm = 0; 200 intern = 0; 201 file_type = 0; 202 max_intern_pos = intern_pos = lb_num = 0; /* shut up gcc! */ 203 204 tag = (struct desc_tag *) blob; 205 switch (udf_rw16(tag->id)) { 206 case TAGID_FENTRY : 207 fe = (struct file_entry *) tag; 208 l_ea = udf_rw32(fe->l_ea); 209 icbflags = udf_rw16(fe->icbtag.flags); 210 addr_type = (icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK); 211 file_type = fe->icbtag.file_type; 212 intern = (addr_type == UDF_ICB_INTERN_ALLOC); 213 intern_pos = UDF_FENTRY_SIZE + l_ea; 214 max_intern_pos = intern_pos + udf_rw64(fe->inf_len); 215 lb_num = udf_rw32(fe->tag.tag_loc); 216 break; 217 case TAGID_EXTFENTRY : 218 efe = (struct extfile_entry *) tag; 219 l_ea = udf_rw32(efe->l_ea); 220 icbflags = udf_rw16(efe->icbtag.flags); 221 addr_type = (icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK); 222 file_type = efe->icbtag.file_type; 223 intern = (addr_type == UDF_ICB_INTERN_ALLOC); 224 intern_pos = UDF_EXTFENTRY_SIZE + l_ea; 225 max_intern_pos = intern_pos + udf_rw64(efe->inf_len); 226 lb_num = udf_rw32(efe->tag.tag_loc); 227 break; 228 case TAGID_INDIRECTENTRY : 229 case TAGID_EXTATTR_HDR : 230 break; 231 case TAGID_ALLOCEXTENT : 232 /* force crclen to 8 for UDF version < 2.01 */ 233 ext = (struct alloc_ext_entry *) tag; 234 if (udf_rw16(ump->logvol_info->min_udf_readver) <= 0x200) 235 ext->tag.desc_crc_len = udf_rw16(8); 236 break; 237 default: 238 panic("%s: passed bad tag\n", __func__); 239 break; 240 } 241 242 /* determine what to fix if its internally recorded */ 243 if (intern) { 244 has_fids = (file_type == UDF_ICB_FILETYPE_DIRECTORY) || 245 (file_type == UDF_ICB_FILETYPE_STREAMDIR); 246 has_sbm = (file_type == UDF_ICB_FILETYPE_META_BITMAP); 247 } 248 249 /* fixup internal extended attributes if present */ 250 if (l_ea) 251 udf_fixup_internal_extattr(blob, lb_num); 252 253 /* fixup fids lb numbers */ 254 if (has_fids) 255 udf_fixup_fid_block(blob, lb_size, intern_pos, 256 max_intern_pos, lb_num); 257 258 /* fixup space bitmap descriptor */ 259 if (has_sbm) { 260 sbm_tag = (struct desc_tag *) (blob + intern_pos); 261 sbm_tag->tag_loc = tag->tag_loc; 262 udf_validate_tag_and_crc_sums((uint8_t *) sbm_tag); 263 } 264 265 udf_validate_tag_and_crc_sums(blob); 266 } 267 268 /* --------------------------------------------------------------------- */ 269 270 /* 271 * Set of generic descriptor readers and writers and their helper functions. 272 * Descriptors inside `logical space' i.e. inside logically mapped partitions 273 * can never be longer than one logical sector. 274 * 275 * NOTE that these functions *can* be used by the sheduler backends to read 276 * node descriptors too. 277 * 278 * For reading, the size of allocated piece is returned in multiple of sector 279 * size due to udf_calc_udf_malloc_size(). 280 */ 281 282 283 /* SYNC reading of n blocks from specified sector */ 284 /* NOTE only used by udf_read_phys_dscr */ 285 static int 286 udf_read_phys_sectors(struct udf_mount *ump, int what, void *blob, 287 uint32_t start, uint32_t sectors) 288 { 289 struct buf *buf, *nestbuf; 290 uint32_t buf_offset; 291 off_t lblkno, rblkno; 292 int sector_size = ump->discinfo.sector_size; 293 int blks = sector_size / DEV_BSIZE; 294 int piece; 295 int error; 296 297 DPRINTF(READ, ("udf_intbreadn() : sectors = %d, sector_size = %d\n", 298 sectors, sector_size)); 299 buf = getiobuf(ump->devvp, true); 300 buf->b_flags = B_READ; 301 buf->b_cflags = BC_BUSY; /* needed? */ 302 buf->b_iodone = NULL; 303 buf->b_data = blob; 304 buf->b_bcount = sectors * sector_size; 305 buf->b_resid = buf->b_bcount; 306 buf->b_bufsize = buf->b_bcount; 307 buf->b_private = NULL; /* not needed yet */ 308 BIO_SETPRIO(buf, BPRIO_DEFAULT); 309 buf->b_lblkno = buf->b_blkno = buf->b_rawblkno = start * blks; 310 buf->b_proc = NULL; 311 312 error = 0; 313 buf_offset = 0; 314 rblkno = start; 315 lblkno = 0; 316 while ((sectors > 0) && (error == 0)) { 317 piece = MIN(MAXPHYS/sector_size, sectors); 318 DPRINTF(READ, ("read in %d + %d\n", (uint32_t) rblkno, piece)); 319 320 nestbuf = getiobuf(NULL, true); 321 nestiobuf_setup(buf, nestbuf, buf_offset, piece * sector_size); 322 /* nestbuf is B_ASYNC */ 323 324 /* identify this nestbuf */ 325 nestbuf->b_lblkno = lblkno; 326 327 /* CD shedules on raw blkno */ 328 nestbuf->b_blkno = rblkno * blks; 329 nestbuf->b_proc = NULL; 330 nestbuf->b_rawblkno = rblkno * blks; 331 nestbuf->b_udf_c_type = what; 332 333 udf_discstrat_queuebuf(ump, nestbuf); 334 335 lblkno += piece; 336 rblkno += piece; 337 buf_offset += piece * sector_size; 338 sectors -= piece; 339 } 340 error = biowait(buf); 341 putiobuf(buf); 342 343 return error; 344 } 345 346 347 /* synchronous generic descriptor read */ 348 int 349 udf_read_phys_dscr(struct udf_mount *ump, uint32_t sector, 350 struct malloc_type *mtype, union dscrptr **dstp) 351 { 352 union dscrptr *dst, *new_dst; 353 uint8_t *pos; 354 int sectors, dscrlen; 355 int i, error, sector_size; 356 357 sector_size = ump->discinfo.sector_size; 358 359 *dstp = dst = NULL; 360 dscrlen = sector_size; 361 362 /* read initial piece */ 363 dst = malloc(sector_size, mtype, M_WAITOK); 364 error = udf_read_phys_sectors(ump, UDF_C_DSCR, dst, sector, 1); 365 DPRINTFIF(DESCRIPTOR, error, ("read error (%d)\n", error)); 366 367 if (!error) { 368 /* check if its a valid tag */ 369 error = udf_check_tag(dst); 370 if (error) { 371 /* check if its an empty block */ 372 pos = (uint8_t *) dst; 373 for (i = 0; i < sector_size; i++, pos++) { 374 if (*pos) break; 375 } 376 if (i == sector_size) { 377 /* return no error but with no dscrptr */ 378 /* dispose first block */ 379 free(dst, mtype); 380 return 0; 381 } 382 } 383 /* calculate descriptor size */ 384 dscrlen = udf_tagsize(dst, sector_size); 385 } 386 DPRINTFIF(DESCRIPTOR, error, ("bad tag checksum\n")); 387 388 if (!error && (dscrlen > sector_size)) { 389 DPRINTF(DESCRIPTOR, ("multi block descriptor read\n")); 390 /* 391 * Read the rest of descriptor. Since it is only used at mount 392 * time its overdone to define and use a specific udf_intbreadn 393 * for this alone. 394 */ 395 396 new_dst = realloc(dst, dscrlen, mtype, M_WAITOK); 397 if (new_dst == NULL) { 398 free(dst, mtype); 399 return ENOMEM; 400 } 401 dst = new_dst; 402 403 sectors = (dscrlen + sector_size -1) / sector_size; 404 DPRINTF(DESCRIPTOR, ("dscrlen = %d (%d blk)\n", dscrlen, sectors)); 405 406 pos = (uint8_t *) dst + sector_size; 407 error = udf_read_phys_sectors(ump, UDF_C_DSCR, pos, 408 sector + 1, sectors-1); 409 410 DPRINTFIF(DESCRIPTOR, error, ("read error on multi (%d)\n", 411 error)); 412 } 413 if (!error) { 414 error = udf_check_tag_payload(dst, dscrlen); 415 DPRINTFIF(DESCRIPTOR, error, ("bad payload check sum\n")); 416 } 417 if (error && dst) { 418 free(dst, mtype); 419 dst = NULL; 420 } 421 *dstp = dst; 422 423 return error; 424 } 425 426 427 static void 428 udf_write_phys_buf(struct udf_mount *ump, int what, struct buf *buf) 429 { 430 struct buf *nestbuf; 431 uint32_t buf_offset; 432 off_t lblkno, rblkno; 433 int sector_size = ump->discinfo.sector_size; 434 int blks = sector_size / DEV_BSIZE; 435 uint32_t sectors; 436 int piece; 437 int error; 438 439 sectors = buf->b_bcount / sector_size; 440 DPRINTF(WRITE, ("udf_intbwriten() : sectors = %d, sector_size = %d\n", 441 sectors, sector_size)); 442 443 /* don't forget to increase pending count for the bwrite itself */ 444 /* panic("NO WRITING\n"); */ 445 if (buf->b_vp) { 446 mutex_enter(&buf->b_vp->v_interlock); 447 buf->b_vp->v_numoutput++; 448 mutex_exit(&buf->b_vp->v_interlock); 449 } 450 451 error = 0; 452 buf_offset = 0; 453 rblkno = buf->b_blkno / blks; 454 lblkno = 0; 455 while ((sectors > 0) && (error == 0)) { 456 piece = MIN(MAXPHYS/sector_size, sectors); 457 DPRINTF(WRITE, ("write out %d + %d\n", 458 (uint32_t) rblkno, piece)); 459 460 nestbuf = getiobuf(NULL, true); 461 nestiobuf_setup(buf, nestbuf, buf_offset, piece * sector_size); 462 /* nestbuf is B_ASYNC */ 463 464 /* identify this nestbuf */ 465 nestbuf->b_lblkno = lblkno; 466 467 /* CD shedules on raw blkno */ 468 nestbuf->b_blkno = rblkno * blks; 469 nestbuf->b_proc = NULL; 470 nestbuf->b_rawblkno = rblkno * blks; 471 nestbuf->b_udf_c_type = what; 472 473 udf_discstrat_queuebuf(ump, nestbuf); 474 475 lblkno += piece; 476 rblkno += piece; 477 buf_offset += piece * sector_size; 478 sectors -= piece; 479 } 480 } 481 482 483 /* synchronous generic descriptor write */ 484 int 485 udf_write_phys_dscr_sync(struct udf_mount *ump, struct udf_node *udf_node, int what, 486 union dscrptr *dscr, uint32_t sector, uint32_t logsector) 487 { 488 struct vnode *vp; 489 struct buf *buf; 490 int sector_size = ump->discinfo.sector_size; 491 int blks = sector_size / DEV_BSIZE; 492 int dscrlen; 493 int error; 494 495 /* set sector number in the descriptor and validate */ 496 dscr->tag.tag_loc = udf_rw32(logsector); 497 udf_validate_tag_and_crc_sums(dscr); 498 499 /* calculate descriptor size */ 500 dscrlen = udf_tagsize(dscr, sector_size); 501 502 /* get transfer buffer */ 503 vp = udf_node ? udf_node->vnode : ump->devvp; 504 buf = getiobuf(vp, true); 505 buf->b_flags = B_WRITE; 506 buf->b_cflags = BC_BUSY; /* needed? */ 507 buf->b_iodone = NULL; 508 buf->b_data = (void *) dscr; 509 buf->b_bcount = dscrlen; 510 buf->b_resid = buf->b_bcount; 511 buf->b_bufsize = buf->b_bcount; 512 buf->b_private = NULL; /* not needed yet */ 513 BIO_SETPRIO(buf, BPRIO_DEFAULT); 514 buf->b_lblkno = buf->b_blkno = buf->b_rawblkno = sector * blks; 515 buf->b_proc = NULL; 516 517 /* do the write, wait and return error */ 518 udf_write_phys_buf(ump, what, buf); 519 error = biowait(buf); 520 putiobuf(buf); 521 522 return error; 523 } 524 525 526 /* asynchronous generic descriptor write */ 527 int 528 udf_write_phys_dscr_async(struct udf_mount *ump, struct udf_node *udf_node, 529 int what, union dscrptr *dscr, 530 uint32_t sector, uint32_t logsector, 531 void (*dscrwr_callback)(struct buf *)) 532 { 533 struct vnode *vp; 534 struct buf *buf; 535 int dscrlen; 536 int sector_size = ump->discinfo.sector_size; 537 int blks = sector_size / DEV_BSIZE; 538 539 KASSERT(dscrwr_callback); 540 DPRINTF(NODE, ("udf_write_phys_dscr_async() called\n")); 541 542 /* set sector number in the descriptor and validate */ 543 dscr->tag.tag_loc = udf_rw32(logsector); 544 udf_validate_tag_and_crc_sums(dscr); 545 546 /* calculate descriptor size */ 547 dscrlen = udf_tagsize(dscr, sector_size); 548 549 /* get transfer buffer */ 550 vp = udf_node ? udf_node->vnode : ump->devvp; 551 buf = getiobuf(vp, true); 552 buf->b_flags = B_WRITE; // | B_ASYNC; 553 buf->b_cflags = BC_BUSY; 554 buf->b_iodone = dscrwr_callback; 555 buf->b_data = dscr; 556 buf->b_bcount = dscrlen; 557 buf->b_resid = buf->b_bcount; 558 buf->b_bufsize = buf->b_bcount; 559 buf->b_private = NULL; /* not needed yet */ 560 BIO_SETPRIO(buf, BPRIO_DEFAULT); 561 buf->b_lblkno = buf->b_blkno = buf->b_rawblkno = sector * blks; 562 buf->b_proc = NULL; 563 564 /* do the write and return no error */ 565 udf_write_phys_buf(ump, what, buf); 566 return 0; 567 } 568 569 /* --------------------------------------------------------------------- */ 570 571 /* disc strategy dispatchers */ 572 573 int 574 udf_create_logvol_dscr(struct udf_mount *ump, struct udf_node *udf_node, struct long_ad *icb, 575 union dscrptr **dscrptr) 576 { 577 struct udf_strategy *strategy = ump->strategy; 578 struct udf_strat_args args; 579 int error; 580 581 KASSERT(strategy); 582 args.ump = ump; 583 args.udf_node = udf_node; 584 args.icb = icb; 585 args.dscr = NULL; 586 587 error = (strategy->create_logvol_dscr)(&args); 588 *dscrptr = args.dscr; 589 590 return error; 591 } 592 593 594 void 595 udf_free_logvol_dscr(struct udf_mount *ump, struct long_ad *icb, 596 void *dscr) 597 { 598 struct udf_strategy *strategy = ump->strategy; 599 struct udf_strat_args args; 600 601 KASSERT(strategy); 602 args.ump = ump; 603 args.icb = icb; 604 args.dscr = dscr; 605 606 (strategy->free_logvol_dscr)(&args); 607 } 608 609 610 int 611 udf_read_logvol_dscr(struct udf_mount *ump, struct long_ad *icb, 612 union dscrptr **dscrptr) 613 { 614 struct udf_strategy *strategy = ump->strategy; 615 struct udf_strat_args args; 616 int error; 617 618 KASSERT(strategy); 619 args.ump = ump; 620 args.icb = icb; 621 args.dscr = NULL; 622 623 error = (strategy->read_logvol_dscr)(&args); 624 *dscrptr = args.dscr; 625 626 return error; 627 } 628 629 630 int 631 udf_write_logvol_dscr(struct udf_node *udf_node, union dscrptr *dscr, 632 struct long_ad *icb, int waitfor) 633 { 634 struct udf_strategy *strategy = udf_node->ump->strategy; 635 struct udf_strat_args args; 636 int error; 637 638 KASSERT(strategy); 639 args.ump = udf_node->ump; 640 args.udf_node = udf_node; 641 args.icb = icb; 642 args.dscr = dscr; 643 args.waitfor = waitfor; 644 645 error = (strategy->write_logvol_dscr)(&args); 646 return error; 647 } 648 649 650 void 651 udf_discstrat_queuebuf(struct udf_mount *ump, struct buf *nestbuf) 652 { 653 struct udf_strategy *strategy = ump->strategy; 654 struct udf_strat_args args; 655 656 KASSERT(strategy); 657 args.ump = ump; 658 args.nestbuf = nestbuf; 659 660 (strategy->queuebuf)(&args); 661 } 662 663 664 void 665 udf_discstrat_init(struct udf_mount *ump) 666 { 667 struct udf_strategy *strategy = ump->strategy; 668 struct udf_strat_args args; 669 670 KASSERT(strategy); 671 args.ump = ump; 672 (strategy->discstrat_init)(&args); 673 } 674 675 676 void udf_discstrat_finish(struct udf_mount *ump) 677 { 678 struct udf_strategy *strategy = ump->strategy; 679 struct udf_strat_args args; 680 681 /* strategy might not have been set, so ignore if not set */ 682 if (strategy) { 683 args.ump = ump; 684 (strategy->discstrat_finish)(&args); 685 } 686 } 687 688 /* --------------------------------------------------------------------- */ 689 690