xref: /netbsd-src/sys/fs/udf/udf_readwrite.c (revision 0c4ddb1599a0bea866fde8522a74cfbd2f68cd1b)
1 /* $NetBSD: udf_readwrite.c,v 1.5 2008/07/09 18:10:57 reinoud Exp $ */
2 
3 /*
4  * Copyright (c) 2007, 2008 Reinoud Zandijk
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  *
27  */
28 
29 #include <sys/cdefs.h>
30 #ifndef lint
31 __KERNEL_RCSID(0, "$NetBSD: udf_readwrite.c,v 1.5 2008/07/09 18:10:57 reinoud Exp $");
32 #endif /* not lint */
33 
34 
35 #if defined(_KERNEL_OPT)
36 #include "opt_quota.h"
37 #include "opt_compat_netbsd.h"
38 #endif
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/sysctl.h>
43 #include <sys/namei.h>
44 #include <sys/proc.h>
45 #include <sys/kernel.h>
46 #include <sys/vnode.h>
47 #include <miscfs/genfs/genfs_node.h>
48 #include <sys/mount.h>
49 #include <sys/buf.h>
50 #include <sys/file.h>
51 #include <sys/device.h>
52 #include <sys/disklabel.h>
53 #include <sys/ioctl.h>
54 #include <sys/malloc.h>
55 #include <sys/dirent.h>
56 #include <sys/stat.h>
57 #include <sys/conf.h>
58 #include <sys/kauth.h>
59 #include <sys/kthread.h>
60 #include <dev/clock_subr.h>
61 
62 #include <fs/udf/ecma167-udf.h>
63 #include <fs/udf/udf_mount.h>
64 
65 #if defined(_KERNEL_OPT)
66 #include "opt_udf.h"
67 #endif
68 
69 #include "udf.h"
70 #include "udf_subr.h"
71 #include "udf_bswap.h"
72 
73 
74 #define VTOI(vnode) ((struct udf_node *) vnode->v_data)
75 
76 /* --------------------------------------------------------------------- */
77 
78 void
79 udf_fixup_fid_block(uint8_t *blob, int lb_size,
80 	int rfix_pos, int max_rfix_pos, uint32_t lb_num)
81 {
82 	struct fileid_desc *fid;
83 	uint8_t *fid_pos;
84 	int fid_len, found;
85 
86 	/* needs to be word aligned */
87 	KASSERT(rfix_pos % 4 == 0);
88 
89 	/* first resync with the FID stream !!! */
90 	found = 0;
91 	while (rfix_pos + sizeof(struct desc_tag) <= max_rfix_pos) {
92 		fid_pos = blob + rfix_pos;
93 		fid = (struct fileid_desc *) fid_pos;
94 		if (udf_rw16(fid->tag.id) == TAGID_FID) {
95 			if (udf_check_tag((union dscrptr *) fid) == 0)
96 				found = 1;
97 		}
98 		if (found)
99 			break;
100 		/* try next location; can only be 4 bytes aligned */
101 		rfix_pos += 4;
102 	}
103 
104 	/* walk over the fids */
105 	fid_pos = blob + rfix_pos;
106 	while (rfix_pos + sizeof(struct desc_tag) <= max_rfix_pos) {
107 		fid = (struct fileid_desc *) fid_pos;
108 		if (udf_rw16(fid->tag.id) != TAGID_FID) {
109 			/* end of FID stream; end of directory or currupted */
110 			break;
111 		}
112 
113 		/* update sector number and recalculate checkum */
114 		fid->tag.tag_loc = udf_rw32(lb_num);
115 		udf_validate_tag_sum((union dscrptr *) fid);
116 
117 		/* if the FID crosses the memory, we're done! */
118 		if (rfix_pos + UDF_FID_SIZE >= max_rfix_pos)
119 			break;
120 
121 		fid_len = udf_fidsize(fid);
122 		fid_pos  += fid_len;
123 		rfix_pos += fid_len;
124 	}
125 }
126 
127 
128 void
129 udf_fixup_internal_extattr(uint8_t *blob, uint32_t lb_num)
130 {
131 	struct desc_tag        *tag;
132 	struct file_entry      *fe;
133 	struct extfile_entry   *efe;
134 	struct extattrhdr_desc *eahdr;
135 	int l_ea;
136 
137 	/* get information from fe/efe */
138 	tag = (struct desc_tag *) blob;
139 	switch (udf_rw16(tag->id)) {
140 	case TAGID_FENTRY :
141 		fe = (struct file_entry *) blob;
142 		l_ea  = udf_rw32(fe->l_ea);
143 		eahdr = (struct extattrhdr_desc *) fe->data;
144 		break;
145 	case TAGID_EXTFENTRY :
146 		efe = (struct extfile_entry *) blob;
147 		l_ea  = udf_rw32(efe->l_ea);
148 		eahdr = (struct extattrhdr_desc *) efe->data;
149 		break;
150 	case TAGID_INDIRECTENTRY :
151 	case TAGID_ALLOCEXTENT :
152 	case TAGID_EXTATTR_HDR :
153 		return;
154 	default:
155 		panic("%s: passed bad tag\n", __func__);
156 	}
157 
158 	/* something recorded here? (why am i called?) */
159 	if (l_ea == 0)
160 		return;
161 
162 #if 0
163 	/* check extended attribute tag */
164 	/* TODO XXX what to do when we encounter an error here? */
165 	error = udf_check_tag(eahdr);
166 	if (error)
167 		return;	/* for now */
168 	if (udf_rw16(eahdr->tag.id) != TAGID_EXTATTR_HDR)
169 		return;	/* for now */
170 	error = udf_check_tag_payload(eahdr, sizeof(struct extattrhdr_desc));
171 	if (error)
172 		return; /* for now */
173 #endif
174 
175 	DPRINTF(EXTATTR, ("node fixup: found %d bytes of extended attributes\n",
176 		l_ea));
177 
178 	/* fixup eahdr tag */
179 	eahdr->tag.tag_loc = udf_rw32(lb_num);
180 	udf_validate_tag_and_crc_sums((union dscrptr *) eahdr);
181 }
182 
183 
184 void
185 udf_fixup_node_internals(struct udf_mount *ump, uint8_t *blob, int udf_c_type)
186 {
187 	struct desc_tag *tag;
188 	struct file_entry *fe;
189 	struct extfile_entry *efe;
190 	struct alloc_ext_entry *ext;
191 	uint32_t lb_size, lb_num;
192 	uint32_t rfid_pos, max_rfid_pos;
193 	int icbflags, addr_type, has_fids, l_ea;
194 
195 	lb_size = udf_rw32(ump->logical_vol->lb_size);
196 	/* if its not a node we're done */
197 	if (udf_c_type != UDF_C_NODE)
198 		return;
199 
200 	/* NOTE this could also be done in write_internal */
201 	/* start of a descriptor */
202 	l_ea     = 0;
203 	has_fids = 0;
204 	max_rfid_pos = rfid_pos = lb_num = 0;	/* shut up gcc! */
205 
206 	tag = (struct desc_tag *) blob;
207 	switch (udf_rw16(tag->id)) {
208 	case TAGID_FENTRY :
209 		fe = (struct file_entry *) tag;
210 		l_ea = udf_rw32(fe->l_ea);
211 		icbflags  = udf_rw16(fe->icbtag.flags);
212 		addr_type = (icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK);
213 		has_fids  = (addr_type == UDF_ICB_INTERN_ALLOC);
214 		rfid_pos  = UDF_FENTRY_SIZE + l_ea;
215 		max_rfid_pos = rfid_pos + udf_rw64(fe->inf_len);
216 		lb_num = udf_rw32(fe->tag.tag_loc);
217 		break;
218 	case TAGID_EXTFENTRY :
219 		efe = (struct extfile_entry *) tag;
220 		l_ea = udf_rw32(efe->l_ea);
221 		icbflags  = udf_rw16(efe->icbtag.flags);
222 		addr_type = (icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK);
223 		has_fids  = (addr_type == UDF_ICB_INTERN_ALLOC);
224 		rfid_pos  = UDF_EXTFENTRY_SIZE + l_ea;
225 		max_rfid_pos = rfid_pos + udf_rw64(efe->inf_len);
226 		lb_num = udf_rw32(efe->tag.tag_loc);
227 		break;
228 	case TAGID_INDIRECTENTRY :
229 	case TAGID_EXTATTR_HDR :
230 		break;
231 	case TAGID_ALLOCEXTENT :
232 		/* force crclen to 8 for UDF version < 2.01 */
233 		ext = (struct alloc_ext_entry *) tag;
234 		if (udf_rw16(ump->logvol_info->min_udf_readver) <= 0x200)
235 			ext->tag.desc_crc_len = udf_rw16(8);
236 		break;
237 	default:
238 		panic("%s: passed bad tag\n", __func__);
239 		break;
240 	}
241 
242 	/* fixup internal extended attributes if present */
243 	if (l_ea)
244 		udf_fixup_internal_extattr(blob, lb_num);
245 
246 	if (has_fids) {
247 		udf_fixup_fid_block(blob, lb_size, rfid_pos,
248 			max_rfid_pos, lb_num);
249 	}
250 	udf_validate_tag_and_crc_sums(blob);
251 }
252 
253 /* --------------------------------------------------------------------- */
254 
255 /*
256  * Set of generic descriptor readers and writers and their helper functions.
257  * Descriptors inside `logical space' i.e. inside logically mapped partitions
258  * can never be longer than one logical sector.
259  *
260  * NOTE that these functions *can* be used by the sheduler backends to read
261  * node descriptors too.
262  *
263  * For reading, the size of allocated piece is returned in multiple of sector
264  * size due to udf_calc_udf_malloc_size().
265  */
266 
267 
268 /* SYNC reading of n blocks from specified sector */
269 /* NOTE only used by udf_read_phys_dscr */
270 static int
271 udf_read_phys_sectors(struct udf_mount *ump, int what, void *blob,
272 	uint32_t start, uint32_t sectors)
273 {
274 	struct buf *buf, *nestbuf;
275 	uint32_t buf_offset;
276 	off_t lblkno, rblkno;
277 	int sector_size = ump->discinfo.sector_size;
278 	int blks = sector_size / DEV_BSIZE;
279 	int piece;
280 	int error;
281 
282 	DPRINTF(READ, ("udf_intbreadn() : sectors = %d, sector_size = %d\n",
283 		sectors, sector_size));
284 	buf = getiobuf(ump->devvp, true);
285 	buf->b_flags    = B_READ;
286 	buf->b_cflags   = BC_BUSY;	/* needed? */
287 	buf->b_iodone   = NULL;
288 	buf->b_data     = blob;
289 	buf->b_bcount   = sectors * sector_size;
290 	buf->b_resid    = buf->b_bcount;
291 	buf->b_bufsize  = buf->b_bcount;
292 	buf->b_private  = NULL;	/* not needed yet */
293 	BIO_SETPRIO(buf, BPRIO_DEFAULT);
294 	buf->b_lblkno   = buf->b_blkno = buf->b_rawblkno = start * blks;
295 	buf->b_proc     = NULL;
296 
297 	error = 0;
298 	buf_offset = 0;
299 	rblkno = start;
300 	lblkno = 0;
301 	while ((sectors > 0) && (error == 0)) {
302 		piece = MIN(MAXPHYS/sector_size, sectors);
303 		DPRINTF(READ, ("read in %d + %d\n", (uint32_t) rblkno, piece));
304 
305 		nestbuf = getiobuf(NULL, true);
306 		nestiobuf_setup(buf, nestbuf, buf_offset, piece * sector_size);
307 		/* nestbuf is B_ASYNC */
308 
309 		/* identify this nestbuf */
310 		nestbuf->b_lblkno   = lblkno;
311 
312 		/* CD shedules on raw blkno */
313 		nestbuf->b_blkno      = rblkno * blks;
314 		nestbuf->b_proc       = NULL;
315 		nestbuf->b_rawblkno   = rblkno * blks;
316 		nestbuf->b_udf_c_type = what;
317 
318 		udf_discstrat_queuebuf(ump, nestbuf);
319 
320 		lblkno     += piece;
321 		rblkno     += piece;
322 		buf_offset += piece * sector_size;
323 		sectors    -= piece;
324 	}
325 	error = biowait(buf);
326 	putiobuf(buf);
327 
328 	return error;
329 }
330 
331 
332 /* synchronous generic descriptor read */
333 int
334 udf_read_phys_dscr(struct udf_mount *ump, uint32_t sector,
335 		    struct malloc_type *mtype, union dscrptr **dstp)
336 {
337 	union dscrptr *dst, *new_dst;
338 	uint8_t *pos;
339 	int sectors, dscrlen;
340 	int i, error, sector_size;
341 
342 	sector_size = ump->discinfo.sector_size;
343 
344 	*dstp = dst = NULL;
345 	dscrlen = sector_size;
346 
347 	/* read initial piece */
348 	dst = malloc(sector_size, mtype, M_WAITOK);
349 	error = udf_read_phys_sectors(ump, UDF_C_DSCR, dst, sector, 1);
350 	DPRINTFIF(DESCRIPTOR, error, ("read error (%d)\n", error));
351 
352 	if (!error) {
353 		/* check if its a valid tag */
354 		error = udf_check_tag(dst);
355 		if (error) {
356 			/* check if its an empty block */
357 			pos = (uint8_t *) dst;
358 			for (i = 0; i < sector_size; i++, pos++) {
359 				if (*pos) break;
360 			}
361 			if (i == sector_size) {
362 				/* return no error but with no dscrptr */
363 				/* dispose first block */
364 				free(dst, mtype);
365 				return 0;
366 			}
367 		}
368 		/* calculate descriptor size */
369 		dscrlen = udf_tagsize(dst, sector_size);
370 	}
371 	DPRINTFIF(DESCRIPTOR, error, ("bad tag checksum\n"));
372 
373 	if (!error && (dscrlen > sector_size)) {
374 		DPRINTF(DESCRIPTOR, ("multi block descriptor read\n"));
375 		/*
376 		 * Read the rest of descriptor. Since it is only used at mount
377 		 * time its overdone to define and use a specific udf_intbreadn
378 		 * for this alone.
379 		 */
380 
381 		new_dst = realloc(dst, dscrlen, mtype, M_WAITOK);
382 		if (new_dst == NULL) {
383 			free(dst, mtype);
384 			return ENOMEM;
385 		}
386 		dst = new_dst;
387 
388 		sectors = (dscrlen + sector_size -1) / sector_size;
389 		DPRINTF(DESCRIPTOR, ("dscrlen = %d (%d blk)\n", dscrlen, sectors));
390 
391 		pos = (uint8_t *) dst + sector_size;
392 		error = udf_read_phys_sectors(ump, UDF_C_DSCR, pos,
393 				sector + 1, sectors-1);
394 
395 		DPRINTFIF(DESCRIPTOR, error, ("read error on multi (%d)\n",
396 		    error));
397 	}
398 	if (!error) {
399 		error = udf_check_tag_payload(dst, dscrlen);
400 		DPRINTFIF(DESCRIPTOR, error, ("bad payload check sum\n"));
401 	}
402 	if (error && dst) {
403 		free(dst, mtype);
404 		dst = NULL;
405 	}
406 	*dstp = dst;
407 
408 	return error;
409 }
410 
411 
412 static void
413 udf_write_phys_buf(struct udf_mount *ump, int what, struct buf *buf)
414 {
415 	struct buf *nestbuf;
416 	uint32_t buf_offset;
417 	off_t lblkno, rblkno;
418 	int sector_size = ump->discinfo.sector_size;
419 	int blks = sector_size / DEV_BSIZE;
420 	uint32_t sectors;
421 	int piece;
422 	int error;
423 
424 	sectors = buf->b_bcount / sector_size;
425 	DPRINTF(WRITE, ("udf_intbwriten() : sectors = %d, sector_size = %d\n",
426 		sectors, sector_size));
427 
428 	/* don't forget to increase pending count for the bwrite itself */
429 /* panic("NO WRITING\n"); */
430 	if (buf->b_vp) {
431 		mutex_enter(&buf->b_vp->v_interlock);
432 		buf->b_vp->v_numoutput++;
433 		mutex_exit(&buf->b_vp->v_interlock);
434 	}
435 
436 	error = 0;
437 	buf_offset = 0;
438 	rblkno = buf->b_blkno / blks;
439 	lblkno = 0;
440 	while ((sectors > 0) && (error == 0)) {
441 		piece = MIN(MAXPHYS/sector_size, sectors);
442 		DPRINTF(WRITE, ("write out %d + %d\n",
443 		    (uint32_t) rblkno, piece));
444 
445 		nestbuf = getiobuf(NULL, true);
446 		nestiobuf_setup(buf, nestbuf, buf_offset, piece * sector_size);
447 		/* nestbuf is B_ASYNC */
448 
449 		/* identify this nestbuf */
450 		nestbuf->b_lblkno   = lblkno;
451 
452 		/* CD shedules on raw blkno */
453 		nestbuf->b_blkno      = rblkno * blks;
454 		nestbuf->b_proc       = NULL;
455 		nestbuf->b_rawblkno   = rblkno * blks;
456 		nestbuf->b_udf_c_type = what;
457 
458 		udf_discstrat_queuebuf(ump, nestbuf);
459 
460 		lblkno     += piece;
461 		rblkno     += piece;
462 		buf_offset += piece * sector_size;
463 		sectors    -= piece;
464 	}
465 }
466 
467 
468 /* synchronous generic descriptor write */
469 int
470 udf_write_phys_dscr_sync(struct udf_mount *ump, struct udf_node *udf_node, int what,
471 		     union dscrptr *dscr, uint32_t sector, uint32_t logsector)
472 {
473 	struct vnode *vp;
474 	struct buf *buf;
475 	int sector_size = ump->discinfo.sector_size;
476 	int blks = sector_size / DEV_BSIZE;
477 	int dscrlen;
478 	int error;
479 
480 	/* set sector number in the descriptor and validate */
481 	dscr->tag.tag_loc = udf_rw32(logsector);
482 	udf_validate_tag_and_crc_sums(dscr);
483 
484 	/* calculate descriptor size */
485 	dscrlen = udf_tagsize(dscr, sector_size);
486 
487 	/* get transfer buffer */
488 	vp = udf_node ? udf_node->vnode : ump->devvp;
489 	buf = getiobuf(vp, true);
490 	buf->b_flags    = B_WRITE;
491 	buf->b_cflags   = BC_BUSY;	/* needed? */
492 	buf->b_iodone   = NULL;
493 	buf->b_data     = (void *) dscr;
494 	buf->b_bcount   = dscrlen;
495 	buf->b_resid    = buf->b_bcount;
496 	buf->b_bufsize  = buf->b_bcount;
497 	buf->b_private  = NULL;	/* not needed yet */
498 	BIO_SETPRIO(buf, BPRIO_DEFAULT);
499 	buf->b_lblkno   = buf->b_blkno = buf->b_rawblkno = sector * blks;
500 	buf->b_proc     = NULL;
501 
502 	/* do the write, wait and return error */
503 	udf_write_phys_buf(ump, what, buf);
504 	error = biowait(buf);
505 	putiobuf(buf);
506 
507 	return error;
508 }
509 
510 
511 /* asynchronous generic descriptor write */
512 int
513 udf_write_phys_dscr_async(struct udf_mount *ump, struct udf_node *udf_node,
514 		      int what, union dscrptr *dscr,
515 		      uint32_t sector, uint32_t logsector,
516 		      void (*dscrwr_callback)(struct buf *))
517 {
518 	struct vnode *vp;
519 	struct buf *buf;
520 	int dscrlen;
521 	int sector_size = ump->discinfo.sector_size;
522 	int blks = sector_size / DEV_BSIZE;
523 
524 	KASSERT(dscrwr_callback);
525 	DPRINTF(NODE, ("udf_write_phys_dscr_async() called\n"));
526 
527 	/* set sector number in the descriptor and validate */
528 	dscr->tag.tag_loc = udf_rw32(logsector);
529 	udf_validate_tag_and_crc_sums(dscr);
530 
531 	/* calculate descriptor size */
532 	dscrlen = udf_tagsize(dscr, sector_size);
533 
534 	/* get transfer buffer */
535 	vp = udf_node ? udf_node->vnode : ump->devvp;
536 	buf = getiobuf(vp, true);
537 	buf->b_flags    = B_WRITE; // | B_ASYNC;
538 	buf->b_cflags   = BC_BUSY;
539 	buf->b_iodone	= dscrwr_callback;
540 	buf->b_data     = dscr;
541 	buf->b_bcount   = dscrlen;
542 	buf->b_resid    = buf->b_bcount;
543 	buf->b_bufsize  = buf->b_bcount;
544 	buf->b_private  = NULL;	/* not needed yet */
545 	BIO_SETPRIO(buf, BPRIO_DEFAULT);
546 	buf->b_lblkno   = buf->b_blkno = buf->b_rawblkno = sector * blks;
547 	buf->b_proc     = NULL;
548 
549 	/* do the write and return no error */
550 	udf_write_phys_buf(ump, what, buf);
551 	return 0;
552 }
553 
554 /* --------------------------------------------------------------------- */
555 
556 /* disc strategy dispatchers */
557 
558 int
559 udf_create_logvol_dscr(struct udf_mount *ump, struct udf_node *udf_node, struct long_ad *icb,
560 	union dscrptr **dscrptr)
561 {
562 	struct udf_strategy *strategy = ump->strategy;
563 	struct udf_strat_args args;
564 	int error;
565 
566 	KASSERT(strategy);
567 	args.ump  = ump;
568 	args.udf_node = udf_node;
569 	args.icb  = icb;
570 	args.dscr = NULL;
571 
572 	error = (strategy->create_logvol_dscr)(&args);
573 	*dscrptr = args.dscr;
574 
575 	return error;
576 }
577 
578 
579 void
580 udf_free_logvol_dscr(struct udf_mount *ump, struct long_ad *icb,
581 	void *dscr)
582 {
583 	struct udf_strategy *strategy = ump->strategy;
584 	struct udf_strat_args args;
585 
586 	KASSERT(strategy);
587 	args.ump  = ump;
588 	args.icb  = icb;
589 	args.dscr = dscr;
590 
591 	(strategy->free_logvol_dscr)(&args);
592 }
593 
594 
595 int
596 udf_read_logvol_dscr(struct udf_mount *ump, struct long_ad *icb,
597 	union dscrptr **dscrptr)
598 {
599 	struct udf_strategy *strategy = ump->strategy;
600 	struct udf_strat_args args;
601 	int error;
602 
603 	KASSERT(strategy);
604 	args.ump  = ump;
605 	args.icb  = icb;
606 	args.dscr = NULL;
607 
608 	error = (strategy->read_logvol_dscr)(&args);
609 	*dscrptr = args.dscr;
610 
611 	return error;
612 }
613 
614 
615 int
616 udf_write_logvol_dscr(struct udf_node *udf_node, union dscrptr *dscr,
617 	struct long_ad *icb, int waitfor)
618 {
619 	struct udf_strategy *strategy = udf_node->ump->strategy;
620 	struct udf_strat_args args;
621 	int error;
622 
623 	KASSERT(strategy);
624 	args.ump      = udf_node->ump;
625 	args.udf_node = udf_node;
626 	args.icb      = icb;
627 	args.dscr     = dscr;
628 	args.waitfor  = waitfor;
629 
630 	error = (strategy->write_logvol_dscr)(&args);
631 	return error;
632 }
633 
634 
635 void
636 udf_discstrat_queuebuf(struct udf_mount *ump, struct buf *nestbuf)
637 {
638 	struct udf_strategy *strategy = ump->strategy;
639 	struct udf_strat_args args;
640 
641 	KASSERT(strategy);
642 	args.ump = ump;
643 	args.nestbuf = nestbuf;
644 
645 	(strategy->queuebuf)(&args);
646 }
647 
648 
649 void
650 udf_discstrat_init(struct udf_mount *ump)
651 {
652 	struct udf_strategy *strategy = ump->strategy;
653 	struct udf_strat_args args;
654 
655 	KASSERT(strategy);
656 	args.ump = ump;
657 	(strategy->discstrat_init)(&args);
658 }
659 
660 
661 void udf_discstrat_finish(struct udf_mount *ump)
662 {
663 	struct udf_strategy *strategy = ump->strategy;
664 	struct udf_strat_args args;
665 
666 	/* strategy might not have been set, so ignore if not set */
667 	if (strategy) {
668 		args.ump = ump;
669 		(strategy->discstrat_finish)(&args);
670 	}
671 }
672 
673 /* --------------------------------------------------------------------- */
674 
675