xref: /netbsd-src/sys/fs/udf/udf_readwrite.c (revision cac8e449158efc7261bebc8657cbb0125a2cfdde)
1 /* $NetBSD: udf_readwrite.c,v 1.6 2008/07/28 19:41:13 reinoud Exp $ */
2 
3 /*
4  * Copyright (c) 2007, 2008 Reinoud Zandijk
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  *
27  */
28 
29 #include <sys/cdefs.h>
30 #ifndef lint
31 __KERNEL_RCSID(0, "$NetBSD: udf_readwrite.c,v 1.6 2008/07/28 19:41:13 reinoud Exp $");
32 #endif /* not lint */
33 
34 
35 #if defined(_KERNEL_OPT)
36 #include "opt_quota.h"
37 #include "opt_compat_netbsd.h"
38 #endif
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/sysctl.h>
43 #include <sys/namei.h>
44 #include <sys/proc.h>
45 #include <sys/kernel.h>
46 #include <sys/vnode.h>
47 #include <miscfs/genfs/genfs_node.h>
48 #include <sys/mount.h>
49 #include <sys/buf.h>
50 #include <sys/file.h>
51 #include <sys/device.h>
52 #include <sys/disklabel.h>
53 #include <sys/ioctl.h>
54 #include <sys/malloc.h>
55 #include <sys/dirent.h>
56 #include <sys/stat.h>
57 #include <sys/conf.h>
58 #include <sys/kauth.h>
59 #include <sys/kthread.h>
60 #include <dev/clock_subr.h>
61 
62 #include <fs/udf/ecma167-udf.h>
63 #include <fs/udf/udf_mount.h>
64 
65 #if defined(_KERNEL_OPT)
66 #include "opt_udf.h"
67 #endif
68 
69 #include "udf.h"
70 #include "udf_subr.h"
71 #include "udf_bswap.h"
72 
73 
74 #define VTOI(vnode) ((struct udf_node *) vnode->v_data)
75 
76 /* --------------------------------------------------------------------- */
77 
78 void
79 udf_fixup_fid_block(uint8_t *blob, int lb_size,
80 	int rfix_pos, int max_rfix_pos, uint32_t lb_num)
81 {
82 	struct fileid_desc *fid;
83 	uint8_t *fid_pos;
84 	int fid_len, found;
85 
86 	/* needs to be word aligned */
87 	KASSERT(rfix_pos % 4 == 0);
88 
89 	/* first resync with the FID stream !!! */
90 	found = 0;
91 	while (rfix_pos + sizeof(struct desc_tag) <= max_rfix_pos) {
92 		fid_pos = blob + rfix_pos;
93 		fid = (struct fileid_desc *) fid_pos;
94 		if (udf_rw16(fid->tag.id) == TAGID_FID) {
95 			if (udf_check_tag((union dscrptr *) fid) == 0)
96 				found = 1;
97 		}
98 		if (found)
99 			break;
100 		/* try next location; can only be 4 bytes aligned */
101 		rfix_pos += 4;
102 	}
103 
104 	/* walk over the fids */
105 	fid_pos = blob + rfix_pos;
106 	while (rfix_pos + sizeof(struct desc_tag) <= max_rfix_pos) {
107 		fid = (struct fileid_desc *) fid_pos;
108 		if (udf_rw16(fid->tag.id) != TAGID_FID) {
109 			/* end of FID stream; end of directory or currupted */
110 			break;
111 		}
112 
113 		/* update sector number and recalculate checkum */
114 		fid->tag.tag_loc = udf_rw32(lb_num);
115 		udf_validate_tag_sum((union dscrptr *) fid);
116 
117 		/* if the FID crosses the memory, we're done! */
118 		if (rfix_pos + UDF_FID_SIZE >= max_rfix_pos)
119 			break;
120 
121 		fid_len = udf_fidsize(fid);
122 		fid_pos  += fid_len;
123 		rfix_pos += fid_len;
124 	}
125 }
126 
127 
128 void
129 udf_fixup_internal_extattr(uint8_t *blob, uint32_t lb_num)
130 {
131 	struct desc_tag        *tag;
132 	struct file_entry      *fe;
133 	struct extfile_entry   *efe;
134 	struct extattrhdr_desc *eahdr;
135 	int l_ea;
136 
137 	/* get information from fe/efe */
138 	tag = (struct desc_tag *) blob;
139 	switch (udf_rw16(tag->id)) {
140 	case TAGID_FENTRY :
141 		fe = (struct file_entry *) blob;
142 		l_ea  = udf_rw32(fe->l_ea);
143 		eahdr = (struct extattrhdr_desc *) fe->data;
144 		break;
145 	case TAGID_EXTFENTRY :
146 		efe = (struct extfile_entry *) blob;
147 		l_ea  = udf_rw32(efe->l_ea);
148 		eahdr = (struct extattrhdr_desc *) efe->data;
149 		break;
150 	case TAGID_INDIRECTENTRY :
151 	case TAGID_ALLOCEXTENT :
152 	case TAGID_EXTATTR_HDR :
153 		return;
154 	default:
155 		panic("%s: passed bad tag\n", __func__);
156 	}
157 
158 	/* something recorded here? (why am i called?) */
159 	if (l_ea == 0)
160 		return;
161 
162 #if 0
163 	/* check extended attribute tag */
164 	/* TODO XXX what to do when we encounter an error here? */
165 	error = udf_check_tag(eahdr);
166 	if (error)
167 		return;	/* for now */
168 	if (udf_rw16(eahdr->tag.id) != TAGID_EXTATTR_HDR)
169 		return;	/* for now */
170 	error = udf_check_tag_payload(eahdr, sizeof(struct extattrhdr_desc));
171 	if (error)
172 		return; /* for now */
173 #endif
174 
175 	DPRINTF(EXTATTR, ("node fixup: found %d bytes of extended attributes\n",
176 		l_ea));
177 
178 	/* fixup eahdr tag */
179 	eahdr->tag.tag_loc = udf_rw32(lb_num);
180 	udf_validate_tag_and_crc_sums((union dscrptr *) eahdr);
181 }
182 
183 
184 void
185 udf_fixup_node_internals(struct udf_mount *ump, uint8_t *blob, int udf_c_type)
186 {
187 	struct desc_tag *tag, *sbm_tag;
188 	struct file_entry *fe;
189 	struct extfile_entry *efe;
190 	struct alloc_ext_entry *ext;
191 	uint32_t lb_size, lb_num;
192 	uint32_t rfid_pos, max_rfid_pos;
193 	int icbflags, addr_type, file_type, has_fids, has_sbm, l_ea;
194 
195 	lb_size = udf_rw32(ump->logical_vol->lb_size);
196 	/* if its not a node we're done */
197 	if (udf_c_type != UDF_C_NODE)
198 		return;
199 
200 	/* NOTE this could also be done in write_internal */
201 	/* start of a descriptor */
202 	l_ea     = 0;
203 	has_fids = 0;
204 	has_sbm  = 0;
205 	max_rfid_pos = rfid_pos = lb_num = 0;	/* shut up gcc! */
206 
207 	tag = (struct desc_tag *) blob;
208 	switch (udf_rw16(tag->id)) {
209 	case TAGID_FENTRY :
210 		fe = (struct file_entry *) tag;
211 		l_ea = udf_rw32(fe->l_ea);
212 		icbflags  = udf_rw16(fe->icbtag.flags);
213 		addr_type = (icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK);
214 		file_type = fe->icbtag.file_type;
215 		has_fids  = (addr_type == UDF_ICB_INTERN_ALLOC);
216 		has_sbm   = (file_type == UDF_ICB_FILETYPE_META_BITMAP);
217 		rfid_pos  = UDF_FENTRY_SIZE + l_ea;
218 		max_rfid_pos = rfid_pos + udf_rw64(fe->inf_len);
219 		lb_num = udf_rw32(fe->tag.tag_loc);
220 		break;
221 	case TAGID_EXTFENTRY :
222 		efe = (struct extfile_entry *) tag;
223 		l_ea = udf_rw32(efe->l_ea);
224 		icbflags  = udf_rw16(efe->icbtag.flags);
225 		addr_type = (icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK);
226 		file_type = efe->icbtag.file_type;
227 		has_fids  = (addr_type == UDF_ICB_INTERN_ALLOC);
228 		has_sbm   = (file_type == UDF_ICB_FILETYPE_META_BITMAP);
229 		rfid_pos  = UDF_EXTFENTRY_SIZE + l_ea;
230 		max_rfid_pos = rfid_pos + udf_rw64(efe->inf_len);
231 		lb_num = udf_rw32(efe->tag.tag_loc);
232 		break;
233 	case TAGID_INDIRECTENTRY :
234 	case TAGID_EXTATTR_HDR :
235 		break;
236 	case TAGID_ALLOCEXTENT :
237 		/* force crclen to 8 for UDF version < 2.01 */
238 		ext = (struct alloc_ext_entry *) tag;
239 		if (udf_rw16(ump->logvol_info->min_udf_readver) <= 0x200)
240 			ext->tag.desc_crc_len = udf_rw16(8);
241 		break;
242 	default:
243 		panic("%s: passed bad tag\n", __func__);
244 		break;
245 	}
246 
247 	/* fixup internal extended attributes if present */
248 	if (l_ea)
249 		udf_fixup_internal_extattr(blob, lb_num);
250 
251 	if (has_fids)
252 		udf_fixup_fid_block(blob, lb_size, rfid_pos,
253 			max_rfid_pos, lb_num);
254 
255 	if (has_sbm) {
256 		sbm_tag = (struct desc_tag *) (blob + rfid_pos);
257 		sbm_tag->tag_loc = tag->tag_loc;
258 		udf_validate_tag_and_crc_sums((uint8_t *) sbm_tag);
259 	}
260 
261 	udf_validate_tag_and_crc_sums(blob);
262 }
263 
264 /* --------------------------------------------------------------------- */
265 
266 /*
267  * Set of generic descriptor readers and writers and their helper functions.
268  * Descriptors inside `logical space' i.e. inside logically mapped partitions
269  * can never be longer than one logical sector.
270  *
271  * NOTE that these functions *can* be used by the sheduler backends to read
272  * node descriptors too.
273  *
274  * For reading, the size of allocated piece is returned in multiple of sector
275  * size due to udf_calc_udf_malloc_size().
276  */
277 
278 
279 /* SYNC reading of n blocks from specified sector */
280 /* NOTE only used by udf_read_phys_dscr */
281 static int
282 udf_read_phys_sectors(struct udf_mount *ump, int what, void *blob,
283 	uint32_t start, uint32_t sectors)
284 {
285 	struct buf *buf, *nestbuf;
286 	uint32_t buf_offset;
287 	off_t lblkno, rblkno;
288 	int sector_size = ump->discinfo.sector_size;
289 	int blks = sector_size / DEV_BSIZE;
290 	int piece;
291 	int error;
292 
293 	DPRINTF(READ, ("udf_intbreadn() : sectors = %d, sector_size = %d\n",
294 		sectors, sector_size));
295 	buf = getiobuf(ump->devvp, true);
296 	buf->b_flags    = B_READ;
297 	buf->b_cflags   = BC_BUSY;	/* needed? */
298 	buf->b_iodone   = NULL;
299 	buf->b_data     = blob;
300 	buf->b_bcount   = sectors * sector_size;
301 	buf->b_resid    = buf->b_bcount;
302 	buf->b_bufsize  = buf->b_bcount;
303 	buf->b_private  = NULL;	/* not needed yet */
304 	BIO_SETPRIO(buf, BPRIO_DEFAULT);
305 	buf->b_lblkno   = buf->b_blkno = buf->b_rawblkno = start * blks;
306 	buf->b_proc     = NULL;
307 
308 	error = 0;
309 	buf_offset = 0;
310 	rblkno = start;
311 	lblkno = 0;
312 	while ((sectors > 0) && (error == 0)) {
313 		piece = MIN(MAXPHYS/sector_size, sectors);
314 		DPRINTF(READ, ("read in %d + %d\n", (uint32_t) rblkno, piece));
315 
316 		nestbuf = getiobuf(NULL, true);
317 		nestiobuf_setup(buf, nestbuf, buf_offset, piece * sector_size);
318 		/* nestbuf is B_ASYNC */
319 
320 		/* identify this nestbuf */
321 		nestbuf->b_lblkno   = lblkno;
322 
323 		/* CD shedules on raw blkno */
324 		nestbuf->b_blkno      = rblkno * blks;
325 		nestbuf->b_proc       = NULL;
326 		nestbuf->b_rawblkno   = rblkno * blks;
327 		nestbuf->b_udf_c_type = what;
328 
329 		udf_discstrat_queuebuf(ump, nestbuf);
330 
331 		lblkno     += piece;
332 		rblkno     += piece;
333 		buf_offset += piece * sector_size;
334 		sectors    -= piece;
335 	}
336 	error = biowait(buf);
337 	putiobuf(buf);
338 
339 	return error;
340 }
341 
342 
343 /* synchronous generic descriptor read */
344 int
345 udf_read_phys_dscr(struct udf_mount *ump, uint32_t sector,
346 		    struct malloc_type *mtype, union dscrptr **dstp)
347 {
348 	union dscrptr *dst, *new_dst;
349 	uint8_t *pos;
350 	int sectors, dscrlen;
351 	int i, error, sector_size;
352 
353 	sector_size = ump->discinfo.sector_size;
354 
355 	*dstp = dst = NULL;
356 	dscrlen = sector_size;
357 
358 	/* read initial piece */
359 	dst = malloc(sector_size, mtype, M_WAITOK);
360 	error = udf_read_phys_sectors(ump, UDF_C_DSCR, dst, sector, 1);
361 	DPRINTFIF(DESCRIPTOR, error, ("read error (%d)\n", error));
362 
363 	if (!error) {
364 		/* check if its a valid tag */
365 		error = udf_check_tag(dst);
366 		if (error) {
367 			/* check if its an empty block */
368 			pos = (uint8_t *) dst;
369 			for (i = 0; i < sector_size; i++, pos++) {
370 				if (*pos) break;
371 			}
372 			if (i == sector_size) {
373 				/* return no error but with no dscrptr */
374 				/* dispose first block */
375 				free(dst, mtype);
376 				return 0;
377 			}
378 		}
379 		/* calculate descriptor size */
380 		dscrlen = udf_tagsize(dst, sector_size);
381 	}
382 	DPRINTFIF(DESCRIPTOR, error, ("bad tag checksum\n"));
383 
384 	if (!error && (dscrlen > sector_size)) {
385 		DPRINTF(DESCRIPTOR, ("multi block descriptor read\n"));
386 		/*
387 		 * Read the rest of descriptor. Since it is only used at mount
388 		 * time its overdone to define and use a specific udf_intbreadn
389 		 * for this alone.
390 		 */
391 
392 		new_dst = realloc(dst, dscrlen, mtype, M_WAITOK);
393 		if (new_dst == NULL) {
394 			free(dst, mtype);
395 			return ENOMEM;
396 		}
397 		dst = new_dst;
398 
399 		sectors = (dscrlen + sector_size -1) / sector_size;
400 		DPRINTF(DESCRIPTOR, ("dscrlen = %d (%d blk)\n", dscrlen, sectors));
401 
402 		pos = (uint8_t *) dst + sector_size;
403 		error = udf_read_phys_sectors(ump, UDF_C_DSCR, pos,
404 				sector + 1, sectors-1);
405 
406 		DPRINTFIF(DESCRIPTOR, error, ("read error on multi (%d)\n",
407 		    error));
408 	}
409 	if (!error) {
410 		error = udf_check_tag_payload(dst, dscrlen);
411 		DPRINTFIF(DESCRIPTOR, error, ("bad payload check sum\n"));
412 	}
413 	if (error && dst) {
414 		free(dst, mtype);
415 		dst = NULL;
416 	}
417 	*dstp = dst;
418 
419 	return error;
420 }
421 
422 
423 static void
424 udf_write_phys_buf(struct udf_mount *ump, int what, struct buf *buf)
425 {
426 	struct buf *nestbuf;
427 	uint32_t buf_offset;
428 	off_t lblkno, rblkno;
429 	int sector_size = ump->discinfo.sector_size;
430 	int blks = sector_size / DEV_BSIZE;
431 	uint32_t sectors;
432 	int piece;
433 	int error;
434 
435 	sectors = buf->b_bcount / sector_size;
436 	DPRINTF(WRITE, ("udf_intbwriten() : sectors = %d, sector_size = %d\n",
437 		sectors, sector_size));
438 
439 	/* don't forget to increase pending count for the bwrite itself */
440 /* panic("NO WRITING\n"); */
441 	if (buf->b_vp) {
442 		mutex_enter(&buf->b_vp->v_interlock);
443 		buf->b_vp->v_numoutput++;
444 		mutex_exit(&buf->b_vp->v_interlock);
445 	}
446 
447 	error = 0;
448 	buf_offset = 0;
449 	rblkno = buf->b_blkno / blks;
450 	lblkno = 0;
451 	while ((sectors > 0) && (error == 0)) {
452 		piece = MIN(MAXPHYS/sector_size, sectors);
453 		DPRINTF(WRITE, ("write out %d + %d\n",
454 		    (uint32_t) rblkno, piece));
455 
456 		nestbuf = getiobuf(NULL, true);
457 		nestiobuf_setup(buf, nestbuf, buf_offset, piece * sector_size);
458 		/* nestbuf is B_ASYNC */
459 
460 		/* identify this nestbuf */
461 		nestbuf->b_lblkno   = lblkno;
462 
463 		/* CD shedules on raw blkno */
464 		nestbuf->b_blkno      = rblkno * blks;
465 		nestbuf->b_proc       = NULL;
466 		nestbuf->b_rawblkno   = rblkno * blks;
467 		nestbuf->b_udf_c_type = what;
468 
469 		udf_discstrat_queuebuf(ump, nestbuf);
470 
471 		lblkno     += piece;
472 		rblkno     += piece;
473 		buf_offset += piece * sector_size;
474 		sectors    -= piece;
475 	}
476 }
477 
478 
479 /* synchronous generic descriptor write */
480 int
481 udf_write_phys_dscr_sync(struct udf_mount *ump, struct udf_node *udf_node, int what,
482 		     union dscrptr *dscr, uint32_t sector, uint32_t logsector)
483 {
484 	struct vnode *vp;
485 	struct buf *buf;
486 	int sector_size = ump->discinfo.sector_size;
487 	int blks = sector_size / DEV_BSIZE;
488 	int dscrlen;
489 	int error;
490 
491 	/* set sector number in the descriptor and validate */
492 	dscr->tag.tag_loc = udf_rw32(logsector);
493 	udf_validate_tag_and_crc_sums(dscr);
494 
495 	/* calculate descriptor size */
496 	dscrlen = udf_tagsize(dscr, sector_size);
497 
498 	/* get transfer buffer */
499 	vp = udf_node ? udf_node->vnode : ump->devvp;
500 	buf = getiobuf(vp, true);
501 	buf->b_flags    = B_WRITE;
502 	buf->b_cflags   = BC_BUSY;	/* needed? */
503 	buf->b_iodone   = NULL;
504 	buf->b_data     = (void *) dscr;
505 	buf->b_bcount   = dscrlen;
506 	buf->b_resid    = buf->b_bcount;
507 	buf->b_bufsize  = buf->b_bcount;
508 	buf->b_private  = NULL;	/* not needed yet */
509 	BIO_SETPRIO(buf, BPRIO_DEFAULT);
510 	buf->b_lblkno   = buf->b_blkno = buf->b_rawblkno = sector * blks;
511 	buf->b_proc     = NULL;
512 
513 	/* do the write, wait and return error */
514 	udf_write_phys_buf(ump, what, buf);
515 	error = biowait(buf);
516 	putiobuf(buf);
517 
518 	return error;
519 }
520 
521 
522 /* asynchronous generic descriptor write */
523 int
524 udf_write_phys_dscr_async(struct udf_mount *ump, struct udf_node *udf_node,
525 		      int what, union dscrptr *dscr,
526 		      uint32_t sector, uint32_t logsector,
527 		      void (*dscrwr_callback)(struct buf *))
528 {
529 	struct vnode *vp;
530 	struct buf *buf;
531 	int dscrlen;
532 	int sector_size = ump->discinfo.sector_size;
533 	int blks = sector_size / DEV_BSIZE;
534 
535 	KASSERT(dscrwr_callback);
536 	DPRINTF(NODE, ("udf_write_phys_dscr_async() called\n"));
537 
538 	/* set sector number in the descriptor and validate */
539 	dscr->tag.tag_loc = udf_rw32(logsector);
540 	udf_validate_tag_and_crc_sums(dscr);
541 
542 	/* calculate descriptor size */
543 	dscrlen = udf_tagsize(dscr, sector_size);
544 
545 	/* get transfer buffer */
546 	vp = udf_node ? udf_node->vnode : ump->devvp;
547 	buf = getiobuf(vp, true);
548 	buf->b_flags    = B_WRITE; // | B_ASYNC;
549 	buf->b_cflags   = BC_BUSY;
550 	buf->b_iodone	= dscrwr_callback;
551 	buf->b_data     = dscr;
552 	buf->b_bcount   = dscrlen;
553 	buf->b_resid    = buf->b_bcount;
554 	buf->b_bufsize  = buf->b_bcount;
555 	buf->b_private  = NULL;	/* not needed yet */
556 	BIO_SETPRIO(buf, BPRIO_DEFAULT);
557 	buf->b_lblkno   = buf->b_blkno = buf->b_rawblkno = sector * blks;
558 	buf->b_proc     = NULL;
559 
560 	/* do the write and return no error */
561 	udf_write_phys_buf(ump, what, buf);
562 	return 0;
563 }
564 
565 /* --------------------------------------------------------------------- */
566 
567 /* disc strategy dispatchers */
568 
569 int
570 udf_create_logvol_dscr(struct udf_mount *ump, struct udf_node *udf_node, struct long_ad *icb,
571 	union dscrptr **dscrptr)
572 {
573 	struct udf_strategy *strategy = ump->strategy;
574 	struct udf_strat_args args;
575 	int error;
576 
577 	KASSERT(strategy);
578 	args.ump  = ump;
579 	args.udf_node = udf_node;
580 	args.icb  = icb;
581 	args.dscr = NULL;
582 
583 	error = (strategy->create_logvol_dscr)(&args);
584 	*dscrptr = args.dscr;
585 
586 	return error;
587 }
588 
589 
590 void
591 udf_free_logvol_dscr(struct udf_mount *ump, struct long_ad *icb,
592 	void *dscr)
593 {
594 	struct udf_strategy *strategy = ump->strategy;
595 	struct udf_strat_args args;
596 
597 	KASSERT(strategy);
598 	args.ump  = ump;
599 	args.icb  = icb;
600 	args.dscr = dscr;
601 
602 	(strategy->free_logvol_dscr)(&args);
603 }
604 
605 
606 int
607 udf_read_logvol_dscr(struct udf_mount *ump, struct long_ad *icb,
608 	union dscrptr **dscrptr)
609 {
610 	struct udf_strategy *strategy = ump->strategy;
611 	struct udf_strat_args args;
612 	int error;
613 
614 	KASSERT(strategy);
615 	args.ump  = ump;
616 	args.icb  = icb;
617 	args.dscr = NULL;
618 
619 	error = (strategy->read_logvol_dscr)(&args);
620 	*dscrptr = args.dscr;
621 
622 	return error;
623 }
624 
625 
626 int
627 udf_write_logvol_dscr(struct udf_node *udf_node, union dscrptr *dscr,
628 	struct long_ad *icb, int waitfor)
629 {
630 	struct udf_strategy *strategy = udf_node->ump->strategy;
631 	struct udf_strat_args args;
632 	int error;
633 
634 	KASSERT(strategy);
635 	args.ump      = udf_node->ump;
636 	args.udf_node = udf_node;
637 	args.icb      = icb;
638 	args.dscr     = dscr;
639 	args.waitfor  = waitfor;
640 
641 	error = (strategy->write_logvol_dscr)(&args);
642 	return error;
643 }
644 
645 
646 void
647 udf_discstrat_queuebuf(struct udf_mount *ump, struct buf *nestbuf)
648 {
649 	struct udf_strategy *strategy = ump->strategy;
650 	struct udf_strat_args args;
651 
652 	KASSERT(strategy);
653 	args.ump = ump;
654 	args.nestbuf = nestbuf;
655 
656 	(strategy->queuebuf)(&args);
657 }
658 
659 
660 void
661 udf_discstrat_init(struct udf_mount *ump)
662 {
663 	struct udf_strategy *strategy = ump->strategy;
664 	struct udf_strat_args args;
665 
666 	KASSERT(strategy);
667 	args.ump = ump;
668 	(strategy->discstrat_init)(&args);
669 }
670 
671 
672 void udf_discstrat_finish(struct udf_mount *ump)
673 {
674 	struct udf_strategy *strategy = ump->strategy;
675 	struct udf_strat_args args;
676 
677 	/* strategy might not have been set, so ignore if not set */
678 	if (strategy) {
679 		args.ump = ump;
680 		(strategy->discstrat_finish)(&args);
681 	}
682 }
683 
684 /* --------------------------------------------------------------------- */
685 
686