Home | History | Annotate | Line # | Download | only in udf
udf_readwrite.c revision 1.1.6.4
      1 /* $NetBSD: udf_readwrite.c,v 1.1.6.4 2008/07/02 19:08:20 mjf Exp $ */
      2 
      3 /*
      4  * Copyright (c) 2007, 2008 Reinoud Zandijk
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  *
     16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     26  *
     27  */
     28 
     29 #include <sys/cdefs.h>
     30 #ifndef lint
     31 __KERNEL_RCSID(0, "$NetBSD: udf_readwrite.c,v 1.1.6.4 2008/07/02 19:08:20 mjf Exp $");
     32 #endif /* not lint */
     33 
     34 
     35 #if defined(_KERNEL_OPT)
     36 #include "opt_quota.h"
     37 #include "opt_compat_netbsd.h"
     38 #endif
     39 
     40 #include <sys/param.h>
     41 #include <sys/systm.h>
     42 #include <sys/sysctl.h>
     43 #include <sys/namei.h>
     44 #include <sys/proc.h>
     45 #include <sys/kernel.h>
     46 #include <sys/vnode.h>
     47 #include <miscfs/genfs/genfs_node.h>
     48 #include <sys/mount.h>
     49 #include <sys/buf.h>
     50 #include <sys/file.h>
     51 #include <sys/device.h>
     52 #include <sys/disklabel.h>
     53 #include <sys/ioctl.h>
     54 #include <sys/malloc.h>
     55 #include <sys/dirent.h>
     56 #include <sys/stat.h>
     57 #include <sys/conf.h>
     58 #include <sys/kauth.h>
     59 #include <sys/kthread.h>
     60 #include <dev/clock_subr.h>
     61 
     62 #include <fs/udf/ecma167-udf.h>
     63 #include <fs/udf/udf_mount.h>
     64 
     65 #if defined(_KERNEL_OPT)
     66 #include "opt_udf.h"
     67 #endif
     68 
     69 #include "udf.h"
     70 #include "udf_subr.h"
     71 #include "udf_bswap.h"
     72 
     73 
     74 #define VTOI(vnode) ((struct udf_node *) vnode->v_data)
     75 
     76 /* --------------------------------------------------------------------- */
     77 
     78 void
     79 udf_fixup_fid_block(uint8_t *blob, int lb_size,
     80 	int rfix_pos, int max_rfix_pos, uint32_t lb_num)
     81 {
     82 	struct fileid_desc *fid;
     83 	uint8_t *fid_pos;
     84 	int fid_len, found;
     85 
     86 	/* needs to be word aligned */
     87 	KASSERT(rfix_pos % 4 == 0);
     88 
     89 	/* first resync with the FID stream !!! */
     90 	found = 0;
     91 	while (rfix_pos + sizeof(struct desc_tag) <= max_rfix_pos) {
     92 		fid_pos = blob + rfix_pos;
     93 		fid = (struct fileid_desc *) fid_pos;
     94 		if (udf_rw16(fid->tag.id) == TAGID_FID) {
     95 			if (udf_check_tag((union dscrptr *) fid) == 0)
     96 				found = 1;
     97 		}
     98 		if (found)
     99 			break;
    100 		/* try next location; can only be 4 bytes aligned */
    101 		rfix_pos += 4;
    102 	}
    103 
    104 	/* walk over the fids */
    105 	fid_pos = blob + rfix_pos;
    106 	while (rfix_pos + sizeof(struct desc_tag) <= max_rfix_pos) {
    107 		fid = (struct fileid_desc *) fid_pos;
    108 		if (udf_rw16(fid->tag.id) != TAGID_FID) {
    109 			/* end of FID stream; end of directory or currupted */
    110 			break;
    111 		}
    112 
    113 		/* update sector number and recalculate checkum */
    114 		fid->tag.tag_loc = udf_rw32(lb_num);
    115 		udf_validate_tag_sum((union dscrptr *) fid);
    116 
    117 		/* if the FID crosses the memory, we're done! */
    118 		if (rfix_pos + UDF_FID_SIZE >= max_rfix_pos)
    119 			break;
    120 
    121 		fid_len = udf_fidsize(fid);
    122 		fid_pos  += fid_len;
    123 		rfix_pos += fid_len;
    124 	}
    125 }
    126 
    127 
    128 void
    129 udf_fixup_internal_extattr(uint8_t *blob, uint32_t lb_num)
    130 {
    131 	struct desc_tag        *tag;
    132 	struct file_entry      *fe;
    133 	struct extfile_entry   *efe;
    134 	struct extattrhdr_desc *eahdr;
    135 	int l_ea;
    136 
    137 	/* get information from fe/efe */
    138 	tag = (struct desc_tag *) blob;
    139 	switch (udf_rw16(tag->id)) {
    140 	case TAGID_FENTRY :
    141 		fe = (struct file_entry *) blob;
    142 		l_ea  = udf_rw32(fe->l_ea);
    143 		eahdr = (struct extattrhdr_desc *) fe->data;
    144 		break;
    145 	case TAGID_EXTFENTRY :
    146 		efe = (struct extfile_entry *) blob;
    147 		l_ea  = udf_rw32(efe->l_ea);
    148 		eahdr = (struct extattrhdr_desc *) efe->data;
    149 		break;
    150 	case TAGID_INDIRECTENTRY :
    151 	case TAGID_ALLOCEXTENT :
    152 	case TAGID_EXTATTR_HDR :
    153 		return;
    154 	default:
    155 		panic("%s: passed bad tag\n", __func__);
    156 	}
    157 
    158 	/* something recorded here? (why am i called?) */
    159 	if (l_ea == 0)
    160 		return;
    161 
    162 #if 0
    163 	/* check extended attribute tag */
    164 	/* TODO XXX what to do when we encounter an error here? */
    165 	error = udf_check_tag(eahdr);
    166 	if (error)
    167 		return;	/* for now */
    168 	if (udf_rw16(eahdr->tag.id) != TAGID_EXTATTR_HDR)
    169 		return;	/* for now */
    170 	error = udf_check_tag_payload(eahdr, sizeof(struct extattrhdr_desc));
    171 	if (error)
    172 		return; /* for now */
    173 #endif
    174 
    175 	DPRINTF(EXTATTR, ("node fixup: found %d bytes of extended attributes\n",
    176 		l_ea));
    177 
    178 	/* fixup eahdr tag */
    179 	eahdr->tag.tag_loc = udf_rw32(lb_num);
    180 	udf_validate_tag_and_crc_sums((union dscrptr *) eahdr);
    181 }
    182 
    183 
    184 void
    185 udf_fixup_node_internals(struct udf_mount *ump, uint8_t *blob, int udf_c_type)
    186 {
    187 	struct desc_tag *tag;
    188 	struct file_entry *fe;
    189 	struct extfile_entry *efe;
    190 	uint32_t lb_size, lb_num;
    191 	uint32_t rfid_pos, max_rfid_pos;
    192 	int icbflags, addr_type, has_fids, l_ea;
    193 
    194 	lb_size = udf_rw32(ump->logical_vol->lb_size);
    195 	/* if its not a node we're done */
    196 	if (udf_c_type != UDF_C_NODE)
    197 		return;
    198 
    199 	/* NOTE this could also be done in write_internal */
    200 	/* start of a descriptor */
    201 	has_fids = 0;
    202 	max_rfid_pos = rfid_pos = lb_num = 0;	/* shut up gcc! */
    203 
    204 	tag = (struct desc_tag *) blob;
    205 	switch (udf_rw16(tag->id)) {
    206 	case TAGID_FENTRY :
    207 		fe = (struct file_entry *) tag;
    208 		l_ea = udf_rw32(fe->l_ea);
    209 		icbflags  = udf_rw16(fe->icbtag.flags);
    210 		addr_type = (icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK);
    211 		has_fids  = (addr_type == UDF_ICB_INTERN_ALLOC);
    212 		rfid_pos  = UDF_FENTRY_SIZE + l_ea;
    213 		max_rfid_pos = rfid_pos + udf_rw64(fe->inf_len);
    214 		lb_num = udf_rw32(fe->tag.tag_loc);
    215 		break;
    216 	case TAGID_EXTFENTRY :
    217 		efe = (struct extfile_entry *) tag;
    218 		l_ea = udf_rw32(efe->l_ea);
    219 		icbflags  = udf_rw16(efe->icbtag.flags);
    220 		addr_type = (icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK);
    221 		has_fids  = (addr_type == UDF_ICB_INTERN_ALLOC);
    222 		rfid_pos  = UDF_EXTFENTRY_SIZE + l_ea;
    223 		max_rfid_pos = rfid_pos + udf_rw64(efe->inf_len);
    224 		lb_num = udf_rw32(efe->tag.tag_loc);
    225 		break;
    226 	case TAGID_INDIRECTENTRY :
    227 	case TAGID_ALLOCEXTENT :
    228 	case TAGID_EXTATTR_HDR :
    229 		l_ea     = 0;
    230 		has_fids = 0;
    231 		break;
    232 	default:
    233 		panic("%s: passed bad tag\n", __func__);
    234 		break;
    235 	}
    236 
    237 	/* fixup internal extended attributes if present */
    238 	if (l_ea)
    239 		udf_fixup_internal_extattr(blob, lb_num);
    240 
    241 	if (has_fids) {
    242 		udf_fixup_fid_block(blob, lb_size, rfid_pos,
    243 			max_rfid_pos, lb_num);
    244 	}
    245 	udf_validate_tag_and_crc_sums(blob);
    246 }
    247 
    248 /* --------------------------------------------------------------------- */
    249 
    250 /*
    251  * Set of generic descriptor readers and writers and their helper functions.
    252  * Descriptors inside `logical space' i.e. inside logically mapped partitions
    253  * can never be longer than one logical sector.
    254  *
    255  * NOTE that these functions *can* be used by the sheduler backends to read
    256  * node descriptors too.
    257  *
    258  * For reading, the size of allocated piece is returned in multiple of sector
    259  * size due to udf_calc_udf_malloc_size().
    260  */
    261 
    262 
    263 /* SYNC reading of n blocks from specified sector */
    264 /* NOTE only used by udf_read_phys_dscr */
    265 static int
    266 udf_read_phys_sectors(struct udf_mount *ump, int what, void *blob,
    267 	uint32_t start, uint32_t sectors)
    268 {
    269 	struct buf *buf, *nestbuf;
    270 	uint32_t buf_offset;
    271 	off_t lblkno, rblkno;
    272 	int sector_size = ump->discinfo.sector_size;
    273 	int blks = sector_size / DEV_BSIZE;
    274 	int piece;
    275 	int error;
    276 
    277 	DPRINTF(READ, ("udf_intbreadn() : sectors = %d, sector_size = %d\n",
    278 		sectors, sector_size));
    279 	buf = getiobuf(ump->devvp, true);
    280 	buf->b_flags    = B_READ;
    281 	buf->b_cflags   = BC_BUSY;	/* needed? */
    282 	buf->b_iodone   = NULL;
    283 	buf->b_data     = blob;
    284 	buf->b_bcount   = sectors * sector_size;
    285 	buf->b_resid    = buf->b_bcount;
    286 	buf->b_bufsize  = buf->b_bcount;
    287 	buf->b_private  = NULL;	/* not needed yet */
    288 	BIO_SETPRIO(buf, BPRIO_DEFAULT);
    289 	buf->b_lblkno   = buf->b_blkno = buf->b_rawblkno = start * blks;
    290 	buf->b_proc     = NULL;
    291 
    292 	error = 0;
    293 	buf_offset = 0;
    294 	rblkno = start;
    295 	lblkno = 0;
    296 	while ((sectors > 0) && (error == 0)) {
    297 		piece = MIN(MAXPHYS/sector_size, sectors);
    298 		DPRINTF(READ, ("read in %d + %d\n", (uint32_t) rblkno, piece));
    299 
    300 		nestbuf = getiobuf(NULL, true);
    301 		nestiobuf_setup(buf, nestbuf, buf_offset, piece * sector_size);
    302 		/* nestbuf is B_ASYNC */
    303 
    304 		/* identify this nestbuf */
    305 		nestbuf->b_lblkno   = lblkno;
    306 
    307 		/* CD shedules on raw blkno */
    308 		nestbuf->b_blkno      = rblkno * blks;
    309 		nestbuf->b_proc       = NULL;
    310 		nestbuf->b_rawblkno   = rblkno * blks;
    311 		nestbuf->b_udf_c_type = what;
    312 
    313 		udf_discstrat_queuebuf(ump, nestbuf);
    314 
    315 		lblkno     += piece;
    316 		rblkno     += piece;
    317 		buf_offset += piece * sector_size;
    318 		sectors    -= piece;
    319 	}
    320 	error = biowait(buf);
    321 	putiobuf(buf);
    322 
    323 	return error;
    324 }
    325 
    326 
    327 /* synchronous generic descriptor read */
    328 int
    329 udf_read_phys_dscr(struct udf_mount *ump, uint32_t sector,
    330 		    struct malloc_type *mtype, union dscrptr **dstp)
    331 {
    332 	union dscrptr *dst, *new_dst;
    333 	uint8_t *pos;
    334 	int sectors, dscrlen;
    335 	int i, error, sector_size;
    336 
    337 	sector_size = ump->discinfo.sector_size;
    338 
    339 	*dstp = dst = NULL;
    340 	dscrlen = sector_size;
    341 
    342 	/* read initial piece */
    343 	dst = malloc(sector_size, mtype, M_WAITOK);
    344 	error = udf_read_phys_sectors(ump, UDF_C_DSCR, dst, sector, 1);
    345 	DPRINTFIF(DESCRIPTOR, error, ("read error (%d)\n", error));
    346 
    347 	if (!error) {
    348 		/* check if its a valid tag */
    349 		error = udf_check_tag(dst);
    350 		if (error) {
    351 			/* check if its an empty block */
    352 			pos = (uint8_t *) dst;
    353 			for (i = 0; i < sector_size; i++, pos++) {
    354 				if (*pos) break;
    355 			}
    356 			if (i == sector_size) {
    357 				/* return no error but with no dscrptr */
    358 				/* dispose first block */
    359 				free(dst, mtype);
    360 				return 0;
    361 			}
    362 		}
    363 		/* calculate descriptor size */
    364 		dscrlen = udf_tagsize(dst, sector_size);
    365 	}
    366 	DPRINTFIF(DESCRIPTOR, error, ("bad tag checksum\n"));
    367 
    368 	if (!error && (dscrlen > sector_size)) {
    369 		DPRINTF(DESCRIPTOR, ("multi block descriptor read\n"));
    370 		/*
    371 		 * Read the rest of descriptor. Since it is only used at mount
    372 		 * time its overdone to define and use a specific udf_intbreadn
    373 		 * for this alone.
    374 		 */
    375 
    376 		new_dst = realloc(dst, dscrlen, mtype, M_WAITOK);
    377 		if (new_dst == NULL) {
    378 			free(dst, mtype);
    379 			return ENOMEM;
    380 		}
    381 		dst = new_dst;
    382 
    383 		sectors = (dscrlen + sector_size -1) / sector_size;
    384 		DPRINTF(DESCRIPTOR, ("dscrlen = %d (%d blk)\n", dscrlen, sectors));
    385 
    386 		pos = (uint8_t *) dst + sector_size;
    387 		error = udf_read_phys_sectors(ump, UDF_C_DSCR, pos,
    388 				sector + 1, sectors-1);
    389 
    390 		DPRINTFIF(DESCRIPTOR, error, ("read error on multi (%d)\n",
    391 		    error));
    392 	}
    393 	if (!error) {
    394 		error = udf_check_tag_payload(dst, dscrlen);
    395 		DPRINTFIF(DESCRIPTOR, error, ("bad payload check sum\n"));
    396 	}
    397 	if (error && dst) {
    398 		free(dst, mtype);
    399 		dst = NULL;
    400 	}
    401 	*dstp = dst;
    402 
    403 	return error;
    404 }
    405 
    406 
    407 static void
    408 udf_write_phys_buf(struct udf_mount *ump, int what, struct buf *buf)
    409 {
    410 	struct buf *nestbuf;
    411 	uint32_t buf_offset;
    412 	off_t lblkno, rblkno;
    413 	int sector_size = ump->discinfo.sector_size;
    414 	int blks = sector_size / DEV_BSIZE;
    415 	uint32_t sectors;
    416 	int piece;
    417 	int error;
    418 
    419 	sectors = buf->b_bcount / sector_size;
    420 	DPRINTF(WRITE, ("udf_intbwriten() : sectors = %d, sector_size = %d\n",
    421 		sectors, sector_size));
    422 
    423 	/* don't forget to increase pending count for the bwrite itself */
    424 /* panic("NO WRITING\n"); */
    425 	if (buf->b_vp) {
    426 		mutex_enter(&buf->b_vp->v_interlock);
    427 		buf->b_vp->v_numoutput++;
    428 		mutex_exit(&buf->b_vp->v_interlock);
    429 	}
    430 
    431 	error = 0;
    432 	buf_offset = 0;
    433 	rblkno = buf->b_blkno / blks;
    434 	lblkno = 0;
    435 	while ((sectors > 0) && (error == 0)) {
    436 		piece = MIN(MAXPHYS/sector_size, sectors);
    437 		DPRINTF(WRITE, ("write out %d + %d\n",
    438 		    (uint32_t) rblkno, piece));
    439 
    440 		nestbuf = getiobuf(NULL, true);
    441 		nestiobuf_setup(buf, nestbuf, buf_offset, piece * sector_size);
    442 		/* nestbuf is B_ASYNC */
    443 
    444 		/* identify this nestbuf */
    445 		nestbuf->b_lblkno   = lblkno;
    446 
    447 		/* CD shedules on raw blkno */
    448 		nestbuf->b_blkno      = rblkno * blks;
    449 		nestbuf->b_proc       = NULL;
    450 		nestbuf->b_rawblkno   = rblkno * blks;
    451 		nestbuf->b_udf_c_type = what;
    452 
    453 		udf_discstrat_queuebuf(ump, nestbuf);
    454 
    455 		lblkno     += piece;
    456 		rblkno     += piece;
    457 		buf_offset += piece * sector_size;
    458 		sectors    -= piece;
    459 	}
    460 }
    461 
    462 
    463 /* synchronous generic descriptor write */
    464 int
    465 udf_write_phys_dscr_sync(struct udf_mount *ump, struct udf_node *udf_node, int what,
    466 		     union dscrptr *dscr, uint32_t sector, uint32_t logsector)
    467 {
    468 	struct vnode *vp;
    469 	struct buf *buf;
    470 	int sector_size = ump->discinfo.sector_size;
    471 	int blks = sector_size / DEV_BSIZE;
    472 	int dscrlen;
    473 	int error;
    474 
    475 	/* set sector number in the descriptor and validate */
    476 	dscr->tag.tag_loc = udf_rw32(logsector);
    477 	udf_validate_tag_and_crc_sums(dscr);
    478 
    479 	/* calculate descriptor size */
    480 	dscrlen = udf_tagsize(dscr, sector_size);
    481 
    482 	/* get transfer buffer */
    483 	vp = udf_node ? udf_node->vnode : ump->devvp;
    484 	buf = getiobuf(vp, true);
    485 	buf->b_flags    = B_WRITE;
    486 	buf->b_cflags   = BC_BUSY;	/* needed? */
    487 	buf->b_iodone   = NULL;
    488 	buf->b_data     = (void *) dscr;
    489 	buf->b_bcount   = dscrlen;
    490 	buf->b_resid    = buf->b_bcount;
    491 	buf->b_bufsize  = buf->b_bcount;
    492 	buf->b_private  = NULL;	/* not needed yet */
    493 	BIO_SETPRIO(buf, BPRIO_DEFAULT);
    494 	buf->b_lblkno   = buf->b_blkno = buf->b_rawblkno = sector * blks;
    495 	buf->b_proc     = NULL;
    496 
    497 	/* do the write, wait and return error */
    498 	udf_write_phys_buf(ump, what, buf);
    499 	error = biowait(buf);
    500 	putiobuf(buf);
    501 
    502 	return error;
    503 }
    504 
    505 
    506 /* asynchronous generic descriptor write */
    507 int
    508 udf_write_phys_dscr_async(struct udf_mount *ump, struct udf_node *udf_node,
    509 		      int what, union dscrptr *dscr,
    510 		      uint32_t sector, uint32_t logsector,
    511 		      void (*dscrwr_callback)(struct buf *))
    512 {
    513 	struct vnode *vp;
    514 	struct buf *buf;
    515 	int dscrlen;
    516 	int sector_size = ump->discinfo.sector_size;
    517 	int blks = sector_size / DEV_BSIZE;
    518 
    519 	KASSERT(dscrwr_callback);
    520 	DPRINTF(NODE, ("udf_write_phys_dscr_async() called\n"));
    521 
    522 	/* set sector number in the descriptor and validate */
    523 	dscr->tag.tag_loc = udf_rw32(logsector);
    524 	udf_validate_tag_and_crc_sums(dscr);
    525 
    526 	/* calculate descriptor size */
    527 	dscrlen = udf_tagsize(dscr, sector_size);
    528 
    529 	/* get transfer buffer */
    530 	vp = udf_node ? udf_node->vnode : ump->devvp;
    531 	buf = getiobuf(vp, true);
    532 	buf->b_flags    = B_WRITE; // | B_ASYNC;
    533 	buf->b_cflags   = BC_BUSY;
    534 	buf->b_iodone	= dscrwr_callback;
    535 	buf->b_data     = dscr;
    536 	buf->b_bcount   = dscrlen;
    537 	buf->b_resid    = buf->b_bcount;
    538 	buf->b_bufsize  = buf->b_bcount;
    539 	buf->b_private  = NULL;	/* not needed yet */
    540 	BIO_SETPRIO(buf, BPRIO_DEFAULT);
    541 	buf->b_lblkno   = buf->b_blkno = buf->b_rawblkno = sector * blks;
    542 	buf->b_proc     = NULL;
    543 
    544 	/* do the write and return no error */
    545 	udf_write_phys_buf(ump, what, buf);
    546 	return 0;
    547 }
    548 
    549 /* --------------------------------------------------------------------- */
    550 
    551 /* disc strategy dispatchers */
    552 
    553 int
    554 udf_create_logvol_dscr(struct udf_mount *ump, struct udf_node *udf_node, struct long_ad *icb,
    555 	union dscrptr **dscrptr)
    556 {
    557 	struct udf_strategy *strategy = ump->strategy;
    558 	struct udf_strat_args args;
    559 	int error;
    560 
    561 	args.ump  = ump;
    562 	args.udf_node = udf_node;
    563 	args.icb  = icb;
    564 	args.dscr = NULL;
    565 
    566 	error = (strategy->create_logvol_dscr)(&args);
    567 	*dscrptr = args.dscr;
    568 
    569 	return error;
    570 }
    571 
    572 
    573 void
    574 udf_free_logvol_dscr(struct udf_mount *ump, struct long_ad *icb,
    575 	void *dscr)
    576 {
    577 	struct udf_strategy *strategy = ump->strategy;
    578 	struct udf_strat_args args;
    579 
    580 	args.ump  = ump;
    581 	args.icb  = icb;
    582 	args.dscr = dscr;
    583 
    584 	(strategy->free_logvol_dscr)(&args);
    585 }
    586 
    587 
    588 int
    589 udf_read_logvol_dscr(struct udf_mount *ump, struct long_ad *icb,
    590 	union dscrptr **dscrptr)
    591 {
    592 	struct udf_strategy *strategy = ump->strategy;
    593 	struct udf_strat_args args;
    594 	int error;
    595 
    596 	args.ump  = ump;
    597 	args.icb  = icb;
    598 	args.dscr = NULL;
    599 
    600 	error = (strategy->read_logvol_dscr)(&args);
    601 	*dscrptr = args.dscr;
    602 
    603 	return error;
    604 }
    605 
    606 
    607 int
    608 udf_write_logvol_dscr(struct udf_node *udf_node, union dscrptr *dscr,
    609 	struct long_ad *icb, int waitfor)
    610 {
    611 	struct udf_strategy *strategy = udf_node->ump->strategy;
    612 	struct udf_strat_args args;
    613 	int error;
    614 
    615 	args.ump      = udf_node->ump;
    616 	args.udf_node = udf_node;
    617 	args.icb      = icb;
    618 	args.dscr     = dscr;
    619 	args.waitfor  = waitfor;
    620 
    621 	error = (strategy->write_logvol_dscr)(&args);
    622 	return error;
    623 }
    624 
    625 
    626 void
    627 udf_discstrat_queuebuf(struct udf_mount *ump, struct buf *nestbuf)
    628 {
    629 	struct udf_strategy *strategy = ump->strategy;
    630 	struct udf_strat_args args;
    631 
    632 	args.ump = ump;
    633 	args.nestbuf = nestbuf;
    634 
    635 	(strategy->queuebuf)(&args);
    636 }
    637 
    638 
    639 void
    640 udf_discstrat_init(struct udf_mount *ump)
    641 {
    642 	struct udf_strategy *strategy = ump->strategy;
    643 	struct udf_strat_args args;
    644 
    645 	args.ump = ump;
    646 	(strategy->discstrat_init)(&args);
    647 }
    648 
    649 
    650 void udf_discstrat_finish(struct udf_mount *ump)
    651 {
    652 	struct udf_strategy *strategy = ump->strategy;
    653 	struct udf_strat_args args;
    654 
    655 	args.ump = ump;
    656 	(strategy->discstrat_finish)(&args);
    657 }
    658 
    659 /* --------------------------------------------------------------------- */
    660 
    661