udf_readwrite.c revision 1.7 1 /* $NetBSD: udf_readwrite.c,v 1.7 2008/08/05 19:29:54 reinoud Exp $ */
2
3 /*
4 * Copyright (c) 2007, 2008 Reinoud Zandijk
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 *
27 */
28
29 #include <sys/cdefs.h>
30 #ifndef lint
31 __KERNEL_RCSID(0, "$NetBSD: udf_readwrite.c,v 1.7 2008/08/05 19:29:54 reinoud Exp $");
32 #endif /* not lint */
33
34
35 #if defined(_KERNEL_OPT)
36 #include "opt_quota.h"
37 #include "opt_compat_netbsd.h"
38 #endif
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/sysctl.h>
43 #include <sys/namei.h>
44 #include <sys/proc.h>
45 #include <sys/kernel.h>
46 #include <sys/vnode.h>
47 #include <miscfs/genfs/genfs_node.h>
48 #include <sys/mount.h>
49 #include <sys/buf.h>
50 #include <sys/file.h>
51 #include <sys/device.h>
52 #include <sys/disklabel.h>
53 #include <sys/ioctl.h>
54 #include <sys/malloc.h>
55 #include <sys/dirent.h>
56 #include <sys/stat.h>
57 #include <sys/conf.h>
58 #include <sys/kauth.h>
59 #include <sys/kthread.h>
60 #include <dev/clock_subr.h>
61
62 #include <fs/udf/ecma167-udf.h>
63 #include <fs/udf/udf_mount.h>
64
65 #if defined(_KERNEL_OPT)
66 #include "opt_udf.h"
67 #endif
68
69 #include "udf.h"
70 #include "udf_subr.h"
71 #include "udf_bswap.h"
72
73
74 #define VTOI(vnode) ((struct udf_node *) vnode->v_data)
75
76 /* --------------------------------------------------------------------- */
77
78 void
79 udf_fixup_fid_block(uint8_t *blob, int lb_size,
80 int rfix_pos, int max_rfix_pos, uint32_t lb_num)
81 {
82 struct fileid_desc *fid;
83 uint8_t *fid_pos;
84 int fid_len, found;
85
86 /* needs to be word aligned */
87 KASSERT(rfix_pos % 4 == 0);
88
89 /* first resync with the FID stream !!! */
90 found = 0;
91 while (rfix_pos + sizeof(struct desc_tag) <= max_rfix_pos) {
92 fid_pos = blob + rfix_pos;
93 fid = (struct fileid_desc *) fid_pos;
94 if (udf_rw16(fid->tag.id) == TAGID_FID) {
95 if (udf_check_tag((union dscrptr *) fid) == 0)
96 found = 1;
97 }
98 if (found)
99 break;
100 /* try next location; can only be 4 bytes aligned */
101 rfix_pos += 4;
102 }
103
104 /* walk over the fids */
105 fid_pos = blob + rfix_pos;
106 while (rfix_pos + sizeof(struct desc_tag) <= max_rfix_pos) {
107 fid = (struct fileid_desc *) fid_pos;
108 if (udf_rw16(fid->tag.id) != TAGID_FID) {
109 /* end of FID stream; end of directory or currupted */
110 break;
111 }
112
113 /* update sector number and recalculate checkum */
114 fid->tag.tag_loc = udf_rw32(lb_num);
115 udf_validate_tag_sum((union dscrptr *) fid);
116
117 /* if the FID crosses the memory, we're done! */
118 if (rfix_pos + UDF_FID_SIZE >= max_rfix_pos)
119 break;
120
121 fid_len = udf_fidsize(fid);
122 fid_pos += fid_len;
123 rfix_pos += fid_len;
124 }
125 }
126
127
128 void
129 udf_fixup_internal_extattr(uint8_t *blob, uint32_t lb_num)
130 {
131 struct desc_tag *tag;
132 struct file_entry *fe;
133 struct extfile_entry *efe;
134 struct extattrhdr_desc *eahdr;
135 int l_ea;
136
137 /* get information from fe/efe */
138 tag = (struct desc_tag *) blob;
139 switch (udf_rw16(tag->id)) {
140 case TAGID_FENTRY :
141 fe = (struct file_entry *) blob;
142 l_ea = udf_rw32(fe->l_ea);
143 eahdr = (struct extattrhdr_desc *) fe->data;
144 break;
145 case TAGID_EXTFENTRY :
146 efe = (struct extfile_entry *) blob;
147 l_ea = udf_rw32(efe->l_ea);
148 eahdr = (struct extattrhdr_desc *) efe->data;
149 break;
150 case TAGID_INDIRECTENTRY :
151 case TAGID_ALLOCEXTENT :
152 case TAGID_EXTATTR_HDR :
153 return;
154 default:
155 panic("%s: passed bad tag\n", __func__);
156 }
157
158 /* something recorded here? (why am i called?) */
159 if (l_ea == 0)
160 return;
161
162 #if 0
163 /* check extended attribute tag */
164 /* TODO XXX what to do when we encounter an error here? */
165 error = udf_check_tag(eahdr);
166 if (error)
167 return; /* for now */
168 if (udf_rw16(eahdr->tag.id) != TAGID_EXTATTR_HDR)
169 return; /* for now */
170 error = udf_check_tag_payload(eahdr, sizeof(struct extattrhdr_desc));
171 if (error)
172 return; /* for now */
173 #endif
174
175 DPRINTF(EXTATTR, ("node fixup: found %d bytes of extended attributes\n",
176 l_ea));
177
178 /* fixup eahdr tag */
179 eahdr->tag.tag_loc = udf_rw32(lb_num);
180 udf_validate_tag_and_crc_sums((union dscrptr *) eahdr);
181 }
182
183
184 void
185 udf_fixup_node_internals(struct udf_mount *ump, uint8_t *blob, int udf_c_type)
186 {
187 struct desc_tag *tag, *sbm_tag;
188 struct file_entry *fe;
189 struct extfile_entry *efe;
190 struct alloc_ext_entry *ext;
191 uint32_t lb_size, lb_num;
192 uint32_t intern_pos, max_intern_pos;
193 int icbflags, addr_type, file_type, intern, has_fids, has_sbm, l_ea;
194
195 lb_size = udf_rw32(ump->logical_vol->lb_size);
196 /* if its not a node we're done */
197 if (udf_c_type != UDF_C_NODE)
198 return;
199
200 /* NOTE this could also be done in write_internal */
201 /* start of a descriptor */
202 l_ea = 0;
203 has_fids = 0;
204 has_sbm = 0;
205 intern = 0;
206 file_type = 0;
207 max_intern_pos = intern_pos = lb_num = 0; /* shut up gcc! */
208
209 tag = (struct desc_tag *) blob;
210 switch (udf_rw16(tag->id)) {
211 case TAGID_FENTRY :
212 fe = (struct file_entry *) tag;
213 l_ea = udf_rw32(fe->l_ea);
214 icbflags = udf_rw16(fe->icbtag.flags);
215 addr_type = (icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK);
216 file_type = fe->icbtag.file_type;
217 intern = (addr_type == UDF_ICB_INTERN_ALLOC);
218 intern_pos = UDF_FENTRY_SIZE + l_ea;
219 max_intern_pos = intern_pos + udf_rw64(fe->inf_len);
220 lb_num = udf_rw32(fe->tag.tag_loc);
221 break;
222 case TAGID_EXTFENTRY :
223 efe = (struct extfile_entry *) tag;
224 l_ea = udf_rw32(efe->l_ea);
225 icbflags = udf_rw16(efe->icbtag.flags);
226 addr_type = (icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK);
227 file_type = efe->icbtag.file_type;
228 intern = (addr_type == UDF_ICB_INTERN_ALLOC);
229 intern_pos = UDF_EXTFENTRY_SIZE + l_ea;
230 max_intern_pos = intern_pos + udf_rw64(efe->inf_len);
231 lb_num = udf_rw32(efe->tag.tag_loc);
232 break;
233 case TAGID_INDIRECTENTRY :
234 case TAGID_EXTATTR_HDR :
235 break;
236 case TAGID_ALLOCEXTENT :
237 /* force crclen to 8 for UDF version < 2.01 */
238 ext = (struct alloc_ext_entry *) tag;
239 if (udf_rw16(ump->logvol_info->min_udf_readver) <= 0x200)
240 ext->tag.desc_crc_len = udf_rw16(8);
241 break;
242 default:
243 panic("%s: passed bad tag\n", __func__);
244 break;
245 }
246
247 /* determine what to fix if its internally recorded */
248 if (intern) {
249 has_fids = (file_type == UDF_ICB_FILETYPE_DIRECTORY) ||
250 (file_type == UDF_ICB_FILETYPE_STREAMDIR);
251 has_sbm = (file_type == UDF_ICB_FILETYPE_META_BITMAP);
252 }
253
254 /* fixup internal extended attributes if present */
255 if (l_ea)
256 udf_fixup_internal_extattr(blob, lb_num);
257
258 /* fixup fids lb numbers */
259 if (has_fids)
260 udf_fixup_fid_block(blob, lb_size, intern_pos,
261 max_intern_pos, lb_num);
262
263 /* fixup space bitmap descriptor */
264 if (has_sbm) {
265 sbm_tag = (struct desc_tag *) (blob + intern_pos);
266 sbm_tag->tag_loc = tag->tag_loc;
267 udf_validate_tag_and_crc_sums((uint8_t *) sbm_tag);
268 }
269
270 udf_validate_tag_and_crc_sums(blob);
271 }
272
273 /* --------------------------------------------------------------------- */
274
275 /*
276 * Set of generic descriptor readers and writers and their helper functions.
277 * Descriptors inside `logical space' i.e. inside logically mapped partitions
278 * can never be longer than one logical sector.
279 *
280 * NOTE that these functions *can* be used by the sheduler backends to read
281 * node descriptors too.
282 *
283 * For reading, the size of allocated piece is returned in multiple of sector
284 * size due to udf_calc_udf_malloc_size().
285 */
286
287
288 /* SYNC reading of n blocks from specified sector */
289 /* NOTE only used by udf_read_phys_dscr */
290 static int
291 udf_read_phys_sectors(struct udf_mount *ump, int what, void *blob,
292 uint32_t start, uint32_t sectors)
293 {
294 struct buf *buf, *nestbuf;
295 uint32_t buf_offset;
296 off_t lblkno, rblkno;
297 int sector_size = ump->discinfo.sector_size;
298 int blks = sector_size / DEV_BSIZE;
299 int piece;
300 int error;
301
302 DPRINTF(READ, ("udf_intbreadn() : sectors = %d, sector_size = %d\n",
303 sectors, sector_size));
304 buf = getiobuf(ump->devvp, true);
305 buf->b_flags = B_READ;
306 buf->b_cflags = BC_BUSY; /* needed? */
307 buf->b_iodone = NULL;
308 buf->b_data = blob;
309 buf->b_bcount = sectors * sector_size;
310 buf->b_resid = buf->b_bcount;
311 buf->b_bufsize = buf->b_bcount;
312 buf->b_private = NULL; /* not needed yet */
313 BIO_SETPRIO(buf, BPRIO_DEFAULT);
314 buf->b_lblkno = buf->b_blkno = buf->b_rawblkno = start * blks;
315 buf->b_proc = NULL;
316
317 error = 0;
318 buf_offset = 0;
319 rblkno = start;
320 lblkno = 0;
321 while ((sectors > 0) && (error == 0)) {
322 piece = MIN(MAXPHYS/sector_size, sectors);
323 DPRINTF(READ, ("read in %d + %d\n", (uint32_t) rblkno, piece));
324
325 nestbuf = getiobuf(NULL, true);
326 nestiobuf_setup(buf, nestbuf, buf_offset, piece * sector_size);
327 /* nestbuf is B_ASYNC */
328
329 /* identify this nestbuf */
330 nestbuf->b_lblkno = lblkno;
331
332 /* CD shedules on raw blkno */
333 nestbuf->b_blkno = rblkno * blks;
334 nestbuf->b_proc = NULL;
335 nestbuf->b_rawblkno = rblkno * blks;
336 nestbuf->b_udf_c_type = what;
337
338 udf_discstrat_queuebuf(ump, nestbuf);
339
340 lblkno += piece;
341 rblkno += piece;
342 buf_offset += piece * sector_size;
343 sectors -= piece;
344 }
345 error = biowait(buf);
346 putiobuf(buf);
347
348 return error;
349 }
350
351
352 /* synchronous generic descriptor read */
353 int
354 udf_read_phys_dscr(struct udf_mount *ump, uint32_t sector,
355 struct malloc_type *mtype, union dscrptr **dstp)
356 {
357 union dscrptr *dst, *new_dst;
358 uint8_t *pos;
359 int sectors, dscrlen;
360 int i, error, sector_size;
361
362 sector_size = ump->discinfo.sector_size;
363
364 *dstp = dst = NULL;
365 dscrlen = sector_size;
366
367 /* read initial piece */
368 dst = malloc(sector_size, mtype, M_WAITOK);
369 error = udf_read_phys_sectors(ump, UDF_C_DSCR, dst, sector, 1);
370 DPRINTFIF(DESCRIPTOR, error, ("read error (%d)\n", error));
371
372 if (!error) {
373 /* check if its a valid tag */
374 error = udf_check_tag(dst);
375 if (error) {
376 /* check if its an empty block */
377 pos = (uint8_t *) dst;
378 for (i = 0; i < sector_size; i++, pos++) {
379 if (*pos) break;
380 }
381 if (i == sector_size) {
382 /* return no error but with no dscrptr */
383 /* dispose first block */
384 free(dst, mtype);
385 return 0;
386 }
387 }
388 /* calculate descriptor size */
389 dscrlen = udf_tagsize(dst, sector_size);
390 }
391 DPRINTFIF(DESCRIPTOR, error, ("bad tag checksum\n"));
392
393 if (!error && (dscrlen > sector_size)) {
394 DPRINTF(DESCRIPTOR, ("multi block descriptor read\n"));
395 /*
396 * Read the rest of descriptor. Since it is only used at mount
397 * time its overdone to define and use a specific udf_intbreadn
398 * for this alone.
399 */
400
401 new_dst = realloc(dst, dscrlen, mtype, M_WAITOK);
402 if (new_dst == NULL) {
403 free(dst, mtype);
404 return ENOMEM;
405 }
406 dst = new_dst;
407
408 sectors = (dscrlen + sector_size -1) / sector_size;
409 DPRINTF(DESCRIPTOR, ("dscrlen = %d (%d blk)\n", dscrlen, sectors));
410
411 pos = (uint8_t *) dst + sector_size;
412 error = udf_read_phys_sectors(ump, UDF_C_DSCR, pos,
413 sector + 1, sectors-1);
414
415 DPRINTFIF(DESCRIPTOR, error, ("read error on multi (%d)\n",
416 error));
417 }
418 if (!error) {
419 error = udf_check_tag_payload(dst, dscrlen);
420 DPRINTFIF(DESCRIPTOR, error, ("bad payload check sum\n"));
421 }
422 if (error && dst) {
423 free(dst, mtype);
424 dst = NULL;
425 }
426 *dstp = dst;
427
428 return error;
429 }
430
431
432 static void
433 udf_write_phys_buf(struct udf_mount *ump, int what, struct buf *buf)
434 {
435 struct buf *nestbuf;
436 uint32_t buf_offset;
437 off_t lblkno, rblkno;
438 int sector_size = ump->discinfo.sector_size;
439 int blks = sector_size / DEV_BSIZE;
440 uint32_t sectors;
441 int piece;
442 int error;
443
444 sectors = buf->b_bcount / sector_size;
445 DPRINTF(WRITE, ("udf_intbwriten() : sectors = %d, sector_size = %d\n",
446 sectors, sector_size));
447
448 /* don't forget to increase pending count for the bwrite itself */
449 /* panic("NO WRITING\n"); */
450 if (buf->b_vp) {
451 mutex_enter(&buf->b_vp->v_interlock);
452 buf->b_vp->v_numoutput++;
453 mutex_exit(&buf->b_vp->v_interlock);
454 }
455
456 error = 0;
457 buf_offset = 0;
458 rblkno = buf->b_blkno / blks;
459 lblkno = 0;
460 while ((sectors > 0) && (error == 0)) {
461 piece = MIN(MAXPHYS/sector_size, sectors);
462 DPRINTF(WRITE, ("write out %d + %d\n",
463 (uint32_t) rblkno, piece));
464
465 nestbuf = getiobuf(NULL, true);
466 nestiobuf_setup(buf, nestbuf, buf_offset, piece * sector_size);
467 /* nestbuf is B_ASYNC */
468
469 /* identify this nestbuf */
470 nestbuf->b_lblkno = lblkno;
471
472 /* CD shedules on raw blkno */
473 nestbuf->b_blkno = rblkno * blks;
474 nestbuf->b_proc = NULL;
475 nestbuf->b_rawblkno = rblkno * blks;
476 nestbuf->b_udf_c_type = what;
477
478 udf_discstrat_queuebuf(ump, nestbuf);
479
480 lblkno += piece;
481 rblkno += piece;
482 buf_offset += piece * sector_size;
483 sectors -= piece;
484 }
485 }
486
487
488 /* synchronous generic descriptor write */
489 int
490 udf_write_phys_dscr_sync(struct udf_mount *ump, struct udf_node *udf_node, int what,
491 union dscrptr *dscr, uint32_t sector, uint32_t logsector)
492 {
493 struct vnode *vp;
494 struct buf *buf;
495 int sector_size = ump->discinfo.sector_size;
496 int blks = sector_size / DEV_BSIZE;
497 int dscrlen;
498 int error;
499
500 /* set sector number in the descriptor and validate */
501 dscr->tag.tag_loc = udf_rw32(logsector);
502 udf_validate_tag_and_crc_sums(dscr);
503
504 /* calculate descriptor size */
505 dscrlen = udf_tagsize(dscr, sector_size);
506
507 /* get transfer buffer */
508 vp = udf_node ? udf_node->vnode : ump->devvp;
509 buf = getiobuf(vp, true);
510 buf->b_flags = B_WRITE;
511 buf->b_cflags = BC_BUSY; /* needed? */
512 buf->b_iodone = NULL;
513 buf->b_data = (void *) dscr;
514 buf->b_bcount = dscrlen;
515 buf->b_resid = buf->b_bcount;
516 buf->b_bufsize = buf->b_bcount;
517 buf->b_private = NULL; /* not needed yet */
518 BIO_SETPRIO(buf, BPRIO_DEFAULT);
519 buf->b_lblkno = buf->b_blkno = buf->b_rawblkno = sector * blks;
520 buf->b_proc = NULL;
521
522 /* do the write, wait and return error */
523 udf_write_phys_buf(ump, what, buf);
524 error = biowait(buf);
525 putiobuf(buf);
526
527 return error;
528 }
529
530
531 /* asynchronous generic descriptor write */
532 int
533 udf_write_phys_dscr_async(struct udf_mount *ump, struct udf_node *udf_node,
534 int what, union dscrptr *dscr,
535 uint32_t sector, uint32_t logsector,
536 void (*dscrwr_callback)(struct buf *))
537 {
538 struct vnode *vp;
539 struct buf *buf;
540 int dscrlen;
541 int sector_size = ump->discinfo.sector_size;
542 int blks = sector_size / DEV_BSIZE;
543
544 KASSERT(dscrwr_callback);
545 DPRINTF(NODE, ("udf_write_phys_dscr_async() called\n"));
546
547 /* set sector number in the descriptor and validate */
548 dscr->tag.tag_loc = udf_rw32(logsector);
549 udf_validate_tag_and_crc_sums(dscr);
550
551 /* calculate descriptor size */
552 dscrlen = udf_tagsize(dscr, sector_size);
553
554 /* get transfer buffer */
555 vp = udf_node ? udf_node->vnode : ump->devvp;
556 buf = getiobuf(vp, true);
557 buf->b_flags = B_WRITE; // | B_ASYNC;
558 buf->b_cflags = BC_BUSY;
559 buf->b_iodone = dscrwr_callback;
560 buf->b_data = dscr;
561 buf->b_bcount = dscrlen;
562 buf->b_resid = buf->b_bcount;
563 buf->b_bufsize = buf->b_bcount;
564 buf->b_private = NULL; /* not needed yet */
565 BIO_SETPRIO(buf, BPRIO_DEFAULT);
566 buf->b_lblkno = buf->b_blkno = buf->b_rawblkno = sector * blks;
567 buf->b_proc = NULL;
568
569 /* do the write and return no error */
570 udf_write_phys_buf(ump, what, buf);
571 return 0;
572 }
573
574 /* --------------------------------------------------------------------- */
575
576 /* disc strategy dispatchers */
577
578 int
579 udf_create_logvol_dscr(struct udf_mount *ump, struct udf_node *udf_node, struct long_ad *icb,
580 union dscrptr **dscrptr)
581 {
582 struct udf_strategy *strategy = ump->strategy;
583 struct udf_strat_args args;
584 int error;
585
586 KASSERT(strategy);
587 args.ump = ump;
588 args.udf_node = udf_node;
589 args.icb = icb;
590 args.dscr = NULL;
591
592 error = (strategy->create_logvol_dscr)(&args);
593 *dscrptr = args.dscr;
594
595 return error;
596 }
597
598
599 void
600 udf_free_logvol_dscr(struct udf_mount *ump, struct long_ad *icb,
601 void *dscr)
602 {
603 struct udf_strategy *strategy = ump->strategy;
604 struct udf_strat_args args;
605
606 KASSERT(strategy);
607 args.ump = ump;
608 args.icb = icb;
609 args.dscr = dscr;
610
611 (strategy->free_logvol_dscr)(&args);
612 }
613
614
615 int
616 udf_read_logvol_dscr(struct udf_mount *ump, struct long_ad *icb,
617 union dscrptr **dscrptr)
618 {
619 struct udf_strategy *strategy = ump->strategy;
620 struct udf_strat_args args;
621 int error;
622
623 KASSERT(strategy);
624 args.ump = ump;
625 args.icb = icb;
626 args.dscr = NULL;
627
628 error = (strategy->read_logvol_dscr)(&args);
629 *dscrptr = args.dscr;
630
631 return error;
632 }
633
634
635 int
636 udf_write_logvol_dscr(struct udf_node *udf_node, union dscrptr *dscr,
637 struct long_ad *icb, int waitfor)
638 {
639 struct udf_strategy *strategy = udf_node->ump->strategy;
640 struct udf_strat_args args;
641 int error;
642
643 KASSERT(strategy);
644 args.ump = udf_node->ump;
645 args.udf_node = udf_node;
646 args.icb = icb;
647 args.dscr = dscr;
648 args.waitfor = waitfor;
649
650 error = (strategy->write_logvol_dscr)(&args);
651 return error;
652 }
653
654
655 void
656 udf_discstrat_queuebuf(struct udf_mount *ump, struct buf *nestbuf)
657 {
658 struct udf_strategy *strategy = ump->strategy;
659 struct udf_strat_args args;
660
661 KASSERT(strategy);
662 args.ump = ump;
663 args.nestbuf = nestbuf;
664
665 (strategy->queuebuf)(&args);
666 }
667
668
669 void
670 udf_discstrat_init(struct udf_mount *ump)
671 {
672 struct udf_strategy *strategy = ump->strategy;
673 struct udf_strat_args args;
674
675 KASSERT(strategy);
676 args.ump = ump;
677 (strategy->discstrat_init)(&args);
678 }
679
680
681 void udf_discstrat_finish(struct udf_mount *ump)
682 {
683 struct udf_strategy *strategy = ump->strategy;
684 struct udf_strat_args args;
685
686 /* strategy might not have been set, so ignore if not set */
687 if (strategy) {
688 args.ump = ump;
689 (strategy->discstrat_finish)(&args);
690 }
691 }
692
693 /* --------------------------------------------------------------------- */
694
695