udf_readwrite.c revision 1.4.2.1 1 /* $NetBSD: udf_readwrite.c,v 1.4.2.1 2008/10/19 22:17:18 haad Exp $ */
2
3 /*
4 * Copyright (c) 2007, 2008 Reinoud Zandijk
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 *
27 */
28
29 #include <sys/cdefs.h>
30 #ifndef lint
31 __KERNEL_RCSID(0, "$NetBSD: udf_readwrite.c,v 1.4.2.1 2008/10/19 22:17:18 haad Exp $");
32 #endif /* not lint */
33
34
35 #if defined(_KERNEL_OPT)
36 #include "opt_quota.h"
37 #include "opt_compat_netbsd.h"
38 #endif
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/sysctl.h>
43 #include <sys/namei.h>
44 #include <sys/proc.h>
45 #include <sys/kernel.h>
46 #include <sys/vnode.h>
47 #include <miscfs/genfs/genfs_node.h>
48 #include <sys/mount.h>
49 #include <sys/buf.h>
50 #include <sys/file.h>
51 #include <sys/device.h>
52 #include <sys/disklabel.h>
53 #include <sys/ioctl.h>
54 #include <sys/malloc.h>
55 #include <sys/dirent.h>
56 #include <sys/stat.h>
57 #include <sys/conf.h>
58 #include <sys/kauth.h>
59 #include <sys/kthread.h>
60 #include <dev/clock_subr.h>
61
62 #include <fs/udf/ecma167-udf.h>
63 #include <fs/udf/udf_mount.h>
64
65 #include "udf.h"
66 #include "udf_subr.h"
67 #include "udf_bswap.h"
68
69
70 #define VTOI(vnode) ((struct udf_node *) vnode->v_data)
71
72 /* --------------------------------------------------------------------- */
73
74 void
75 udf_fixup_fid_block(uint8_t *blob, int lb_size,
76 int rfix_pos, int max_rfix_pos, uint32_t lb_num)
77 {
78 struct fileid_desc *fid;
79 uint8_t *fid_pos;
80 int fid_len, found;
81
82 /* needs to be word aligned */
83 KASSERT(rfix_pos % 4 == 0);
84
85 /* first resync with the FID stream !!! */
86 found = 0;
87 while (rfix_pos + sizeof(struct desc_tag) <= max_rfix_pos) {
88 fid_pos = blob + rfix_pos;
89 fid = (struct fileid_desc *) fid_pos;
90 if (udf_rw16(fid->tag.id) == TAGID_FID) {
91 if (udf_check_tag((union dscrptr *) fid) == 0)
92 found = 1;
93 }
94 if (found)
95 break;
96 /* try next location; can only be 4 bytes aligned */
97 rfix_pos += 4;
98 }
99
100 /* walk over the fids */
101 fid_pos = blob + rfix_pos;
102 while (rfix_pos + sizeof(struct desc_tag) <= max_rfix_pos) {
103 fid = (struct fileid_desc *) fid_pos;
104 if (udf_rw16(fid->tag.id) != TAGID_FID) {
105 /* end of FID stream; end of directory or currupted */
106 break;
107 }
108
109 /* update sector number and recalculate checkum */
110 fid->tag.tag_loc = udf_rw32(lb_num);
111 udf_validate_tag_sum((union dscrptr *) fid);
112
113 /* if the FID crosses the memory, we're done! */
114 if (rfix_pos + UDF_FID_SIZE >= max_rfix_pos)
115 break;
116
117 fid_len = udf_fidsize(fid);
118 fid_pos += fid_len;
119 rfix_pos += fid_len;
120 }
121 }
122
123
124 void
125 udf_fixup_internal_extattr(uint8_t *blob, uint32_t lb_num)
126 {
127 struct desc_tag *tag;
128 struct file_entry *fe;
129 struct extfile_entry *efe;
130 struct extattrhdr_desc *eahdr;
131 int l_ea;
132
133 /* get information from fe/efe */
134 tag = (struct desc_tag *) blob;
135 switch (udf_rw16(tag->id)) {
136 case TAGID_FENTRY :
137 fe = (struct file_entry *) blob;
138 l_ea = udf_rw32(fe->l_ea);
139 eahdr = (struct extattrhdr_desc *) fe->data;
140 break;
141 case TAGID_EXTFENTRY :
142 efe = (struct extfile_entry *) blob;
143 l_ea = udf_rw32(efe->l_ea);
144 eahdr = (struct extattrhdr_desc *) efe->data;
145 break;
146 case TAGID_INDIRECTENTRY :
147 case TAGID_ALLOCEXTENT :
148 case TAGID_EXTATTR_HDR :
149 return;
150 default:
151 panic("%s: passed bad tag\n", __func__);
152 }
153
154 /* something recorded here? (why am i called?) */
155 if (l_ea == 0)
156 return;
157
158 #if 0
159 /* check extended attribute tag */
160 /* TODO XXX what to do when we encounter an error here? */
161 error = udf_check_tag(eahdr);
162 if (error)
163 return; /* for now */
164 if (udf_rw16(eahdr->tag.id) != TAGID_EXTATTR_HDR)
165 return; /* for now */
166 error = udf_check_tag_payload(eahdr, sizeof(struct extattrhdr_desc));
167 if (error)
168 return; /* for now */
169 #endif
170
171 DPRINTF(EXTATTR, ("node fixup: found %d bytes of extended attributes\n",
172 l_ea));
173
174 /* fixup eahdr tag */
175 eahdr->tag.tag_loc = udf_rw32(lb_num);
176 udf_validate_tag_and_crc_sums((union dscrptr *) eahdr);
177 }
178
179
180 void
181 udf_fixup_node_internals(struct udf_mount *ump, uint8_t *blob, int udf_c_type)
182 {
183 struct desc_tag *tag, *sbm_tag;
184 struct file_entry *fe;
185 struct extfile_entry *efe;
186 struct alloc_ext_entry *ext;
187 uint32_t lb_size, lb_num;
188 uint32_t intern_pos, max_intern_pos;
189 int icbflags, addr_type, file_type, intern, has_fids, has_sbm, l_ea;
190
191 lb_size = udf_rw32(ump->logical_vol->lb_size);
192 /* if its not a node we're done */
193 if (udf_c_type != UDF_C_NODE)
194 return;
195
196 /* NOTE this could also be done in write_internal */
197 /* start of a descriptor */
198 l_ea = 0;
199 has_fids = 0;
200 has_sbm = 0;
201 intern = 0;
202 file_type = 0;
203 max_intern_pos = intern_pos = lb_num = 0; /* shut up gcc! */
204
205 tag = (struct desc_tag *) blob;
206 switch (udf_rw16(tag->id)) {
207 case TAGID_FENTRY :
208 fe = (struct file_entry *) tag;
209 l_ea = udf_rw32(fe->l_ea);
210 icbflags = udf_rw16(fe->icbtag.flags);
211 addr_type = (icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK);
212 file_type = fe->icbtag.file_type;
213 intern = (addr_type == UDF_ICB_INTERN_ALLOC);
214 intern_pos = UDF_FENTRY_SIZE + l_ea;
215 max_intern_pos = intern_pos + udf_rw64(fe->inf_len);
216 lb_num = udf_rw32(fe->tag.tag_loc);
217 break;
218 case TAGID_EXTFENTRY :
219 efe = (struct extfile_entry *) tag;
220 l_ea = udf_rw32(efe->l_ea);
221 icbflags = udf_rw16(efe->icbtag.flags);
222 addr_type = (icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK);
223 file_type = efe->icbtag.file_type;
224 intern = (addr_type == UDF_ICB_INTERN_ALLOC);
225 intern_pos = UDF_EXTFENTRY_SIZE + l_ea;
226 max_intern_pos = intern_pos + udf_rw64(efe->inf_len);
227 lb_num = udf_rw32(efe->tag.tag_loc);
228 break;
229 case TAGID_INDIRECTENTRY :
230 case TAGID_EXTATTR_HDR :
231 break;
232 case TAGID_ALLOCEXTENT :
233 /* force crclen to 8 for UDF version < 2.01 */
234 ext = (struct alloc_ext_entry *) tag;
235 if (udf_rw16(ump->logvol_info->min_udf_readver) <= 0x200)
236 ext->tag.desc_crc_len = udf_rw16(8);
237 break;
238 default:
239 panic("%s: passed bad tag\n", __func__);
240 break;
241 }
242
243 /* determine what to fix if its internally recorded */
244 if (intern) {
245 has_fids = (file_type == UDF_ICB_FILETYPE_DIRECTORY) ||
246 (file_type == UDF_ICB_FILETYPE_STREAMDIR);
247 has_sbm = (file_type == UDF_ICB_FILETYPE_META_BITMAP);
248 }
249
250 /* fixup internal extended attributes if present */
251 if (l_ea)
252 udf_fixup_internal_extattr(blob, lb_num);
253
254 /* fixup fids lb numbers */
255 if (has_fids)
256 udf_fixup_fid_block(blob, lb_size, intern_pos,
257 max_intern_pos, lb_num);
258
259 /* fixup space bitmap descriptor */
260 if (has_sbm) {
261 sbm_tag = (struct desc_tag *) (blob + intern_pos);
262 sbm_tag->tag_loc = tag->tag_loc;
263 udf_validate_tag_and_crc_sums((uint8_t *) sbm_tag);
264 }
265
266 udf_validate_tag_and_crc_sums(blob);
267 }
268
269 /* --------------------------------------------------------------------- */
270
271 /*
272 * Set of generic descriptor readers and writers and their helper functions.
273 * Descriptors inside `logical space' i.e. inside logically mapped partitions
274 * can never be longer than one logical sector.
275 *
276 * NOTE that these functions *can* be used by the sheduler backends to read
277 * node descriptors too.
278 *
279 * For reading, the size of allocated piece is returned in multiple of sector
280 * size due to udf_calc_udf_malloc_size().
281 */
282
283
284 /* SYNC reading of n blocks from specified sector */
285 /* NOTE only used by udf_read_phys_dscr */
286 static int
287 udf_read_phys_sectors(struct udf_mount *ump, int what, void *blob,
288 uint32_t start, uint32_t sectors)
289 {
290 struct buf *buf, *nestbuf;
291 uint32_t buf_offset;
292 off_t lblkno, rblkno;
293 int sector_size = ump->discinfo.sector_size;
294 int blks = sector_size / DEV_BSIZE;
295 int piece;
296 int error;
297
298 DPRINTF(READ, ("udf_intbreadn() : sectors = %d, sector_size = %d\n",
299 sectors, sector_size));
300 buf = getiobuf(ump->devvp, true);
301 buf->b_flags = B_READ;
302 buf->b_cflags = BC_BUSY; /* needed? */
303 buf->b_iodone = NULL;
304 buf->b_data = blob;
305 buf->b_bcount = sectors * sector_size;
306 buf->b_resid = buf->b_bcount;
307 buf->b_bufsize = buf->b_bcount;
308 buf->b_private = NULL; /* not needed yet */
309 BIO_SETPRIO(buf, BPRIO_DEFAULT);
310 buf->b_lblkno = buf->b_blkno = buf->b_rawblkno = start * blks;
311 buf->b_proc = NULL;
312
313 error = 0;
314 buf_offset = 0;
315 rblkno = start;
316 lblkno = 0;
317 while ((sectors > 0) && (error == 0)) {
318 piece = MIN(MAXPHYS/sector_size, sectors);
319 DPRINTF(READ, ("read in %d + %d\n", (uint32_t) rblkno, piece));
320
321 nestbuf = getiobuf(NULL, true);
322 nestiobuf_setup(buf, nestbuf, buf_offset, piece * sector_size);
323 /* nestbuf is B_ASYNC */
324
325 /* identify this nestbuf */
326 nestbuf->b_lblkno = lblkno;
327
328 /* CD shedules on raw blkno */
329 nestbuf->b_blkno = rblkno * blks;
330 nestbuf->b_proc = NULL;
331 nestbuf->b_rawblkno = rblkno * blks;
332 nestbuf->b_udf_c_type = what;
333
334 udf_discstrat_queuebuf(ump, nestbuf);
335
336 lblkno += piece;
337 rblkno += piece;
338 buf_offset += piece * sector_size;
339 sectors -= piece;
340 }
341 error = biowait(buf);
342 putiobuf(buf);
343
344 return error;
345 }
346
347
348 /* synchronous generic descriptor read */
349 int
350 udf_read_phys_dscr(struct udf_mount *ump, uint32_t sector,
351 struct malloc_type *mtype, union dscrptr **dstp)
352 {
353 union dscrptr *dst, *new_dst;
354 uint8_t *pos;
355 int sectors, dscrlen;
356 int i, error, sector_size;
357
358 sector_size = ump->discinfo.sector_size;
359
360 *dstp = dst = NULL;
361 dscrlen = sector_size;
362
363 /* read initial piece */
364 dst = malloc(sector_size, mtype, M_WAITOK);
365 error = udf_read_phys_sectors(ump, UDF_C_DSCR, dst, sector, 1);
366 DPRINTFIF(DESCRIPTOR, error, ("read error (%d)\n", error));
367
368 if (!error) {
369 /* check if its a valid tag */
370 error = udf_check_tag(dst);
371 if (error) {
372 /* check if its an empty block */
373 pos = (uint8_t *) dst;
374 for (i = 0; i < sector_size; i++, pos++) {
375 if (*pos) break;
376 }
377 if (i == sector_size) {
378 /* return no error but with no dscrptr */
379 /* dispose first block */
380 free(dst, mtype);
381 return 0;
382 }
383 }
384 /* calculate descriptor size */
385 dscrlen = udf_tagsize(dst, sector_size);
386 }
387 DPRINTFIF(DESCRIPTOR, error, ("bad tag checksum\n"));
388
389 if (!error && (dscrlen > sector_size)) {
390 DPRINTF(DESCRIPTOR, ("multi block descriptor read\n"));
391 /*
392 * Read the rest of descriptor. Since it is only used at mount
393 * time its overdone to define and use a specific udf_intbreadn
394 * for this alone.
395 */
396
397 new_dst = realloc(dst, dscrlen, mtype, M_WAITOK);
398 if (new_dst == NULL) {
399 free(dst, mtype);
400 return ENOMEM;
401 }
402 dst = new_dst;
403
404 sectors = (dscrlen + sector_size -1) / sector_size;
405 DPRINTF(DESCRIPTOR, ("dscrlen = %d (%d blk)\n", dscrlen, sectors));
406
407 pos = (uint8_t *) dst + sector_size;
408 error = udf_read_phys_sectors(ump, UDF_C_DSCR, pos,
409 sector + 1, sectors-1);
410
411 DPRINTFIF(DESCRIPTOR, error, ("read error on multi (%d)\n",
412 error));
413 }
414 if (!error) {
415 error = udf_check_tag_payload(dst, dscrlen);
416 DPRINTFIF(DESCRIPTOR, error, ("bad payload check sum\n"));
417 }
418 if (error && dst) {
419 free(dst, mtype);
420 dst = NULL;
421 }
422 *dstp = dst;
423
424 return error;
425 }
426
427
428 static void
429 udf_write_phys_buf(struct udf_mount *ump, int what, struct buf *buf)
430 {
431 struct buf *nestbuf;
432 uint32_t buf_offset;
433 off_t lblkno, rblkno;
434 int sector_size = ump->discinfo.sector_size;
435 int blks = sector_size / DEV_BSIZE;
436 uint32_t sectors;
437 int piece;
438 int error;
439
440 sectors = buf->b_bcount / sector_size;
441 DPRINTF(WRITE, ("udf_intbwriten() : sectors = %d, sector_size = %d\n",
442 sectors, sector_size));
443
444 /* don't forget to increase pending count for the bwrite itself */
445 /* panic("NO WRITING\n"); */
446 if (buf->b_vp) {
447 mutex_enter(&buf->b_vp->v_interlock);
448 buf->b_vp->v_numoutput++;
449 mutex_exit(&buf->b_vp->v_interlock);
450 }
451
452 error = 0;
453 buf_offset = 0;
454 rblkno = buf->b_blkno / blks;
455 lblkno = 0;
456 while ((sectors > 0) && (error == 0)) {
457 piece = MIN(MAXPHYS/sector_size, sectors);
458 DPRINTF(WRITE, ("write out %d + %d\n",
459 (uint32_t) rblkno, piece));
460
461 nestbuf = getiobuf(NULL, true);
462 nestiobuf_setup(buf, nestbuf, buf_offset, piece * sector_size);
463 /* nestbuf is B_ASYNC */
464
465 /* identify this nestbuf */
466 nestbuf->b_lblkno = lblkno;
467
468 /* CD shedules on raw blkno */
469 nestbuf->b_blkno = rblkno * blks;
470 nestbuf->b_proc = NULL;
471 nestbuf->b_rawblkno = rblkno * blks;
472 nestbuf->b_udf_c_type = what;
473
474 udf_discstrat_queuebuf(ump, nestbuf);
475
476 lblkno += piece;
477 rblkno += piece;
478 buf_offset += piece * sector_size;
479 sectors -= piece;
480 }
481 }
482
483
484 /* synchronous generic descriptor write */
485 int
486 udf_write_phys_dscr_sync(struct udf_mount *ump, struct udf_node *udf_node, int what,
487 union dscrptr *dscr, uint32_t sector, uint32_t logsector)
488 {
489 struct vnode *vp;
490 struct buf *buf;
491 int sector_size = ump->discinfo.sector_size;
492 int blks = sector_size / DEV_BSIZE;
493 int dscrlen;
494 int error;
495
496 /* set sector number in the descriptor and validate */
497 dscr->tag.tag_loc = udf_rw32(logsector);
498 udf_validate_tag_and_crc_sums(dscr);
499
500 /* calculate descriptor size */
501 dscrlen = udf_tagsize(dscr, sector_size);
502
503 /* get transfer buffer */
504 vp = udf_node ? udf_node->vnode : ump->devvp;
505 buf = getiobuf(vp, true);
506 buf->b_flags = B_WRITE;
507 buf->b_cflags = BC_BUSY; /* needed? */
508 buf->b_iodone = NULL;
509 buf->b_data = (void *) dscr;
510 buf->b_bcount = dscrlen;
511 buf->b_resid = buf->b_bcount;
512 buf->b_bufsize = buf->b_bcount;
513 buf->b_private = NULL; /* not needed yet */
514 BIO_SETPRIO(buf, BPRIO_DEFAULT);
515 buf->b_lblkno = buf->b_blkno = buf->b_rawblkno = sector * blks;
516 buf->b_proc = NULL;
517
518 /* do the write, wait and return error */
519 udf_write_phys_buf(ump, what, buf);
520 error = biowait(buf);
521 putiobuf(buf);
522
523 return error;
524 }
525
526
527 /* asynchronous generic descriptor write */
528 int
529 udf_write_phys_dscr_async(struct udf_mount *ump, struct udf_node *udf_node,
530 int what, union dscrptr *dscr,
531 uint32_t sector, uint32_t logsector,
532 void (*dscrwr_callback)(struct buf *))
533 {
534 struct vnode *vp;
535 struct buf *buf;
536 int dscrlen;
537 int sector_size = ump->discinfo.sector_size;
538 int blks = sector_size / DEV_BSIZE;
539
540 KASSERT(dscrwr_callback);
541 DPRINTF(NODE, ("udf_write_phys_dscr_async() called\n"));
542
543 /* set sector number in the descriptor and validate */
544 dscr->tag.tag_loc = udf_rw32(logsector);
545 udf_validate_tag_and_crc_sums(dscr);
546
547 /* calculate descriptor size */
548 dscrlen = udf_tagsize(dscr, sector_size);
549
550 /* get transfer buffer */
551 vp = udf_node ? udf_node->vnode : ump->devvp;
552 buf = getiobuf(vp, true);
553 buf->b_flags = B_WRITE; // | B_ASYNC;
554 buf->b_cflags = BC_BUSY;
555 buf->b_iodone = dscrwr_callback;
556 buf->b_data = dscr;
557 buf->b_bcount = dscrlen;
558 buf->b_resid = buf->b_bcount;
559 buf->b_bufsize = buf->b_bcount;
560 buf->b_private = NULL; /* not needed yet */
561 BIO_SETPRIO(buf, BPRIO_DEFAULT);
562 buf->b_lblkno = buf->b_blkno = buf->b_rawblkno = sector * blks;
563 buf->b_proc = NULL;
564
565 /* do the write and return no error */
566 udf_write_phys_buf(ump, what, buf);
567 return 0;
568 }
569
570 /* --------------------------------------------------------------------- */
571
572 /* disc strategy dispatchers */
573
574 int
575 udf_create_logvol_dscr(struct udf_mount *ump, struct udf_node *udf_node, struct long_ad *icb,
576 union dscrptr **dscrptr)
577 {
578 struct udf_strategy *strategy = ump->strategy;
579 struct udf_strat_args args;
580 int error;
581
582 KASSERT(strategy);
583 args.ump = ump;
584 args.udf_node = udf_node;
585 args.icb = icb;
586 args.dscr = NULL;
587
588 error = (strategy->create_logvol_dscr)(&args);
589 *dscrptr = args.dscr;
590
591 return error;
592 }
593
594
595 void
596 udf_free_logvol_dscr(struct udf_mount *ump, struct long_ad *icb,
597 void *dscr)
598 {
599 struct udf_strategy *strategy = ump->strategy;
600 struct udf_strat_args args;
601
602 KASSERT(strategy);
603 args.ump = ump;
604 args.icb = icb;
605 args.dscr = dscr;
606
607 (strategy->free_logvol_dscr)(&args);
608 }
609
610
611 int
612 udf_read_logvol_dscr(struct udf_mount *ump, struct long_ad *icb,
613 union dscrptr **dscrptr)
614 {
615 struct udf_strategy *strategy = ump->strategy;
616 struct udf_strat_args args;
617 int error;
618
619 KASSERT(strategy);
620 args.ump = ump;
621 args.icb = icb;
622 args.dscr = NULL;
623
624 error = (strategy->read_logvol_dscr)(&args);
625 *dscrptr = args.dscr;
626
627 return error;
628 }
629
630
631 int
632 udf_write_logvol_dscr(struct udf_node *udf_node, union dscrptr *dscr,
633 struct long_ad *icb, int waitfor)
634 {
635 struct udf_strategy *strategy = udf_node->ump->strategy;
636 struct udf_strat_args args;
637 int error;
638
639 KASSERT(strategy);
640 args.ump = udf_node->ump;
641 args.udf_node = udf_node;
642 args.icb = icb;
643 args.dscr = dscr;
644 args.waitfor = waitfor;
645
646 error = (strategy->write_logvol_dscr)(&args);
647 return error;
648 }
649
650
651 void
652 udf_discstrat_queuebuf(struct udf_mount *ump, struct buf *nestbuf)
653 {
654 struct udf_strategy *strategy = ump->strategy;
655 struct udf_strat_args args;
656
657 KASSERT(strategy);
658 args.ump = ump;
659 args.nestbuf = nestbuf;
660
661 (strategy->queuebuf)(&args);
662 }
663
664
665 void
666 udf_discstrat_init(struct udf_mount *ump)
667 {
668 struct udf_strategy *strategy = ump->strategy;
669 struct udf_strat_args args;
670
671 KASSERT(strategy);
672 args.ump = ump;
673 (strategy->discstrat_init)(&args);
674 }
675
676
677 void udf_discstrat_finish(struct udf_mount *ump)
678 {
679 struct udf_strategy *strategy = ump->strategy;
680 struct udf_strat_args args;
681
682 /* strategy might not have been set, so ignore if not set */
683 if (strategy) {
684 args.ump = ump;
685 (strategy->discstrat_finish)(&args);
686 }
687 }
688
689 /* --------------------------------------------------------------------- */
690
691