udf_subr.c revision 1.71 1 /* $NetBSD: udf_subr.c,v 1.71 2008/08/06 13:41:12 reinoud Exp $ */
2
3 /*
4 * Copyright (c) 2006, 2008 Reinoud Zandijk
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 *
27 */
28
29
30 #include <sys/cdefs.h>
31 #ifndef lint
32 __KERNEL_RCSID(0, "$NetBSD: udf_subr.c,v 1.71 2008/08/06 13:41:12 reinoud Exp $");
33 #endif /* not lint */
34
35
36 #if defined(_KERNEL_OPT)
37 #include "opt_quota.h"
38 #include "opt_compat_netbsd.h"
39 #endif
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/sysctl.h>
44 #include <sys/namei.h>
45 #include <sys/proc.h>
46 #include <sys/kernel.h>
47 #include <sys/vnode.h>
48 #include <miscfs/genfs/genfs_node.h>
49 #include <sys/mount.h>
50 #include <sys/buf.h>
51 #include <sys/file.h>
52 #include <sys/device.h>
53 #include <sys/disklabel.h>
54 #include <sys/ioctl.h>
55 #include <sys/malloc.h>
56 #include <sys/dirent.h>
57 #include <sys/stat.h>
58 #include <sys/conf.h>
59 #include <sys/kauth.h>
60 #include <fs/unicode.h>
61 #include <dev/clock_subr.h>
62
63 #include <fs/udf/ecma167-udf.h>
64 #include <fs/udf/udf_mount.h>
65
66 #if defined(_KERNEL_OPT)
67 #include "opt_udf.h"
68 #endif
69
70 #include "udf.h"
71 #include "udf_subr.h"
72 #include "udf_bswap.h"
73
74
75 #define VTOI(vnode) ((struct udf_node *) (vnode)->v_data)
76
77 #define UDF_SET_SYSTEMFILE(vp) \
78 /* XXXAD Is the vnode locked? */ \
79 (vp)->v_vflag |= VV_SYSTEM; \
80 vref(vp); \
81 vput(vp); \
82
83 extern int syncer_maxdelay; /* maximum delay time */
84 extern int (**udf_vnodeop_p)(void *);
85
86 /* --------------------------------------------------------------------- */
87
88 //#ifdef DEBUG
89 #if 1
90
91 #if 0
92 static void
93 udf_dumpblob(boid *blob, uint32_t dlen)
94 {
95 int i, j;
96
97 printf("blob = %p\n", blob);
98 printf("dump of %d bytes\n", dlen);
99
100 for (i = 0; i < dlen; i+ = 16) {
101 printf("%04x ", i);
102 for (j = 0; j < 16; j++) {
103 if (i+j < dlen) {
104 printf("%02x ", blob[i+j]);
105 } else {
106 printf(" ");
107 }
108 }
109 for (j = 0; j < 16; j++) {
110 if (i+j < dlen) {
111 if (blob[i+j]>32 && blob[i+j]! = 127) {
112 printf("%c", blob[i+j]);
113 } else {
114 printf(".");
115 }
116 }
117 }
118 printf("\n");
119 }
120 printf("\n");
121 Debugger();
122 }
123 #endif
124
125 static void
126 udf_dump_discinfo(struct udf_mount *ump)
127 {
128 char bits[128];
129 struct mmc_discinfo *di = &ump->discinfo;
130
131 if ((udf_verbose & UDF_DEBUG_VOLUMES) == 0)
132 return;
133
134 printf("Device/media info :\n");
135 printf("\tMMC profile 0x%02x\n", di->mmc_profile);
136 printf("\tderived class %d\n", di->mmc_class);
137 printf("\tsector size %d\n", di->sector_size);
138 printf("\tdisc state %d\n", di->disc_state);
139 printf("\tlast ses state %d\n", di->last_session_state);
140 printf("\tbg format state %d\n", di->bg_format_state);
141 printf("\tfrst track %d\n", di->first_track);
142 printf("\tfst on last ses %d\n", di->first_track_last_session);
143 printf("\tlst on last ses %d\n", di->last_track_last_session);
144 printf("\tlink block penalty %d\n", di->link_block_penalty);
145 bitmask_snprintf(di->disc_flags, MMC_DFLAGS_FLAGBITS, bits,
146 sizeof(bits));
147 printf("\tdisc flags %s\n", bits);
148 printf("\tdisc id %x\n", di->disc_id);
149 printf("\tdisc barcode %"PRIx64"\n", di->disc_barcode);
150
151 printf("\tnum sessions %d\n", di->num_sessions);
152 printf("\tnum tracks %d\n", di->num_tracks);
153
154 bitmask_snprintf(di->mmc_cur, MMC_CAP_FLAGBITS, bits, sizeof(bits));
155 printf("\tcapabilities cur %s\n", bits);
156 bitmask_snprintf(di->mmc_cap, MMC_CAP_FLAGBITS, bits, sizeof(bits));
157 printf("\tcapabilities cap %s\n", bits);
158 }
159 #else
160 #define udf_dump_discinfo(a);
161 #endif
162
163
164 /* --------------------------------------------------------------------- */
165
166 /* not called often */
167 int
168 udf_update_discinfo(struct udf_mount *ump)
169 {
170 struct vnode *devvp = ump->devvp;
171 struct partinfo dpart;
172 struct mmc_discinfo *di;
173 int error;
174
175 DPRINTF(VOLUMES, ("read/update disc info\n"));
176 di = &ump->discinfo;
177 memset(di, 0, sizeof(struct mmc_discinfo));
178
179 /* check if we're on a MMC capable device, i.e. CD/DVD */
180 error = VOP_IOCTL(devvp, MMCGETDISCINFO, di, FKIOCTL, NOCRED);
181 if (error == 0) {
182 udf_dump_discinfo(ump);
183 return 0;
184 }
185
186 /* disc partition support */
187 error = VOP_IOCTL(devvp, DIOCGPART, &dpart, FREAD, NOCRED);
188 if (error)
189 return ENODEV;
190
191 /* set up a disc info profile for partitions */
192 di->mmc_profile = 0x01; /* disc type */
193 di->mmc_class = MMC_CLASS_DISC;
194 di->disc_state = MMC_STATE_CLOSED;
195 di->last_session_state = MMC_STATE_CLOSED;
196 di->bg_format_state = MMC_BGFSTATE_COMPLETED;
197 di->link_block_penalty = 0;
198
199 di->mmc_cur = MMC_CAP_RECORDABLE | MMC_CAP_REWRITABLE |
200 MMC_CAP_ZEROLINKBLK | MMC_CAP_HW_DEFECTFREE;
201 di->mmc_cap = di->mmc_cur;
202 di->disc_flags = MMC_DFLAGS_UNRESTRICTED;
203
204 /* TODO problem with last_possible_lba on resizable VND; request */
205 di->last_possible_lba = dpart.part->p_size;
206 di->sector_size = dpart.disklab->d_secsize;
207
208 di->num_sessions = 1;
209 di->num_tracks = 1;
210
211 di->first_track = 1;
212 di->first_track_last_session = di->last_track_last_session = 1;
213
214 udf_dump_discinfo(ump);
215 return 0;
216 }
217
218
219 int
220 udf_update_trackinfo(struct udf_mount *ump, struct mmc_trackinfo *ti)
221 {
222 struct vnode *devvp = ump->devvp;
223 struct mmc_discinfo *di = &ump->discinfo;
224 int error, class;
225
226 DPRINTF(VOLUMES, ("read track info\n"));
227
228 class = di->mmc_class;
229 if (class != MMC_CLASS_DISC) {
230 /* tracknr specified in struct ti */
231 error = VOP_IOCTL(devvp, MMCGETTRACKINFO, ti, FKIOCTL, NOCRED);
232 return error;
233 }
234
235 /* disc partition support */
236 if (ti->tracknr != 1)
237 return EIO;
238
239 /* create fake ti (TODO check for resized vnds) */
240 ti->sessionnr = 1;
241
242 ti->track_mode = 0; /* XXX */
243 ti->data_mode = 0; /* XXX */
244 ti->flags = MMC_TRACKINFO_LRA_VALID | MMC_TRACKINFO_NWA_VALID;
245
246 ti->track_start = 0;
247 ti->packet_size = 1;
248
249 /* TODO support for resizable vnd */
250 ti->track_size = di->last_possible_lba;
251 ti->next_writable = di->last_possible_lba;
252 ti->last_recorded = ti->next_writable;
253 ti->free_blocks = 0;
254
255 return 0;
256 }
257
258
259 int
260 udf_setup_writeparams(struct udf_mount *ump)
261 {
262 struct mmc_writeparams mmc_writeparams;
263 int error;
264
265 if (ump->discinfo.mmc_class == MMC_CLASS_DISC)
266 return 0;
267
268 /*
269 * only CD burning normally needs setting up, but other disc types
270 * might need other settings to be made. The MMC framework will set up
271 * the nessisary recording parameters according to the disc
272 * characteristics read in. Modifications can be made in the discinfo
273 * structure passed to change the nature of the disc.
274 */
275
276 memset(&mmc_writeparams, 0, sizeof(struct mmc_writeparams));
277 mmc_writeparams.mmc_class = ump->discinfo.mmc_class;
278 mmc_writeparams.mmc_cur = ump->discinfo.mmc_cur;
279
280 /*
281 * UDF dictates first track to determine track mode for the whole
282 * disc. [UDF 1.50/6.10.1.1, UDF 1.50/6.10.2.1]
283 * To prevent problems with a `reserved' track in front we start with
284 * the 2nd track and if that is not valid, go for the 1st.
285 */
286 mmc_writeparams.tracknr = 2;
287 mmc_writeparams.data_mode = MMC_DATAMODE_DEFAULT; /* XA disc */
288 mmc_writeparams.track_mode = MMC_TRACKMODE_DEFAULT; /* data */
289
290 error = VOP_IOCTL(ump->devvp, MMCSETUPWRITEPARAMS, &mmc_writeparams,
291 FKIOCTL, NOCRED);
292 if (error) {
293 mmc_writeparams.tracknr = 1;
294 error = VOP_IOCTL(ump->devvp, MMCSETUPWRITEPARAMS,
295 &mmc_writeparams, FKIOCTL, NOCRED);
296 }
297 return error;
298 }
299
300
301 int
302 udf_synchronise_caches(struct udf_mount *ump)
303 {
304 struct mmc_op mmc_op;
305
306 DPRINTF(CALL, ("udf_synchronise_caches()\n"));
307
308 if (ump->vfs_mountp->mnt_flag & MNT_RDONLY)
309 return 0;
310
311 /* discs are done now */
312 if (ump->discinfo.mmc_class == MMC_CLASS_DISC)
313 return 0;
314
315 bzero(&mmc_op, sizeof(struct mmc_op));
316 mmc_op.operation = MMC_OP_SYNCHRONISECACHE;
317
318 /* ignore return code */
319 (void) VOP_IOCTL(ump->devvp, MMCOP, &mmc_op, FKIOCTL, NOCRED);
320
321 return 0;
322 }
323
324 /* --------------------------------------------------------------------- */
325
326 /* track/session searching for mounting */
327 int
328 udf_search_tracks(struct udf_mount *ump, struct udf_args *args,
329 int *first_tracknr, int *last_tracknr)
330 {
331 struct mmc_trackinfo trackinfo;
332 uint32_t tracknr, start_track, num_tracks;
333 int error;
334
335 /* if negative, sessionnr is relative to last session */
336 if (args->sessionnr < 0) {
337 args->sessionnr += ump->discinfo.num_sessions;
338 }
339
340 /* sanity */
341 if (args->sessionnr < 0)
342 args->sessionnr = 0;
343 if (args->sessionnr > ump->discinfo.num_sessions)
344 args->sessionnr = ump->discinfo.num_sessions;
345
346 /* search the tracks for this session, zero session nr indicates last */
347 if (args->sessionnr == 0)
348 args->sessionnr = ump->discinfo.num_sessions;
349 if (ump->discinfo.last_session_state == MMC_STATE_EMPTY)
350 args->sessionnr--;
351
352 /* sanity again */
353 if (args->sessionnr < 0)
354 args->sessionnr = 0;
355
356 /* search the first and last track of the specified session */
357 num_tracks = ump->discinfo.num_tracks;
358 start_track = ump->discinfo.first_track;
359
360 /* search for first track of this session */
361 for (tracknr = start_track; tracknr <= num_tracks; tracknr++) {
362 /* get track info */
363 trackinfo.tracknr = tracknr;
364 error = udf_update_trackinfo(ump, &trackinfo);
365 if (error)
366 return error;
367
368 if (trackinfo.sessionnr == args->sessionnr)
369 break;
370 }
371 *first_tracknr = tracknr;
372
373 /* search for last track of this session */
374 for (;tracknr <= num_tracks; tracknr++) {
375 /* get track info */
376 trackinfo.tracknr = tracknr;
377 error = udf_update_trackinfo(ump, &trackinfo);
378 if (error || (trackinfo.sessionnr != args->sessionnr)) {
379 tracknr--;
380 break;
381 }
382 }
383 if (tracknr > num_tracks)
384 tracknr--;
385
386 *last_tracknr = tracknr;
387
388 if (*last_tracknr < *first_tracknr) {
389 printf( "udf_search_tracks: sanity check on drive+disc failed, "
390 "drive returned garbage\n");
391 return EINVAL;
392 }
393
394 assert(*last_tracknr >= *first_tracknr);
395 return 0;
396 }
397
398
399 /*
400 * NOTE: this is the only routine in this file that directly peeks into the
401 * metadata file but since its at a larval state of the mount it can't hurt.
402 *
403 * XXX candidate for udf_allocation.c
404 * XXX clean me up!, change to new node reading code.
405 */
406
407 static void
408 udf_check_track_metadata_overlap(struct udf_mount *ump,
409 struct mmc_trackinfo *trackinfo)
410 {
411 struct part_desc *part;
412 struct file_entry *fe;
413 struct extfile_entry *efe;
414 struct short_ad *s_ad;
415 struct long_ad *l_ad;
416 uint32_t track_start, track_end;
417 uint32_t phys_part_start, phys_part_end, part_start, part_end;
418 uint32_t sector_size, len, alloclen, plb_num;
419 uint8_t *pos;
420 int addr_type, icblen, icbflags, flags;
421
422 /* get our track extents */
423 track_start = trackinfo->track_start;
424 track_end = track_start + trackinfo->track_size;
425
426 /* get our base partition extent */
427 KASSERT(ump->node_part == ump->fids_part);
428 part = ump->partitions[ump->node_part];
429 phys_part_start = udf_rw32(part->start_loc);
430 phys_part_end = phys_part_start + udf_rw32(part->part_len);
431
432 /* no use if its outside the physical partition */
433 if ((phys_part_start >= track_end) || (phys_part_end < track_start))
434 return;
435
436 /*
437 * now follow all extents in the fe/efe to see if they refer to this
438 * track
439 */
440
441 sector_size = ump->discinfo.sector_size;
442
443 /* XXX should we claim exclusive access to the metafile ? */
444 /* TODO: move to new node read code */
445 fe = ump->metadata_node->fe;
446 efe = ump->metadata_node->efe;
447 if (fe) {
448 alloclen = udf_rw32(fe->l_ad);
449 pos = &fe->data[0] + udf_rw32(fe->l_ea);
450 icbflags = udf_rw16(fe->icbtag.flags);
451 } else {
452 assert(efe);
453 alloclen = udf_rw32(efe->l_ad);
454 pos = &efe->data[0] + udf_rw32(efe->l_ea);
455 icbflags = udf_rw16(efe->icbtag.flags);
456 }
457 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
458
459 while (alloclen) {
460 if (addr_type == UDF_ICB_SHORT_ALLOC) {
461 icblen = sizeof(struct short_ad);
462 s_ad = (struct short_ad *) pos;
463 len = udf_rw32(s_ad->len);
464 plb_num = udf_rw32(s_ad->lb_num);
465 } else {
466 /* should not be present, but why not */
467 icblen = sizeof(struct long_ad);
468 l_ad = (struct long_ad *) pos;
469 len = udf_rw32(l_ad->len);
470 plb_num = udf_rw32(l_ad->loc.lb_num);
471 /* pvpart_num = udf_rw16(l_ad->loc.part_num); */
472 }
473 /* process extent */
474 flags = UDF_EXT_FLAGS(len);
475 len = UDF_EXT_LEN(len);
476
477 part_start = phys_part_start + plb_num;
478 part_end = part_start + (len / sector_size);
479
480 if ((part_start >= track_start) && (part_end <= track_end)) {
481 /* extent is enclosed within this track */
482 ump->metadata_track = *trackinfo;
483 return;
484 }
485
486 pos += icblen;
487 alloclen -= icblen;
488 }
489 }
490
491
492 int
493 udf_search_writing_tracks(struct udf_mount *ump)
494 {
495 struct mmc_trackinfo trackinfo;
496 struct part_desc *part;
497 uint32_t tracknr, start_track, num_tracks;
498 uint32_t track_start, track_end, part_start, part_end;
499 int node_alloc, error;
500
501 /*
502 * in the CD/(HD)DVD/BD recordable device model a few tracks within
503 * the last session might be open but in the UDF device model at most
504 * three tracks can be open: a reserved track for delayed ISO VRS
505 * writing, a data track and a metadata track. We search here for the
506 * data track and the metadata track. Note that the reserved track is
507 * troublesome but can be detected by its small size of < 512 sectors.
508 */
509
510 num_tracks = ump->discinfo.num_tracks;
511 start_track = ump->discinfo.first_track;
512
513 /* fetch info on first and possibly only track */
514 trackinfo.tracknr = start_track;
515 error = udf_update_trackinfo(ump, &trackinfo);
516 if (error)
517 return error;
518
519 /* copy results to our mount point */
520 ump->data_track = trackinfo;
521 ump->metadata_track = trackinfo;
522
523 /* if not sequential, we're done */
524 if (num_tracks == 1)
525 return 0;
526
527 for (tracknr = start_track;tracknr <= num_tracks; tracknr++) {
528 /* get track info */
529 trackinfo.tracknr = tracknr;
530 error = udf_update_trackinfo(ump, &trackinfo);
531 if (error)
532 return error;
533
534 if ((trackinfo.flags & MMC_TRACKINFO_NWA_VALID) == 0)
535 continue;
536
537 track_start = trackinfo.track_start;
538 track_end = track_start + trackinfo.track_size;
539
540 /* check for overlap on data partition */
541 part = ump->partitions[ump->data_part];
542 part_start = udf_rw32(part->start_loc);
543 part_end = part_start + udf_rw32(part->part_len);
544 if ((part_start < track_end) && (part_end > track_start)) {
545 ump->data_track = trackinfo;
546 /* TODO check if UDF partition data_part is writable */
547 }
548
549 /* check for overlap on metadata partition */
550 node_alloc = ump->vtop_alloc[ump->node_part];
551 if ((node_alloc == UDF_ALLOC_METASEQUENTIAL) ||
552 (node_alloc == UDF_ALLOC_METABITMAP)) {
553 udf_check_track_metadata_overlap(ump, &trackinfo);
554 } else {
555 ump->metadata_track = trackinfo;
556 }
557 }
558
559 if ((ump->data_track.flags & MMC_TRACKINFO_NWA_VALID) == 0)
560 return EROFS;
561
562 if ((ump->metadata_track.flags & MMC_TRACKINFO_NWA_VALID) == 0)
563 return EROFS;
564
565 return 0;
566 }
567
568 /* --------------------------------------------------------------------- */
569
570 /*
571 * Check if the blob starts with a good UDF tag. Tags are protected by a
572 * checksum over the reader except one byte at position 4 that is the checksum
573 * itself.
574 */
575
576 int
577 udf_check_tag(void *blob)
578 {
579 struct desc_tag *tag = blob;
580 uint8_t *pos, sum, cnt;
581
582 /* check TAG header checksum */
583 pos = (uint8_t *) tag;
584 sum = 0;
585
586 for(cnt = 0; cnt < 16; cnt++) {
587 if (cnt != 4)
588 sum += *pos;
589 pos++;
590 }
591 if (sum != tag->cksum) {
592 /* bad tag header checksum; this is not a valid tag */
593 return EINVAL;
594 }
595
596 return 0;
597 }
598
599
600 /*
601 * check tag payload will check descriptor CRC as specified.
602 * If the descriptor is too long, it will return EIO otherwise EINVAL.
603 */
604
605 int
606 udf_check_tag_payload(void *blob, uint32_t max_length)
607 {
608 struct desc_tag *tag = blob;
609 uint16_t crc, crc_len;
610
611 crc_len = udf_rw16(tag->desc_crc_len);
612
613 /* check payload CRC if applicable */
614 if (crc_len == 0)
615 return 0;
616
617 if (crc_len > max_length)
618 return EIO;
619
620 crc = udf_cksum(((uint8_t *) tag) + UDF_DESC_TAG_LENGTH, crc_len);
621 if (crc != udf_rw16(tag->desc_crc)) {
622 /* bad payload CRC; this is a broken tag */
623 return EINVAL;
624 }
625
626 return 0;
627 }
628
629
630 void
631 udf_validate_tag_sum(void *blob)
632 {
633 struct desc_tag *tag = blob;
634 uint8_t *pos, sum, cnt;
635
636 /* calculate TAG header checksum */
637 pos = (uint8_t *) tag;
638 sum = 0;
639
640 for(cnt = 0; cnt < 16; cnt++) {
641 if (cnt != 4) sum += *pos;
642 pos++;
643 }
644 tag->cksum = sum; /* 8 bit */
645 }
646
647
648 /* assumes sector number of descriptor to be saved already present */
649 void
650 udf_validate_tag_and_crc_sums(void *blob)
651 {
652 struct desc_tag *tag = blob;
653 uint8_t *btag = (uint8_t *) tag;
654 uint16_t crc, crc_len;
655
656 crc_len = udf_rw16(tag->desc_crc_len);
657
658 /* check payload CRC if applicable */
659 if (crc_len > 0) {
660 crc = udf_cksum(btag + UDF_DESC_TAG_LENGTH, crc_len);
661 tag->desc_crc = udf_rw16(crc);
662 }
663
664 /* calculate TAG header checksum */
665 udf_validate_tag_sum(blob);
666 }
667
668 /* --------------------------------------------------------------------- */
669
670 /*
671 * XXX note the different semantics from udfclient: for FIDs it still rounds
672 * up to sectors. Use udf_fidsize() for a correct length.
673 */
674
675 int
676 udf_tagsize(union dscrptr *dscr, uint32_t lb_size)
677 {
678 uint32_t size, tag_id, num_lb, elmsz;
679
680 tag_id = udf_rw16(dscr->tag.id);
681
682 switch (tag_id) {
683 case TAGID_LOGVOL :
684 size = sizeof(struct logvol_desc) - 1;
685 size += udf_rw32(dscr->lvd.mt_l);
686 break;
687 case TAGID_UNALLOC_SPACE :
688 elmsz = sizeof(struct extent_ad);
689 size = sizeof(struct unalloc_sp_desc) - elmsz;
690 size += udf_rw32(dscr->usd.alloc_desc_num) * elmsz;
691 break;
692 case TAGID_FID :
693 size = UDF_FID_SIZE + dscr->fid.l_fi + udf_rw16(dscr->fid.l_iu);
694 size = (size + 3) & ~3;
695 break;
696 case TAGID_LOGVOL_INTEGRITY :
697 size = sizeof(struct logvol_int_desc) - sizeof(uint32_t);
698 size += udf_rw32(dscr->lvid.l_iu);
699 size += (2 * udf_rw32(dscr->lvid.num_part) * sizeof(uint32_t));
700 break;
701 case TAGID_SPACE_BITMAP :
702 size = sizeof(struct space_bitmap_desc) - 1;
703 size += udf_rw32(dscr->sbd.num_bytes);
704 break;
705 case TAGID_SPARING_TABLE :
706 elmsz = sizeof(struct spare_map_entry);
707 size = sizeof(struct udf_sparing_table) - elmsz;
708 size += udf_rw16(dscr->spt.rt_l) * elmsz;
709 break;
710 case TAGID_FENTRY :
711 size = sizeof(struct file_entry);
712 size += udf_rw32(dscr->fe.l_ea) + udf_rw32(dscr->fe.l_ad)-1;
713 break;
714 case TAGID_EXTFENTRY :
715 size = sizeof(struct extfile_entry);
716 size += udf_rw32(dscr->efe.l_ea) + udf_rw32(dscr->efe.l_ad)-1;
717 break;
718 case TAGID_FSD :
719 size = sizeof(struct fileset_desc);
720 break;
721 default :
722 size = sizeof(union dscrptr);
723 break;
724 }
725
726 if ((size == 0) || (lb_size == 0))
727 return 0;
728
729 if (lb_size == 1)
730 return size;
731
732 /* round up in sectors */
733 num_lb = (size + lb_size -1) / lb_size;
734 return num_lb * lb_size;
735 }
736
737
738 int
739 udf_fidsize(struct fileid_desc *fid)
740 {
741 uint32_t size;
742
743 if (udf_rw16(fid->tag.id) != TAGID_FID)
744 panic("got udf_fidsize on non FID\n");
745
746 size = UDF_FID_SIZE + fid->l_fi + udf_rw16(fid->l_iu);
747 size = (size + 3) & ~3;
748
749 return size;
750 }
751
752 /* --------------------------------------------------------------------- */
753
754 void
755 udf_lock_node(struct udf_node *udf_node, int flag, char const *fname, const int lineno)
756 {
757 int ret;
758
759 mutex_enter(&udf_node->node_mutex);
760 /* wait until free */
761 while (udf_node->i_flags & IN_LOCKED) {
762 ret = cv_timedwait(&udf_node->node_lock, &udf_node->node_mutex, hz/8);
763 /* TODO check if we should return error; abort */
764 if (ret == EWOULDBLOCK) {
765 DPRINTF(LOCKING, ( "udf_lock_node: udf_node %p would block "
766 "wanted at %s:%d, previously locked at %s:%d\n",
767 udf_node, fname, lineno,
768 udf_node->lock_fname, udf_node->lock_lineno));
769 }
770 }
771 /* grab */
772 udf_node->i_flags |= IN_LOCKED | flag;
773 /* debug */
774 udf_node->lock_fname = fname;
775 udf_node->lock_lineno = lineno;
776
777 mutex_exit(&udf_node->node_mutex);
778 }
779
780
781 void
782 udf_unlock_node(struct udf_node *udf_node, int flag)
783 {
784 mutex_enter(&udf_node->node_mutex);
785 udf_node->i_flags &= ~(IN_LOCKED | flag);
786 cv_broadcast(&udf_node->node_lock);
787 mutex_exit(&udf_node->node_mutex);
788 }
789
790
791 /* --------------------------------------------------------------------- */
792
793 static int
794 udf_read_anchor(struct udf_mount *ump, uint32_t sector, struct anchor_vdp **dst)
795 {
796 int error;
797
798 error = udf_read_phys_dscr(ump, sector, M_UDFVOLD,
799 (union dscrptr **) dst);
800 if (!error) {
801 /* blank terminator blocks are not allowed here */
802 if (*dst == NULL)
803 return ENOENT;
804 if (udf_rw16((*dst)->tag.id) != TAGID_ANCHOR) {
805 error = ENOENT;
806 free(*dst, M_UDFVOLD);
807 *dst = NULL;
808 DPRINTF(VOLUMES, ("Not an anchor\n"));
809 }
810 }
811
812 return error;
813 }
814
815
816 int
817 udf_read_anchors(struct udf_mount *ump)
818 {
819 struct udf_args *args = &ump->mount_args;
820 struct mmc_trackinfo first_track;
821 struct mmc_trackinfo second_track;
822 struct mmc_trackinfo last_track;
823 struct anchor_vdp **anchorsp;
824 uint32_t track_start;
825 uint32_t track_end;
826 uint32_t positions[4];
827 int first_tracknr, last_tracknr;
828 int error, anch, ok, first_anchor;
829
830 /* search the first and last track of the specified session */
831 error = udf_search_tracks(ump, args, &first_tracknr, &last_tracknr);
832 if (!error) {
833 first_track.tracknr = first_tracknr;
834 error = udf_update_trackinfo(ump, &first_track);
835 }
836 if (!error) {
837 last_track.tracknr = last_tracknr;
838 error = udf_update_trackinfo(ump, &last_track);
839 }
840 if ((!error) && (first_tracknr != last_tracknr)) {
841 second_track.tracknr = first_tracknr+1;
842 error = udf_update_trackinfo(ump, &second_track);
843 }
844 if (error) {
845 printf("UDF mount: reading disc geometry failed\n");
846 return 0;
847 }
848
849 track_start = first_track.track_start;
850
851 /* `end' is not as straitforward as start. */
852 track_end = last_track.track_start
853 + last_track.track_size - last_track.free_blocks - 1;
854
855 if (ump->discinfo.mmc_cur & MMC_CAP_SEQUENTIAL) {
856 /* end of track is not straitforward here */
857 if (last_track.flags & MMC_TRACKINFO_LRA_VALID)
858 track_end = last_track.last_recorded;
859 else if (last_track.flags & MMC_TRACKINFO_NWA_VALID)
860 track_end = last_track.next_writable
861 - ump->discinfo.link_block_penalty;
862 }
863
864 /* its no use reading a blank track */
865 first_anchor = 0;
866 if (first_track.flags & MMC_TRACKINFO_BLANK)
867 first_anchor = 1;
868
869 /* get our packet size */
870 ump->packet_size = first_track.packet_size;
871 if (first_track.flags & MMC_TRACKINFO_BLANK)
872 ump->packet_size = second_track.packet_size;
873
874 if (ump->packet_size <= 1) {
875 /* take max, but not bigger than 64 */
876 ump->packet_size = MAXPHYS / ump->discinfo.sector_size;
877 ump->packet_size = MIN(ump->packet_size, 64);
878 }
879 KASSERT(ump->packet_size >= 1);
880
881 /* read anchors start+256, start+512, end-256, end */
882 positions[0] = track_start+256;
883 positions[1] = track_end-256;
884 positions[2] = track_end;
885 positions[3] = track_start+512; /* [UDF 2.60/6.11.2] */
886 /* XXX shouldn't +512 be prefered above +256 for compat with Roxio CD */
887
888 ok = 0;
889 anchorsp = ump->anchors;
890 for (anch = first_anchor; anch < 4; anch++) {
891 DPRINTF(VOLUMES, ("Read anchor %d at sector %d\n", anch,
892 positions[anch]));
893 error = udf_read_anchor(ump, positions[anch], anchorsp);
894 if (!error) {
895 anchorsp++;
896 ok++;
897 }
898 }
899
900 /* VATs are only recorded on sequential media, but initialise */
901 ump->first_possible_vat_location = track_start + 2;
902 ump->last_possible_vat_location = track_end + last_track.packet_size;
903
904 return ok;
905 }
906
907 /* --------------------------------------------------------------------- */
908
909 /* we dont try to be smart; we just record the parts */
910 #define UDF_UPDATE_DSCR(name, dscr) \
911 if (name) \
912 free(name, M_UDFVOLD); \
913 name = dscr;
914
915 static int
916 udf_process_vds_descriptor(struct udf_mount *ump, union dscrptr *dscr)
917 {
918 struct part_desc *part;
919 uint16_t phys_part, raw_phys_part;
920
921 DPRINTF(VOLUMES, ("\tprocessing VDS descr %d\n",
922 udf_rw16(dscr->tag.id)));
923 switch (udf_rw16(dscr->tag.id)) {
924 case TAGID_PRI_VOL : /* primary partition */
925 UDF_UPDATE_DSCR(ump->primary_vol, &dscr->pvd);
926 break;
927 case TAGID_LOGVOL : /* logical volume */
928 UDF_UPDATE_DSCR(ump->logical_vol, &dscr->lvd);
929 break;
930 case TAGID_UNALLOC_SPACE : /* unallocated space */
931 UDF_UPDATE_DSCR(ump->unallocated, &dscr->usd);
932 break;
933 case TAGID_IMP_VOL : /* implementation */
934 /* XXX do we care about multiple impl. descr ? */
935 UDF_UPDATE_DSCR(ump->implementation, &dscr->ivd);
936 break;
937 case TAGID_PARTITION : /* physical partition */
938 /* not much use if its not allocated */
939 if ((udf_rw16(dscr->pd.flags) & UDF_PART_FLAG_ALLOCATED) == 0) {
940 free(dscr, M_UDFVOLD);
941 break;
942 }
943
944 /*
945 * BUGALERT: some rogue implementations use random physical
946 * partion numbers to break other implementations so lookup
947 * the number.
948 */
949 raw_phys_part = udf_rw16(dscr->pd.part_num);
950 for (phys_part = 0; phys_part < UDF_PARTITIONS; phys_part++) {
951 part = ump->partitions[phys_part];
952 if (part == NULL)
953 break;
954 if (udf_rw16(part->part_num) == raw_phys_part)
955 break;
956 }
957 if (phys_part == UDF_PARTITIONS) {
958 free(dscr, M_UDFVOLD);
959 return EINVAL;
960 }
961
962 UDF_UPDATE_DSCR(ump->partitions[phys_part], &dscr->pd);
963 break;
964 case TAGID_VOL : /* volume space extender; rare */
965 DPRINTF(VOLUMES, ("VDS extender ignored\n"));
966 free(dscr, M_UDFVOLD);
967 break;
968 default :
969 DPRINTF(VOLUMES, ("Unhandled VDS type %d\n",
970 udf_rw16(dscr->tag.id)));
971 free(dscr, M_UDFVOLD);
972 }
973
974 return 0;
975 }
976 #undef UDF_UPDATE_DSCR
977
978 /* --------------------------------------------------------------------- */
979
980 static int
981 udf_read_vds_extent(struct udf_mount *ump, uint32_t loc, uint32_t len)
982 {
983 union dscrptr *dscr;
984 uint32_t sector_size, dscr_size;
985 int error;
986
987 sector_size = ump->discinfo.sector_size;
988
989 /* loc is sectornr, len is in bytes */
990 error = EIO;
991 while (len) {
992 error = udf_read_phys_dscr(ump, loc, M_UDFVOLD, &dscr);
993 if (error)
994 return error;
995
996 /* blank block is a terminator */
997 if (dscr == NULL)
998 return 0;
999
1000 /* TERM descriptor is a terminator */
1001 if (udf_rw16(dscr->tag.id) == TAGID_TERM) {
1002 free(dscr, M_UDFVOLD);
1003 return 0;
1004 }
1005
1006 /* process all others */
1007 dscr_size = udf_tagsize(dscr, sector_size);
1008 error = udf_process_vds_descriptor(ump, dscr);
1009 if (error) {
1010 free(dscr, M_UDFVOLD);
1011 break;
1012 }
1013 assert((dscr_size % sector_size) == 0);
1014
1015 len -= dscr_size;
1016 loc += dscr_size / sector_size;
1017 }
1018
1019 return error;
1020 }
1021
1022
1023 int
1024 udf_read_vds_space(struct udf_mount *ump)
1025 {
1026 /* struct udf_args *args = &ump->mount_args; */
1027 struct anchor_vdp *anchor, *anchor2;
1028 size_t size;
1029 uint32_t main_loc, main_len;
1030 uint32_t reserve_loc, reserve_len;
1031 int error;
1032
1033 /*
1034 * read in VDS space provided by the anchors; if one descriptor read
1035 * fails, try the mirror sector.
1036 *
1037 * check if 2nd anchor is different from 1st; if so, go for 2nd. This
1038 * avoids the `compatibility features' of DirectCD that may confuse
1039 * stuff completely.
1040 */
1041
1042 anchor = ump->anchors[0];
1043 anchor2 = ump->anchors[1];
1044 assert(anchor);
1045
1046 if (anchor2) {
1047 size = sizeof(struct extent_ad);
1048 if (memcmp(&anchor->main_vds_ex, &anchor2->main_vds_ex, size))
1049 anchor = anchor2;
1050 /* reserve is specified to be a literal copy of main */
1051 }
1052
1053 main_loc = udf_rw32(anchor->main_vds_ex.loc);
1054 main_len = udf_rw32(anchor->main_vds_ex.len);
1055
1056 reserve_loc = udf_rw32(anchor->reserve_vds_ex.loc);
1057 reserve_len = udf_rw32(anchor->reserve_vds_ex.len);
1058
1059 error = udf_read_vds_extent(ump, main_loc, main_len);
1060 if (error) {
1061 printf("UDF mount: reading in reserve VDS extent\n");
1062 error = udf_read_vds_extent(ump, reserve_loc, reserve_len);
1063 }
1064
1065 return error;
1066 }
1067
1068 /* --------------------------------------------------------------------- */
1069
1070 /*
1071 * Read in the logical volume integrity sequence pointed to by our logical
1072 * volume descriptor. Its a sequence that can be extended using fields in the
1073 * integrity descriptor itself. On sequential media only one is found, on
1074 * rewritable media a sequence of descriptors can be found as a form of
1075 * history keeping and on non sequential write-once media the chain is vital
1076 * to allow more and more descriptors to be written. The last descriptor
1077 * written in an extent needs to claim space for a new extent.
1078 */
1079
1080 static int
1081 udf_retrieve_lvint(struct udf_mount *ump)
1082 {
1083 union dscrptr *dscr;
1084 struct logvol_int_desc *lvint;
1085 struct udf_lvintq *trace;
1086 uint32_t lb_size, lbnum, len;
1087 int dscr_type, error, trace_len;
1088
1089 lb_size = udf_rw32(ump->logical_vol->lb_size);
1090 len = udf_rw32(ump->logical_vol->integrity_seq_loc.len);
1091 lbnum = udf_rw32(ump->logical_vol->integrity_seq_loc.loc);
1092
1093 /* clean trace */
1094 memset(ump->lvint_trace, 0,
1095 UDF_LVDINT_SEGMENTS * sizeof(struct udf_lvintq));
1096
1097 trace_len = 0;
1098 trace = ump->lvint_trace;
1099 trace->start = lbnum;
1100 trace->end = lbnum + len/lb_size;
1101 trace->pos = 0;
1102 trace->wpos = 0;
1103
1104 lvint = NULL;
1105 dscr = NULL;
1106 error = 0;
1107 while (len) {
1108 trace->pos = lbnum - trace->start;
1109 trace->wpos = trace->pos + 1;
1110
1111 /* read in our integrity descriptor */
1112 error = udf_read_phys_dscr(ump, lbnum, M_UDFVOLD, &dscr);
1113 if (!error) {
1114 if (dscr == NULL) {
1115 trace->wpos = trace->pos;
1116 break; /* empty terminates */
1117 }
1118 dscr_type = udf_rw16(dscr->tag.id);
1119 if (dscr_type == TAGID_TERM) {
1120 trace->wpos = trace->pos;
1121 break; /* clean terminator */
1122 }
1123 if (dscr_type != TAGID_LOGVOL_INTEGRITY) {
1124 /* fatal... corrupt disc */
1125 error = ENOENT;
1126 break;
1127 }
1128 if (lvint)
1129 free(lvint, M_UDFVOLD);
1130 lvint = &dscr->lvid;
1131 dscr = NULL;
1132 } /* else hope for the best... maybe the next is ok */
1133
1134 DPRINTFIF(VOLUMES, lvint, ("logvol integrity read, state %s\n",
1135 udf_rw32(lvint->integrity_type) ? "CLOSED" : "OPEN"));
1136
1137 /* proceed sequential */
1138 lbnum += 1;
1139 len -= lb_size;
1140
1141 /* are we linking to a new piece? */
1142 if (dscr && lvint->next_extent.len) {
1143 len = udf_rw32(lvint->next_extent.len);
1144 lbnum = udf_rw32(lvint->next_extent.loc);
1145
1146 if (trace_len >= UDF_LVDINT_SEGMENTS-1) {
1147 /* IEK! segment link full... */
1148 DPRINTF(VOLUMES, ("lvdint segments full\n"));
1149 error = EINVAL;
1150 } else {
1151 trace++;
1152 trace_len++;
1153
1154 trace->start = lbnum;
1155 trace->end = lbnum + len/lb_size;
1156 trace->pos = 0;
1157 trace->wpos = 0;
1158 }
1159 }
1160 }
1161
1162 /* clean up the mess, esp. when there is an error */
1163 if (dscr)
1164 free(dscr, M_UDFVOLD);
1165
1166 if (error && lvint) {
1167 free(lvint, M_UDFVOLD);
1168 lvint = NULL;
1169 }
1170
1171 if (!lvint)
1172 error = ENOENT;
1173
1174 ump->logvol_integrity = lvint;
1175 return error;
1176 }
1177
1178
1179 static int
1180 udf_loose_lvint_history(struct udf_mount *ump)
1181 {
1182 union dscrptr **bufs, *dscr, *last_dscr;
1183 struct udf_lvintq *trace, *in_trace, *out_trace;
1184 struct logvol_int_desc *lvint;
1185 uint32_t in_ext, in_pos, in_len;
1186 uint32_t out_ext, out_wpos, out_len;
1187 uint32_t lb_size, packet_size, lb_num;
1188 uint32_t len, start;
1189 int ext, minext, extlen, cnt, cpy_len, dscr_type;
1190 int losing;
1191 int error;
1192
1193 DPRINTF(VOLUMES, ("need to lose some lvint history\n"));
1194
1195 lb_size = udf_rw32(ump->logical_vol->lb_size);
1196 packet_size = ump->data_track.packet_size; /* XXX data track */
1197
1198 /* search smallest extent */
1199 trace = &ump->lvint_trace[0];
1200 minext = trace->end - trace->start;
1201 for (ext = 1; ext < UDF_LVDINT_SEGMENTS; ext++) {
1202 trace = &ump->lvint_trace[ext];
1203 extlen = trace->end - trace->start;
1204 if (extlen == 0)
1205 break;
1206 minext = MIN(minext, extlen);
1207 }
1208 losing = MIN(minext, UDF_LVINT_LOSSAGE);
1209 /* no sense wiping all */
1210 if (losing == minext)
1211 losing--;
1212
1213 DPRINTF(VOLUMES, ("\tlosing %d entries\n", losing));
1214
1215 /* get buffer for pieces */
1216 bufs = malloc(UDF_LVDINT_SEGMENTS * sizeof(void *), M_TEMP, M_WAITOK);
1217
1218 in_ext = 0;
1219 in_pos = losing;
1220 in_trace = &ump->lvint_trace[in_ext];
1221 in_len = in_trace->end - in_trace->start;
1222 out_ext = 0;
1223 out_wpos = 0;
1224 out_trace = &ump->lvint_trace[out_ext];
1225 out_len = out_trace->end - out_trace->start;
1226
1227 last_dscr = NULL;
1228 for(;;) {
1229 out_trace->pos = out_wpos;
1230 out_trace->wpos = out_trace->pos;
1231 if (in_pos >= in_len) {
1232 in_ext++;
1233 in_pos = 0;
1234 in_trace = &ump->lvint_trace[in_ext];
1235 in_len = in_trace->end - in_trace->start;
1236 }
1237 if (out_wpos >= out_len) {
1238 out_ext++;
1239 out_wpos = 0;
1240 out_trace = &ump->lvint_trace[out_ext];
1241 out_len = out_trace->end - out_trace->start;
1242 }
1243 /* copy overlap contents */
1244 cpy_len = MIN(in_len - in_pos, out_len - out_wpos);
1245 cpy_len = MIN(cpy_len, in_len - in_trace->pos);
1246 if (cpy_len == 0)
1247 break;
1248
1249 /* copy */
1250 DPRINTF(VOLUMES, ("\treading %d lvid descriptors\n", cpy_len));
1251 for (cnt = 0; cnt < cpy_len; cnt++) {
1252 /* read in our integrity descriptor */
1253 lb_num = in_trace->start + in_pos + cnt;
1254 error = udf_read_phys_dscr(ump, lb_num, M_UDFVOLD,
1255 &dscr);
1256 if (error) {
1257 /* copy last one */
1258 dscr = last_dscr;
1259 }
1260 bufs[cnt] = dscr;
1261 if (!error) {
1262 if (dscr == NULL) {
1263 out_trace->pos = out_wpos + cnt;
1264 out_trace->wpos = out_trace->pos;
1265 break; /* empty terminates */
1266 }
1267 dscr_type = udf_rw16(dscr->tag.id);
1268 if (dscr_type == TAGID_TERM) {
1269 out_trace->pos = out_wpos + cnt;
1270 out_trace->wpos = out_trace->pos;
1271 break; /* clean terminator */
1272 }
1273 if (dscr_type != TAGID_LOGVOL_INTEGRITY) {
1274 panic( "UDF integrity sequence "
1275 "corrupted while mounted!\n");
1276 }
1277 last_dscr = dscr;
1278 }
1279 }
1280
1281 /* patch up if first entry was on error */
1282 if (bufs[0] == NULL) {
1283 for (cnt = 0; cnt < cpy_len; cnt++)
1284 if (bufs[cnt] != NULL)
1285 break;
1286 last_dscr = bufs[cnt];
1287 for (; cnt > 0; cnt--) {
1288 bufs[cnt] = last_dscr;
1289 }
1290 }
1291
1292 /* glue + write out */
1293 DPRINTF(VOLUMES, ("\twriting %d lvid descriptors\n", cpy_len));
1294 for (cnt = 0; cnt < cpy_len; cnt++) {
1295 lb_num = out_trace->start + out_wpos + cnt;
1296 lvint = &bufs[cnt]->lvid;
1297
1298 /* set continuation */
1299 len = 0;
1300 start = 0;
1301 if (out_wpos + cnt == out_len) {
1302 /* get continuation */
1303 trace = &ump->lvint_trace[out_ext+1];
1304 len = trace->end - trace->start;
1305 start = trace->start;
1306 }
1307 lvint->next_extent.len = udf_rw32(len);
1308 lvint->next_extent.loc = udf_rw32(start);
1309
1310 lb_num = trace->start + trace->wpos;
1311 error = udf_write_phys_dscr_sync(ump, NULL, UDF_C_DSCR,
1312 bufs[cnt], lb_num, lb_num);
1313 DPRINTFIF(VOLUMES, error,
1314 ("error writing lvint lb_num\n"));
1315 }
1316
1317 /* free non repeating descriptors */
1318 last_dscr = NULL;
1319 for (cnt = 0; cnt < cpy_len; cnt++) {
1320 if (bufs[cnt] != last_dscr)
1321 free(bufs[cnt], M_UDFVOLD);
1322 last_dscr = bufs[cnt];
1323 }
1324
1325 /* advance */
1326 in_pos += cpy_len;
1327 out_wpos += cpy_len;
1328 }
1329
1330 free(bufs, M_TEMP);
1331
1332 return 0;
1333 }
1334
1335
1336 static int
1337 udf_writeout_lvint(struct udf_mount *ump, int lvflag)
1338 {
1339 struct udf_lvintq *trace;
1340 struct timeval now_v;
1341 struct timespec now_s;
1342 uint32_t sector;
1343 int logvol_integrity;
1344 int space, error;
1345
1346 DPRINTF(VOLUMES, ("writing out logvol integrity descriptor\n"));
1347
1348 again:
1349 /* get free space in last chunk */
1350 trace = ump->lvint_trace;
1351 while (trace->wpos > (trace->end - trace->start)) {
1352 DPRINTF(VOLUMES, ("skip : start = %d, end = %d, pos = %d, "
1353 "wpos = %d\n", trace->start, trace->end,
1354 trace->pos, trace->wpos));
1355 trace++;
1356 }
1357
1358 /* check if there is space to append */
1359 space = (trace->end - trace->start) - trace->wpos;
1360 DPRINTF(VOLUMES, ("write start = %d, end = %d, pos = %d, wpos = %d, "
1361 "space = %d\n", trace->start, trace->end, trace->pos,
1362 trace->wpos, space));
1363
1364 /* get state */
1365 logvol_integrity = udf_rw32(ump->logvol_integrity->integrity_type);
1366 if (logvol_integrity == UDF_INTEGRITY_CLOSED) {
1367 if ((space < 3) && (lvflag & UDF_APPENDONLY_LVINT)) {
1368 /* don't allow this logvol to be opened */
1369 /* TODO extent LVINT space if possible */
1370 return EROFS;
1371 }
1372 }
1373
1374 if (space < 1) {
1375 if (lvflag & UDF_APPENDONLY_LVINT)
1376 return EROFS;
1377 /* loose history by re-writing extents */
1378 error = udf_loose_lvint_history(ump);
1379 if (error)
1380 return error;
1381 goto again;
1382 }
1383
1384 /* update our integrity descriptor to identify us and timestamp it */
1385 DPRINTF(VOLUMES, ("updating integrity descriptor\n"));
1386 microtime(&now_v);
1387 TIMEVAL_TO_TIMESPEC(&now_v, &now_s);
1388 udf_timespec_to_timestamp(&now_s, &ump->logvol_integrity->time);
1389 udf_set_regid(&ump->logvol_info->impl_id, IMPL_NAME);
1390 udf_add_impl_regid(ump, &ump->logvol_info->impl_id);
1391
1392 /* writeout integrity descriptor */
1393 sector = trace->start + trace->wpos;
1394 error = udf_write_phys_dscr_sync(ump, NULL, UDF_C_DSCR,
1395 (union dscrptr *) ump->logvol_integrity,
1396 sector, sector);
1397 DPRINTF(VOLUMES, ("writeout lvint : error = %d\n", error));
1398 if (error)
1399 return error;
1400
1401 /* advance write position */
1402 trace->wpos++; space--;
1403 if (space >= 1) {
1404 /* append terminator */
1405 sector = trace->start + trace->wpos;
1406 error = udf_write_terminator(ump, sector);
1407
1408 DPRINTF(VOLUMES, ("write terminator : error = %d\n", error));
1409 }
1410
1411 space = (trace->end - trace->start) - trace->wpos;
1412 DPRINTF(VOLUMES, ("write start = %d, end = %d, pos = %d, wpos = %d, "
1413 "space = %d\n", trace->start, trace->end, trace->pos,
1414 trace->wpos, space));
1415 DPRINTF(VOLUMES, ("finished writing out logvol integrity descriptor "
1416 "successfull\n"));
1417
1418 return error;
1419 }
1420
1421 /* --------------------------------------------------------------------- */
1422
1423 static int
1424 udf_read_physical_partition_spacetables(struct udf_mount *ump)
1425 {
1426 union dscrptr *dscr;
1427 /* struct udf_args *args = &ump->mount_args; */
1428 struct part_desc *partd;
1429 struct part_hdr_desc *parthdr;
1430 struct udf_bitmap *bitmap;
1431 uint32_t phys_part;
1432 uint32_t lb_num, len;
1433 int error, dscr_type;
1434
1435 /* unallocated space map */
1436 for (phys_part = 0; phys_part < UDF_PARTITIONS; phys_part++) {
1437 partd = ump->partitions[phys_part];
1438 if (partd == NULL)
1439 continue;
1440 parthdr = &partd->_impl_use.part_hdr;
1441
1442 lb_num = udf_rw32(partd->start_loc);
1443 lb_num += udf_rw32(parthdr->unalloc_space_bitmap.lb_num);
1444 len = udf_rw32(parthdr->unalloc_space_bitmap.len);
1445 if (len == 0)
1446 continue;
1447
1448 DPRINTF(VOLUMES, ("Read unalloc. space bitmap %d\n", lb_num));
1449 error = udf_read_phys_dscr(ump, lb_num, M_UDFVOLD, &dscr);
1450 if (!error && dscr) {
1451 /* analyse */
1452 dscr_type = udf_rw16(dscr->tag.id);
1453 if (dscr_type == TAGID_SPACE_BITMAP) {
1454 DPRINTF(VOLUMES, ("Accepting space bitmap\n"));
1455 ump->part_unalloc_dscr[phys_part] = &dscr->sbd;
1456
1457 /* fill in ump->part_unalloc_bits */
1458 bitmap = &ump->part_unalloc_bits[phys_part];
1459 bitmap->blob = (uint8_t *) dscr;
1460 bitmap->bits = dscr->sbd.data;
1461 bitmap->max_offset = udf_rw32(dscr->sbd.num_bits);
1462 bitmap->pages = NULL; /* TODO */
1463 bitmap->data_pos = 0;
1464 bitmap->metadata_pos = 0;
1465 } else {
1466 free(dscr, M_UDFVOLD);
1467
1468 printf( "UDF mount: error reading unallocated "
1469 "space bitmap\n");
1470 return EROFS;
1471 }
1472 } else {
1473 /* blank not allowed */
1474 printf("UDF mount: blank unallocated space bitmap\n");
1475 return EROFS;
1476 }
1477 }
1478
1479 /* unallocated space table (not supported) */
1480 for (phys_part = 0; phys_part < UDF_PARTITIONS; phys_part++) {
1481 partd = ump->partitions[phys_part];
1482 if (partd == NULL)
1483 continue;
1484 parthdr = &partd->_impl_use.part_hdr;
1485
1486 len = udf_rw32(parthdr->unalloc_space_table.len);
1487 if (len) {
1488 printf("UDF mount: space tables not supported\n");
1489 return EROFS;
1490 }
1491 }
1492
1493 /* freed space map */
1494 for (phys_part = 0; phys_part < UDF_PARTITIONS; phys_part++) {
1495 partd = ump->partitions[phys_part];
1496 if (partd == NULL)
1497 continue;
1498 parthdr = &partd->_impl_use.part_hdr;
1499
1500 /* freed space map */
1501 lb_num = udf_rw32(partd->start_loc);
1502 lb_num += udf_rw32(parthdr->freed_space_bitmap.lb_num);
1503 len = udf_rw32(parthdr->freed_space_bitmap.len);
1504 if (len == 0)
1505 continue;
1506
1507 DPRINTF(VOLUMES, ("Read unalloc. space bitmap %d\n", lb_num));
1508 error = udf_read_phys_dscr(ump, lb_num, M_UDFVOLD, &dscr);
1509 if (!error && dscr) {
1510 /* analyse */
1511 dscr_type = udf_rw16(dscr->tag.id);
1512 if (dscr_type == TAGID_SPACE_BITMAP) {
1513 DPRINTF(VOLUMES, ("Accepting space bitmap\n"));
1514 ump->part_freed_dscr[phys_part] = &dscr->sbd;
1515
1516 /* fill in ump->part_freed_bits */
1517 bitmap = &ump->part_unalloc_bits[phys_part];
1518 bitmap->blob = (uint8_t *) dscr;
1519 bitmap->bits = dscr->sbd.data;
1520 bitmap->max_offset = udf_rw32(dscr->sbd.num_bits);
1521 bitmap->pages = NULL; /* TODO */
1522 bitmap->data_pos = 0;
1523 bitmap->metadata_pos = 0;
1524 } else {
1525 free(dscr, M_UDFVOLD);
1526
1527 printf( "UDF mount: error reading freed "
1528 "space bitmap\n");
1529 return EROFS;
1530 }
1531 } else {
1532 /* blank not allowed */
1533 printf("UDF mount: blank freed space bitmap\n");
1534 return EROFS;
1535 }
1536 }
1537
1538 /* freed space table (not supported) */
1539 for (phys_part = 0; phys_part < UDF_PARTITIONS; phys_part++) {
1540 partd = ump->partitions[phys_part];
1541 if (partd == NULL)
1542 continue;
1543 parthdr = &partd->_impl_use.part_hdr;
1544
1545 len = udf_rw32(parthdr->freed_space_table.len);
1546 if (len) {
1547 printf("UDF mount: space tables not supported\n");
1548 return EROFS;
1549 }
1550 }
1551
1552 return 0;
1553 }
1554
1555
1556 /* TODO implement async writeout */
1557 int
1558 udf_write_physical_partition_spacetables(struct udf_mount *ump, int waitfor)
1559 {
1560 union dscrptr *dscr;
1561 /* struct udf_args *args = &ump->mount_args; */
1562 struct part_desc *partd;
1563 struct part_hdr_desc *parthdr;
1564 uint32_t phys_part;
1565 uint32_t lb_num, len, ptov;
1566 int error_all, error;
1567
1568 error_all = 0;
1569 /* unallocated space map */
1570 for (phys_part = 0; phys_part < UDF_PARTITIONS; phys_part++) {
1571 partd = ump->partitions[phys_part];
1572 if (partd == NULL)
1573 continue;
1574 parthdr = &partd->_impl_use.part_hdr;
1575
1576 ptov = udf_rw32(partd->start_loc);
1577 lb_num = udf_rw32(parthdr->unalloc_space_bitmap.lb_num);
1578 len = udf_rw32(parthdr->unalloc_space_bitmap.len);
1579 if (len == 0)
1580 continue;
1581
1582 DPRINTF(VOLUMES, ("Write unalloc. space bitmap %d\n",
1583 lb_num + ptov));
1584 dscr = (union dscrptr *) ump->part_unalloc_dscr[phys_part];
1585 error = udf_write_phys_dscr_sync(ump, NULL, UDF_C_DSCR,
1586 (union dscrptr *) dscr,
1587 ptov + lb_num, lb_num);
1588 if (error) {
1589 DPRINTF(VOLUMES, ("\tfailed!! (error %d)\n", error));
1590 error_all = error;
1591 }
1592 }
1593
1594 /* freed space map */
1595 for (phys_part = 0; phys_part < UDF_PARTITIONS; phys_part++) {
1596 partd = ump->partitions[phys_part];
1597 if (partd == NULL)
1598 continue;
1599 parthdr = &partd->_impl_use.part_hdr;
1600
1601 /* freed space map */
1602 ptov = udf_rw32(partd->start_loc);
1603 lb_num = udf_rw32(parthdr->freed_space_bitmap.lb_num);
1604 len = udf_rw32(parthdr->freed_space_bitmap.len);
1605 if (len == 0)
1606 continue;
1607
1608 DPRINTF(VOLUMES, ("Write freed space bitmap %d\n",
1609 lb_num + ptov));
1610 dscr = (union dscrptr *) ump->part_freed_dscr[phys_part];
1611 error = udf_write_phys_dscr_sync(ump, NULL, UDF_C_DSCR,
1612 (union dscrptr *) dscr,
1613 ptov + lb_num, lb_num);
1614 if (error) {
1615 DPRINTF(VOLUMES, ("\tfailed!! (error %d)\n", error));
1616 error_all = error;
1617 }
1618 }
1619
1620 return error_all;
1621 }
1622
1623
1624 static int
1625 udf_read_metadata_partition_spacetable(struct udf_mount *ump)
1626 {
1627 struct udf_node *bitmap_node;
1628 union dscrptr *dscr;
1629 struct udf_bitmap *bitmap;
1630 uint64_t inflen;
1631 int error, dscr_type;
1632
1633 bitmap_node = ump->metadatabitmap_node;
1634
1635 /* only read in when metadata bitmap node is read in */
1636 if (bitmap_node == NULL)
1637 return 0;
1638
1639 if (bitmap_node->fe) {
1640 inflen = udf_rw64(bitmap_node->fe->inf_len);
1641 } else {
1642 KASSERT(bitmap_node->efe);
1643 inflen = udf_rw64(bitmap_node->efe->inf_len);
1644 }
1645
1646 DPRINTF(VOLUMES, ("Reading metadata space bitmap for "
1647 "%"PRIu64" bytes\n", inflen));
1648
1649 /* allocate space for bitmap */
1650 dscr = malloc(inflen, M_UDFVOLD, M_CANFAIL | M_WAITOK);
1651 if (!dscr)
1652 return ENOMEM;
1653
1654 /* set vnode type to regular file or we can't read from it! */
1655 bitmap_node->vnode->v_type = VREG;
1656
1657 /* read in complete metadata bitmap file */
1658 error = vn_rdwr(UIO_READ, bitmap_node->vnode,
1659 dscr,
1660 inflen, 0,
1661 UIO_SYSSPACE,
1662 IO_SYNC | IO_NODELOCKED | IO_ALTSEMANTICS, FSCRED,
1663 NULL, NULL);
1664 if (error) {
1665 DPRINTF(VOLUMES, ("Error reading metadata space bitmap\n"));
1666 goto errorout;
1667 }
1668
1669 /* analyse */
1670 dscr_type = udf_rw16(dscr->tag.id);
1671 if (dscr_type == TAGID_SPACE_BITMAP) {
1672 DPRINTF(VOLUMES, ("Accepting metadata space bitmap\n"));
1673 ump->metadata_unalloc_dscr = &dscr->sbd;
1674
1675 /* fill in bitmap bits */
1676 bitmap = &ump->metadata_unalloc_bits;
1677 bitmap->blob = (uint8_t *) dscr;
1678 bitmap->bits = dscr->sbd.data;
1679 bitmap->max_offset = udf_rw32(dscr->sbd.num_bits);
1680 bitmap->pages = NULL; /* TODO */
1681 bitmap->data_pos = 0;
1682 bitmap->metadata_pos = 0;
1683 } else {
1684 DPRINTF(VOLUMES, ("No valid bitmap found!\n"));
1685 goto errorout;
1686 }
1687
1688 return 0;
1689
1690 errorout:
1691 free(dscr, M_UDFVOLD);
1692 printf( "UDF mount: error reading unallocated "
1693 "space bitmap for metadata partition\n");
1694 return EROFS;
1695 }
1696
1697
1698 int
1699 udf_write_metadata_partition_spacetable(struct udf_mount *ump, int waitfor)
1700 {
1701 struct udf_node *bitmap_node;
1702 union dscrptr *dscr;
1703 uint64_t inflen, new_inflen;
1704 int dummy, error;
1705
1706 bitmap_node = ump->metadatabitmap_node;
1707
1708 /* only write out when metadata bitmap node is known */
1709 if (bitmap_node == NULL)
1710 return 0;
1711
1712 if (bitmap_node->fe) {
1713 inflen = udf_rw64(bitmap_node->fe->inf_len);
1714 } else {
1715 KASSERT(bitmap_node->efe);
1716 inflen = udf_rw64(bitmap_node->efe->inf_len);
1717 }
1718
1719 /* reduce length to zero */
1720 dscr = (union dscrptr *) ump->metadata_unalloc_dscr;
1721 new_inflen = udf_tagsize(dscr, 1);
1722
1723 DPRINTF(VOLUMES, ("Resize and write out metadata space bitmap from "
1724 "%"PRIu64" to %"PRIu64" bytes\n", inflen, new_inflen));
1725
1726 error = udf_resize_node(bitmap_node, new_inflen, &dummy);
1727 if (error)
1728 printf("Error resizing metadata space bitmap\n");
1729
1730 error = vn_rdwr(UIO_WRITE, bitmap_node->vnode,
1731 dscr,
1732 new_inflen, 0,
1733 UIO_SYSSPACE,
1734 IO_NODELOCKED | IO_ALTSEMANTICS, FSCRED,
1735 NULL, NULL);
1736
1737 bitmap_node->i_flags |= IN_MODIFIED;
1738 vflushbuf(bitmap_node->vnode, 1 /* sync */);
1739
1740 error = VOP_FSYNC(bitmap_node->vnode,
1741 FSCRED, FSYNC_WAIT, 0, 0);
1742
1743 if (error)
1744 printf( "Error writing out metadata partition unalloced "
1745 "space bitmap!\n");
1746
1747 return error;
1748 }
1749
1750
1751 /* --------------------------------------------------------------------- */
1752
1753 /*
1754 * Checks if ump's vds information is correct and complete
1755 */
1756
1757 int
1758 udf_process_vds(struct udf_mount *ump) {
1759 union udf_pmap *mapping;
1760 /* struct udf_args *args = &ump->mount_args; */
1761 struct logvol_int_desc *lvint;
1762 struct udf_logvol_info *lvinfo;
1763 struct part_desc *part;
1764 uint32_t n_pm, mt_l;
1765 uint8_t *pmap_pos;
1766 char *domain_name, *map_name;
1767 const char *check_name;
1768 char bits[128];
1769 int pmap_stype, pmap_size;
1770 int pmap_type, log_part, phys_part, raw_phys_part, maps_on;
1771 int n_phys, n_virt, n_spar, n_meta;
1772 int len, error;
1773
1774 if (ump == NULL)
1775 return ENOENT;
1776
1777 /* we need at least an anchor (trivial, but for safety) */
1778 if (ump->anchors[0] == NULL)
1779 return EINVAL;
1780
1781 /* we need at least one primary and one logical volume descriptor */
1782 if ((ump->primary_vol == NULL) || (ump->logical_vol) == NULL)
1783 return EINVAL;
1784
1785 /* we need at least one partition descriptor */
1786 if (ump->partitions[0] == NULL)
1787 return EINVAL;
1788
1789 /* check logical volume sector size verses device sector size */
1790 if (udf_rw32(ump->logical_vol->lb_size) != ump->discinfo.sector_size) {
1791 printf("UDF mount: format violation, lb_size != sector size\n");
1792 return EINVAL;
1793 }
1794
1795 /* check domain name */
1796 domain_name = ump->logical_vol->domain_id.id;
1797 if (strncmp(domain_name, "*OSTA UDF Compliant", 20)) {
1798 printf("mount_udf: disc not OSTA UDF Compliant, aborting\n");
1799 return EINVAL;
1800 }
1801
1802 /* retrieve logical volume integrity sequence */
1803 error = udf_retrieve_lvint(ump);
1804
1805 /*
1806 * We need at least one logvol integrity descriptor recorded. Note
1807 * that its OK to have an open logical volume integrity here. The VAT
1808 * will close/update the integrity.
1809 */
1810 if (ump->logvol_integrity == NULL)
1811 return EINVAL;
1812
1813 /* process derived structures */
1814 n_pm = udf_rw32(ump->logical_vol->n_pm); /* num partmaps */
1815 lvint = ump->logvol_integrity;
1816 lvinfo = (struct udf_logvol_info *) (&lvint->tables[2 * n_pm]);
1817 ump->logvol_info = lvinfo;
1818
1819 /* TODO check udf versions? */
1820
1821 /*
1822 * check logvol mappings: effective virt->log partmap translation
1823 * check and recording of the mapping results. Saves expensive
1824 * strncmp() in tight places.
1825 */
1826 DPRINTF(VOLUMES, ("checking logvol mappings\n"));
1827 n_pm = udf_rw32(ump->logical_vol->n_pm); /* num partmaps */
1828 mt_l = udf_rw32(ump->logical_vol->mt_l); /* partmaps data length */
1829 pmap_pos = ump->logical_vol->maps;
1830
1831 if (n_pm > UDF_PMAPS) {
1832 printf("UDF mount: too many mappings\n");
1833 return EINVAL;
1834 }
1835
1836 /* count types and set partition numbers */
1837 ump->data_part = ump->node_part = ump->fids_part = 0;
1838 n_phys = n_virt = n_spar = n_meta = 0;
1839 for (log_part = 0; log_part < n_pm; log_part++) {
1840 mapping = (union udf_pmap *) pmap_pos;
1841 pmap_stype = pmap_pos[0];
1842 pmap_size = pmap_pos[1];
1843 switch (pmap_stype) {
1844 case 1: /* physical mapping */
1845 /* volseq = udf_rw16(mapping->pm1.vol_seq_num); */
1846 raw_phys_part = udf_rw16(mapping->pm1.part_num);
1847 pmap_type = UDF_VTOP_TYPE_PHYS;
1848 n_phys++;
1849 ump->data_part = log_part;
1850 ump->node_part = log_part;
1851 ump->fids_part = log_part;
1852 break;
1853 case 2: /* virtual/sparable/meta mapping */
1854 map_name = mapping->pm2.part_id.id;
1855 /* volseq = udf_rw16(mapping->pm2.vol_seq_num); */
1856 raw_phys_part = udf_rw16(mapping->pm2.part_num);
1857 pmap_type = UDF_VTOP_TYPE_UNKNOWN;
1858 len = UDF_REGID_ID_SIZE;
1859
1860 check_name = "*UDF Virtual Partition";
1861 if (strncmp(map_name, check_name, len) == 0) {
1862 pmap_type = UDF_VTOP_TYPE_VIRT;
1863 n_virt++;
1864 ump->node_part = log_part;
1865 break;
1866 }
1867 check_name = "*UDF Sparable Partition";
1868 if (strncmp(map_name, check_name, len) == 0) {
1869 pmap_type = UDF_VTOP_TYPE_SPARABLE;
1870 n_spar++;
1871 ump->data_part = log_part;
1872 ump->node_part = log_part;
1873 ump->fids_part = log_part;
1874 break;
1875 }
1876 check_name = "*UDF Metadata Partition";
1877 if (strncmp(map_name, check_name, len) == 0) {
1878 pmap_type = UDF_VTOP_TYPE_META;
1879 n_meta++;
1880 ump->node_part = log_part;
1881 ump->fids_part = log_part;
1882 break;
1883 }
1884 break;
1885 default:
1886 return EINVAL;
1887 }
1888
1889 /*
1890 * BUGALERT: some rogue implementations use random physical
1891 * partion numbers to break other implementations so lookup
1892 * the number.
1893 */
1894 for (phys_part = 0; phys_part < UDF_PARTITIONS; phys_part++) {
1895 part = ump->partitions[phys_part];
1896 if (part == NULL)
1897 continue;
1898 if (udf_rw16(part->part_num) == raw_phys_part)
1899 break;
1900 }
1901
1902 DPRINTF(VOLUMES, ("\t%d -> %d(%d) type %d\n", log_part,
1903 raw_phys_part, phys_part, pmap_type));
1904
1905 if (phys_part == UDF_PARTITIONS)
1906 return EINVAL;
1907 if (pmap_type == UDF_VTOP_TYPE_UNKNOWN)
1908 return EINVAL;
1909
1910 ump->vtop [log_part] = phys_part;
1911 ump->vtop_tp[log_part] = pmap_type;
1912
1913 pmap_pos += pmap_size;
1914 }
1915 /* not winning the beauty contest */
1916 ump->vtop_tp[UDF_VTOP_RAWPART] = UDF_VTOP_TYPE_RAW;
1917
1918 /* test some basic UDF assertions/requirements */
1919 if ((n_virt > 1) || (n_spar > 1) || (n_meta > 1))
1920 return EINVAL;
1921
1922 if (n_virt) {
1923 if ((n_phys == 0) || n_spar || n_meta)
1924 return EINVAL;
1925 }
1926 if (n_spar + n_phys == 0)
1927 return EINVAL;
1928
1929 /* select allocation type for each logical partition */
1930 for (log_part = 0; log_part < n_pm; log_part++) {
1931 maps_on = ump->vtop[log_part];
1932 switch (ump->vtop_tp[log_part]) {
1933 case UDF_VTOP_TYPE_PHYS :
1934 assert(maps_on == log_part);
1935 ump->vtop_alloc[log_part] = UDF_ALLOC_SPACEMAP;
1936 break;
1937 case UDF_VTOP_TYPE_VIRT :
1938 ump->vtop_alloc[log_part] = UDF_ALLOC_VAT;
1939 ump->vtop_alloc[maps_on] = UDF_ALLOC_SEQUENTIAL;
1940 break;
1941 case UDF_VTOP_TYPE_SPARABLE :
1942 assert(maps_on == log_part);
1943 ump->vtop_alloc[log_part] = UDF_ALLOC_SPACEMAP;
1944 break;
1945 case UDF_VTOP_TYPE_META :
1946 ump->vtop_alloc[log_part] = UDF_ALLOC_METABITMAP;
1947 if (ump->discinfo.mmc_cur & MMC_CAP_PSEUDOOVERWRITE) {
1948 /* special case for UDF 2.60 */
1949 ump->vtop_alloc[log_part] = UDF_ALLOC_METASEQUENTIAL;
1950 ump->vtop_alloc[maps_on] = UDF_ALLOC_SEQUENTIAL;
1951 }
1952 break;
1953 default:
1954 panic("bad alloction type in udf's ump->vtop\n");
1955 }
1956 }
1957
1958 /* determine logical volume open/closure actions */
1959 if (n_virt) {
1960 ump->lvopen = 0;
1961 if (ump->discinfo.last_session_state == MMC_STATE_CLOSED)
1962 ump->lvopen |= UDF_OPEN_SESSION ;
1963 ump->lvclose = UDF_WRITE_VAT;
1964 if (ump->mount_args.udfmflags & UDFMNT_CLOSESESSION)
1965 ump->lvclose |= UDF_CLOSE_SESSION;
1966 } else {
1967 /* `normal' rewritable or non sequential media */
1968 ump->lvopen = UDF_WRITE_LVINT;
1969 ump->lvclose = UDF_WRITE_LVINT;
1970 if ((ump->discinfo.mmc_cur & MMC_CAP_REWRITABLE) == 0)
1971 ump->lvopen |= UDF_APPENDONLY_LVINT;
1972 }
1973
1974 /*
1975 * Determine sheduler error behaviour. For virtual partions, update
1976 * the trackinfo; for sparable partitions replace a whole block on the
1977 * sparable table. Allways requeue.
1978 */
1979 ump->lvreadwrite = 0;
1980 if (n_virt)
1981 ump->lvreadwrite = UDF_UPDATE_TRACKINFO;
1982 if (n_spar)
1983 ump->lvreadwrite = UDF_REMAP_BLOCK;
1984
1985 /*
1986 * Select our sheduler
1987 */
1988 ump->strategy = &udf_strat_rmw;
1989 if (n_virt || (ump->discinfo.mmc_cur & MMC_CAP_PSEUDOOVERWRITE))
1990 ump->strategy = &udf_strat_sequential;
1991 if ((ump->discinfo.mmc_class == MMC_CLASS_DISC) ||
1992 (ump->discinfo.mmc_class == MMC_CLASS_UNKN))
1993 ump->strategy = &udf_strat_direct;
1994 if (n_spar)
1995 ump->strategy = &udf_strat_rmw;
1996
1997 /* print results */
1998 DPRINTF(VOLUMES, ("\tdata partition %d\n", ump->data_part));
1999 DPRINTF(VOLUMES, ("\t\talloc scheme %d\n", ump->vtop_alloc[ump->data_part]));
2000 DPRINTF(VOLUMES, ("\tnode partition %d\n", ump->node_part));
2001 DPRINTF(VOLUMES, ("\t\talloc scheme %d\n", ump->vtop_alloc[ump->node_part]));
2002 DPRINTF(VOLUMES, ("\tfids partition %d\n", ump->fids_part));
2003 DPRINTF(VOLUMES, ("\t\talloc scheme %d\n", ump->vtop_alloc[ump->fids_part]));
2004
2005 bitmask_snprintf(ump->lvopen, UDFLOGVOL_BITS, bits, sizeof(bits));
2006 DPRINTF(VOLUMES, ("\tactions on logvol open %s\n", bits));
2007 bitmask_snprintf(ump->lvclose, UDFLOGVOL_BITS, bits, sizeof(bits));
2008 DPRINTF(VOLUMES, ("\tactions on logvol close %s\n", bits));
2009 bitmask_snprintf(ump->lvreadwrite, UDFONERROR_BITS, bits, sizeof(bits));
2010 DPRINTF(VOLUMES, ("\tactions on logvol errors %s\n", bits));
2011
2012 DPRINTF(VOLUMES, ("\tselected sheduler `%s`\n",
2013 (ump->strategy == &udf_strat_direct) ? "Direct" :
2014 (ump->strategy == &udf_strat_sequential) ? "Sequential" :
2015 (ump->strategy == &udf_strat_rmw) ? "RMW" : "UNKNOWN!"));
2016
2017 /* signal its OK for now */
2018 return 0;
2019 }
2020
2021 /* --------------------------------------------------------------------- */
2022
2023 /*
2024 * Update logical volume name in all structures that keep a record of it. We
2025 * use memmove since each of them might be specified as a source.
2026 *
2027 * Note that it doesn't update the VAT structure!
2028 */
2029
2030 static void
2031 udf_update_logvolname(struct udf_mount *ump, char *logvol_id)
2032 {
2033 struct logvol_desc *lvd = NULL;
2034 struct fileset_desc *fsd = NULL;
2035 struct udf_lv_info *lvi = NULL;
2036
2037 DPRINTF(VOLUMES, ("Updating logical volume name\n"));
2038 lvd = ump->logical_vol;
2039 fsd = ump->fileset_desc;
2040 if (ump->implementation)
2041 lvi = &ump->implementation->_impl_use.lv_info;
2042
2043 /* logvol's id might be specified as origional so use memmove here */
2044 memmove(lvd->logvol_id, logvol_id, 128);
2045 if (fsd)
2046 memmove(fsd->logvol_id, logvol_id, 128);
2047 if (lvi)
2048 memmove(lvi->logvol_id, logvol_id, 128);
2049 }
2050
2051 /* --------------------------------------------------------------------- */
2052
2053 void
2054 udf_inittag(struct udf_mount *ump, struct desc_tag *tag, int tagid,
2055 uint32_t sector)
2056 {
2057 assert(ump->logical_vol);
2058
2059 tag->id = udf_rw16(tagid);
2060 tag->descriptor_ver = ump->logical_vol->tag.descriptor_ver;
2061 tag->cksum = 0;
2062 tag->reserved = 0;
2063 tag->serial_num = ump->logical_vol->tag.serial_num;
2064 tag->tag_loc = udf_rw32(sector);
2065 }
2066
2067
2068 uint64_t
2069 udf_advance_uniqueid(struct udf_mount *ump)
2070 {
2071 uint64_t unique_id;
2072
2073 mutex_enter(&ump->logvol_mutex);
2074 unique_id = udf_rw64(ump->logvol_integrity->lvint_next_unique_id);
2075 if (unique_id < 0x10)
2076 unique_id = 0x10;
2077 ump->logvol_integrity->lvint_next_unique_id = udf_rw64(unique_id + 1);
2078 mutex_exit(&ump->logvol_mutex);
2079
2080 return unique_id;
2081 }
2082
2083
2084 static void
2085 udf_adjust_filecount(struct udf_node *udf_node, int sign)
2086 {
2087 struct udf_mount *ump = udf_node->ump;
2088 uint32_t num_dirs, num_files;
2089 int udf_file_type;
2090
2091 /* get file type */
2092 if (udf_node->fe) {
2093 udf_file_type = udf_node->fe->icbtag.file_type;
2094 } else {
2095 udf_file_type = udf_node->efe->icbtag.file_type;
2096 }
2097
2098 /* adjust file count */
2099 mutex_enter(&ump->allocate_mutex);
2100 if (udf_file_type == UDF_ICB_FILETYPE_DIRECTORY) {
2101 num_dirs = udf_rw32(ump->logvol_info->num_directories);
2102 ump->logvol_info->num_directories =
2103 udf_rw32((num_dirs + sign));
2104 } else {
2105 num_files = udf_rw32(ump->logvol_info->num_files);
2106 ump->logvol_info->num_files =
2107 udf_rw32((num_files + sign));
2108 }
2109 mutex_exit(&ump->allocate_mutex);
2110 }
2111
2112
2113 void
2114 udf_osta_charset(struct charspec *charspec)
2115 {
2116 bzero(charspec, sizeof(struct charspec));
2117 charspec->type = 0;
2118 strcpy((char *) charspec->inf, "OSTA Compressed Unicode");
2119 }
2120
2121
2122 /* first call udf_set_regid and then the suffix */
2123 void
2124 udf_set_regid(struct regid *regid, char const *name)
2125 {
2126 bzero(regid, sizeof(struct regid));
2127 regid->flags = 0; /* not dirty and not protected */
2128 strcpy((char *) regid->id, name);
2129 }
2130
2131
2132 void
2133 udf_add_domain_regid(struct udf_mount *ump, struct regid *regid)
2134 {
2135 uint16_t *ver;
2136
2137 ver = (uint16_t *) regid->id_suffix;
2138 *ver = ump->logvol_info->min_udf_readver;
2139 }
2140
2141
2142 void
2143 udf_add_udf_regid(struct udf_mount *ump, struct regid *regid)
2144 {
2145 uint16_t *ver;
2146
2147 ver = (uint16_t *) regid->id_suffix;
2148 *ver = ump->logvol_info->min_udf_readver;
2149
2150 regid->id_suffix[2] = 4; /* unix */
2151 regid->id_suffix[3] = 8; /* NetBSD */
2152 }
2153
2154
2155 void
2156 udf_add_impl_regid(struct udf_mount *ump, struct regid *regid)
2157 {
2158 regid->id_suffix[0] = 4; /* unix */
2159 regid->id_suffix[1] = 8; /* NetBSD */
2160 }
2161
2162
2163 void
2164 udf_add_app_regid(struct udf_mount *ump, struct regid *regid)
2165 {
2166 regid->id_suffix[0] = APP_VERSION_MAIN;
2167 regid->id_suffix[1] = APP_VERSION_SUB;
2168 }
2169
2170 static int
2171 udf_create_parentfid(struct udf_mount *ump, struct fileid_desc *fid,
2172 struct long_ad *parent, uint64_t unique_id)
2173 {
2174 /* the size of an empty FID is 38 but needs to be a multiple of 4 */
2175 int fidsize = 40;
2176
2177 udf_inittag(ump, &fid->tag, TAGID_FID, udf_rw32(parent->loc.lb_num));
2178 fid->file_version_num = udf_rw16(1); /* UDF 2.3.4.1 */
2179 fid->file_char = UDF_FILE_CHAR_DIR | UDF_FILE_CHAR_PAR;
2180 fid->icb = *parent;
2181 fid->icb.longad_uniqueid = udf_rw32((uint32_t) unique_id);
2182 fid->tag.desc_crc_len = fidsize - UDF_DESC_TAG_LENGTH;
2183 (void) udf_validate_tag_and_crc_sums((union dscrptr *) fid);
2184
2185 return fidsize;
2186 }
2187
2188 /* --------------------------------------------------------------------- */
2189
2190 /*
2191 * Extended attribute support. UDF knows of 3 places for extended attributes:
2192 *
2193 * (a) inside the file's (e)fe in the length of the extended attribute area
2194 * before the allocation descriptors/filedata
2195 *
2196 * (b) in a file referenced by (e)fe->ext_attr_icb and
2197 *
2198 * (c) in the e(fe)'s associated stream directory that can hold various
2199 * sub-files. In the stream directory a few fixed named subfiles are reserved
2200 * for NT/Unix ACL's and OS/2 attributes.
2201 *
2202 * NOTE: Extended attributes are read randomly but allways written
2203 * *atomicaly*. For ACL's this interface is propably different but not known
2204 * to me yet.
2205 *
2206 * Order of extended attributes in a space :
2207 * ECMA 167 EAs
2208 * Non block aligned Implementation Use EAs
2209 * Block aligned Implementation Use EAs
2210 * Application Use EAs
2211 */
2212
2213 static int
2214 udf_impl_extattr_check(struct impl_extattr_entry *implext)
2215 {
2216 uint16_t *spos;
2217
2218 if (strncmp(implext->imp_id.id, "*UDF", 4) == 0) {
2219 /* checksum valid? */
2220 DPRINTF(EXTATTR, ("checking UDF impl. attr checksum\n"));
2221 spos = (uint16_t *) implext->data;
2222 if (udf_rw16(*spos) != udf_ea_cksum((uint8_t *) implext))
2223 return EINVAL;
2224 }
2225 return 0;
2226 }
2227
2228 static void
2229 udf_calc_impl_extattr_checksum(struct impl_extattr_entry *implext)
2230 {
2231 uint16_t *spos;
2232
2233 if (strncmp(implext->imp_id.id, "*UDF", 4) == 0) {
2234 /* set checksum */
2235 spos = (uint16_t *) implext->data;
2236 *spos = udf_rw16(udf_ea_cksum((uint8_t *) implext));
2237 }
2238 }
2239
2240
2241 int
2242 udf_extattr_search_intern(struct udf_node *node,
2243 uint32_t sattr, char const *sattrname,
2244 uint32_t *offsetp, uint32_t *lengthp)
2245 {
2246 struct extattrhdr_desc *eahdr;
2247 struct extattr_entry *attrhdr;
2248 struct impl_extattr_entry *implext;
2249 uint32_t offset, a_l, sector_size;
2250 int32_t l_ea;
2251 uint8_t *pos;
2252 int error;
2253
2254 /* get mountpoint */
2255 sector_size = node->ump->discinfo.sector_size;
2256
2257 /* get information from fe/efe */
2258 if (node->fe) {
2259 l_ea = udf_rw32(node->fe->l_ea);
2260 eahdr = (struct extattrhdr_desc *) node->fe->data;
2261 } else {
2262 assert(node->efe);
2263 l_ea = udf_rw32(node->efe->l_ea);
2264 eahdr = (struct extattrhdr_desc *) node->efe->data;
2265 }
2266
2267 /* something recorded here? */
2268 if (l_ea == 0)
2269 return ENOENT;
2270
2271 /* check extended attribute tag; what to do if it fails? */
2272 error = udf_check_tag(eahdr);
2273 if (error)
2274 return EINVAL;
2275 if (udf_rw16(eahdr->tag.id) != TAGID_EXTATTR_HDR)
2276 return EINVAL;
2277 error = udf_check_tag_payload(eahdr, sizeof(struct extattrhdr_desc));
2278 if (error)
2279 return EINVAL;
2280
2281 DPRINTF(EXTATTR, ("Found %d bytes of extended attributes\n", l_ea));
2282
2283 /* looking for Ecma-167 attributes? */
2284 offset = sizeof(struct extattrhdr_desc);
2285
2286 /* looking for either implemenation use or application use */
2287 if (sattr == 2048) { /* [4/48.10.8] */
2288 offset = udf_rw32(eahdr->impl_attr_loc);
2289 if (offset == UDF_IMPL_ATTR_LOC_NOT_PRESENT)
2290 return ENOENT;
2291 }
2292 if (sattr == 65536) { /* [4/48.10.9] */
2293 offset = udf_rw32(eahdr->appl_attr_loc);
2294 if (offset == UDF_APPL_ATTR_LOC_NOT_PRESENT)
2295 return ENOENT;
2296 }
2297
2298 /* paranoia check offset and l_ea */
2299 if (l_ea + offset >= sector_size - sizeof(struct extattr_entry))
2300 return EINVAL;
2301
2302 DPRINTF(EXTATTR, ("Starting at offset %d\n", offset));
2303
2304 /* find our extended attribute */
2305 l_ea -= offset;
2306 pos = (uint8_t *) eahdr + offset;
2307
2308 while (l_ea >= sizeof(struct extattr_entry)) {
2309 DPRINTF(EXTATTR, ("%d extended attr bytes left\n", l_ea));
2310 attrhdr = (struct extattr_entry *) pos;
2311 implext = (struct impl_extattr_entry *) pos;
2312
2313 /* get complete attribute length and check for roque values */
2314 a_l = udf_rw32(attrhdr->a_l);
2315 DPRINTF(EXTATTR, ("attribute %d:%d, len %d/%d\n",
2316 udf_rw32(attrhdr->type),
2317 attrhdr->subtype, a_l, l_ea));
2318 if ((a_l == 0) || (a_l > l_ea))
2319 return EINVAL;
2320
2321 if (attrhdr->type != sattr)
2322 goto next_attribute;
2323
2324 /* we might have found it! */
2325 if (attrhdr->type < 2048) { /* Ecma-167 attribute */
2326 *offsetp = offset;
2327 *lengthp = a_l;
2328 return 0; /* success */
2329 }
2330
2331 /*
2332 * Implementation use and application use extended attributes
2333 * have a name to identify. They share the same structure only
2334 * UDF implementation use extended attributes have a checksum
2335 * we need to check
2336 */
2337
2338 DPRINTF(EXTATTR, ("named attribute %s\n", implext->imp_id.id));
2339 if (strcmp(implext->imp_id.id, sattrname) == 0) {
2340 /* we have found our appl/implementation attribute */
2341 *offsetp = offset;
2342 *lengthp = a_l;
2343 return 0; /* success */
2344 }
2345
2346 next_attribute:
2347 /* next attribute */
2348 pos += a_l;
2349 l_ea -= a_l;
2350 offset += a_l;
2351 }
2352 /* not found */
2353 return ENOENT;
2354 }
2355
2356
2357 static void
2358 udf_extattr_insert_internal(struct udf_mount *ump, union dscrptr *dscr,
2359 struct extattr_entry *extattr)
2360 {
2361 struct file_entry *fe;
2362 struct extfile_entry *efe;
2363 struct extattrhdr_desc *extattrhdr;
2364 struct impl_extattr_entry *implext;
2365 uint32_t impl_attr_loc, appl_attr_loc, l_ea, a_l, exthdr_len;
2366 uint32_t *l_eap, l_ad;
2367 uint16_t *spos;
2368 uint8_t *bpos, *data;
2369
2370 if (udf_rw16(dscr->tag.id) == TAGID_FENTRY) {
2371 fe = &dscr->fe;
2372 data = fe->data;
2373 l_eap = &fe->l_ea;
2374 l_ad = udf_rw32(fe->l_ad);
2375 } else if (udf_rw16(dscr->tag.id) == TAGID_EXTFENTRY) {
2376 efe = &dscr->efe;
2377 data = efe->data;
2378 l_eap = &efe->l_ea;
2379 l_ad = udf_rw32(efe->l_ad);
2380 } else {
2381 panic("Bad tag passed to udf_extattr_insert_internal");
2382 }
2383
2384 /* can't append already written to file descriptors yet */
2385 assert(l_ad == 0);
2386
2387 /* should have a header! */
2388 extattrhdr = (struct extattrhdr_desc *) data;
2389 l_ea = udf_rw32(*l_eap);
2390 if (l_ea == 0) {
2391 /* create empty extended attribute header */
2392 exthdr_len = sizeof(struct extattrhdr_desc);
2393
2394 udf_inittag(ump, &extattrhdr->tag, TAGID_EXTATTR_HDR,
2395 /* loc */ 0);
2396 extattrhdr->impl_attr_loc = udf_rw32(exthdr_len);
2397 extattrhdr->appl_attr_loc = udf_rw32(exthdr_len);
2398 extattrhdr->tag.desc_crc_len = udf_rw16(8);
2399
2400 /* record extended attribute header length */
2401 l_ea = exthdr_len;
2402 *l_eap = udf_rw32(l_ea);
2403 }
2404
2405 /* extract locations */
2406 impl_attr_loc = udf_rw32(extattrhdr->impl_attr_loc);
2407 appl_attr_loc = udf_rw32(extattrhdr->appl_attr_loc);
2408 if (impl_attr_loc == UDF_IMPL_ATTR_LOC_NOT_PRESENT)
2409 impl_attr_loc = l_ea;
2410 if (appl_attr_loc == UDF_IMPL_ATTR_LOC_NOT_PRESENT)
2411 appl_attr_loc = l_ea;
2412
2413 /* Ecma 167 EAs */
2414 if (udf_rw32(extattr->type) < 2048) {
2415 assert(impl_attr_loc == l_ea);
2416 assert(appl_attr_loc == l_ea);
2417 }
2418
2419 /* implementation use extended attributes */
2420 if (udf_rw32(extattr->type) == 2048) {
2421 assert(appl_attr_loc == l_ea);
2422
2423 /* calculate and write extended attribute header checksum */
2424 implext = (struct impl_extattr_entry *) extattr;
2425 assert(udf_rw32(implext->iu_l) == 4); /* [UDF 3.3.4.5] */
2426 spos = (uint16_t *) implext->data;
2427 *spos = udf_rw16(udf_ea_cksum((uint8_t *) implext));
2428 }
2429
2430 /* application use extended attributes */
2431 assert(udf_rw32(extattr->type) != 65536);
2432 assert(appl_attr_loc == l_ea);
2433
2434 /* append the attribute at the end of the current space */
2435 bpos = data + udf_rw32(*l_eap);
2436 a_l = udf_rw32(extattr->a_l);
2437
2438 /* update impl. attribute locations */
2439 if (udf_rw32(extattr->type) < 2048) {
2440 impl_attr_loc = l_ea + a_l;
2441 appl_attr_loc = l_ea + a_l;
2442 }
2443 if (udf_rw32(extattr->type) == 2048) {
2444 appl_attr_loc = l_ea + a_l;
2445 }
2446
2447 /* copy and advance */
2448 memcpy(bpos, extattr, a_l);
2449 l_ea += a_l;
2450 *l_eap = udf_rw32(l_ea);
2451
2452 /* do the `dance` again backwards */
2453 if (udf_rw16(ump->logical_vol->tag.descriptor_ver) != 2) {
2454 if (impl_attr_loc == l_ea)
2455 impl_attr_loc = UDF_IMPL_ATTR_LOC_NOT_PRESENT;
2456 if (appl_attr_loc == l_ea)
2457 appl_attr_loc = UDF_APPL_ATTR_LOC_NOT_PRESENT;
2458 }
2459
2460 /* store offsets */
2461 extattrhdr->impl_attr_loc = udf_rw32(impl_attr_loc);
2462 extattrhdr->appl_attr_loc = udf_rw32(appl_attr_loc);
2463 }
2464
2465
2466 /* --------------------------------------------------------------------- */
2467
2468 static int
2469 udf_update_lvid_from_vat_extattr(struct udf_node *vat_node)
2470 {
2471 struct udf_mount *ump;
2472 struct udf_logvol_info *lvinfo;
2473 struct impl_extattr_entry *implext;
2474 struct vatlvext_extattr_entry lvext;
2475 const char *extstr = "*UDF VAT LVExtension";
2476 uint64_t vat_uniqueid;
2477 uint32_t offset, a_l;
2478 uint8_t *ea_start, *lvextpos;
2479 int error;
2480
2481 /* get mountpoint and lvinfo */
2482 ump = vat_node->ump;
2483 lvinfo = ump->logvol_info;
2484
2485 /* get information from fe/efe */
2486 if (vat_node->fe) {
2487 vat_uniqueid = udf_rw64(vat_node->fe->unique_id);
2488 ea_start = vat_node->fe->data;
2489 } else {
2490 vat_uniqueid = udf_rw64(vat_node->efe->unique_id);
2491 ea_start = vat_node->efe->data;
2492 }
2493
2494 error = udf_extattr_search_intern(vat_node, 2048, extstr, &offset, &a_l);
2495 if (error)
2496 return error;
2497
2498 implext = (struct impl_extattr_entry *) (ea_start + offset);
2499 error = udf_impl_extattr_check(implext);
2500 if (error)
2501 return error;
2502
2503 /* paranoia */
2504 if (a_l != sizeof(*implext) -1 + udf_rw32(implext->iu_l) + sizeof(lvext)) {
2505 DPRINTF(VOLUMES, ("VAT LVExtension size doesn't compute\n"));
2506 return EINVAL;
2507 }
2508
2509 /*
2510 * we have found our "VAT LVExtension attribute. BUT due to a
2511 * bug in the specification it might not be word aligned so
2512 * copy first to avoid panics on some machines (!!)
2513 */
2514 DPRINTF(VOLUMES, ("Found VAT LVExtension attr\n"));
2515 lvextpos = implext->data + udf_rw32(implext->iu_l);
2516 memcpy(&lvext, lvextpos, sizeof(lvext));
2517
2518 /* check if it was updated the last time */
2519 if (udf_rw64(lvext.unique_id_chk) == vat_uniqueid) {
2520 lvinfo->num_files = lvext.num_files;
2521 lvinfo->num_directories = lvext.num_directories;
2522 udf_update_logvolname(ump, lvext.logvol_id);
2523 } else {
2524 DPRINTF(VOLUMES, ("VAT LVExtension out of date\n"));
2525 /* replace VAT LVExt by free space EA */
2526 memset(implext->imp_id.id, 0, UDF_REGID_ID_SIZE);
2527 strcpy(implext->imp_id.id, "*UDF FreeEASpace");
2528 udf_calc_impl_extattr_checksum(implext);
2529 }
2530
2531 return 0;
2532 }
2533
2534
2535 static int
2536 udf_update_vat_extattr_from_lvid(struct udf_node *vat_node)
2537 {
2538 struct udf_mount *ump;
2539 struct udf_logvol_info *lvinfo;
2540 struct impl_extattr_entry *implext;
2541 struct vatlvext_extattr_entry lvext;
2542 const char *extstr = "*UDF VAT LVExtension";
2543 uint64_t vat_uniqueid;
2544 uint32_t offset, a_l;
2545 uint8_t *ea_start, *lvextpos;
2546 int error;
2547
2548 /* get mountpoint and lvinfo */
2549 ump = vat_node->ump;
2550 lvinfo = ump->logvol_info;
2551
2552 /* get information from fe/efe */
2553 if (vat_node->fe) {
2554 vat_uniqueid = udf_rw64(vat_node->fe->unique_id);
2555 ea_start = vat_node->fe->data;
2556 } else {
2557 vat_uniqueid = udf_rw64(vat_node->efe->unique_id);
2558 ea_start = vat_node->efe->data;
2559 }
2560
2561 error = udf_extattr_search_intern(vat_node, 2048, extstr, &offset, &a_l);
2562 if (error)
2563 return error;
2564 /* found, it existed */
2565
2566 /* paranoia */
2567 implext = (struct impl_extattr_entry *) (ea_start + offset);
2568 error = udf_impl_extattr_check(implext);
2569 if (error) {
2570 DPRINTF(VOLUMES, ("VAT LVExtension bad on update\n"));
2571 return error;
2572 }
2573 /* it is correct */
2574
2575 /*
2576 * we have found our "VAT LVExtension attribute. BUT due to a
2577 * bug in the specification it might not be word aligned so
2578 * copy first to avoid panics on some machines (!!)
2579 */
2580 DPRINTF(VOLUMES, ("Updating VAT LVExtension attr\n"));
2581 lvextpos = implext->data + udf_rw32(implext->iu_l);
2582
2583 lvext.unique_id_chk = vat_uniqueid;
2584 lvext.num_files = lvinfo->num_files;
2585 lvext.num_directories = lvinfo->num_directories;
2586 memmove(lvext.logvol_id, ump->logical_vol->logvol_id, 128);
2587
2588 memcpy(lvextpos, &lvext, sizeof(lvext));
2589
2590 return 0;
2591 }
2592
2593 /* --------------------------------------------------------------------- */
2594
2595 int
2596 udf_vat_read(struct udf_node *vat_node, uint8_t *blob, int size, uint32_t offset)
2597 {
2598 struct udf_mount *ump = vat_node->ump;
2599
2600 if (offset + size > ump->vat_offset + ump->vat_entries * 4)
2601 return EINVAL;
2602
2603 memcpy(blob, ump->vat_table + offset, size);
2604 return 0;
2605 }
2606
2607 int
2608 udf_vat_write(struct udf_node *vat_node, uint8_t *blob, int size, uint32_t offset)
2609 {
2610 struct udf_mount *ump = vat_node->ump;
2611 uint32_t offset_high;
2612 uint8_t *new_vat_table;
2613
2614 /* extent VAT allocation if needed */
2615 offset_high = offset + size;
2616 if (offset_high >= ump->vat_table_alloc_len) {
2617 /* realloc */
2618 new_vat_table = realloc(ump->vat_table,
2619 ump->vat_table_alloc_len + UDF_VAT_CHUNKSIZE,
2620 M_UDFVOLD, M_WAITOK | M_CANFAIL);
2621 if (!new_vat_table) {
2622 printf("udf_vat_write: can't extent VAT, out of mem\n");
2623 return ENOMEM;
2624 }
2625 ump->vat_table = new_vat_table;
2626 ump->vat_table_alloc_len += UDF_VAT_CHUNKSIZE;
2627 }
2628 ump->vat_table_len = MAX(ump->vat_table_len, offset_high);
2629
2630 memcpy(ump->vat_table + offset, blob, size);
2631 return 0;
2632 }
2633
2634 /* --------------------------------------------------------------------- */
2635
2636 /* TODO support previous VAT location writeout */
2637 static int
2638 udf_update_vat_descriptor(struct udf_mount *ump)
2639 {
2640 struct udf_node *vat_node = ump->vat_node;
2641 struct udf_logvol_info *lvinfo = ump->logvol_info;
2642 struct icb_tag *icbtag;
2643 struct udf_oldvat_tail *oldvat_tl;
2644 struct udf_vat *vat;
2645 uint64_t unique_id;
2646 uint32_t lb_size;
2647 uint8_t *raw_vat;
2648 int filetype, error;
2649
2650 KASSERT(vat_node);
2651 KASSERT(lvinfo);
2652 lb_size = udf_rw32(ump->logical_vol->lb_size);
2653
2654 /* get our new unique_id */
2655 unique_id = udf_advance_uniqueid(ump);
2656
2657 /* get information from fe/efe */
2658 if (vat_node->fe) {
2659 icbtag = &vat_node->fe->icbtag;
2660 vat_node->fe->unique_id = udf_rw64(unique_id);
2661 } else {
2662 icbtag = &vat_node->efe->icbtag;
2663 vat_node->efe->unique_id = udf_rw64(unique_id);
2664 }
2665
2666 /* Check icb filetype! it has to be 0 or UDF_ICB_FILETYPE_VAT */
2667 filetype = icbtag->file_type;
2668 KASSERT((filetype == 0) || (filetype == UDF_ICB_FILETYPE_VAT));
2669
2670 /* allocate piece to process head or tail of VAT file */
2671 raw_vat = malloc(lb_size, M_TEMP, M_WAITOK);
2672
2673 if (filetype == 0) {
2674 /*
2675 * Update "*UDF VAT LVExtension" extended attribute from the
2676 * lvint if present.
2677 */
2678 udf_update_vat_extattr_from_lvid(vat_node);
2679
2680 /* setup identifying regid */
2681 oldvat_tl = (struct udf_oldvat_tail *) raw_vat;
2682 memset(oldvat_tl, 0, sizeof(struct udf_oldvat_tail));
2683
2684 udf_set_regid(&oldvat_tl->id, "*UDF Virtual Alloc Tbl");
2685 udf_add_udf_regid(ump, &oldvat_tl->id);
2686 oldvat_tl->prev_vat = udf_rw32(0xffffffff);
2687
2688 /* write out new tail of virtual allocation table file */
2689 error = udf_vat_write(vat_node, raw_vat,
2690 sizeof(struct udf_oldvat_tail), ump->vat_entries * 4);
2691 } else {
2692 /* compose the VAT2 header */
2693 vat = (struct udf_vat *) raw_vat;
2694 memset(vat, 0, sizeof(struct udf_vat));
2695
2696 vat->header_len = udf_rw16(152); /* as per spec */
2697 vat->impl_use_len = udf_rw16(0);
2698 memmove(vat->logvol_id, ump->logical_vol->logvol_id, 128);
2699 vat->prev_vat = udf_rw32(0xffffffff);
2700 vat->num_files = lvinfo->num_files;
2701 vat->num_directories = lvinfo->num_directories;
2702 vat->min_udf_readver = lvinfo->min_udf_readver;
2703 vat->min_udf_writever = lvinfo->min_udf_writever;
2704 vat->max_udf_writever = lvinfo->max_udf_writever;
2705
2706 error = udf_vat_write(vat_node, raw_vat,
2707 sizeof(struct udf_vat), 0);
2708 }
2709 free(raw_vat, M_TEMP);
2710
2711 return error; /* success! */
2712 }
2713
2714
2715 int
2716 udf_writeout_vat(struct udf_mount *ump)
2717 {
2718 struct udf_node *vat_node = ump->vat_node;
2719 uint32_t vat_length;
2720 int error;
2721
2722 KASSERT(vat_node);
2723
2724 DPRINTF(CALL, ("udf_writeout_vat\n"));
2725
2726 mutex_enter(&ump->allocate_mutex);
2727 udf_update_vat_descriptor(ump);
2728
2729 /* write out the VAT contents ; TODO intelligent writing */
2730 vat_length = ump->vat_table_len;
2731 error = vn_rdwr(UIO_WRITE, vat_node->vnode,
2732 ump->vat_table, ump->vat_table_len, 0,
2733 UIO_SYSSPACE, IO_NODELOCKED, FSCRED, NULL, NULL);
2734 if (error) {
2735 printf("udf_writeout_vat: failed to write out VAT contents\n");
2736 goto out;
2737 }
2738
2739 mutex_exit(&ump->allocate_mutex);
2740
2741 vflushbuf(ump->vat_node->vnode, 1 /* sync */);
2742 error = VOP_FSYNC(ump->vat_node->vnode,
2743 FSCRED, FSYNC_WAIT, 0, 0);
2744 if (error)
2745 printf("udf_writeout_vat: error writing VAT node!\n");
2746 out:
2747
2748 return error;
2749 }
2750
2751 /* --------------------------------------------------------------------- */
2752
2753 /*
2754 * Read in relevant pieces of VAT file and check if its indeed a VAT file
2755 * descriptor. If OK, read in complete VAT file.
2756 */
2757
2758 static int
2759 udf_check_for_vat(struct udf_node *vat_node)
2760 {
2761 struct udf_mount *ump;
2762 struct icb_tag *icbtag;
2763 struct timestamp *mtime;
2764 struct udf_vat *vat;
2765 struct udf_oldvat_tail *oldvat_tl;
2766 struct udf_logvol_info *lvinfo;
2767 uint64_t unique_id;
2768 uint32_t vat_length;
2769 uint32_t vat_offset, vat_entries, vat_table_alloc_len;
2770 uint32_t sector_size;
2771 uint32_t *raw_vat;
2772 uint8_t *vat_table;
2773 char *regid_name;
2774 int filetype;
2775 int error;
2776
2777 /* vat_length is really 64 bits though impossible */
2778
2779 DPRINTF(VOLUMES, ("Checking for VAT\n"));
2780 if (!vat_node)
2781 return ENOENT;
2782
2783 /* get mount info */
2784 ump = vat_node->ump;
2785 sector_size = udf_rw32(ump->logical_vol->lb_size);
2786
2787 /* check assertions */
2788 assert(vat_node->fe || vat_node->efe);
2789 assert(ump->logvol_integrity);
2790
2791 /* set vnode type to regular file or we can't read from it! */
2792 vat_node->vnode->v_type = VREG;
2793
2794 /* get information from fe/efe */
2795 if (vat_node->fe) {
2796 vat_length = udf_rw64(vat_node->fe->inf_len);
2797 icbtag = &vat_node->fe->icbtag;
2798 mtime = &vat_node->fe->mtime;
2799 unique_id = udf_rw64(vat_node->fe->unique_id);
2800 } else {
2801 vat_length = udf_rw64(vat_node->efe->inf_len);
2802 icbtag = &vat_node->efe->icbtag;
2803 mtime = &vat_node->efe->mtime;
2804 unique_id = udf_rw64(vat_node->efe->unique_id);
2805 }
2806
2807 /* Check icb filetype! it has to be 0 or UDF_ICB_FILETYPE_VAT */
2808 filetype = icbtag->file_type;
2809 if ((filetype != 0) && (filetype != UDF_ICB_FILETYPE_VAT))
2810 return ENOENT;
2811
2812 DPRINTF(VOLUMES, ("\tPossible VAT length %d\n", vat_length));
2813
2814 vat_table_alloc_len =
2815 ((vat_length + UDF_VAT_CHUNKSIZE-1) / UDF_VAT_CHUNKSIZE)
2816 * UDF_VAT_CHUNKSIZE;
2817
2818 vat_table = malloc(vat_table_alloc_len, M_UDFVOLD,
2819 M_CANFAIL | M_WAITOK);
2820 if (vat_table == NULL) {
2821 printf("allocation of %d bytes failed for VAT\n",
2822 vat_table_alloc_len);
2823 return ENOMEM;
2824 }
2825
2826 /* allocate piece to read in head or tail of VAT file */
2827 raw_vat = malloc(sector_size, M_TEMP, M_WAITOK);
2828
2829 /*
2830 * check contents of the file if its the old 1.50 VAT table format.
2831 * Its notoriously broken and allthough some implementations support an
2832 * extention as defined in the UDF 1.50 errata document, its doubtfull
2833 * to be useable since a lot of implementations don't maintain it.
2834 */
2835 lvinfo = ump->logvol_info;
2836
2837 if (filetype == 0) {
2838 /* definition */
2839 vat_offset = 0;
2840 vat_entries = (vat_length-36)/4;
2841
2842 /* read in tail of virtual allocation table file */
2843 error = vn_rdwr(UIO_READ, vat_node->vnode,
2844 (uint8_t *) raw_vat,
2845 sizeof(struct udf_oldvat_tail),
2846 vat_entries * 4,
2847 UIO_SYSSPACE, IO_SYNC | IO_NODELOCKED, FSCRED,
2848 NULL, NULL);
2849 if (error)
2850 goto out;
2851
2852 /* check 1.50 VAT */
2853 oldvat_tl = (struct udf_oldvat_tail *) raw_vat;
2854 regid_name = (char *) oldvat_tl->id.id;
2855 error = strncmp(regid_name, "*UDF Virtual Alloc Tbl", 22);
2856 if (error) {
2857 DPRINTF(VOLUMES, ("VAT format 1.50 rejected\n"));
2858 error = ENOENT;
2859 goto out;
2860 }
2861
2862 /*
2863 * update LVID from "*UDF VAT LVExtension" extended attribute
2864 * if present.
2865 */
2866 udf_update_lvid_from_vat_extattr(vat_node);
2867 } else {
2868 /* read in head of virtual allocation table file */
2869 error = vn_rdwr(UIO_READ, vat_node->vnode,
2870 (uint8_t *) raw_vat,
2871 sizeof(struct udf_vat), 0,
2872 UIO_SYSSPACE, IO_SYNC | IO_NODELOCKED, FSCRED,
2873 NULL, NULL);
2874 if (error)
2875 goto out;
2876
2877 /* definition */
2878 vat = (struct udf_vat *) raw_vat;
2879 vat_offset = vat->header_len;
2880 vat_entries = (vat_length - vat_offset)/4;
2881
2882 assert(lvinfo);
2883 lvinfo->num_files = vat->num_files;
2884 lvinfo->num_directories = vat->num_directories;
2885 lvinfo->min_udf_readver = vat->min_udf_readver;
2886 lvinfo->min_udf_writever = vat->min_udf_writever;
2887 lvinfo->max_udf_writever = vat->max_udf_writever;
2888
2889 udf_update_logvolname(ump, vat->logvol_id);
2890 }
2891
2892 /* read in complete VAT file */
2893 error = vn_rdwr(UIO_READ, vat_node->vnode,
2894 vat_table,
2895 vat_length, 0,
2896 UIO_SYSSPACE, IO_SYNC | IO_NODELOCKED, FSCRED,
2897 NULL, NULL);
2898 if (error)
2899 printf("read in of complete VAT file failed (error %d)\n",
2900 error);
2901 if (error)
2902 goto out;
2903
2904 DPRINTF(VOLUMES, ("VAT format accepted, marking it closed\n"));
2905 ump->logvol_integrity->lvint_next_unique_id = unique_id;
2906 ump->logvol_integrity->integrity_type = udf_rw32(UDF_INTEGRITY_CLOSED);
2907 ump->logvol_integrity->time = *mtime;
2908
2909 ump->vat_table_len = vat_length;
2910 ump->vat_table_alloc_len = vat_table_alloc_len;
2911 ump->vat_table = vat_table;
2912 ump->vat_offset = vat_offset;
2913 ump->vat_entries = vat_entries;
2914 ump->vat_last_free_lb = 0; /* start at beginning */
2915
2916 out:
2917 if (error) {
2918 if (vat_table)
2919 free(vat_table, M_UDFVOLD);
2920 }
2921 free(raw_vat, M_TEMP);
2922
2923 return error;
2924 }
2925
2926 /* --------------------------------------------------------------------- */
2927
2928 static int
2929 udf_search_vat(struct udf_mount *ump, union udf_pmap *mapping)
2930 {
2931 struct udf_node *vat_node;
2932 struct long_ad icb_loc;
2933 uint32_t early_vat_loc, late_vat_loc, vat_loc;
2934 int error;
2935
2936 /* mapping info not needed */
2937 mapping = mapping;
2938
2939 vat_loc = ump->last_possible_vat_location;
2940 early_vat_loc = vat_loc - 256; /* 8 blocks of 32 sectors */
2941
2942 DPRINTF(VOLUMES, ("1) last possible %d, early_vat_loc %d \n",
2943 vat_loc, early_vat_loc));
2944 early_vat_loc = MAX(early_vat_loc, ump->first_possible_vat_location);
2945 late_vat_loc = vat_loc + 1024;
2946
2947 DPRINTF(VOLUMES, ("2) last possible %d, early_vat_loc %d \n",
2948 vat_loc, early_vat_loc));
2949
2950 /* start looking from the end of the range */
2951 do {
2952 DPRINTF(VOLUMES, ("Checking for VAT at sector %d\n", vat_loc));
2953 icb_loc.loc.part_num = udf_rw16(UDF_VTOP_RAWPART);
2954 icb_loc.loc.lb_num = udf_rw32(vat_loc);
2955
2956 error = udf_get_node(ump, &icb_loc, &vat_node);
2957 if (!error) {
2958 error = udf_check_for_vat(vat_node);
2959 DPRINTFIF(VOLUMES, !error,
2960 ("VAT accepted at %d\n", vat_loc));
2961 if (!error)
2962 break;
2963 }
2964 if (vat_node) {
2965 vput(vat_node->vnode);
2966 vat_node = NULL;
2967 }
2968 vat_loc--; /* walk backwards */
2969 } while (vat_loc >= early_vat_loc);
2970
2971 /* keep our VAT node around */
2972 if (vat_node) {
2973 UDF_SET_SYSTEMFILE(vat_node->vnode);
2974 ump->vat_node = vat_node;
2975 }
2976
2977 return error;
2978 }
2979
2980 /* --------------------------------------------------------------------- */
2981
2982 static int
2983 udf_read_sparables(struct udf_mount *ump, union udf_pmap *mapping)
2984 {
2985 union dscrptr *dscr;
2986 struct part_map_spare *pms = &mapping->pms;
2987 uint32_t lb_num;
2988 int spar, error;
2989
2990 /*
2991 * The partition mapping passed on to us specifies the information we
2992 * need to locate and initialise the sparable partition mapping
2993 * information we need.
2994 */
2995
2996 DPRINTF(VOLUMES, ("Read sparable table\n"));
2997 ump->sparable_packet_size = udf_rw16(pms->packet_len);
2998 KASSERT(ump->sparable_packet_size >= ump->packet_size); /* XXX */
2999
3000 for (spar = 0; spar < pms->n_st; spar++) {
3001 lb_num = pms->st_loc[spar];
3002 DPRINTF(VOLUMES, ("Checking for sparing table %d\n", lb_num));
3003 error = udf_read_phys_dscr(ump, lb_num, M_UDFVOLD, &dscr);
3004 if (!error && dscr) {
3005 if (udf_rw16(dscr->tag.id) == TAGID_SPARING_TABLE) {
3006 if (ump->sparing_table)
3007 free(ump->sparing_table, M_UDFVOLD);
3008 ump->sparing_table = &dscr->spt;
3009 dscr = NULL;
3010 DPRINTF(VOLUMES,
3011 ("Sparing table accepted (%d entries)\n",
3012 udf_rw16(ump->sparing_table->rt_l)));
3013 break; /* we're done */
3014 }
3015 }
3016 if (dscr)
3017 free(dscr, M_UDFVOLD);
3018 }
3019
3020 if (ump->sparing_table)
3021 return 0;
3022
3023 return ENOENT;
3024 }
3025
3026 /* --------------------------------------------------------------------- */
3027
3028 static int
3029 udf_read_metadata_nodes(struct udf_mount *ump, union udf_pmap *mapping)
3030 {
3031 struct part_map_meta *pmm = &mapping->pmm;
3032 struct long_ad icb_loc;
3033 struct vnode *vp;
3034 int error;
3035
3036 DPRINTF(VOLUMES, ("Reading in Metadata files\n"));
3037 icb_loc.loc.part_num = pmm->part_num;
3038 icb_loc.loc.lb_num = pmm->meta_file_lbn;
3039 DPRINTF(VOLUMES, ("Metadata file\n"));
3040 error = udf_get_node(ump, &icb_loc, &ump->metadata_node);
3041 if (ump->metadata_node) {
3042 vp = ump->metadata_node->vnode;
3043 UDF_SET_SYSTEMFILE(vp);
3044 }
3045
3046 icb_loc.loc.lb_num = pmm->meta_mirror_file_lbn;
3047 if (icb_loc.loc.lb_num != -1) {
3048 DPRINTF(VOLUMES, ("Metadata copy file\n"));
3049 error = udf_get_node(ump, &icb_loc, &ump->metadatamirror_node);
3050 if (ump->metadatamirror_node) {
3051 vp = ump->metadatamirror_node->vnode;
3052 UDF_SET_SYSTEMFILE(vp);
3053 }
3054 }
3055
3056 icb_loc.loc.lb_num = pmm->meta_bitmap_file_lbn;
3057 if (icb_loc.loc.lb_num != -1) {
3058 DPRINTF(VOLUMES, ("Metadata bitmap file\n"));
3059 error = udf_get_node(ump, &icb_loc, &ump->metadatabitmap_node);
3060 if (ump->metadatabitmap_node) {
3061 vp = ump->metadatabitmap_node->vnode;
3062 UDF_SET_SYSTEMFILE(vp);
3063 }
3064 }
3065
3066 /* if we're mounting read-only we relax the requirements */
3067 if (ump->vfs_mountp->mnt_flag & MNT_RDONLY) {
3068 error = EFAULT;
3069 if (ump->metadata_node)
3070 error = 0;
3071 if ((ump->metadata_node == NULL) && (ump->metadatamirror_node)) {
3072 printf( "udf mount: Metadata file not readable, "
3073 "substituting Metadata copy file\n");
3074 ump->metadata_node = ump->metadatamirror_node;
3075 ump->metadatamirror_node = NULL;
3076 error = 0;
3077 }
3078 } else {
3079 /* mounting read/write */
3080 /* XXX DISABLED! metadata writing is not working yet XXX */
3081 if (error)
3082 error = EROFS;
3083 }
3084 DPRINTFIF(VOLUMES, error, ("udf mount: failed to read "
3085 "metadata files\n"));
3086 return error;
3087 }
3088
3089 /* --------------------------------------------------------------------- */
3090
3091 int
3092 udf_read_vds_tables(struct udf_mount *ump)
3093 {
3094 union udf_pmap *mapping;
3095 /* struct udf_args *args = &ump->mount_args; */
3096 uint32_t n_pm, mt_l;
3097 uint32_t log_part;
3098 uint8_t *pmap_pos;
3099 int pmap_size;
3100 int error;
3101
3102 /* Iterate (again) over the part mappings for locations */
3103 n_pm = udf_rw32(ump->logical_vol->n_pm); /* num partmaps */
3104 mt_l = udf_rw32(ump->logical_vol->mt_l); /* partmaps data length */
3105 pmap_pos = ump->logical_vol->maps;
3106
3107 for (log_part = 0; log_part < n_pm; log_part++) {
3108 mapping = (union udf_pmap *) pmap_pos;
3109 switch (ump->vtop_tp[log_part]) {
3110 case UDF_VTOP_TYPE_PHYS :
3111 /* nothing */
3112 break;
3113 case UDF_VTOP_TYPE_VIRT :
3114 /* search and load VAT */
3115 error = udf_search_vat(ump, mapping);
3116 if (error)
3117 return ENOENT;
3118 break;
3119 case UDF_VTOP_TYPE_SPARABLE :
3120 /* load one of the sparable tables */
3121 error = udf_read_sparables(ump, mapping);
3122 if (error)
3123 return ENOENT;
3124 break;
3125 case UDF_VTOP_TYPE_META :
3126 /* load the associated file descriptors */
3127 error = udf_read_metadata_nodes(ump, mapping);
3128 if (error)
3129 return ENOENT;
3130 break;
3131 default:
3132 break;
3133 }
3134 pmap_size = pmap_pos[1];
3135 pmap_pos += pmap_size;
3136 }
3137
3138 /* read in and check unallocated and free space info if writing */
3139 if ((ump->vfs_mountp->mnt_flag & MNT_RDONLY) == 0) {
3140 error = udf_read_physical_partition_spacetables(ump);
3141 if (error)
3142 return error;
3143
3144 /* also read in metadata partion spacebitmap if defined */
3145 error = udf_read_metadata_partition_spacetable(ump);
3146 return error;
3147 }
3148
3149 return 0;
3150 }
3151
3152 /* --------------------------------------------------------------------- */
3153
3154 int
3155 udf_read_rootdirs(struct udf_mount *ump)
3156 {
3157 union dscrptr *dscr;
3158 /* struct udf_args *args = &ump->mount_args; */
3159 struct udf_node *rootdir_node, *streamdir_node;
3160 struct long_ad fsd_loc, *dir_loc;
3161 uint32_t lb_num, dummy;
3162 uint32_t fsd_len;
3163 int dscr_type;
3164 int error;
3165
3166 /* TODO implement FSD reading in separate function like integrity? */
3167 /* get fileset descriptor sequence */
3168 fsd_loc = ump->logical_vol->lv_fsd_loc;
3169 fsd_len = udf_rw32(fsd_loc.len);
3170
3171 dscr = NULL;
3172 error = 0;
3173 while (fsd_len || error) {
3174 DPRINTF(VOLUMES, ("fsd_len = %d\n", fsd_len));
3175 /* translate fsd_loc to lb_num */
3176 error = udf_translate_vtop(ump, &fsd_loc, &lb_num, &dummy);
3177 if (error)
3178 break;
3179 DPRINTF(VOLUMES, ("Reading FSD at lb %d\n", lb_num));
3180 error = udf_read_phys_dscr(ump, lb_num, M_UDFVOLD, &dscr);
3181 /* end markers */
3182 if (error || (dscr == NULL))
3183 break;
3184
3185 /* analyse */
3186 dscr_type = udf_rw16(dscr->tag.id);
3187 if (dscr_type == TAGID_TERM)
3188 break;
3189 if (dscr_type != TAGID_FSD) {
3190 free(dscr, M_UDFVOLD);
3191 return ENOENT;
3192 }
3193
3194 /*
3195 * TODO check for multiple fileset descriptors; its only
3196 * picking the last now. Also check for FSD
3197 * correctness/interpretability
3198 */
3199
3200 /* update */
3201 if (ump->fileset_desc) {
3202 free(ump->fileset_desc, M_UDFVOLD);
3203 }
3204 ump->fileset_desc = &dscr->fsd;
3205 dscr = NULL;
3206
3207 /* continue to the next fsd */
3208 fsd_len -= ump->discinfo.sector_size;
3209 fsd_loc.loc.lb_num = udf_rw32(udf_rw32(fsd_loc.loc.lb_num)+1);
3210
3211 /* follow up to fsd->next_ex (long_ad) if its not null */
3212 if (udf_rw32(ump->fileset_desc->next_ex.len)) {
3213 DPRINTF(VOLUMES, ("follow up FSD extent\n"));
3214 fsd_loc = ump->fileset_desc->next_ex;
3215 fsd_len = udf_rw32(ump->fileset_desc->next_ex.len);
3216 }
3217 }
3218 if (dscr)
3219 free(dscr, M_UDFVOLD);
3220
3221 /* there has to be one */
3222 if (ump->fileset_desc == NULL)
3223 return ENOENT;
3224
3225 DPRINTF(VOLUMES, ("FSD read in fine\n"));
3226 DPRINTF(VOLUMES, ("Updating fsd logical volume id\n"));
3227 udf_update_logvolname(ump, ump->logical_vol->logvol_id);
3228
3229 /*
3230 * Now the FSD is known, read in the rootdirectory and if one exists,
3231 * the system stream dir. Some files in the system streamdir are not
3232 * wanted in this implementation since they are not maintained. If
3233 * writing is enabled we'll delete these files if they exist.
3234 */
3235
3236 rootdir_node = streamdir_node = NULL;
3237 dir_loc = NULL;
3238
3239 /* try to read in the rootdir */
3240 dir_loc = &ump->fileset_desc->rootdir_icb;
3241 error = udf_get_node(ump, dir_loc, &rootdir_node);
3242 if (error)
3243 return ENOENT;
3244
3245 /* aparently it read in fine */
3246
3247 /*
3248 * Try the system stream directory; not very likely in the ones we
3249 * test, but for completeness.
3250 */
3251 dir_loc = &ump->fileset_desc->streamdir_icb;
3252 if (udf_rw32(dir_loc->len)) {
3253 printf("udf_read_rootdirs: streamdir defined ");
3254 error = udf_get_node(ump, dir_loc, &streamdir_node);
3255 if (error) {
3256 printf("but error in streamdir reading\n");
3257 } else {
3258 printf("but ignored\n");
3259 /*
3260 * TODO process streamdir `baddies' i.e. files we dont
3261 * want if R/W
3262 */
3263 }
3264 }
3265
3266 DPRINTF(VOLUMES, ("Rootdir(s) read in fine\n"));
3267
3268 /* release the vnodes again; they'll be auto-recycled later */
3269 if (streamdir_node) {
3270 vput(streamdir_node->vnode);
3271 }
3272 if (rootdir_node) {
3273 vput(rootdir_node->vnode);
3274 }
3275
3276 return 0;
3277 }
3278
3279 /* --------------------------------------------------------------------- */
3280
3281 /* To make absolutely sure we are NOT returning zero, add one :) */
3282
3283 long
3284 udf_calchash(struct long_ad *icbptr)
3285 {
3286 /* ought to be enough since each mountpoint has its own chain */
3287 return udf_rw32(icbptr->loc.lb_num) + 1;
3288 }
3289
3290
3291 static struct udf_node *
3292 udf_hash_lookup(struct udf_mount *ump, struct long_ad *icbptr)
3293 {
3294 struct udf_node *node;
3295 struct vnode *vp;
3296 uint32_t hashline;
3297
3298 loop:
3299 mutex_enter(&ump->ihash_lock);
3300
3301 hashline = udf_calchash(icbptr) & UDF_INODE_HASHMASK;
3302 LIST_FOREACH(node, &ump->udf_nodes[hashline], hashchain) {
3303 assert(node);
3304 if (node->loc.loc.lb_num == icbptr->loc.lb_num &&
3305 node->loc.loc.part_num == icbptr->loc.part_num) {
3306 vp = node->vnode;
3307 assert(vp);
3308 mutex_enter(&vp->v_interlock);
3309 mutex_exit(&ump->ihash_lock);
3310 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK))
3311 goto loop;
3312 return node;
3313 }
3314 }
3315 mutex_exit(&ump->ihash_lock);
3316
3317 return NULL;
3318 }
3319
3320
3321 static void
3322 udf_sorted_list_insert(struct udf_node *node)
3323 {
3324 struct udf_mount *ump;
3325 struct udf_node *s_node, *last_node;
3326 uint32_t loc, s_loc;
3327
3328 ump = node->ump;
3329 last_node = NULL; /* XXX gcc */
3330
3331 if (LIST_EMPTY(&ump->sorted_udf_nodes)) {
3332 LIST_INSERT_HEAD(&ump->sorted_udf_nodes, node, sortchain);
3333 return;
3334 }
3335
3336 /*
3337 * We sort on logical block number here and not on physical block
3338 * number here. Ideally we should go for the physical block nr to get
3339 * better sync performance though this sort will ensure that packets
3340 * won't get spit up unnessisarily.
3341 */
3342
3343 loc = udf_rw32(node->loc.loc.lb_num);
3344 LIST_FOREACH(s_node, &ump->sorted_udf_nodes, sortchain) {
3345 s_loc = udf_rw32(s_node->loc.loc.lb_num);
3346 if (s_loc > loc) {
3347 LIST_INSERT_BEFORE(s_node, node, sortchain);
3348 return;
3349 }
3350 last_node = s_node;
3351 }
3352 LIST_INSERT_AFTER(last_node, node, sortchain);
3353 }
3354
3355
3356 static void
3357 udf_register_node(struct udf_node *node)
3358 {
3359 struct udf_mount *ump;
3360 struct udf_node *chk;
3361 uint32_t hashline;
3362
3363 ump = node->ump;
3364 mutex_enter(&ump->ihash_lock);
3365
3366 /* add to our hash table */
3367 hashline = udf_calchash(&node->loc) & UDF_INODE_HASHMASK;
3368 #ifdef DEBUG
3369 LIST_FOREACH(chk, &ump->udf_nodes[hashline], hashchain) {
3370 assert(chk);
3371 if (chk->loc.loc.lb_num == node->loc.loc.lb_num &&
3372 chk->loc.loc.part_num == node->loc.loc.part_num)
3373 panic("Double node entered\n");
3374 }
3375 #else
3376 chk = NULL;
3377 #endif
3378 LIST_INSERT_HEAD(&ump->udf_nodes[hashline], node, hashchain);
3379
3380 /* add to our sorted list */
3381 udf_sorted_list_insert(node);
3382
3383 mutex_exit(&ump->ihash_lock);
3384 }
3385
3386
3387 static void
3388 udf_deregister_node(struct udf_node *node)
3389 {
3390 struct udf_mount *ump;
3391
3392 ump = node->ump;
3393 mutex_enter(&ump->ihash_lock);
3394
3395 /* from hash and sorted list */
3396 LIST_REMOVE(node, hashchain);
3397 LIST_REMOVE(node, sortchain);
3398
3399 mutex_exit(&ump->ihash_lock);
3400 }
3401
3402 /* --------------------------------------------------------------------- */
3403
3404 int
3405 udf_open_logvol(struct udf_mount *ump)
3406 {
3407 int logvol_integrity;
3408 int error;
3409
3410 /* already/still open? */
3411 logvol_integrity = udf_rw32(ump->logvol_integrity->integrity_type);
3412 if (logvol_integrity == UDF_INTEGRITY_OPEN)
3413 return 0;
3414
3415 /* can we open it ? */
3416 if (ump->vfs_mountp->mnt_flag & MNT_RDONLY)
3417 return EROFS;
3418
3419 /* setup write parameters */
3420 DPRINTF(VOLUMES, ("Setting up write parameters\n"));
3421 if ((error = udf_setup_writeparams(ump)) != 0)
3422 return error;
3423
3424 /* determine data and metadata tracks (most likely same) */
3425 error = udf_search_writing_tracks(ump);
3426 if (error) {
3427 /* most likely lack of space */
3428 printf("udf_open_logvol: error searching writing tracks\n");
3429 return EROFS;
3430 }
3431
3432 /* writeout/update lvint on disc or only in memory */
3433 DPRINTF(VOLUMES, ("Opening logical volume\n"));
3434 if (ump->lvopen & UDF_OPEN_SESSION) {
3435 /* TODO implement writeout of VRS + VDS */
3436 printf( "udf_open_logvol:Opening a closed session not yet "
3437 "implemented\n");
3438 return EROFS;
3439
3440 /* determine data and metadata tracks again */
3441 error = udf_search_writing_tracks(ump);
3442 }
3443
3444 /* mark it open */
3445 ump->logvol_integrity->integrity_type = udf_rw32(UDF_INTEGRITY_OPEN);
3446
3447 /* do we need to write it out? */
3448 if (ump->lvopen & UDF_WRITE_LVINT) {
3449 error = udf_writeout_lvint(ump, ump->lvopen);
3450 /* if we couldn't write it mark it closed again */
3451 if (error) {
3452 ump->logvol_integrity->integrity_type =
3453 udf_rw32(UDF_INTEGRITY_CLOSED);
3454 return error;
3455 }
3456 }
3457
3458 return 0;
3459 }
3460
3461
3462 int
3463 udf_close_logvol(struct udf_mount *ump, int mntflags)
3464 {
3465 int logvol_integrity;
3466 int error = 0, error1 = 0, error2 = 0;
3467 int n;
3468
3469 /* already/still closed? */
3470 logvol_integrity = udf_rw32(ump->logvol_integrity->integrity_type);
3471 if (logvol_integrity == UDF_INTEGRITY_CLOSED)
3472 return 0;
3473
3474 /* writeout/update lvint or write out VAT */
3475 DPRINTF(VOLUMES, ("Closing logical volume\n"));
3476 if (ump->lvclose & UDF_WRITE_VAT) {
3477 DPRINTF(VOLUMES, ("lvclose & UDF_WRITE_VAT\n"));
3478
3479 /* write out the VAT node */
3480 DPRINTF(VOLUMES, ("writeout vat_node\n"));
3481 udf_writeout_vat(ump);
3482
3483 vflushbuf(ump->vat_node->vnode, 1 /* sync */);
3484 for (n = 0; n < 16; n++) {
3485 ump->vat_node->i_flags |= IN_MODIFIED;
3486 error = VOP_FSYNC(ump->vat_node->vnode,
3487 FSCRED, FSYNC_WAIT, 0, 0);
3488 }
3489 if (error) {
3490 printf("udf_close_logvol: writeout of VAT failed\n");
3491 return error;
3492 }
3493 }
3494
3495 if (ump->lvclose & UDF_WRITE_PART_BITMAPS) {
3496 /* sync writeout metadata spacetable if existing */
3497 error1 = udf_write_metadata_partition_spacetable(ump, true);
3498 if (error1)
3499 printf( "udf_close_logvol: writeout of metadata space "
3500 "bitmap failed\n");
3501
3502 /* sync writeout partition spacetables */
3503 error2 = udf_write_physical_partition_spacetables(ump, true);
3504 if (error2)
3505 printf( "udf_close_logvol: writeout of space tables "
3506 "failed\n");
3507
3508 if (error1 || error2)
3509 return (error1 | error2);
3510
3511 ump->lvclose &= ~UDF_WRITE_PART_BITMAPS;
3512 }
3513
3514 if (ump->lvclose & UDF_CLOSE_SESSION) {
3515 printf("TODO: Closing a session is not yet implemented\n");
3516 return EROFS;
3517 ump->lvopen |= UDF_OPEN_SESSION;
3518 }
3519
3520 /* mark it closed */
3521 ump->logvol_integrity->integrity_type = udf_rw32(UDF_INTEGRITY_CLOSED);
3522
3523 /* do we need to write out the logical volume integrity */
3524 if (ump->lvclose & UDF_WRITE_LVINT)
3525 error = udf_writeout_lvint(ump, ump->lvopen);
3526 if (error) {
3527 /* HELP now what? mark it open again for now */
3528 ump->logvol_integrity->integrity_type =
3529 udf_rw32(UDF_INTEGRITY_OPEN);
3530 return error;
3531 }
3532
3533 (void) udf_synchronise_caches(ump);
3534
3535 return 0;
3536 }
3537
3538 /* --------------------------------------------------------------------- */
3539
3540 /*
3541 * Genfs interfacing
3542 *
3543 * static const struct genfs_ops udf_genfsops = {
3544 * .gop_size = genfs_size,
3545 * size of transfers
3546 * .gop_alloc = udf_gop_alloc,
3547 * allocate len bytes at offset
3548 * .gop_write = genfs_gop_write,
3549 * putpages interface code
3550 * .gop_markupdate = udf_gop_markupdate,
3551 * set update/modify flags etc.
3552 * }
3553 */
3554
3555 /*
3556 * Genfs interface. These four functions are the only ones defined though not
3557 * documented... great....
3558 */
3559
3560 /*
3561 * Callback from genfs to allocate len bytes at offset off; only called when
3562 * filling up gaps in the allocation.
3563 */
3564 /* XXX should we check if there is space enough in udf_gop_alloc? */
3565 static int
3566 udf_gop_alloc(struct vnode *vp, off_t off,
3567 off_t len, int flags, kauth_cred_t cred)
3568 {
3569 #if 0
3570 struct udf_node *udf_node = VTOI(vp);
3571 struct udf_mount *ump = udf_node->ump;
3572 uint32_t lb_size, num_lb;
3573 #endif
3574
3575 DPRINTF(NOTIMPL, ("udf_gop_alloc not implemented\n"));
3576 DPRINTF(ALLOC, ("udf_gop_alloc called for %"PRIu64" bytes\n", len));
3577
3578 return 0;
3579 }
3580
3581
3582 /*
3583 * callback from genfs to update our flags
3584 */
3585 static void
3586 udf_gop_markupdate(struct vnode *vp, int flags)
3587 {
3588 struct udf_node *udf_node = VTOI(vp);
3589 u_long mask = 0;
3590
3591 if ((flags & GOP_UPDATE_ACCESSED) != 0) {
3592 mask = IN_ACCESS;
3593 }
3594 if ((flags & GOP_UPDATE_MODIFIED) != 0) {
3595 if (vp->v_type == VREG) {
3596 mask |= IN_CHANGE | IN_UPDATE;
3597 } else {
3598 mask |= IN_MODIFY;
3599 }
3600 }
3601 if (mask) {
3602 udf_node->i_flags |= mask;
3603 }
3604 }
3605
3606
3607 static const struct genfs_ops udf_genfsops = {
3608 .gop_size = genfs_size,
3609 .gop_alloc = udf_gop_alloc,
3610 .gop_write = genfs_gop_write_rwmap,
3611 .gop_markupdate = udf_gop_markupdate,
3612 };
3613
3614
3615 /* --------------------------------------------------------------------- */
3616
3617 int
3618 udf_write_terminator(struct udf_mount *ump, uint32_t sector)
3619 {
3620 union dscrptr *dscr;
3621 int error;
3622
3623 dscr = malloc(ump->discinfo.sector_size, M_TEMP, M_WAITOK);
3624 bzero(dscr, ump->discinfo.sector_size);
3625 udf_inittag(ump, &dscr->tag, TAGID_TERM, sector);
3626
3627 /* CRC length for an anchor is 512 - tag length; defined in Ecma 167 */
3628 dscr->tag.desc_crc_len = udf_rw16(512-UDF_DESC_TAG_LENGTH);
3629 (void) udf_validate_tag_and_crc_sums(dscr);
3630
3631 error = udf_write_phys_dscr_sync(ump, NULL, UDF_C_DSCR,
3632 dscr, sector, sector);
3633
3634 free(dscr, M_TEMP);
3635
3636 return error;
3637 }
3638
3639
3640 /* --------------------------------------------------------------------- */
3641
3642 /* UDF<->unix converters */
3643
3644 /* --------------------------------------------------------------------- */
3645
3646 static mode_t
3647 udf_perm_to_unix_mode(uint32_t perm)
3648 {
3649 mode_t mode;
3650
3651 mode = ((perm & UDF_FENTRY_PERM_USER_MASK) );
3652 mode |= ((perm & UDF_FENTRY_PERM_GRP_MASK ) >> 2);
3653 mode |= ((perm & UDF_FENTRY_PERM_OWNER_MASK) >> 4);
3654
3655 return mode;
3656 }
3657
3658 /* --------------------------------------------------------------------- */
3659
3660 static uint32_t
3661 unix_mode_to_udf_perm(mode_t mode)
3662 {
3663 uint32_t perm;
3664
3665 perm = ((mode & S_IRWXO) );
3666 perm |= ((mode & S_IRWXG) << 2);
3667 perm |= ((mode & S_IRWXU) << 4);
3668 perm |= ((mode & S_IWOTH) << 3);
3669 perm |= ((mode & S_IWGRP) << 5);
3670 perm |= ((mode & S_IWUSR) << 7);
3671
3672 return perm;
3673 }
3674
3675 /* --------------------------------------------------------------------- */
3676
3677 static uint32_t
3678 udf_icb_to_unix_filetype(uint32_t icbftype)
3679 {
3680 switch (icbftype) {
3681 case UDF_ICB_FILETYPE_DIRECTORY :
3682 case UDF_ICB_FILETYPE_STREAMDIR :
3683 return S_IFDIR;
3684 case UDF_ICB_FILETYPE_FIFO :
3685 return S_IFIFO;
3686 case UDF_ICB_FILETYPE_CHARDEVICE :
3687 return S_IFCHR;
3688 case UDF_ICB_FILETYPE_BLOCKDEVICE :
3689 return S_IFBLK;
3690 case UDF_ICB_FILETYPE_RANDOMACCESS :
3691 case UDF_ICB_FILETYPE_REALTIME :
3692 return S_IFREG;
3693 case UDF_ICB_FILETYPE_SYMLINK :
3694 return S_IFLNK;
3695 case UDF_ICB_FILETYPE_SOCKET :
3696 return S_IFSOCK;
3697 }
3698 /* no idea what this is */
3699 return 0;
3700 }
3701
3702 /* --------------------------------------------------------------------- */
3703
3704 void
3705 udf_to_unix_name(char *result, int result_len, char *id, int len,
3706 struct charspec *chsp)
3707 {
3708 uint16_t *raw_name, *unix_name;
3709 uint16_t *inchp, ch;
3710 uint8_t *outchp;
3711 const char *osta_id = "OSTA Compressed Unicode";
3712 int ucode_chars, nice_uchars, is_osta_typ0, nout;
3713
3714 raw_name = malloc(2048 * sizeof(uint16_t), M_UDFTEMP, M_WAITOK);
3715 unix_name = raw_name + 1024; /* split space in half */
3716 assert(sizeof(char) == sizeof(uint8_t));
3717 outchp = (uint8_t *) result;
3718
3719 is_osta_typ0 = (chsp->type == 0);
3720 is_osta_typ0 &= (strcmp((char *) chsp->inf, osta_id) == 0);
3721 if (is_osta_typ0) {
3722 /* TODO clean up */
3723 *raw_name = *unix_name = 0;
3724 ucode_chars = udf_UncompressUnicode(len, (uint8_t *) id, raw_name);
3725 ucode_chars = MIN(ucode_chars, UnicodeLength((unicode_t *) raw_name));
3726 nice_uchars = UDFTransName(unix_name, raw_name, ucode_chars);
3727 /* output UTF8 */
3728 for (inchp = unix_name; nice_uchars>0; inchp++, nice_uchars--) {
3729 ch = *inchp;
3730 nout = wput_utf8(outchp, result_len, ch);
3731 outchp += nout; result_len -= nout;
3732 if (!ch) break;
3733 }
3734 *outchp++ = 0;
3735 } else {
3736 /* assume 8bit char length byte latin-1 */
3737 assert(*id == 8);
3738 assert(strlen((char *) (id+1)) <= MAXNAMLEN);
3739 strncpy((char *) result, (char *) (id+1), strlen((char *) (id+1)));
3740 }
3741 free(raw_name, M_UDFTEMP);
3742 }
3743
3744 /* --------------------------------------------------------------------- */
3745
3746 void
3747 unix_to_udf_name(char *result, uint8_t *result_len, char const *name, int name_len,
3748 struct charspec *chsp)
3749 {
3750 uint16_t *raw_name;
3751 uint16_t *outchp;
3752 const char *inchp;
3753 const char *osta_id = "OSTA Compressed Unicode";
3754 int udf_chars, is_osta_typ0, bits;
3755 size_t cnt;
3756
3757 /* allocate temporary unicode-16 buffer */
3758 raw_name = malloc(1024, M_UDFTEMP, M_WAITOK);
3759
3760 /* convert utf8 to unicode-16 */
3761 *raw_name = 0;
3762 inchp = name;
3763 outchp = raw_name;
3764 bits = 8;
3765 for (cnt = name_len, udf_chars = 0; cnt;) {
3766 /*###3490 [cc] warning: passing argument 2 of 'wget_utf8' from incompatible pointer type%%%*/
3767 *outchp = wget_utf8(&inchp, &cnt);
3768 if (*outchp > 0xff)
3769 bits=16;
3770 outchp++;
3771 udf_chars++;
3772 }
3773 /* null terminate just in case */
3774 *outchp++ = 0;
3775
3776 is_osta_typ0 = (chsp->type == 0);
3777 is_osta_typ0 &= (strcmp((char *) chsp->inf, osta_id) == 0);
3778 if (is_osta_typ0) {
3779 udf_chars = udf_CompressUnicode(udf_chars, bits,
3780 (unicode_t *) raw_name,
3781 (byte *) result);
3782 } else {
3783 printf("unix to udf name: no CHSP0 ?\n");
3784 /* XXX assume 8bit char length byte latin-1 */
3785 *result++ = 8; udf_chars = 1;
3786 strncpy(result, name + 1, name_len);
3787 udf_chars += name_len;
3788 }
3789 *result_len = udf_chars;
3790 free(raw_name, M_UDFTEMP);
3791 }
3792
3793 /* --------------------------------------------------------------------- */
3794
3795 void
3796 udf_timestamp_to_timespec(struct udf_mount *ump,
3797 struct timestamp *timestamp,
3798 struct timespec *timespec)
3799 {
3800 struct clock_ymdhms ymdhms;
3801 uint32_t usecs, secs, nsecs;
3802 uint16_t tz;
3803
3804 /* fill in ymdhms structure from timestamp */
3805 memset(&ymdhms, 0, sizeof(ymdhms));
3806 ymdhms.dt_year = udf_rw16(timestamp->year);
3807 ymdhms.dt_mon = timestamp->month;
3808 ymdhms.dt_day = timestamp->day;
3809 ymdhms.dt_wday = 0; /* ? */
3810 ymdhms.dt_hour = timestamp->hour;
3811 ymdhms.dt_min = timestamp->minute;
3812 ymdhms.dt_sec = timestamp->second;
3813
3814 secs = clock_ymdhms_to_secs(&ymdhms);
3815 usecs = timestamp->usec +
3816 100*timestamp->hund_usec + 10000*timestamp->centisec;
3817 nsecs = usecs * 1000;
3818
3819 /*
3820 * Calculate the time zone. The timezone is 12 bit signed 2's
3821 * compliment, so we gotta do some extra magic to handle it right.
3822 */
3823 tz = udf_rw16(timestamp->type_tz);
3824 tz &= 0x0fff; /* only lower 12 bits are significant */
3825 if (tz & 0x0800) /* sign extention */
3826 tz |= 0xf000;
3827
3828 /* TODO check timezone conversion */
3829 /* check if we are specified a timezone to convert */
3830 if (udf_rw16(timestamp->type_tz) & 0x1000) {
3831 if ((int16_t) tz != -2047)
3832 secs -= (int16_t) tz * 60;
3833 } else {
3834 secs -= ump->mount_args.gmtoff;
3835 }
3836
3837 timespec->tv_sec = secs;
3838 timespec->tv_nsec = nsecs;
3839 }
3840
3841
3842 void
3843 udf_timespec_to_timestamp(struct timespec *timespec, struct timestamp *timestamp)
3844 {
3845 struct clock_ymdhms ymdhms;
3846 uint32_t husec, usec, csec;
3847
3848 (void) clock_secs_to_ymdhms(timespec->tv_sec, &ymdhms);
3849
3850 usec = timespec->tv_nsec / 1000;
3851 husec = usec / 100;
3852 usec -= husec * 100; /* only 0-99 in usec */
3853 csec = husec / 100; /* only 0-99 in csec */
3854 husec -= csec * 100; /* only 0-99 in husec */
3855
3856 /* set method 1 for CUT/GMT */
3857 timestamp->type_tz = udf_rw16((1<<12) + 0);
3858 timestamp->year = udf_rw16(ymdhms.dt_year);
3859 timestamp->month = ymdhms.dt_mon;
3860 timestamp->day = ymdhms.dt_day;
3861 timestamp->hour = ymdhms.dt_hour;
3862 timestamp->minute = ymdhms.dt_min;
3863 timestamp->second = ymdhms.dt_sec;
3864 timestamp->centisec = csec;
3865 timestamp->hund_usec = husec;
3866 timestamp->usec = usec;
3867 }
3868
3869 /* --------------------------------------------------------------------- */
3870
3871 /*
3872 * Attribute and filetypes converters with get/set pairs
3873 */
3874
3875 uint32_t
3876 udf_getaccessmode(struct udf_node *udf_node)
3877 {
3878 struct file_entry *fe = udf_node->fe;;
3879 struct extfile_entry *efe = udf_node->efe;
3880 uint32_t udf_perm, icbftype;
3881 uint32_t mode, ftype;
3882 uint16_t icbflags;
3883
3884 UDF_LOCK_NODE(udf_node, 0);
3885 if (fe) {
3886 udf_perm = udf_rw32(fe->perm);
3887 icbftype = fe->icbtag.file_type;
3888 icbflags = udf_rw16(fe->icbtag.flags);
3889 } else {
3890 assert(udf_node->efe);
3891 udf_perm = udf_rw32(efe->perm);
3892 icbftype = efe->icbtag.file_type;
3893 icbflags = udf_rw16(efe->icbtag.flags);
3894 }
3895
3896 mode = udf_perm_to_unix_mode(udf_perm);
3897 ftype = udf_icb_to_unix_filetype(icbftype);
3898
3899 /* set suid, sgid, sticky from flags in fe/efe */
3900 if (icbflags & UDF_ICB_TAG_FLAGS_SETUID)
3901 mode |= S_ISUID;
3902 if (icbflags & UDF_ICB_TAG_FLAGS_SETGID)
3903 mode |= S_ISGID;
3904 if (icbflags & UDF_ICB_TAG_FLAGS_STICKY)
3905 mode |= S_ISVTX;
3906
3907 UDF_UNLOCK_NODE(udf_node, 0);
3908
3909 return mode | ftype;
3910 }
3911
3912
3913 void
3914 udf_setaccessmode(struct udf_node *udf_node, mode_t mode)
3915 {
3916 struct file_entry *fe = udf_node->fe;
3917 struct extfile_entry *efe = udf_node->efe;
3918 uint32_t udf_perm;
3919 uint16_t icbflags;
3920
3921 UDF_LOCK_NODE(udf_node, 0);
3922 udf_perm = unix_mode_to_udf_perm(mode & ALLPERMS);
3923 if (fe) {
3924 icbflags = udf_rw16(fe->icbtag.flags);
3925 } else {
3926 icbflags = udf_rw16(efe->icbtag.flags);
3927 }
3928
3929 icbflags &= ~UDF_ICB_TAG_FLAGS_SETUID;
3930 icbflags &= ~UDF_ICB_TAG_FLAGS_SETGID;
3931 icbflags &= ~UDF_ICB_TAG_FLAGS_STICKY;
3932 if (mode & S_ISUID)
3933 icbflags |= UDF_ICB_TAG_FLAGS_SETUID;
3934 if (mode & S_ISGID)
3935 icbflags |= UDF_ICB_TAG_FLAGS_SETGID;
3936 if (mode & S_ISVTX)
3937 icbflags |= UDF_ICB_TAG_FLAGS_STICKY;
3938
3939 if (fe) {
3940 fe->perm = udf_rw32(udf_perm);
3941 fe->icbtag.flags = udf_rw16(icbflags);
3942 } else {
3943 efe->perm = udf_rw32(udf_perm);
3944 efe->icbtag.flags = udf_rw16(icbflags);
3945 }
3946
3947 UDF_UNLOCK_NODE(udf_node, 0);
3948 }
3949
3950
3951 void
3952 udf_getownership(struct udf_node *udf_node, uid_t *uidp, gid_t *gidp)
3953 {
3954 struct udf_mount *ump = udf_node->ump;
3955 struct file_entry *fe = udf_node->fe;
3956 struct extfile_entry *efe = udf_node->efe;
3957 uid_t uid;
3958 gid_t gid;
3959
3960 UDF_LOCK_NODE(udf_node, 0);
3961 if (fe) {
3962 uid = (uid_t)udf_rw32(fe->uid);
3963 gid = (gid_t)udf_rw32(fe->gid);
3964 } else {
3965 assert(udf_node->efe);
3966 uid = (uid_t)udf_rw32(efe->uid);
3967 gid = (gid_t)udf_rw32(efe->gid);
3968 }
3969
3970 /* do the uid/gid translation game */
3971 if ((uid == (uid_t) -1) && (gid == (gid_t) -1)) {
3972 uid = ump->mount_args.anon_uid;
3973 gid = ump->mount_args.anon_gid;
3974 }
3975 *uidp = uid;
3976 *gidp = gid;
3977
3978 UDF_UNLOCK_NODE(udf_node, 0);
3979 }
3980
3981
3982 void
3983 udf_setownership(struct udf_node *udf_node, uid_t uid, gid_t gid)
3984 {
3985 struct udf_mount *ump = udf_node->ump;
3986 struct file_entry *fe = udf_node->fe;
3987 struct extfile_entry *efe = udf_node->efe;
3988 uid_t nobody_uid;
3989 gid_t nobody_gid;
3990
3991 UDF_LOCK_NODE(udf_node, 0);
3992
3993 /* do the uid/gid translation game */
3994 nobody_uid = ump->mount_args.nobody_uid;
3995 nobody_gid = ump->mount_args.nobody_gid;
3996 if ((uid == nobody_uid) && (gid == nobody_gid)) {
3997 uid = (uid_t) -1;
3998 gid = (gid_t) -1;
3999 }
4000
4001 if (fe) {
4002 fe->uid = udf_rw32((uint32_t) uid);
4003 fe->gid = udf_rw32((uint32_t) gid);
4004 } else {
4005 efe->uid = udf_rw32((uint32_t) uid);
4006 efe->gid = udf_rw32((uint32_t) gid);
4007 }
4008
4009 UDF_UNLOCK_NODE(udf_node, 0);
4010 }
4011
4012
4013 /* --------------------------------------------------------------------- */
4014
4015 /*
4016 * UDF dirhash implementation
4017 */
4018
4019 static uint32_t
4020 udf_dirhash_hash(const char *str, int namelen)
4021 {
4022 uint32_t hash = 5381;
4023 int i, c;
4024
4025 for (i = 0; i < namelen; i++) {
4026 c = *str++;
4027 hash = ((hash << 5) + hash) + c; /* hash * 33 + c */
4028 }
4029 return hash;
4030 }
4031
4032
4033 static void
4034 udf_dirhash_purge(struct udf_dirhash *dirh)
4035 {
4036 struct udf_dirhash_entry *dirh_e;
4037 uint32_t hashline;
4038
4039 if (dirh == NULL)
4040 return;
4041
4042 if (dirh->size == 0)
4043 return;
4044
4045 for (hashline = 0; hashline < UDF_DIRHASH_HASHSIZE; hashline++) {
4046 dirh_e = LIST_FIRST(&dirh->entries[hashline]);
4047 while (dirh_e) {
4048 LIST_REMOVE(dirh_e, next);
4049 pool_put(&udf_dirhash_entry_pool, dirh_e);
4050 dirh_e = LIST_FIRST(&dirh->entries[hashline]);
4051 }
4052 }
4053 dirh_e = LIST_FIRST(&dirh->free_entries);
4054
4055 while (dirh_e) {
4056 LIST_REMOVE(dirh_e, next);
4057 pool_put(&udf_dirhash_entry_pool, dirh_e);
4058 dirh_e = LIST_FIRST(&dirh->entries[hashline]);
4059 }
4060
4061 dirh->flags &= ~UDF_DIRH_COMPLETE;
4062 dirh->flags |= UDF_DIRH_PURGED;
4063
4064 udf_dirhashsize -= dirh->size;
4065 dirh->size = 0;
4066 }
4067
4068
4069 static void
4070 udf_dirhash_destroy(struct udf_dirhash **dirhp)
4071 {
4072 struct udf_dirhash *dirh = *dirhp;
4073
4074 if (dirh == NULL)
4075 return;
4076
4077 mutex_enter(&udf_dirhashmutex);
4078
4079 udf_dirhash_purge(dirh);
4080 TAILQ_REMOVE(&udf_dirhash_queue, dirh, next);
4081 pool_put(&udf_dirhash_pool, dirh);
4082
4083 *dirhp = NULL;
4084
4085 mutex_exit(&udf_dirhashmutex);
4086 }
4087
4088
4089 static void
4090 udf_dirhash_get(struct udf_dirhash **dirhp)
4091 {
4092 struct udf_dirhash *dirh;
4093 uint32_t hashline;
4094
4095 mutex_enter(&udf_dirhashmutex);
4096
4097 dirh = *dirhp;
4098 if (*dirhp == NULL) {
4099 dirh = pool_get(&udf_dirhash_pool, PR_WAITOK);
4100 *dirhp = dirh;
4101 memset(dirh, 0, sizeof(struct udf_dirhash));
4102 for (hashline = 0; hashline < UDF_DIRHASH_HASHSIZE; hashline++)
4103 LIST_INIT(&dirh->entries[hashline]);
4104 dirh->size = 0;
4105 dirh->refcnt = 0;
4106 dirh->flags = 0;
4107 } else {
4108 TAILQ_REMOVE(&udf_dirhash_queue, dirh, next);
4109 }
4110
4111 dirh->refcnt++;
4112 TAILQ_INSERT_HEAD(&udf_dirhash_queue, dirh, next);
4113
4114 mutex_exit(&udf_dirhashmutex);
4115 }
4116
4117
4118 static void
4119 udf_dirhash_put(struct udf_dirhash *dirh)
4120 {
4121 mutex_enter(&udf_dirhashmutex);
4122 dirh->refcnt--;
4123 mutex_exit(&udf_dirhashmutex);
4124 }
4125
4126
4127 static void
4128 udf_dirhash_enter(struct udf_node *dir_node, struct fileid_desc *fid,
4129 struct dirent *dirent, uint64_t offset, uint32_t fid_size, int new)
4130 {
4131 struct udf_dirhash *dirh, *del_dirh, *prev_dirh;
4132 struct udf_dirhash_entry *dirh_e;
4133 uint32_t hashvalue, hashline;
4134 int entrysize;
4135
4136 /* make sure we have a dirhash to work on */
4137 dirh = dir_node->dir_hash;
4138 KASSERT(dirh);
4139 KASSERT(dirh->refcnt > 0);
4140
4141 /* are we trying to re-enter an entry? */
4142 if (!new && (dirh->flags & UDF_DIRH_COMPLETE))
4143 return;
4144
4145 /* calculate our hash */
4146 hashvalue = udf_dirhash_hash(dirent->d_name, dirent->d_namlen);
4147 hashline = hashvalue & UDF_DIRHASH_HASHMASK;
4148
4149 /* lookup and insert entry if not there yet */
4150 LIST_FOREACH(dirh_e, &dirh->entries[hashline], next) {
4151 /* check for hash collision */
4152 if (dirh_e->hashvalue != hashvalue)
4153 continue;
4154 if (dirh_e->offset != offset)
4155 continue;
4156 /* got it already */
4157 KASSERT(dirh_e->d_namlen == dirent->d_namlen);
4158 KASSERT(dirh_e->fid_size == fid_size);
4159 return;
4160 }
4161
4162 DPRINTF(DIRHASH, ("dirhash enter %"PRIu64", %d, %d for `%*.*s`\n",
4163 offset, fid_size, dirent->d_namlen,
4164 dirent->d_namlen, dirent->d_namlen, dirent->d_name));
4165
4166 /* check if entry is in free space list */
4167 LIST_FOREACH(dirh_e, &dirh->free_entries, next) {
4168 if (dirh_e->offset == offset) {
4169 DPRINTF(DIRHASH, ("\tremoving free entry\n"));
4170 LIST_REMOVE(dirh_e, next);
4171 break;
4172 }
4173 }
4174
4175 /* ensure we are not passing the dirhash limit */
4176 entrysize = sizeof(struct udf_dirhash_entry);
4177 if (udf_dirhashsize + entrysize > udf_maxdirhashsize) {
4178 del_dirh = TAILQ_LAST(&udf_dirhash_queue, _udf_dirhash);
4179 KASSERT(del_dirh);
4180 while (udf_dirhashsize + entrysize > udf_maxdirhashsize) {
4181 /* no use trying to delete myself */
4182 if (del_dirh == dirh)
4183 break;
4184 prev_dirh = TAILQ_PREV(del_dirh, _udf_dirhash, next);
4185 if (del_dirh->refcnt == 0)
4186 udf_dirhash_purge(del_dirh);
4187 del_dirh = prev_dirh;
4188 }
4189 }
4190
4191 /* add to the hashline */
4192 dirh_e = pool_get(&udf_dirhash_entry_pool, PR_WAITOK);
4193 memset(dirh_e, 0, sizeof(struct udf_dirhash_entry));
4194
4195 dirh_e->hashvalue = hashvalue;
4196 dirh_e->offset = offset;
4197 dirh_e->d_namlen = dirent->d_namlen;
4198 dirh_e->fid_size = fid_size;
4199
4200 dirh->size += sizeof(struct udf_dirhash_entry);
4201 udf_dirhashsize += sizeof(struct udf_dirhash_entry);
4202 LIST_INSERT_HEAD(&dirh->entries[hashline], dirh_e, next);
4203 }
4204
4205
4206 static void
4207 udf_dirhash_enter_freed(struct udf_node *dir_node, uint64_t offset,
4208 uint32_t fid_size)
4209 {
4210 struct udf_dirhash *dirh;
4211 struct udf_dirhash_entry *dirh_e;
4212
4213 /* make sure we have a dirhash to work on */
4214 dirh = dir_node->dir_hash;
4215 KASSERT(dirh);
4216 KASSERT(dirh->refcnt > 0);
4217
4218 #ifdef DEBUG
4219 /* check for double entry of free space */
4220 LIST_FOREACH(dirh_e, &dirh->free_entries, next)
4221 KASSERT(dirh_e->offset != offset);
4222 #endif
4223
4224 DPRINTF(DIRHASH, ("dirhash enter FREED %"PRIu64", %d\n",
4225 offset, fid_size));
4226 dirh_e = pool_get(&udf_dirhash_entry_pool, PR_WAITOK);
4227 memset(dirh_e, 0, sizeof(struct udf_dirhash_entry));
4228
4229 dirh_e->hashvalue = 0; /* not relevant */
4230 dirh_e->offset = offset;
4231 dirh_e->d_namlen = 0; /* not relevant */
4232 dirh_e->fid_size = fid_size;
4233
4234 /* XXX it might be preferable to append them at the tail */
4235 LIST_INSERT_HEAD(&dirh->free_entries, dirh_e, next);
4236 dirh->size += sizeof(struct udf_dirhash_entry);
4237 udf_dirhashsize += sizeof(struct udf_dirhash_entry);
4238 }
4239
4240
4241 static void
4242 udf_dirhash_remove(struct udf_node *dir_node, struct dirent *dirent,
4243 uint64_t offset, uint32_t fid_size)
4244 {
4245 struct udf_dirhash *dirh;
4246 struct udf_dirhash_entry *dirh_e;
4247 uint32_t hashvalue, hashline;
4248
4249 DPRINTF(DIRHASH, ("dirhash remove %"PRIu64", %d for `%*.*s`\n",
4250 offset, fid_size,
4251 dirent->d_namlen, dirent->d_namlen, dirent->d_name));
4252
4253 /* make sure we have a dirhash to work on */
4254 dirh = dir_node->dir_hash;
4255 KASSERT(dirh);
4256 KASSERT(dirh->refcnt > 0);
4257
4258 /* calculate our hash */
4259 hashvalue = udf_dirhash_hash(dirent->d_name, dirent->d_namlen);
4260 hashline = hashvalue & UDF_DIRHASH_HASHMASK;
4261
4262 /* lookup entry */
4263 LIST_FOREACH(dirh_e, &dirh->entries[hashline], next) {
4264 /* check for hash collision */
4265 if (dirh_e->hashvalue != hashvalue)
4266 continue;
4267 if (dirh_e->offset != offset)
4268 continue;
4269
4270 /* got it! */
4271 KASSERT(dirh_e->d_namlen == dirent->d_namlen);
4272 KASSERT(dirh_e->fid_size == fid_size);
4273 LIST_REMOVE(dirh_e, next);
4274 dirh->size -= sizeof(struct udf_dirhash_entry);
4275 udf_dirhashsize -= sizeof(struct udf_dirhash_entry);
4276
4277 udf_dirhash_enter_freed(dir_node, offset, fid_size);
4278 return;
4279 }
4280
4281 /* not found! */
4282 panic("dirhash_remove couldn't find entry in hash table\n");
4283 }
4284
4285
4286 /* BUGALERT: don't use result longer than needed, never past the node lock */
4287 /* call with NULL *result initially and it will return nonzero if again */
4288 static int
4289 udf_dirhash_lookup(struct udf_node *dir_node, const char *d_name, int d_namlen,
4290 struct udf_dirhash_entry **result)
4291 {
4292 struct udf_dirhash *dirh;
4293 struct udf_dirhash_entry *dirh_e;
4294 uint32_t hashvalue, hashline;
4295
4296 KASSERT(VOP_ISLOCKED(dir_node->vnode));
4297
4298 /* make sure we have a dirhash to work on */
4299 dirh = dir_node->dir_hash;
4300 KASSERT(dirh);
4301 KASSERT(dirh->refcnt > 0);
4302
4303 /* start where we were */
4304 if (*result) {
4305 KASSERT(dir_node->dir_hash);
4306 dirh_e = *result;
4307
4308 /* retrieve information to avoid recalculation and advance */
4309 hashvalue = dirh_e->hashvalue;
4310 dirh_e = LIST_NEXT(*result, next);
4311 } else {
4312 /* calculate our hash and lookup all entries in hashline */
4313 hashvalue = udf_dirhash_hash(d_name, d_namlen);
4314 hashline = hashvalue & UDF_DIRHASH_HASHMASK;
4315 dirh_e = LIST_FIRST(&dirh->entries[hashline]);
4316 }
4317
4318 for (; dirh_e; dirh_e = LIST_NEXT(dirh_e, next)) {
4319 /* check for hash collision */
4320 if (dirh_e->hashvalue != hashvalue)
4321 continue;
4322 if (dirh_e->d_namlen != d_namlen)
4323 continue;
4324 /* might have an entry in the cache */
4325 *result = dirh_e;
4326 return 1;
4327 }
4328
4329 *result = NULL;
4330 return 0;
4331 }
4332
4333
4334 /* BUGALERT: don't use result longer than needed, never past the node lock */
4335 /* call with NULL *result initially and it will return nonzero if again */
4336 static int
4337 udf_dirhash_lookup_freed(struct udf_node *dir_node, uint32_t min_fidsize,
4338 struct udf_dirhash_entry **result)
4339 {
4340 struct udf_dirhash *dirh;
4341 struct udf_dirhash_entry *dirh_e;
4342
4343 KASSERT(VOP_ISLOCKED(dir_node->vnode));
4344
4345 /* make sure we have a dirhash to work on */
4346 dirh = dir_node->dir_hash;
4347 KASSERT(dirh);
4348 KASSERT(dirh->refcnt > 0);
4349
4350 /* start where we were */
4351 if (*result) {
4352 KASSERT(dir_node->dir_hash);
4353 dirh_e = LIST_NEXT(*result, next);
4354 } else {
4355 /* lookup all entries that match */
4356 dirh_e = LIST_FIRST(&dirh->free_entries);
4357 }
4358
4359 for (; dirh_e; dirh_e = LIST_NEXT(dirh_e, next)) {
4360 /* check for minimum size */
4361 if (dirh_e->fid_size < min_fidsize)
4362 continue;
4363 /* might be a candidate */
4364 *result = dirh_e;
4365 return 1;
4366 }
4367
4368 *result = NULL;
4369 return 0;
4370 }
4371
4372
4373 static int
4374 udf_dirhash_fill(struct udf_node *dir_node)
4375 {
4376 struct vnode *dvp = dir_node->vnode;
4377 struct udf_dirhash *dirh;
4378 struct file_entry *fe = dir_node->fe;
4379 struct extfile_entry *efe = dir_node->efe;
4380 struct fileid_desc *fid;
4381 struct dirent *dirent;
4382 uint64_t file_size, pre_diroffset, diroffset;
4383 uint32_t lb_size;
4384 int error;
4385
4386 /* make sure we have a dirhash to work on */
4387 dirh = dir_node->dir_hash;
4388 KASSERT(dirh);
4389 KASSERT(dirh->refcnt > 0);
4390
4391 if (dirh->flags & UDF_DIRH_BROKEN)
4392 return EIO;
4393 if (dirh->flags & UDF_DIRH_COMPLETE)
4394 return 0;
4395
4396 /* make sure we have a clean dirhash to add to */
4397 udf_dirhash_purge(dirh);
4398
4399 /* get directory filesize */
4400 if (fe) {
4401 file_size = udf_rw64(fe->inf_len);
4402 } else {
4403 assert(efe);
4404 file_size = udf_rw64(efe->inf_len);
4405 }
4406
4407 /* allocate temporary space for fid */
4408 lb_size = udf_rw32(dir_node->ump->logical_vol->lb_size);
4409 fid = malloc(lb_size, M_UDFTEMP, M_WAITOK);
4410
4411 /* allocate temporary space for dirent */
4412 dirent = malloc(sizeof(struct dirent), M_UDFTEMP, M_WAITOK);
4413
4414 error = 0;
4415 diroffset = 0;
4416 while (diroffset < file_size) {
4417 /* transfer a new fid/dirent */
4418 pre_diroffset = diroffset;
4419 error = udf_read_fid_stream(dvp, &diroffset, fid, dirent);
4420 if (error) {
4421 /* TODO what to do? continue but not add? */
4422 dirh->flags |= UDF_DIRH_BROKEN;
4423 udf_dirhash_purge(dirh);
4424 break;
4425 }
4426
4427 if ((fid->file_char & UDF_FILE_CHAR_DEL)) {
4428 /* register deleted extent for reuse */
4429 udf_dirhash_enter_freed(dir_node, pre_diroffset,
4430 udf_fidsize(fid));
4431 } else {
4432 /* append to the dirhash */
4433 udf_dirhash_enter(dir_node, fid, dirent, pre_diroffset,
4434 udf_fidsize(fid), 0);
4435 }
4436 }
4437 dirh->flags |= UDF_DIRH_COMPLETE;
4438
4439 free(fid, M_UDFTEMP);
4440 free(dirent, M_UDFTEMP);
4441
4442 return error;
4443 }
4444
4445
4446 /* --------------------------------------------------------------------- */
4447
4448 /*
4449 * Directory read and manipulation functions.
4450 *
4451 * Note that if the file is found, the cached diroffset position *before* the
4452 * advance is remembered. Thus if the same filename is lookup again just after
4453 * this lookup its immediately found.
4454 */
4455
4456 int
4457 udf_lookup_name_in_dir(struct vnode *vp, const char *name, int namelen,
4458 struct long_ad *icb_loc, int *found)
4459 {
4460 struct udf_node *dir_node = VTOI(vp);
4461 struct udf_dirhash_entry *dirh_ep;
4462 struct fileid_desc *fid;
4463 struct dirent *dirent;
4464 uint64_t diroffset;
4465 uint32_t lb_size;
4466 int hit, error;
4467
4468 /* set default return */
4469 *found = 0;
4470
4471 /* get our dirhash and make sure its read in */
4472 udf_dirhash_get(&dir_node->dir_hash);
4473 error = udf_dirhash_fill(dir_node);
4474 if (error) {
4475 udf_dirhash_put(dir_node->dir_hash);
4476 return error;
4477 }
4478
4479 /* allocate temporary space for fid */
4480 lb_size = udf_rw32(dir_node->ump->logical_vol->lb_size);
4481 fid = malloc(lb_size, M_UDFTEMP, M_WAITOK);
4482 dirent = malloc(sizeof(struct dirent), M_UDFTEMP, M_WAITOK);
4483
4484 DPRINTF(DIRHASH, ("dirhash_lookup looking for `%*.*s`\n",
4485 namelen, namelen, name));
4486
4487 /* search our dirhash hits */
4488 memset(icb_loc, 0, sizeof(*icb_loc));
4489 dirh_ep = NULL;
4490 for (;;) {
4491 hit = udf_dirhash_lookup(dir_node, name, namelen, &dirh_ep);
4492 /* if no hit, abort the search */
4493 if (!hit)
4494 break;
4495
4496 /* check this hit */
4497 diroffset = dirh_ep->offset;
4498
4499 /* transfer a new fid/dirent */
4500 error = udf_read_fid_stream(vp, &diroffset, fid, dirent);
4501 if (error)
4502 break;
4503
4504 DPRINTF(DIRHASH, ("dirhash_lookup\tchecking `%*.*s`\n",
4505 dirent->d_namlen, dirent->d_namlen, dirent->d_name));
4506
4507 /* see if its our entry */
4508 KASSERT(dirent->d_namlen == namelen);
4509 if (strncmp(dirent->d_name, name, namelen) == 0) {
4510 *found = 1;
4511 *icb_loc = fid->icb;
4512 break;
4513 }
4514 }
4515 free(fid, M_UDFTEMP);
4516 free(dirent, M_UDFTEMP);
4517
4518 udf_dirhash_put(dir_node->dir_hash);
4519
4520 return error;
4521 }
4522
4523 /* --------------------------------------------------------------------- */
4524
4525 static int
4526 udf_create_new_fe(struct udf_mount *ump, struct file_entry *fe, int file_type,
4527 struct long_ad *node_icb, struct long_ad *parent_icb,
4528 uint64_t parent_unique_id)
4529 {
4530 struct timespec now;
4531 struct icb_tag *icb;
4532 struct filetimes_extattr_entry *ft_extattr;
4533 uint64_t unique_id;
4534 uint32_t fidsize, lb_num;
4535 uint8_t *bpos;
4536 int crclen, attrlen;
4537
4538 lb_num = udf_rw32(node_icb->loc.lb_num);
4539 udf_inittag(ump, &fe->tag, TAGID_FENTRY, lb_num);
4540 icb = &fe->icbtag;
4541
4542 /*
4543 * Always use strategy type 4 unless on WORM wich we don't support
4544 * (yet). Fill in defaults and set for internal allocation of data.
4545 */
4546 icb->strat_type = udf_rw16(4);
4547 icb->max_num_entries = udf_rw16(1);
4548 icb->file_type = file_type; /* 8 bit */
4549 icb->flags = udf_rw16(UDF_ICB_INTERN_ALLOC);
4550
4551 fe->perm = udf_rw32(0x7fff); /* all is allowed */
4552 fe->link_cnt = udf_rw16(0); /* explicit setting */
4553
4554 fe->ckpoint = udf_rw32(1); /* user supplied file version */
4555
4556 vfs_timestamp(&now);
4557 udf_timespec_to_timestamp(&now, &fe->atime);
4558 udf_timespec_to_timestamp(&now, &fe->attrtime);
4559 udf_timespec_to_timestamp(&now, &fe->mtime);
4560
4561 udf_set_regid(&fe->imp_id, IMPL_NAME);
4562 udf_add_impl_regid(ump, &fe->imp_id);
4563
4564 unique_id = udf_advance_uniqueid(ump);
4565 fe->unique_id = udf_rw64(unique_id);
4566 fe->l_ea = udf_rw32(0);
4567
4568 /* create extended attribute to record our creation time */
4569 attrlen = UDF_FILETIMES_ATTR_SIZE(1);
4570 ft_extattr = malloc(attrlen, M_UDFTEMP, M_WAITOK);
4571 memset(ft_extattr, 0, attrlen);
4572 ft_extattr->hdr.type = udf_rw32(UDF_FILETIMES_ATTR_NO);
4573 ft_extattr->hdr.subtype = 1; /* [4/48.10.5] */
4574 ft_extattr->hdr.a_l = udf_rw32(UDF_FILETIMES_ATTR_SIZE(1));
4575 ft_extattr->d_l = udf_rw32(UDF_TIMESTAMP_SIZE); /* one item */
4576 ft_extattr->existence = UDF_FILETIMES_FILE_CREATION;
4577 udf_timespec_to_timestamp(&now, &ft_extattr->times[0]);
4578
4579 udf_extattr_insert_internal(ump, (union dscrptr *) fe,
4580 (struct extattr_entry *) ft_extattr);
4581 free(ft_extattr, M_UDFTEMP);
4582
4583 /* if its a directory, create '..' */
4584 bpos = (uint8_t *) fe->data + udf_rw32(fe->l_ea);
4585 fidsize = 0;
4586 if (file_type == UDF_ICB_FILETYPE_DIRECTORY) {
4587 fidsize = udf_create_parentfid(ump,
4588 (struct fileid_desc *) bpos, parent_icb,
4589 parent_unique_id);
4590 }
4591
4592 /* record fidlength information */
4593 fe->inf_len = udf_rw64(fidsize);
4594 fe->l_ad = udf_rw32(fidsize);
4595 fe->logblks_rec = udf_rw64(0); /* intern */
4596
4597 crclen = sizeof(struct file_entry) - 1 - UDF_DESC_TAG_LENGTH;
4598 crclen += udf_rw32(fe->l_ea) + fidsize;
4599 fe->tag.desc_crc_len = udf_rw16(crclen);
4600
4601 (void) udf_validate_tag_and_crc_sums((union dscrptr *) fe);
4602
4603 return fidsize;
4604 }
4605
4606 /* --------------------------------------------------------------------- */
4607
4608 static int
4609 udf_create_new_efe(struct udf_mount *ump, struct extfile_entry *efe,
4610 int file_type, struct long_ad *node_icb, struct long_ad *parent_icb,
4611 uint64_t parent_unique_id)
4612 {
4613 struct timespec now;
4614 struct icb_tag *icb;
4615 uint64_t unique_id;
4616 uint32_t fidsize, lb_num;
4617 uint8_t *bpos;
4618 int crclen;
4619
4620 lb_num = udf_rw32(node_icb->loc.lb_num);
4621 udf_inittag(ump, &efe->tag, TAGID_EXTFENTRY, lb_num);
4622 icb = &efe->icbtag;
4623
4624 /*
4625 * Always use strategy type 4 unless on WORM wich we don't support
4626 * (yet). Fill in defaults and set for internal allocation of data.
4627 */
4628 icb->strat_type = udf_rw16(4);
4629 icb->max_num_entries = udf_rw16(1);
4630 icb->file_type = file_type; /* 8 bit */
4631 icb->flags = udf_rw16(UDF_ICB_INTERN_ALLOC);
4632
4633 efe->perm = udf_rw32(0x7fff); /* all is allowed */
4634 efe->link_cnt = udf_rw16(0); /* explicit setting */
4635
4636 efe->ckpoint = udf_rw32(1); /* user supplied file version */
4637
4638 vfs_timestamp(&now);
4639 udf_timespec_to_timestamp(&now, &efe->ctime);
4640 udf_timespec_to_timestamp(&now, &efe->atime);
4641 udf_timespec_to_timestamp(&now, &efe->attrtime);
4642 udf_timespec_to_timestamp(&now, &efe->mtime);
4643
4644 udf_set_regid(&efe->imp_id, IMPL_NAME);
4645 udf_add_impl_regid(ump, &efe->imp_id);
4646
4647 unique_id = udf_advance_uniqueid(ump);
4648 efe->unique_id = udf_rw64(unique_id);
4649 efe->l_ea = udf_rw32(0);
4650
4651 /* if its a directory, create '..' */
4652 bpos = (uint8_t *) efe->data + udf_rw32(efe->l_ea);
4653 fidsize = 0;
4654 if (file_type == UDF_ICB_FILETYPE_DIRECTORY) {
4655 fidsize = udf_create_parentfid(ump,
4656 (struct fileid_desc *) bpos, parent_icb,
4657 parent_unique_id);
4658 }
4659
4660 /* record fidlength information */
4661 efe->obj_size = udf_rw64(fidsize);
4662 efe->inf_len = udf_rw64(fidsize);
4663 efe->l_ad = udf_rw32(fidsize);
4664 efe->logblks_rec = udf_rw64(0); /* intern */
4665
4666 crclen = sizeof(struct extfile_entry) - 1 - UDF_DESC_TAG_LENGTH;
4667 crclen += udf_rw32(efe->l_ea) + fidsize;
4668 efe->tag.desc_crc_len = udf_rw16(crclen);
4669
4670 (void) udf_validate_tag_and_crc_sums((union dscrptr *) efe);
4671
4672 return fidsize;
4673 }
4674
4675 /* --------------------------------------------------------------------- */
4676
4677 int
4678 udf_dir_detach(struct udf_mount *ump, struct udf_node *dir_node,
4679 struct udf_node *udf_node, struct componentname *cnp)
4680 {
4681 struct vnode *dvp = dir_node->vnode;
4682 struct udf_dirhash_entry *dirh_ep;
4683 struct file_entry *fe = dir_node->fe;
4684 struct extfile_entry *efe = dir_node->efe;
4685 struct fileid_desc *fid;
4686 struct dirent *dirent;
4687 uint64_t file_size, diroffset;
4688 uint32_t lb_size, fidsize;
4689 int found, error;
4690 char const *name = cnp->cn_nameptr;
4691 int namelen = cnp->cn_namelen;
4692 int hit, refcnt;
4693
4694 /* get our dirhash and make sure its read in */
4695 udf_dirhash_get(&dir_node->dir_hash);
4696 error = udf_dirhash_fill(dir_node);
4697 if (error) {
4698 udf_dirhash_put(dir_node->dir_hash);
4699 return error;
4700 }
4701
4702 /* get directory filesize */
4703 if (fe) {
4704 file_size = udf_rw64(fe->inf_len);
4705 } else {
4706 assert(efe);
4707 file_size = udf_rw64(efe->inf_len);
4708 }
4709
4710 /* allocate temporary space for fid */
4711 lb_size = udf_rw32(dir_node->ump->logical_vol->lb_size);
4712 fid = malloc(lb_size, M_UDFTEMP, M_WAITOK);
4713 dirent = malloc(sizeof(struct dirent), M_UDFTEMP, M_WAITOK);
4714
4715 /* search our dirhash hits */
4716 found = 0;
4717 dirh_ep = NULL;
4718 for (;;) {
4719 hit = udf_dirhash_lookup(dir_node, name, namelen, &dirh_ep);
4720 /* if no hit, abort the search */
4721 if (!hit)
4722 break;
4723
4724 /* check this hit */
4725 diroffset = dirh_ep->offset;
4726
4727 /* transfer a new fid/dirent */
4728 error = udf_read_fid_stream(dvp, &diroffset, fid, dirent);
4729 if (error)
4730 break;
4731
4732 /* see if its our entry */
4733 KASSERT(dirent->d_namlen == namelen);
4734 if (strncmp(dirent->d_name, name, namelen) == 0) {
4735 found = 1;
4736 break;
4737 }
4738 }
4739
4740 if (!found)
4741 error = ENOENT;
4742 if (error)
4743 goto error_out;
4744
4745 /* mark deleted */
4746 fid->file_char |= UDF_FILE_CHAR_DEL;
4747 #ifdef UDF_COMPLETE_DELETE
4748 memset(&fid->icb, 0, sizeof(fid->icb));
4749 #endif
4750 (void) udf_validate_tag_and_crc_sums((union dscrptr *) fid);
4751
4752 /* get size of fid and compensate for the read_fid_stream advance */
4753 fidsize = udf_fidsize(fid);
4754 diroffset -= fidsize;
4755
4756 /* write out */
4757 error = vn_rdwr(UIO_WRITE, dir_node->vnode,
4758 fid, fidsize, diroffset,
4759 UIO_SYSSPACE, IO_ALTSEMANTICS | IO_NODELOCKED,
4760 FSCRED, NULL, NULL);
4761 if (error)
4762 goto error_out;
4763
4764 /* get reference count of attached node */
4765 if (udf_node->fe) {
4766 refcnt = udf_rw16(udf_node->fe->link_cnt);
4767 } else {
4768 KASSERT(udf_node->efe);
4769 refcnt = udf_rw16(udf_node->efe->link_cnt);
4770 }
4771 #ifdef UDF_COMPLETE_DELETE
4772 /* substract reference counter in attached node */
4773 refcnt -= 1;
4774 if (udf_node->fe) {
4775 udf_node->fe->link_cnt = udf_rw16(refcnt);
4776 } else {
4777 udf_node->efe->link_cnt = udf_rw16(refcnt);
4778 }
4779
4780 /* prevent writeout when refcnt == 0 */
4781 if (refcnt == 0)
4782 udf_node->i_flags |= IN_DELETED;
4783
4784 if (fid->file_char & UDF_FILE_CHAR_DIR) {
4785 int drefcnt;
4786
4787 /* substract reference counter in directory node */
4788 /* note subtract 2 (?) for its was also backreferenced */
4789 if (dir_node->fe) {
4790 drefcnt = udf_rw16(dir_node->fe->link_cnt);
4791 drefcnt -= 1;
4792 dir_node->fe->link_cnt = udf_rw16(drefcnt);
4793 } else {
4794 KASSERT(dir_node->efe);
4795 drefcnt = udf_rw16(dir_node->efe->link_cnt);
4796 drefcnt -= 1;
4797 dir_node->efe->link_cnt = udf_rw16(drefcnt);
4798 }
4799 }
4800
4801 udf_node->i_flags |= IN_MODIFIED;
4802 dir_node->i_flags |= IN_MODIFIED;
4803 #endif
4804 /* if it is/was a hardlink adjust the file count */
4805 if (refcnt > 0)
4806 udf_adjust_filecount(udf_node, -1);
4807
4808 /* remove from the dirhash */
4809 udf_dirhash_remove(dir_node, dirent, diroffset,
4810 udf_fidsize(fid));
4811
4812 error_out:
4813 free(fid, M_UDFTEMP);
4814 free(dirent, M_UDFTEMP);
4815
4816 udf_dirhash_put(dir_node->dir_hash);
4817
4818 return error;
4819 }
4820
4821 /* --------------------------------------------------------------------- */
4822
4823 /*
4824 * We are not allowed to split the fid tag itself over an logical block so
4825 * check the space remaining in the logical block.
4826 *
4827 * We try to select the smallest candidate for recycling or when none is
4828 * found, append a new one at the end of the directory.
4829 */
4830
4831 int
4832 udf_dir_attach(struct udf_mount *ump, struct udf_node *dir_node,
4833 struct udf_node *udf_node, struct vattr *vap, struct componentname *cnp)
4834 {
4835 struct vnode *dvp = dir_node->vnode;
4836 struct udf_dirhash_entry *dirh_ep;
4837 struct fileid_desc *fid;
4838 struct icb_tag *icbtag;
4839 struct charspec osta_charspec;
4840 struct dirent dirent;
4841 uint64_t unique_id, dir_size, diroffset;
4842 uint64_t fid_pos, end_fid_pos, chosen_fid_pos;
4843 uint32_t chosen_size, chosen_size_diff;
4844 int lb_size, lb_rest, fidsize, this_fidsize, size_diff;
4845 int file_char, refcnt, icbflags, addr_type, hit, error;
4846
4847 /* get our dirhash and make sure its read in */
4848 udf_dirhash_get(&dir_node->dir_hash);
4849 error = udf_dirhash_fill(dir_node);
4850 if (error) {
4851 udf_dirhash_put(dir_node->dir_hash);
4852 return error;
4853 }
4854
4855 /* get info */
4856 lb_size = udf_rw32(ump->logical_vol->lb_size);
4857 udf_osta_charset(&osta_charspec);
4858
4859 if (dir_node->fe) {
4860 dir_size = udf_rw64(dir_node->fe->inf_len);
4861 icbtag = &dir_node->fe->icbtag;
4862 } else {
4863 dir_size = udf_rw64(dir_node->efe->inf_len);
4864 icbtag = &dir_node->efe->icbtag;
4865 }
4866
4867 icbflags = udf_rw16(icbtag->flags);
4868 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
4869
4870 if (udf_node->fe) {
4871 unique_id = udf_rw64(udf_node->fe->unique_id);
4872 refcnt = udf_rw16(udf_node->fe->link_cnt);
4873 } else {
4874 unique_id = udf_rw64(udf_node->efe->unique_id);
4875 refcnt = udf_rw16(udf_node->efe->link_cnt);
4876 }
4877
4878 if (refcnt > 0) {
4879 unique_id = udf_advance_uniqueid(ump);
4880 udf_adjust_filecount(udf_node, 1);
4881 }
4882
4883 /* determine file characteristics */
4884 file_char = 0; /* visible non deleted file and not stream metadata */
4885 if (vap->va_type == VDIR)
4886 file_char = UDF_FILE_CHAR_DIR;
4887
4888 /* malloc scrap buffer */
4889 fid = malloc(lb_size, M_TEMP, M_WAITOK);
4890 bzero(fid, lb_size);
4891
4892 /* calculate _minimum_ fid size */
4893 unix_to_udf_name((char *) fid->data, &fid->l_fi,
4894 cnp->cn_nameptr, cnp->cn_namelen, &osta_charspec);
4895 fidsize = UDF_FID_SIZE + fid->l_fi;
4896 fidsize = (fidsize + 3) & ~3; /* multiple of 4 */
4897
4898 /* find position that will fit the FID */
4899 chosen_fid_pos = dir_size;
4900 chosen_size = 0;
4901 chosen_size_diff = UINT_MAX;
4902
4903 /* shut up gcc */
4904 dirent.d_namlen = 0;
4905
4906 /* search our dirhash hits */
4907 error = 0;
4908 dirh_ep = NULL;
4909 for (;;) {
4910 hit = udf_dirhash_lookup_freed(dir_node, fidsize, &dirh_ep);
4911 /* if no hit, abort the search */
4912 if (!hit)
4913 break;
4914
4915 /* check this hit for size */
4916 this_fidsize = dirh_ep->fid_size;
4917
4918 /* check this hit */
4919 fid_pos = dirh_ep->offset;
4920 end_fid_pos = fid_pos + this_fidsize;
4921 size_diff = this_fidsize - fidsize;
4922 lb_rest = lb_size - (end_fid_pos % lb_size);
4923
4924 #ifndef UDF_COMPLETE_DELETE
4925 /* transfer a new fid/dirent */
4926 error = udf_read_fid_stream(vp, &fid_pos, fid, dirent);
4927 if (error)
4928 goto error_out;
4929
4930 /* only reuse entries that are wiped */
4931 /* check if the len + loc are marked zero */
4932 if (udf_rw32(fid->icb.len != 0))
4933 continue;
4934 if (udf_rw32(fid->icb.loc.lb_num) != 0)
4935 continue;
4936 if (udf_rw16(fid->icb.loc.part_num != 0))
4937 continue;
4938 #endif /* UDF_COMPLETE_DELETE */
4939
4940 /* select if not splitting the tag and its smaller */
4941 if ((size_diff >= 0) &&
4942 (size_diff < chosen_size_diff) &&
4943 (lb_rest >= sizeof(struct desc_tag)))
4944 {
4945 /* UDF 2.3.4.2+3 specifies rules for iu size */
4946 if ((size_diff == 0) || (size_diff >= 32)) {
4947 chosen_fid_pos = fid_pos;
4948 chosen_size = this_fidsize;
4949 chosen_size_diff = size_diff;
4950 }
4951 }
4952 }
4953
4954
4955 /* extend directory if no other candidate found */
4956 if (chosen_size == 0) {
4957 chosen_fid_pos = dir_size;
4958 chosen_size = fidsize;
4959 chosen_size_diff = 0;
4960
4961 /* special case UDF 2.00+ 2.3.4.4, no splitting up fid tag */
4962 if (addr_type == UDF_ICB_INTERN_ALLOC) {
4963 /* pre-grow directory to see if we're to switch */
4964 udf_grow_node(dir_node, dir_size + chosen_size);
4965
4966 icbflags = udf_rw16(icbtag->flags);
4967 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
4968 }
4969
4970 /* make sure the next fid desc_tag won't be splitted */
4971 if (addr_type != UDF_ICB_INTERN_ALLOC) {
4972 end_fid_pos = chosen_fid_pos + chosen_size;
4973 lb_rest = lb_size - (end_fid_pos % lb_size);
4974
4975 /* pad with implementation use regid if needed */
4976 if (lb_rest < sizeof(struct desc_tag))
4977 chosen_size += 32;
4978 }
4979 }
4980 chosen_size_diff = chosen_size - fidsize;
4981 diroffset = chosen_fid_pos + chosen_size;
4982
4983 /* populate the FID */
4984 memset(fid, 0, lb_size);
4985 udf_inittag(ump, &fid->tag, TAGID_FID, 0);
4986 fid->file_version_num = udf_rw16(1); /* UDF 2.3.4.1 */
4987 fid->file_char = file_char;
4988 fid->icb = udf_node->loc;
4989 fid->icb.longad_uniqueid = udf_rw32((uint32_t) unique_id);
4990 fid->l_iu = udf_rw16(0);
4991
4992 if (chosen_size > fidsize) {
4993 /* insert implementation-use regid to space it correctly */
4994 fid->l_iu = udf_rw16(chosen_size_diff);
4995
4996 /* set implementation use */
4997 udf_set_regid((struct regid *) fid->data, IMPL_NAME);
4998 udf_add_impl_regid(ump, (struct regid *) fid->data);
4999 }
5000
5001 /* fill in name */
5002 unix_to_udf_name((char *) fid->data + udf_rw16(fid->l_iu),
5003 &fid->l_fi, cnp->cn_nameptr, cnp->cn_namelen, &osta_charspec);
5004
5005 fid->tag.desc_crc_len = chosen_size - UDF_DESC_TAG_LENGTH;
5006 (void) udf_validate_tag_and_crc_sums((union dscrptr *) fid);
5007
5008 /* writeout FID/update parent directory */
5009 error = vn_rdwr(UIO_WRITE, dvp,
5010 fid, chosen_size, chosen_fid_pos,
5011 UIO_SYSSPACE, IO_ALTSEMANTICS | IO_NODELOCKED,
5012 FSCRED, NULL, NULL);
5013
5014 if (error)
5015 goto error_out;
5016
5017 /* add reference counter in attached node */
5018 if (udf_node->fe) {
5019 refcnt = udf_rw16(udf_node->fe->link_cnt);
5020 udf_node->fe->link_cnt = udf_rw16(refcnt+1);
5021 } else {
5022 KASSERT(udf_node->efe);
5023 refcnt = udf_rw16(udf_node->efe->link_cnt);
5024 udf_node->efe->link_cnt = udf_rw16(refcnt+1);
5025 }
5026
5027 /* mark not deleted if it was... just in case, but do warn */
5028 if (udf_node->i_flags & IN_DELETED) {
5029 printf("udf: warning, marking a file undeleted\n");
5030 udf_node->i_flags &= ~IN_DELETED;
5031 }
5032
5033 if (file_char & UDF_FILE_CHAR_DIR) {
5034 /* add reference counter in directory node for '..' */
5035 if (dir_node->fe) {
5036 refcnt = udf_rw16(dir_node->fe->link_cnt);
5037 refcnt++;
5038 dir_node->fe->link_cnt = udf_rw16(refcnt);
5039 } else {
5040 KASSERT(dir_node->efe);
5041 refcnt = udf_rw16(dir_node->efe->link_cnt);
5042 refcnt++;
5043 dir_node->efe->link_cnt = udf_rw16(refcnt);
5044 }
5045 }
5046
5047 /* append to the dirhash */
5048 dirent.d_namlen = cnp->cn_namelen;
5049 memcpy(dirent.d_name, cnp->cn_nameptr, cnp->cn_namelen);
5050 udf_dirhash_enter(dir_node, fid, &dirent, chosen_fid_pos,
5051 udf_fidsize(fid), 1);
5052
5053 /* note updates */
5054 udf_node->i_flags |= IN_CHANGE | IN_MODIFY; /* | IN_CREATE? */
5055 /* VN_KNOTE(udf_node, ...) */
5056 udf_update(udf_node->vnode, NULL, NULL, NULL, 0);
5057
5058 error_out:
5059 free(fid, M_TEMP);
5060
5061 udf_dirhash_put(dir_node->dir_hash);
5062
5063 return error;
5064 }
5065
5066 /* --------------------------------------------------------------------- */
5067
5068 /*
5069 * Each node can have an attached streamdir node though not recursively. These
5070 * are otherwise known as named substreams/named extended attributes that have
5071 * no size limitations.
5072 *
5073 * `Normal' extended attributes are indicated with a number and are recorded
5074 * in either the fe/efe descriptor itself for small descriptors or recorded in
5075 * the attached extended attribute file. Since these spaces can get
5076 * fragmented, care ought to be taken.
5077 *
5078 * Since the size of the space reserved for allocation descriptors is limited,
5079 * there is a mechanim provided for extending this space; this is done by a
5080 * special extent to allow schrinking of the allocations without breaking the
5081 * linkage to the allocation extent descriptor.
5082 */
5083
5084 int
5085 udf_get_node(struct udf_mount *ump, struct long_ad *node_icb_loc,
5086 struct udf_node **udf_noderes)
5087 {
5088 union dscrptr *dscr;
5089 struct udf_node *udf_node;
5090 struct vnode *nvp;
5091 struct long_ad icb_loc, last_fe_icb_loc;
5092 uint64_t file_size;
5093 uint32_t lb_size, sector, dummy;
5094 uint8_t *file_data;
5095 int udf_file_type, dscr_type, strat, strat4096, needs_indirect;
5096 int slot, eof, error;
5097
5098 DPRINTF(NODE, ("udf_get_node called\n"));
5099 *udf_noderes = udf_node = NULL;
5100
5101 /* lock to disallow simultanious creation of same udf_node */
5102 mutex_enter(&ump->get_node_lock);
5103
5104 DPRINTF(NODE, ("\tlookup in hash table\n"));
5105 /* lookup in hash table */
5106 assert(ump);
5107 assert(node_icb_loc);
5108 udf_node = udf_hash_lookup(ump, node_icb_loc);
5109 if (udf_node) {
5110 DPRINTF(NODE, ("\tgot it from the hash!\n"));
5111 /* vnode is returned locked */
5112 *udf_noderes = udf_node;
5113 mutex_exit(&ump->get_node_lock);
5114 return 0;
5115 }
5116
5117 /* garbage check: translate udf_node_icb_loc to sectornr */
5118 error = udf_translate_vtop(ump, node_icb_loc, §or, &dummy);
5119 if (error) {
5120 /* no use, this will fail anyway */
5121 mutex_exit(&ump->get_node_lock);
5122 return EINVAL;
5123 }
5124
5125 /* build udf_node (do initialise!) */
5126 udf_node = pool_get(&udf_node_pool, PR_WAITOK);
5127 memset(udf_node, 0, sizeof(struct udf_node));
5128
5129 DPRINTF(NODE, ("\tget new vnode\n"));
5130 /* give it a vnode */
5131 error = getnewvnode(VT_UDF, ump->vfs_mountp, udf_vnodeop_p, &nvp);
5132 if (error) {
5133 pool_put(&udf_node_pool, udf_node);
5134 mutex_exit(&ump->get_node_lock);
5135 return error;
5136 }
5137
5138 /* always return locked vnode */
5139 if ((error = vn_lock(nvp, LK_EXCLUSIVE | LK_RETRY))) {
5140 /* recycle vnode and unlock; simultanious will fail too */
5141 ungetnewvnode(nvp);
5142 mutex_exit(&ump->get_node_lock);
5143 return error;
5144 }
5145
5146 /* initialise crosslinks, note location of fe/efe for hashing */
5147 udf_node->ump = ump;
5148 udf_node->vnode = nvp;
5149 nvp->v_data = udf_node;
5150 udf_node->loc = *node_icb_loc;
5151 udf_node->lockf = 0;
5152 mutex_init(&udf_node->node_mutex, MUTEX_DEFAULT, IPL_NONE);
5153 cv_init(&udf_node->node_lock, "udf_nlk");
5154 genfs_node_init(nvp, &udf_genfsops); /* inititise genfs */
5155 udf_node->outstanding_bufs = 0;
5156 udf_node->outstanding_nodedscr = 0;
5157
5158 /* insert into the hash lookup */
5159 udf_register_node(udf_node);
5160
5161 /* safe to unlock, the entry is in the hash table, vnode is locked */
5162 mutex_exit(&ump->get_node_lock);
5163
5164 icb_loc = *node_icb_loc;
5165 needs_indirect = 0;
5166 strat4096 = 0;
5167 udf_file_type = UDF_ICB_FILETYPE_UNKNOWN;
5168 file_size = 0;
5169 file_data = NULL;
5170 lb_size = udf_rw32(ump->logical_vol->lb_size);
5171
5172 DPRINTF(NODE, ("\tstart reading descriptors\n"));
5173 do {
5174 /* try to read in fe/efe */
5175 error = udf_read_logvol_dscr(ump, &icb_loc, &dscr);
5176
5177 /* blank sector marks end of sequence, check this */
5178 if ((dscr == NULL) && (!strat4096))
5179 error = ENOENT;
5180
5181 /* break if read error or blank sector */
5182 if (error || (dscr == NULL))
5183 break;
5184
5185 /* process descriptor based on the descriptor type */
5186 dscr_type = udf_rw16(dscr->tag.id);
5187 DPRINTF(NODE, ("\tread descriptor %d\n", dscr_type));
5188
5189 /* if dealing with an indirect entry, follow the link */
5190 if (dscr_type == TAGID_INDIRECTENTRY) {
5191 needs_indirect = 0;
5192 udf_free_logvol_dscr(ump, &icb_loc, dscr);
5193 icb_loc = dscr->inde.indirect_icb;
5194 continue;
5195 }
5196
5197 /* only file entries and extended file entries allowed here */
5198 if ((dscr_type != TAGID_FENTRY) &&
5199 (dscr_type != TAGID_EXTFENTRY)) {
5200 udf_free_logvol_dscr(ump, &icb_loc, dscr);
5201 error = ENOENT;
5202 break;
5203 }
5204
5205 KASSERT(udf_tagsize(dscr, lb_size) == lb_size);
5206
5207 /* choose this one */
5208 last_fe_icb_loc = icb_loc;
5209
5210 /* record and process/update (ext)fentry */
5211 file_data = NULL;
5212 if (dscr_type == TAGID_FENTRY) {
5213 if (udf_node->fe)
5214 udf_free_logvol_dscr(ump, &last_fe_icb_loc,
5215 udf_node->fe);
5216 udf_node->fe = &dscr->fe;
5217 strat = udf_rw16(udf_node->fe->icbtag.strat_type);
5218 udf_file_type = udf_node->fe->icbtag.file_type;
5219 file_size = udf_rw64(udf_node->fe->inf_len);
5220 file_data = udf_node->fe->data;
5221 } else {
5222 if (udf_node->efe)
5223 udf_free_logvol_dscr(ump, &last_fe_icb_loc,
5224 udf_node->efe);
5225 udf_node->efe = &dscr->efe;
5226 strat = udf_rw16(udf_node->efe->icbtag.strat_type);
5227 udf_file_type = udf_node->efe->icbtag.file_type;
5228 file_size = udf_rw64(udf_node->efe->inf_len);
5229 file_data = udf_node->efe->data;
5230 }
5231
5232 /* check recording strategy (structure) */
5233
5234 /*
5235 * Strategy 4096 is a daisy linked chain terminating with an
5236 * unrecorded sector or a TERM descriptor. The next
5237 * descriptor is to be found in the sector that follows the
5238 * current sector.
5239 */
5240 if (strat == 4096) {
5241 strat4096 = 1;
5242 needs_indirect = 1;
5243
5244 icb_loc.loc.lb_num = udf_rw32(icb_loc.loc.lb_num) + 1;
5245 }
5246
5247 /*
5248 * Strategy 4 is the normal strategy and terminates, but if
5249 * we're in strategy 4096, we can't have strategy 4 mixed in
5250 */
5251
5252 if (strat == 4) {
5253 if (strat4096) {
5254 error = EINVAL;
5255 break;
5256 }
5257 break; /* done */
5258 }
5259 } while (!error);
5260
5261 /* first round of cleanup code */
5262 if (error) {
5263 DPRINTF(NODE, ("\tnode fe/efe failed!\n"));
5264 /* recycle udf_node */
5265 udf_dispose_node(udf_node);
5266
5267 vlockmgr(nvp->v_vnlock, LK_RELEASE);
5268 nvp->v_data = NULL;
5269 ungetnewvnode(nvp);
5270
5271 return EINVAL; /* error code ok? */
5272 }
5273 DPRINTF(NODE, ("\tnode fe/efe read in fine\n"));
5274
5275 /* assert no references to dscr anymore beyong this point */
5276 assert((udf_node->fe) || (udf_node->efe));
5277 dscr = NULL;
5278
5279 /*
5280 * Remember where to record an updated version of the descriptor. If
5281 * there is a sequence of indirect entries, icb_loc will have been
5282 * updated. Its the write disipline to allocate new space and to make
5283 * sure the chain is maintained.
5284 *
5285 * `needs_indirect' flags if the next location is to be filled with
5286 * with an indirect entry.
5287 */
5288 udf_node->write_loc = icb_loc;
5289 udf_node->needs_indirect = needs_indirect;
5290
5291 /*
5292 * Go trough all allocations extents of this descriptor and when
5293 * encountering a redirect read in the allocation extension. These are
5294 * daisy-chained.
5295 */
5296 UDF_LOCK_NODE(udf_node, 0);
5297 udf_node->num_extensions = 0;
5298
5299 error = 0;
5300 slot = 0;
5301 for (;;) {
5302 udf_get_adslot(udf_node, slot, &icb_loc, &eof);
5303 DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, len = %d, "
5304 "lb_num = %d, part = %d\n", slot, eof,
5305 UDF_EXT_FLAGS(udf_rw32(icb_loc.len)),
5306 UDF_EXT_LEN(udf_rw32(icb_loc.len)),
5307 udf_rw32(icb_loc.loc.lb_num),
5308 udf_rw16(icb_loc.loc.part_num)));
5309 if (eof)
5310 break;
5311 slot++;
5312
5313 if (UDF_EXT_FLAGS(udf_rw32(icb_loc.len)) != UDF_EXT_REDIRECT)
5314 continue;
5315
5316 DPRINTF(NODE, ("\tgot redirect extent\n"));
5317 if (udf_node->num_extensions >= UDF_MAX_ALLOC_EXTENTS) {
5318 DPRINTF(ALLOC, ("udf_get_node: implementation limit, "
5319 "too many allocation extensions on "
5320 "udf_node\n"));
5321 error = EINVAL;
5322 break;
5323 }
5324
5325 /* length can only be *one* lb : UDF 2.50/2.3.7.1 */
5326 if (UDF_EXT_LEN(udf_rw32(icb_loc.len)) != lb_size) {
5327 DPRINTF(ALLOC, ("udf_get_node: bad allocation "
5328 "extension size in udf_node\n"));
5329 error = EINVAL;
5330 break;
5331 }
5332
5333 DPRINTF(NODE, ("read allocation extent at lb_num %d\n",
5334 UDF_EXT_LEN(udf_rw32(icb_loc.loc.lb_num))));
5335 /* load in allocation extent */
5336 error = udf_read_logvol_dscr(ump, &icb_loc, &dscr);
5337 if (error || (dscr == NULL))
5338 break;
5339
5340 /* process read-in descriptor */
5341 dscr_type = udf_rw16(dscr->tag.id);
5342
5343 if (dscr_type != TAGID_ALLOCEXTENT) {
5344 udf_free_logvol_dscr(ump, &icb_loc, dscr);
5345 error = ENOENT;
5346 break;
5347 }
5348
5349 DPRINTF(NODE, ("\trecording redirect extent\n"));
5350 udf_node->ext[udf_node->num_extensions] = &dscr->aee;
5351 udf_node->ext_loc[udf_node->num_extensions] = icb_loc;
5352
5353 udf_node->num_extensions++;
5354
5355 } /* while */
5356 UDF_UNLOCK_NODE(udf_node, 0);
5357
5358 /* second round of cleanup code */
5359 if (error) {
5360 /* recycle udf_node */
5361 udf_dispose_node(udf_node);
5362
5363 vlockmgr(nvp->v_vnlock, LK_RELEASE);
5364 nvp->v_data = NULL;
5365 ungetnewvnode(nvp);
5366
5367 return EINVAL; /* error code ok? */
5368 }
5369
5370 DPRINTF(NODE, ("\tnode read in fine\n"));
5371
5372 /*
5373 * Translate UDF filetypes into vnode types.
5374 *
5375 * Systemfiles like the meta main and mirror files are not treated as
5376 * normal files, so we type them as having no type. UDF dictates that
5377 * they are not allowed to be visible.
5378 */
5379
5380 switch (udf_file_type) {
5381 case UDF_ICB_FILETYPE_DIRECTORY :
5382 case UDF_ICB_FILETYPE_STREAMDIR :
5383 nvp->v_type = VDIR;
5384 break;
5385 case UDF_ICB_FILETYPE_BLOCKDEVICE :
5386 nvp->v_type = VBLK;
5387 break;
5388 case UDF_ICB_FILETYPE_CHARDEVICE :
5389 nvp->v_type = VCHR;
5390 break;
5391 case UDF_ICB_FILETYPE_SOCKET :
5392 nvp->v_type = VSOCK;
5393 break;
5394 case UDF_ICB_FILETYPE_FIFO :
5395 nvp->v_type = VFIFO;
5396 break;
5397 case UDF_ICB_FILETYPE_SYMLINK :
5398 nvp->v_type = VLNK;
5399 break;
5400 case UDF_ICB_FILETYPE_VAT :
5401 case UDF_ICB_FILETYPE_META_MAIN :
5402 case UDF_ICB_FILETYPE_META_MIRROR :
5403 nvp->v_type = VNON;
5404 break;
5405 case UDF_ICB_FILETYPE_RANDOMACCESS :
5406 case UDF_ICB_FILETYPE_REALTIME :
5407 nvp->v_type = VREG;
5408 break;
5409 default:
5410 /* YIKES, something else */
5411 nvp->v_type = VNON;
5412 }
5413
5414 /* TODO specfs, fifofs etc etc. vnops setting */
5415
5416 /* don't forget to set vnode's v_size */
5417 uvm_vnp_setsize(nvp, file_size);
5418
5419 /* TODO ext attr and streamdir udf_nodes */
5420
5421 *udf_noderes = udf_node;
5422
5423 return 0;
5424 }
5425
5426 /* --------------------------------------------------------------------- */
5427
5428 int
5429 udf_writeout_node(struct udf_node *udf_node, int waitfor)
5430 {
5431 union dscrptr *dscr;
5432 struct long_ad *loc;
5433 int extnr, flags, error;
5434
5435 DPRINTF(NODE, ("udf_writeout_node called\n"));
5436
5437 KASSERT(udf_node->outstanding_bufs == 0);
5438 KASSERT(udf_node->outstanding_nodedscr == 0);
5439
5440 KASSERT(LIST_EMPTY(&udf_node->vnode->v_dirtyblkhd));
5441
5442 if (udf_node->i_flags & IN_DELETED) {
5443 DPRINTF(NODE, ("\tnode deleted; not writing out\n"));
5444 return 0;
5445 }
5446
5447 /* lock node */
5448 flags = waitfor ? 0 : IN_CALLBACK_ULK;
5449 UDF_LOCK_NODE(udf_node, flags);
5450
5451 /* at least one descriptor writeout */
5452 udf_node->outstanding_nodedscr = 1;
5453
5454 /* we're going to write out the descriptor so clear the flags */
5455 udf_node->i_flags &= ~(IN_MODIFIED | IN_ACCESSED);
5456
5457 /* if we were rebuild, write out the allocation extents */
5458 if (udf_node->i_flags & IN_NODE_REBUILD) {
5459 /* mark outstanding node descriptors and issue them */
5460 udf_node->outstanding_nodedscr += udf_node->num_extensions;
5461 for (extnr = 0; extnr < udf_node->num_extensions; extnr++) {
5462 loc = &udf_node->ext_loc[extnr];
5463 dscr = (union dscrptr *) udf_node->ext[extnr];
5464 error = udf_write_logvol_dscr(udf_node, dscr, loc, 0);
5465 if (error)
5466 return error;
5467 }
5468 /* mark allocation extents written out */
5469 udf_node->i_flags &= ~(IN_NODE_REBUILD);
5470 }
5471
5472 if (udf_node->fe) {
5473 KASSERT(udf_node->efe == NULL);
5474 dscr = (union dscrptr *) udf_node->fe;
5475 } else {
5476 KASSERT(udf_node->efe);
5477 KASSERT(udf_node->fe == NULL);
5478 dscr = (union dscrptr *) udf_node->efe;
5479 }
5480 KASSERT(dscr);
5481
5482 loc = &udf_node->write_loc;
5483 error = udf_write_logvol_dscr(udf_node, dscr, loc, waitfor);
5484 return error;
5485 }
5486
5487 /* --------------------------------------------------------------------- */
5488
5489 int
5490 udf_dispose_node(struct udf_node *udf_node)
5491 {
5492 struct vnode *vp;
5493 int extnr;
5494
5495 DPRINTF(NODE, ("udf_dispose_node called on node %p\n", udf_node));
5496 if (!udf_node) {
5497 DPRINTF(NODE, ("UDF: Dispose node on node NULL, ignoring\n"));
5498 return 0;
5499 }
5500
5501 vp = udf_node->vnode;
5502 #ifdef DIAGNOSTIC
5503 if (vp->v_numoutput)
5504 panic("disposing UDF node with pending I/O's, udf_node = %p, "
5505 "v_numoutput = %d", udf_node, vp->v_numoutput);
5506 #endif
5507
5508 /* wait until out of sync (just in case we happen to stumble over one */
5509 KASSERT(!mutex_owned(&mntvnode_lock));
5510 mutex_enter(&mntvnode_lock);
5511 while (udf_node->i_flags & IN_SYNCED) {
5512 cv_timedwait(&udf_node->ump->dirtynodes_cv, &mntvnode_lock,
5513 hz/16);
5514 }
5515 mutex_exit(&mntvnode_lock);
5516
5517 /* TODO extended attributes and streamdir */
5518
5519 /* remove dirhash if present */
5520 udf_dirhash_destroy(&udf_node->dir_hash);
5521
5522 /* remove from our hash lookup table */
5523 udf_deregister_node(udf_node);
5524
5525 /* destroy our lock */
5526 mutex_destroy(&udf_node->node_mutex);
5527 cv_destroy(&udf_node->node_lock);
5528
5529 /* dissociate our udf_node from the vnode */
5530 genfs_node_destroy(udf_node->vnode);
5531 vp->v_data = NULL;
5532
5533 /* free associated memory and the node itself */
5534 for (extnr = 0; extnr < udf_node->num_extensions; extnr++) {
5535 udf_free_logvol_dscr(udf_node->ump, &udf_node->ext_loc[extnr],
5536 udf_node->ext[extnr]);
5537 udf_node->ext[extnr] = (void *) 0xdeadcccc;
5538 }
5539
5540 if (udf_node->fe)
5541 udf_free_logvol_dscr(udf_node->ump, &udf_node->loc,
5542 udf_node->fe);
5543 if (udf_node->efe)
5544 udf_free_logvol_dscr(udf_node->ump, &udf_node->loc,
5545 udf_node->efe);
5546
5547 udf_node->fe = (void *) 0xdeadaaaa;
5548 udf_node->efe = (void *) 0xdeadbbbb;
5549 udf_node->ump = (void *) 0xdeadbeef;
5550 pool_put(&udf_node_pool, udf_node);
5551
5552 return 0;
5553 }
5554
5555
5556
5557 /*
5558 * create a new node using the specified vnodeops, vap and cnp but with the
5559 * udf_file_type. This allows special files to be created. Use with care.
5560 */
5561
5562 static int
5563 udf_create_node_raw(struct vnode *dvp, struct vnode **vpp, int udf_file_type,
5564 int (**vnodeops)(void *), struct vattr *vap, struct componentname *cnp)
5565 {
5566 union dscrptr *dscr;
5567 struct udf_node *dir_node = VTOI(dvp);;
5568 struct udf_node *udf_node;
5569 struct udf_mount *ump = dir_node->ump;
5570 struct vnode *nvp;
5571 struct long_ad node_icb_loc;
5572 uint64_t parent_unique_id;
5573 uint64_t lmapping;
5574 uint32_t lb_size, lb_num;
5575 uint16_t vpart_num;
5576 uid_t uid;
5577 gid_t gid, parent_gid;
5578 int fid_size, error;
5579
5580 lb_size = udf_rw32(ump->logical_vol->lb_size);
5581 *vpp = NULL;
5582
5583 /* allocate vnode */
5584 error = getnewvnode(VT_UDF, ump->vfs_mountp, vnodeops, &nvp);
5585 if (error)
5586 return error;
5587
5588 /* lock node */
5589 error = vn_lock(nvp, LK_EXCLUSIVE | LK_RETRY);
5590 if (error) {
5591 nvp->v_data = NULL;
5592 ungetnewvnode(nvp);
5593 return error;
5594 }
5595
5596 /* get disc allocation for one logical block */
5597 vpart_num = ump->node_part;
5598 error = udf_pre_allocate_space(ump, UDF_C_NODE, 1,
5599 vpart_num, &lmapping);
5600 lb_num = lmapping;
5601 if (error) {
5602 vlockmgr(nvp->v_vnlock, LK_RELEASE);
5603 ungetnewvnode(nvp);
5604 return error;
5605 }
5606
5607 /* initialise pointer to location */
5608 memset(&node_icb_loc, 0, sizeof(struct long_ad));
5609 node_icb_loc.len = lb_size;
5610 node_icb_loc.loc.lb_num = udf_rw32(lb_num);
5611 node_icb_loc.loc.part_num = udf_rw16(vpart_num);
5612
5613 /* build udf_node (do initialise!) */
5614 udf_node = pool_get(&udf_node_pool, PR_WAITOK);
5615 memset(udf_node, 0, sizeof(struct udf_node));
5616
5617 /* initialise crosslinks, note location of fe/efe for hashing */
5618 /* bugalert: synchronise with udf_get_node() */
5619 udf_node->ump = ump;
5620 udf_node->vnode = nvp;
5621 nvp->v_data = udf_node;
5622 udf_node->loc = node_icb_loc;
5623 udf_node->write_loc = node_icb_loc;
5624 udf_node->lockf = 0;
5625 mutex_init(&udf_node->node_mutex, MUTEX_DEFAULT, IPL_NONE);
5626 cv_init(&udf_node->node_lock, "udf_nlk");
5627 udf_node->outstanding_bufs = 0;
5628 udf_node->outstanding_nodedscr = 0;
5629
5630 /* initialise genfs */
5631 genfs_node_init(nvp, &udf_genfsops);
5632
5633 /* insert into the hash lookup */
5634 udf_register_node(udf_node);
5635
5636 /* get parent's unique ID for refering '..' if its a directory */
5637 if (dir_node->fe) {
5638 parent_unique_id = udf_rw64(dir_node->fe->unique_id);
5639 parent_gid = (gid_t) udf_rw32(dir_node->fe->gid);
5640 } else {
5641 parent_unique_id = udf_rw64(dir_node->efe->unique_id);
5642 parent_gid = (gid_t) udf_rw32(dir_node->efe->gid);
5643 }
5644
5645 /* get descriptor */
5646 udf_create_logvol_dscr(ump, udf_node, &node_icb_loc, &dscr);
5647
5648 /* choose a fe or an efe for it */
5649 if (ump->logical_vol->tag.descriptor_ver == 2) {
5650 udf_node->fe = &dscr->fe;
5651 fid_size = udf_create_new_fe(ump, udf_node->fe,
5652 udf_file_type, &udf_node->loc,
5653 &dir_node->loc, parent_unique_id);
5654 /* TODO add extended attribute for creation time */
5655 } else {
5656 udf_node->efe = &dscr->efe;
5657 fid_size = udf_create_new_efe(ump, udf_node->efe,
5658 udf_file_type, &udf_node->loc,
5659 &dir_node->loc, parent_unique_id);
5660 }
5661 KASSERT(dscr->tag.tag_loc == udf_node->loc.loc.lb_num);
5662
5663 /* update vnode's size and type */
5664 nvp->v_type = vap->va_type;
5665 uvm_vnp_setsize(nvp, fid_size);
5666
5667 /* set access mode */
5668 udf_setaccessmode(udf_node, vap->va_mode);
5669
5670 /* set ownership */
5671 uid = kauth_cred_geteuid(cnp->cn_cred);
5672 gid = parent_gid;
5673 udf_setownership(udf_node, uid, gid);
5674
5675 error = udf_dir_attach(ump, dir_node, udf_node, vap, cnp);
5676 if (error) {
5677 /* free disc allocation for node */
5678 udf_free_allocated_space(ump, lb_num, vpart_num, 1);
5679
5680 /* recycle udf_node */
5681 udf_dispose_node(udf_node);
5682 vput(nvp);
5683
5684 *vpp = NULL;
5685 return error;
5686 }
5687
5688 /* adjust file count */
5689 udf_adjust_filecount(udf_node, 1);
5690
5691 /* return result */
5692 *vpp = nvp;
5693
5694 return 0;
5695 }
5696
5697
5698 int
5699 udf_create_node(struct vnode *dvp, struct vnode **vpp, struct vattr *vap,
5700 struct componentname *cnp)
5701 {
5702 int (**vnodeops)(void *);
5703 int udf_file_type;
5704
5705 DPRINTF(NODE, ("udf_create_node called\n"));
5706
5707 /* what type are we creating ? */
5708 vnodeops = udf_vnodeop_p;
5709 /* start with a default */
5710 udf_file_type = UDF_ICB_FILETYPE_RANDOMACCESS;
5711
5712 *vpp = NULL;
5713
5714 switch (vap->va_type) {
5715 case VREG :
5716 udf_file_type = UDF_ICB_FILETYPE_RANDOMACCESS;
5717 break;
5718 case VDIR :
5719 udf_file_type = UDF_ICB_FILETYPE_DIRECTORY;
5720 break;
5721 case VLNK :
5722 udf_file_type = UDF_ICB_FILETYPE_SYMLINK;
5723 break;
5724 case VBLK :
5725 udf_file_type = UDF_ICB_FILETYPE_BLOCKDEVICE;
5726 /* specfs */
5727 return ENOTSUP;
5728 break;
5729 case VCHR :
5730 udf_file_type = UDF_ICB_FILETYPE_CHARDEVICE;
5731 /* specfs */
5732 return ENOTSUP;
5733 break;
5734 case VFIFO :
5735 udf_file_type = UDF_ICB_FILETYPE_FIFO;
5736 /* specfs */
5737 return ENOTSUP;
5738 break;
5739 case VSOCK :
5740 udf_file_type = UDF_ICB_FILETYPE_SOCKET;
5741 /* specfs */
5742 return ENOTSUP;
5743 break;
5744 case VNON :
5745 case VBAD :
5746 default :
5747 /* nothing; can we even create these? */
5748 return EINVAL;
5749 }
5750
5751 return udf_create_node_raw(dvp, vpp, udf_file_type, vnodeops, vap, cnp);
5752 }
5753
5754 /* --------------------------------------------------------------------- */
5755
5756 static void
5757 udf_free_descriptor_space(struct udf_node *udf_node, struct long_ad *loc, void *mem)
5758 {
5759 struct udf_mount *ump = udf_node->ump;
5760 uint32_t lb_size, lb_num, len, num_lb;
5761 uint16_t vpart_num;
5762
5763 /* is there really one? */
5764 if (mem == NULL)
5765 return;
5766
5767 /* got a descriptor here */
5768 len = UDF_EXT_LEN(udf_rw32(loc->len));
5769 lb_num = udf_rw32(loc->loc.lb_num);
5770 vpart_num = udf_rw16(loc->loc.part_num);
5771
5772 lb_size = udf_rw32(ump->logical_vol->lb_size);
5773 num_lb = (len + lb_size -1) / lb_size;
5774
5775 udf_free_allocated_space(ump, lb_num, vpart_num, num_lb);
5776 }
5777
5778 void
5779 udf_delete_node(struct udf_node *udf_node)
5780 {
5781 void *dscr;
5782 struct udf_mount *ump;
5783 struct long_ad *loc;
5784 int extnr, lvint, dummy;
5785
5786 ump = udf_node->ump;
5787
5788 /* paranoia check on integrity; should be open!; we could panic */
5789 lvint = udf_rw32(udf_node->ump->logvol_integrity->integrity_type);
5790 if (lvint == UDF_INTEGRITY_CLOSED)
5791 printf("\tIntegrity was CLOSED!\n");
5792
5793 /* whatever the node type, change its size to zero */
5794 (void) udf_resize_node(udf_node, 0, &dummy);
5795
5796 /* force it to be `clean'; no use writing it out */
5797 udf_node->i_flags &= ~(IN_MODIFIED | IN_ACCESSED | IN_ACCESS |
5798 IN_CHANGE | IN_UPDATE | IN_MODIFY);
5799
5800 /* adjust file count */
5801 udf_adjust_filecount(udf_node, -1);
5802
5803 /*
5804 * Free its allocated descriptors; memory will be released when
5805 * vop_reclaim() is called.
5806 */
5807 loc = &udf_node->loc;
5808
5809 dscr = udf_node->fe;
5810 udf_free_descriptor_space(udf_node, loc, dscr);
5811 dscr = udf_node->efe;
5812 udf_free_descriptor_space(udf_node, loc, dscr);
5813
5814 for (extnr = 0; extnr < UDF_MAX_ALLOC_EXTENTS; extnr++) {
5815 dscr = udf_node->ext[extnr];
5816 loc = &udf_node->ext_loc[extnr];
5817 udf_free_descriptor_space(udf_node, loc, dscr);
5818 }
5819 }
5820
5821 /* --------------------------------------------------------------------- */
5822
5823 /* set new filesize; node but be LOCKED on entry and is locked on exit */
5824 int
5825 udf_resize_node(struct udf_node *udf_node, uint64_t new_size, int *extended)
5826 {
5827 struct file_entry *fe = udf_node->fe;
5828 struct extfile_entry *efe = udf_node->efe;
5829 uint64_t file_size;
5830 int error;
5831
5832 if (fe) {
5833 file_size = udf_rw64(fe->inf_len);
5834 } else {
5835 assert(udf_node->efe);
5836 file_size = udf_rw64(efe->inf_len);
5837 }
5838
5839 DPRINTF(ATTR, ("\tchanging file length from %"PRIu64" to %"PRIu64"\n",
5840 file_size, new_size));
5841
5842 /* if not changing, we're done */
5843 if (file_size == new_size)
5844 return 0;
5845
5846 *extended = (new_size > file_size);
5847 if (*extended) {
5848 error = udf_grow_node(udf_node, new_size);
5849 } else {
5850 error = udf_shrink_node(udf_node, new_size);
5851 }
5852
5853 return error;
5854 }
5855
5856
5857 /* --------------------------------------------------------------------- */
5858
5859 void
5860 udf_itimes(struct udf_node *udf_node, struct timespec *acc,
5861 struct timespec *mod, struct timespec *birth)
5862 {
5863 struct timespec now;
5864 struct file_entry *fe;
5865 struct extfile_entry *efe;
5866 struct filetimes_extattr_entry *ft_extattr;
5867 struct timestamp *atime, *mtime, *attrtime, *ctime;
5868 struct timestamp fe_ctime;
5869 struct timespec cur_birth;
5870 uint32_t offset, a_l;
5871 uint8_t *filedata;
5872 int error;
5873
5874 /* protect against rogue values */
5875 if (!udf_node)
5876 return;
5877
5878 fe = udf_node->fe;
5879 efe = udf_node->efe;
5880
5881 if (!(udf_node->i_flags & (IN_ACCESS|IN_CHANGE|IN_UPDATE|IN_MODIFY)))
5882 return;
5883
5884 /* get descriptor information */
5885 if (fe) {
5886 atime = &fe->atime;
5887 mtime = &fe->mtime;
5888 attrtime = &fe->attrtime;
5889 filedata = fe->data;
5890
5891 /* initial save dummy setting */
5892 ctime = &fe_ctime;
5893
5894 /* check our extended attribute if present */
5895 error = udf_extattr_search_intern(udf_node,
5896 UDF_FILETIMES_ATTR_NO, "", &offset, &a_l);
5897 if (!error) {
5898 ft_extattr = (struct filetimes_extattr_entry *)
5899 (filedata + offset);
5900 if (ft_extattr->existence & UDF_FILETIMES_FILE_CREATION)
5901 ctime = &ft_extattr->times[0];
5902 }
5903 /* TODO create the extended attribute if not found ? */
5904 } else {
5905 assert(udf_node->efe);
5906 atime = &efe->atime;
5907 mtime = &efe->mtime;
5908 attrtime = &efe->attrtime;
5909 ctime = &efe->ctime;
5910 }
5911
5912 vfs_timestamp(&now);
5913
5914 /* set access time */
5915 if (udf_node->i_flags & IN_ACCESS) {
5916 if (acc == NULL)
5917 acc = &now;
5918 udf_timespec_to_timestamp(acc, atime);
5919 }
5920
5921 /* set modification time */
5922 if (udf_node->i_flags & (IN_UPDATE | IN_MODIFY)) {
5923 if (mod == NULL)
5924 mod = &now;
5925 udf_timespec_to_timestamp(mod, mtime);
5926
5927 /* ensure birthtime is older than set modification! */
5928 udf_timestamp_to_timespec(udf_node->ump, ctime, &cur_birth);
5929 if ((cur_birth.tv_sec > mod->tv_sec) ||
5930 ((cur_birth.tv_sec == mod->tv_sec) &&
5931 (cur_birth.tv_nsec > mod->tv_nsec))) {
5932 udf_timespec_to_timestamp(mod, ctime);
5933 }
5934 }
5935
5936 /* update birthtime if specified */
5937 /* XXX we asume here that given birthtime is older than mod */
5938 if (birth && (birth->tv_sec != VNOVAL)) {
5939 udf_timespec_to_timestamp(birth, ctime);
5940 }
5941
5942 /* set change time */
5943 if (udf_node->i_flags & (IN_CHANGE | IN_MODIFY))
5944 udf_timespec_to_timestamp(&now, attrtime);
5945
5946 /* notify updates to the node itself */
5947 if (udf_node->i_flags & (IN_ACCESS | IN_MODIFY))
5948 udf_node->i_flags |= IN_ACCESSED;
5949 if (udf_node->i_flags & (IN_UPDATE | IN_CHANGE))
5950 udf_node->i_flags |= IN_MODIFIED;
5951
5952 /* clear modification flags */
5953 udf_node->i_flags &= ~(IN_ACCESS | IN_CHANGE | IN_UPDATE | IN_MODIFY);
5954 }
5955
5956 /* --------------------------------------------------------------------- */
5957
5958 int
5959 udf_update(struct vnode *vp, struct timespec *acc,
5960 struct timespec *mod, struct timespec *birth, int updflags)
5961 {
5962 union dscrptr *dscrptr;
5963 struct udf_node *udf_node = VTOI(vp);
5964 struct udf_mount *ump = udf_node->ump;
5965 struct regid *impl_id;
5966 int mnt_async = (vp->v_mount->mnt_flag & MNT_ASYNC);
5967 int waitfor, flags;
5968
5969 #ifdef DEBUG
5970 char bits[128];
5971 DPRINTF(CALL, ("udf_update(node, %p, %p, %p, %d)\n", acc, mod, birth,
5972 updflags));
5973 bitmask_snprintf(udf_node->i_flags, IN_FLAGBITS, bits, sizeof(bits));
5974 DPRINTF(CALL, ("\tnode flags %s\n", bits));
5975 DPRINTF(CALL, ("\t\tmnt_async = %d\n", mnt_async));
5976 #endif
5977
5978 /* set our times */
5979 udf_itimes(udf_node, acc, mod, birth);
5980
5981 /* set our implementation id */
5982 if (udf_node->fe) {
5983 dscrptr = (union dscrptr *) udf_node->fe;
5984 impl_id = &udf_node->fe->imp_id;
5985 } else {
5986 dscrptr = (union dscrptr *) udf_node->efe;
5987 impl_id = &udf_node->efe->imp_id;
5988 }
5989
5990 /* set our ID */
5991 udf_set_regid(impl_id, IMPL_NAME);
5992 udf_add_impl_regid(ump, impl_id);
5993
5994 /* update our crc! on RMW we are not allowed to change a thing */
5995 udf_validate_tag_and_crc_sums(dscrptr);
5996
5997 /* if called when mounted readonly, never write back */
5998 if (vp->v_mount->mnt_flag & MNT_RDONLY)
5999 return 0;
6000
6001 /* check if the node is dirty 'enough'*/
6002 if (updflags & UPDATE_CLOSE) {
6003 flags = udf_node->i_flags & (IN_MODIFIED | IN_ACCESSED);
6004 } else {
6005 flags = udf_node->i_flags & IN_MODIFIED;
6006 }
6007 if (flags == 0)
6008 return 0;
6009
6010 /* determine if we need to write sync or async */
6011 waitfor = 0;
6012 if ((flags & IN_MODIFIED) && (mnt_async == 0)) {
6013 /* sync mounted */
6014 waitfor = updflags & UPDATE_WAIT;
6015 if (updflags & UPDATE_DIROP)
6016 waitfor |= UPDATE_WAIT;
6017 }
6018 if (waitfor)
6019 return VOP_FSYNC(vp, FSCRED, FSYNC_WAIT, 0,0);
6020
6021 return 0;
6022 }
6023
6024
6025 /* --------------------------------------------------------------------- */
6026
6027
6028 /*
6029 * Read one fid and process it into a dirent and advance to the next (*fid)
6030 * has to be allocated a logical block in size, (*dirent) struct dirent length
6031 */
6032
6033 int
6034 udf_read_fid_stream(struct vnode *vp, uint64_t *offset,
6035 struct fileid_desc *fid, struct dirent *dirent)
6036 {
6037 struct udf_node *dir_node = VTOI(vp);
6038 struct udf_mount *ump = dir_node->ump;
6039 struct file_entry *fe = dir_node->fe;
6040 struct extfile_entry *efe = dir_node->efe;
6041 uint32_t fid_size, lb_size;
6042 uint64_t file_size;
6043 char *fid_name;
6044 int enough, error;
6045
6046 assert(fid);
6047 assert(dirent);
6048 assert(dir_node);
6049 assert(offset);
6050 assert(*offset != 1);
6051
6052 DPRINTF(FIDS, ("read_fid_stream called at offset %"PRIu64"\n", *offset));
6053 /* check if we're past the end of the directory */
6054 if (fe) {
6055 file_size = udf_rw64(fe->inf_len);
6056 } else {
6057 assert(dir_node->efe);
6058 file_size = udf_rw64(efe->inf_len);
6059 }
6060 if (*offset >= file_size)
6061 return EINVAL;
6062
6063 /* get maximum length of FID descriptor */
6064 lb_size = udf_rw32(ump->logical_vol->lb_size);
6065
6066 /* initialise return values */
6067 fid_size = 0;
6068 memset(dirent, 0, sizeof(struct dirent));
6069 memset(fid, 0, lb_size);
6070
6071 enough = (file_size - (*offset) >= UDF_FID_SIZE);
6072 if (!enough) {
6073 /* short dir ... */
6074 return EIO;
6075 }
6076
6077 error = vn_rdwr(UIO_READ, vp,
6078 fid, MIN(file_size - (*offset), lb_size), *offset,
6079 UIO_SYSSPACE, IO_ALTSEMANTICS | IO_NODELOCKED, FSCRED,
6080 NULL, NULL);
6081 if (error)
6082 return error;
6083
6084 DPRINTF(FIDS, ("\tfid piece read in fine\n"));
6085 /*
6086 * Check if we got a whole descriptor.
6087 * TODO Try to `resync' directory stream when something is very wrong.
6088 */
6089
6090 /* check if our FID header is OK */
6091 error = udf_check_tag(fid);
6092 if (error) {
6093 goto brokendir;
6094 }
6095 DPRINTF(FIDS, ("\ttag check ok\n"));
6096
6097 if (udf_rw16(fid->tag.id) != TAGID_FID) {
6098 error = EIO;
6099 goto brokendir;
6100 }
6101 DPRINTF(FIDS, ("\ttag checked ok: got TAGID_FID\n"));
6102
6103 /* check for length */
6104 fid_size = udf_fidsize(fid);
6105 enough = (file_size - (*offset) >= fid_size);
6106 if (!enough) {
6107 error = EIO;
6108 goto brokendir;
6109 }
6110 DPRINTF(FIDS, ("\tthe complete fid is read in\n"));
6111
6112 /* check FID contents */
6113 error = udf_check_tag_payload((union dscrptr *) fid, lb_size);
6114 brokendir:
6115 if (error) {
6116 /* note that is sometimes a bit quick to report */
6117 printf("BROKEN DIRECTORY ENTRY\n");
6118 /* RESYNC? */
6119 /* TODO: use udf_resync_fid_stream */
6120 return EIO;
6121 }
6122 DPRINTF(FIDS, ("\tpayload checked ok\n"));
6123
6124 /* we got a whole and valid descriptor! */
6125 DPRINTF(FIDS, ("\tinterpret FID\n"));
6126
6127 /* create resulting dirent structure */
6128 fid_name = (char *) fid->data + udf_rw16(fid->l_iu);
6129 udf_to_unix_name(dirent->d_name, MAXNAMLEN,
6130 fid_name, fid->l_fi, &ump->logical_vol->desc_charset);
6131
6132 /* '..' has no name, so provide one */
6133 if (fid->file_char & UDF_FILE_CHAR_PAR)
6134 strcpy(dirent->d_name, "..");
6135
6136 dirent->d_fileno = udf_calchash(&fid->icb); /* inode hash XXX */
6137 dirent->d_namlen = strlen(dirent->d_name);
6138 dirent->d_reclen = _DIRENT_SIZE(dirent);
6139
6140 /*
6141 * Note that its not worth trying to go for the filetypes now... its
6142 * too expensive too
6143 */
6144 dirent->d_type = DT_UNKNOWN;
6145
6146 /* initial guess for filetype we can make */
6147 if (fid->file_char & UDF_FILE_CHAR_DIR)
6148 dirent->d_type = DT_DIR;
6149
6150 /* advance */
6151 *offset += fid_size;
6152
6153 return error;
6154 }
6155
6156
6157 /* --------------------------------------------------------------------- */
6158
6159 static void
6160 udf_sync_pass(struct udf_mount *ump, kauth_cred_t cred, int waitfor,
6161 int pass, int *ndirty)
6162 {
6163 struct udf_node *udf_node, *n_udf_node;
6164 struct vnode *vp;
6165 int vdirty, error;
6166 int on_type, on_flags, on_vnode;
6167
6168 derailed:
6169 KASSERT(mutex_owned(&mntvnode_lock));
6170
6171 DPRINTF(SYNC, ("sync_pass %d\n", pass));
6172 udf_node = LIST_FIRST(&ump->sorted_udf_nodes);
6173 for (;udf_node; udf_node = n_udf_node) {
6174 DPRINTF(SYNC, ("."));
6175
6176 udf_node->i_flags &= ~IN_SYNCED;
6177 vp = udf_node->vnode;
6178
6179 mutex_enter(&vp->v_interlock);
6180 n_udf_node = LIST_NEXT(udf_node, sortchain);
6181 if (n_udf_node)
6182 n_udf_node->i_flags |= IN_SYNCED;
6183
6184 /* system nodes are not synced this way */
6185 if (vp->v_vflag & VV_SYSTEM) {
6186 mutex_exit(&vp->v_interlock);
6187 continue;
6188 }
6189
6190 /* check if its dirty enough to even try */
6191 on_type = (waitfor == MNT_LAZY || vp->v_type == VNON);
6192 on_flags = ((udf_node->i_flags &
6193 (IN_ACCESSED | IN_UPDATE | IN_MODIFIED)) == 0);
6194 on_vnode = LIST_EMPTY(&vp->v_dirtyblkhd)
6195 && UVM_OBJ_IS_CLEAN(&vp->v_uobj);
6196 if (on_type || (on_flags || on_vnode)) { /* XXX */
6197 /* not dirty (enough?) */
6198 mutex_exit(&vp->v_interlock);
6199 continue;
6200 }
6201
6202 mutex_exit(&mntvnode_lock);
6203 error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK);
6204 if (error) {
6205 mutex_enter(&mntvnode_lock);
6206 if (error == ENOENT)
6207 goto derailed;
6208 *ndirty += 1;
6209 continue;
6210 }
6211
6212 switch (pass) {
6213 case 1:
6214 VOP_FSYNC(vp, cred, 0 | FSYNC_DATAONLY,0,0);
6215 break;
6216 case 2:
6217 vdirty = vp->v_numoutput;
6218 if (vp->v_tag == VT_UDF)
6219 vdirty += udf_node->outstanding_bufs +
6220 udf_node->outstanding_nodedscr;
6221 if (vdirty == 0)
6222 VOP_FSYNC(vp, cred, 0,0,0);
6223 *ndirty += vdirty;
6224 break;
6225 case 3:
6226 vdirty = vp->v_numoutput;
6227 if (vp->v_tag == VT_UDF)
6228 vdirty += udf_node->outstanding_bufs +
6229 udf_node->outstanding_nodedscr;
6230 *ndirty += vdirty;
6231 break;
6232 }
6233
6234 vput(vp);
6235 mutex_enter(&mntvnode_lock);
6236 }
6237 DPRINTF(SYNC, ("END sync_pass %d\n", pass));
6238 }
6239
6240
6241 void
6242 udf_do_sync(struct udf_mount *ump, kauth_cred_t cred, int waitfor)
6243 {
6244 int dummy, ndirty;
6245
6246 mutex_enter(&mntvnode_lock);
6247 recount:
6248 dummy = 0;
6249 DPRINTF(CALL, ("issue VOP_FSYNC(DATA only) on all nodes\n"));
6250 DPRINTF(SYNC, ("issue VOP_FSYNC(DATA only) on all nodes\n"));
6251 udf_sync_pass(ump, cred, waitfor, 1, &dummy);
6252
6253 DPRINTF(CALL, ("issue VOP_FSYNC(COMPLETE) on all finished nodes\n"));
6254 DPRINTF(SYNC, ("issue VOP_FSYNC(COMPLETE) on all finished nodes\n"));
6255 udf_sync_pass(ump, cred, waitfor, 2, &dummy);
6256
6257 if (waitfor == MNT_WAIT) {
6258 ndirty = ump->devvp->v_numoutput;
6259 DPRINTF(NODE, ("counting pending blocks: on devvp %d\n",
6260 ndirty));
6261 udf_sync_pass(ump, cred, waitfor, 3, &ndirty);
6262 DPRINTF(NODE, ("counted num dirty pending blocks %d\n",
6263 ndirty));
6264
6265 if (ndirty) {
6266 /* 1/4 second wait */
6267 cv_timedwait(&ump->dirtynodes_cv, &mntvnode_lock,
6268 hz/4);
6269 goto recount;
6270 }
6271 }
6272
6273 mutex_exit(&mntvnode_lock);
6274 }
6275
6276 /* --------------------------------------------------------------------- */
6277
6278 /*
6279 * Read and write file extent in/from the buffer.
6280 *
6281 * The splitup of the extent into seperate request-buffers is to minimise
6282 * copying around as much as possible.
6283 *
6284 * block based file reading and writing
6285 */
6286
6287 static int
6288 udf_read_internal(struct udf_node *node, uint8_t *blob)
6289 {
6290 struct udf_mount *ump;
6291 struct file_entry *fe = node->fe;
6292 struct extfile_entry *efe = node->efe;
6293 uint64_t inflen;
6294 uint32_t sector_size;
6295 uint8_t *pos;
6296 int icbflags, addr_type;
6297
6298 /* get extent and do some paranoia checks */
6299 ump = node->ump;
6300 sector_size = ump->discinfo.sector_size;
6301
6302 if (fe) {
6303 inflen = udf_rw64(fe->inf_len);
6304 pos = &fe->data[0] + udf_rw32(fe->l_ea);
6305 icbflags = udf_rw16(fe->icbtag.flags);
6306 } else {
6307 assert(node->efe);
6308 inflen = udf_rw64(efe->inf_len);
6309 pos = &efe->data[0] + udf_rw32(efe->l_ea);
6310 icbflags = udf_rw16(efe->icbtag.flags);
6311 }
6312 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
6313
6314 assert(addr_type == UDF_ICB_INTERN_ALLOC);
6315 assert(inflen < sector_size);
6316
6317 /* copy out info */
6318 memset(blob, 0, sector_size);
6319 memcpy(blob, pos, inflen);
6320
6321 return 0;
6322 }
6323
6324
6325 static int
6326 udf_write_internal(struct udf_node *node, uint8_t *blob)
6327 {
6328 struct udf_mount *ump;
6329 struct file_entry *fe = node->fe;
6330 struct extfile_entry *efe = node->efe;
6331 uint64_t inflen;
6332 uint32_t sector_size;
6333 uint8_t *pos;
6334 int icbflags, addr_type;
6335
6336 /* get extent and do some paranoia checks */
6337 ump = node->ump;
6338 sector_size = ump->discinfo.sector_size;
6339
6340 if (fe) {
6341 inflen = udf_rw64(fe->inf_len);
6342 pos = &fe->data[0] + udf_rw32(fe->l_ea);
6343 icbflags = udf_rw16(fe->icbtag.flags);
6344 } else {
6345 assert(node->efe);
6346 inflen = udf_rw64(efe->inf_len);
6347 pos = &efe->data[0] + udf_rw32(efe->l_ea);
6348 icbflags = udf_rw16(efe->icbtag.flags);
6349 }
6350 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
6351
6352 assert(addr_type == UDF_ICB_INTERN_ALLOC);
6353 assert(inflen < sector_size);
6354
6355 /* copy in blob */
6356 /* memset(pos, 0, inflen); */
6357 memcpy(pos, blob, inflen);
6358
6359 return 0;
6360 }
6361
6362
6363 void
6364 udf_read_filebuf(struct udf_node *udf_node, struct buf *buf)
6365 {
6366 struct buf *nestbuf;
6367 struct udf_mount *ump = udf_node->ump;
6368 uint64_t *mapping;
6369 uint64_t run_start;
6370 uint32_t sector_size;
6371 uint32_t buf_offset, sector, rbuflen, rblk;
6372 uint32_t from, lblkno;
6373 uint32_t sectors;
6374 uint8_t *buf_pos;
6375 int error, run_length, isdir, what;
6376
6377 sector_size = udf_node->ump->discinfo.sector_size;
6378
6379 from = buf->b_blkno;
6380 sectors = buf->b_bcount / sector_size;
6381
6382 isdir = (udf_node->vnode->v_type == VDIR);
6383 what = isdir ? UDF_C_FIDS : UDF_C_USERDATA;
6384
6385 /* assure we have enough translation slots */
6386 KASSERT(buf->b_bcount / sector_size <= UDF_MAX_MAPPINGS);
6387 KASSERT(MAXPHYS / sector_size <= UDF_MAX_MAPPINGS);
6388
6389 if (sectors > UDF_MAX_MAPPINGS) {
6390 printf("udf_read_filebuf: implementation limit on bufsize\n");
6391 buf->b_error = EIO;
6392 biodone(buf);
6393 return;
6394 }
6395
6396 mapping = malloc(sizeof(*mapping) * UDF_MAX_MAPPINGS, M_TEMP, M_WAITOK);
6397
6398 error = 0;
6399 DPRINTF(READ, ("\ttranslate %d-%d\n", from, sectors));
6400 error = udf_translate_file_extent(udf_node, from, sectors, mapping);
6401 if (error) {
6402 buf->b_error = error;
6403 biodone(buf);
6404 goto out;
6405 }
6406 DPRINTF(READ, ("\ttranslate extent went OK\n"));
6407
6408 /* pre-check if its an internal */
6409 if (*mapping == UDF_TRANS_INTERN) {
6410 error = udf_read_internal(udf_node, (uint8_t *) buf->b_data);
6411 if (error)
6412 buf->b_error = error;
6413 biodone(buf);
6414 goto out;
6415 }
6416 DPRINTF(READ, ("\tnot intern\n"));
6417
6418 #ifdef DEBUG
6419 if (udf_verbose & UDF_DEBUG_TRANSLATE) {
6420 printf("Returned translation table:\n");
6421 for (sector = 0; sector < sectors; sector++) {
6422 printf("%d : %"PRIu64"\n", sector, mapping[sector]);
6423 }
6424 }
6425 #endif
6426
6427 /* request read-in of data from disc sheduler */
6428 buf->b_resid = buf->b_bcount;
6429 for (sector = 0; sector < sectors; sector++) {
6430 buf_offset = sector * sector_size;
6431 buf_pos = (uint8_t *) buf->b_data + buf_offset;
6432 DPRINTF(READ, ("\tprocessing rel sector %d\n", sector));
6433
6434 /* check if its zero or unmapped to stop reading */
6435 switch (mapping[sector]) {
6436 case UDF_TRANS_UNMAPPED:
6437 case UDF_TRANS_ZERO:
6438 /* copy zero sector TODO runlength like below */
6439 memset(buf_pos, 0, sector_size);
6440 DPRINTF(READ, ("\treturning zero sector\n"));
6441 nestiobuf_done(buf, sector_size, 0);
6442 break;
6443 default :
6444 DPRINTF(READ, ("\tread sector "
6445 "%"PRIu64"\n", mapping[sector]));
6446
6447 lblkno = from + sector;
6448 run_start = mapping[sector];
6449 run_length = 1;
6450 while (sector < sectors-1) {
6451 if (mapping[sector+1] != mapping[sector]+1)
6452 break;
6453 run_length++;
6454 sector++;
6455 }
6456
6457 /*
6458 * nest an iobuf and mark it for async reading. Since
6459 * we're using nested buffers, they can't be cached by
6460 * design.
6461 */
6462 rbuflen = run_length * sector_size;
6463 rblk = run_start * (sector_size/DEV_BSIZE);
6464
6465 nestbuf = getiobuf(NULL, true);
6466 nestiobuf_setup(buf, nestbuf, buf_offset, rbuflen);
6467 /* nestbuf is B_ASYNC */
6468
6469 /* identify this nestbuf */
6470 nestbuf->b_lblkno = lblkno;
6471 assert(nestbuf->b_vp == udf_node->vnode);
6472
6473 /* CD shedules on raw blkno */
6474 nestbuf->b_blkno = rblk;
6475 nestbuf->b_proc = NULL;
6476 nestbuf->b_rawblkno = rblk;
6477 nestbuf->b_udf_c_type = what;
6478
6479 udf_discstrat_queuebuf(ump, nestbuf);
6480 }
6481 }
6482 out:
6483 /* if we're synchronously reading, wait for the completion */
6484 if ((buf->b_flags & B_ASYNC) == 0)
6485 biowait(buf);
6486
6487 DPRINTF(READ, ("\tend of read_filebuf\n"));
6488 free(mapping, M_TEMP);
6489 return;
6490 }
6491
6492
6493 void
6494 udf_write_filebuf(struct udf_node *udf_node, struct buf *buf)
6495 {
6496 struct buf *nestbuf;
6497 struct udf_mount *ump = udf_node->ump;
6498 uint64_t *mapping;
6499 uint64_t run_start;
6500 uint32_t lb_size;
6501 uint32_t buf_offset, lb_num, rbuflen, rblk;
6502 uint32_t from, lblkno;
6503 uint32_t num_lb;
6504 uint8_t *buf_pos;
6505 int error, run_length, isdir, what, s;
6506
6507 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
6508
6509 from = buf->b_blkno;
6510 num_lb = buf->b_bcount / lb_size;
6511
6512 isdir = (udf_node->vnode->v_type == VDIR);
6513 what = isdir ? UDF_C_FIDS : UDF_C_USERDATA;
6514
6515 if (udf_node == ump->metadatabitmap_node)
6516 what = UDF_C_METADATA_SBM;
6517
6518 /* assure we have enough translation slots */
6519 KASSERT(buf->b_bcount / lb_size <= UDF_MAX_MAPPINGS);
6520 KASSERT(MAXPHYS / lb_size <= UDF_MAX_MAPPINGS);
6521
6522 if (num_lb > UDF_MAX_MAPPINGS) {
6523 printf("udf_write_filebuf: implementation limit on bufsize\n");
6524 buf->b_error = EIO;
6525 biodone(buf);
6526 return;
6527 }
6528
6529 mapping = malloc(sizeof(*mapping) * UDF_MAX_MAPPINGS, M_TEMP, M_WAITOK);
6530
6531 error = 0;
6532 DPRINTF(WRITE, ("\ttranslate %d-%d\n", from, num_lb));
6533 error = udf_translate_file_extent(udf_node, from, num_lb, mapping);
6534 if (error) {
6535 buf->b_error = error;
6536 biodone(buf);
6537 goto out;
6538 }
6539 DPRINTF(WRITE, ("\ttranslate extent went OK\n"));
6540
6541 /* if its internally mapped, we can write it in the descriptor itself */
6542 if (*mapping == UDF_TRANS_INTERN) {
6543 /* TODO paranoia check if we ARE going to have enough space */
6544 error = udf_write_internal(udf_node, (uint8_t *) buf->b_data);
6545 if (error)
6546 buf->b_error = error;
6547 biodone(buf);
6548 goto out;
6549 }
6550 DPRINTF(WRITE, ("\tnot intern\n"));
6551
6552 /* request write out of data to disc sheduler */
6553 buf->b_resid = buf->b_bcount;
6554 for (lb_num = 0; lb_num < num_lb; lb_num++) {
6555 buf_offset = lb_num * lb_size;
6556 buf_pos = (uint8_t *) buf->b_data + buf_offset;
6557 DPRINTF(WRITE, ("\tprocessing rel lb_num %d\n", lb_num));
6558
6559 /*
6560 * Mappings are not that important here. Just before we write
6561 * the lb_num we late-allocate them when needed and update the
6562 * mapping in the udf_node.
6563 */
6564
6565 /* XXX why not ignore the mapping altogether ? */
6566 /* TODO estimate here how much will be late-allocated */
6567 DPRINTF(WRITE, ("\twrite lb_num "
6568 "%"PRIu64, mapping[lb_num]));
6569
6570 lblkno = from + lb_num;
6571 run_start = mapping[lb_num];
6572 run_length = 1;
6573 while (lb_num < num_lb-1) {
6574 if (mapping[lb_num+1] != mapping[lb_num]+1)
6575 if (mapping[lb_num+1] != mapping[lb_num])
6576 break;
6577 run_length++;
6578 lb_num++;
6579 }
6580 DPRINTF(WRITE, ("+ %d\n", run_length));
6581
6582 /* nest an iobuf on the master buffer for the extent */
6583 rbuflen = run_length * lb_size;
6584 rblk = run_start * (lb_size/DEV_BSIZE);
6585
6586 #if 0
6587 /* if its zero or unmapped, our blknr gets -1 for unmapped */
6588 switch (mapping[lb_num]) {
6589 case UDF_TRANS_UNMAPPED:
6590 case UDF_TRANS_ZERO:
6591 rblk = -1;
6592 break;
6593 default:
6594 rblk = run_start * (lb_size/DEV_BSIZE);
6595 break;
6596 }
6597 #endif
6598
6599 nestbuf = getiobuf(NULL, true);
6600 nestiobuf_setup(buf, nestbuf, buf_offset, rbuflen);
6601 /* nestbuf is B_ASYNC */
6602
6603 /* identify this nestbuf */
6604 nestbuf->b_lblkno = lblkno;
6605 KASSERT(nestbuf->b_vp == udf_node->vnode);
6606
6607 /* CD shedules on raw blkno */
6608 nestbuf->b_blkno = rblk;
6609 nestbuf->b_proc = NULL;
6610 nestbuf->b_rawblkno = rblk;
6611 nestbuf->b_udf_c_type = what;
6612
6613 /* increment our outstanding bufs counter */
6614 s = splbio();
6615 udf_node->outstanding_bufs++;
6616 splx(s);
6617
6618 udf_discstrat_queuebuf(ump, nestbuf);
6619 }
6620 out:
6621 /* if we're synchronously writing, wait for the completion */
6622 if ((buf->b_flags & B_ASYNC) == 0)
6623 biowait(buf);
6624
6625 DPRINTF(WRITE, ("\tend of write_filebuf\n"));
6626 free(mapping, M_TEMP);
6627 return;
6628 }
6629
6630 /* --------------------------------------------------------------------- */
6631
6632
6633