udf_subr.c revision 1.47 1 /* $NetBSD: udf_subr.c,v 1.47 2008/05/17 08:07:21 reinoud Exp $ */
2
3 /*
4 * Copyright (c) 2006, 2008 Reinoud Zandijk
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 *
27 */
28
29
30 #include <sys/cdefs.h>
31 #ifndef lint
32 __KERNEL_RCSID(0, "$NetBSD: udf_subr.c,v 1.47 2008/05/17 08:07:21 reinoud Exp $");
33 #endif /* not lint */
34
35
36 #if defined(_KERNEL_OPT)
37 #include "opt_quota.h"
38 #include "opt_compat_netbsd.h"
39 #endif
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/sysctl.h>
44 #include <sys/namei.h>
45 #include <sys/proc.h>
46 #include <sys/kernel.h>
47 #include <sys/vnode.h>
48 #include <miscfs/genfs/genfs_node.h>
49 #include <sys/mount.h>
50 #include <sys/buf.h>
51 #include <sys/file.h>
52 #include <sys/device.h>
53 #include <sys/disklabel.h>
54 #include <sys/ioctl.h>
55 #include <sys/malloc.h>
56 #include <sys/dirent.h>
57 #include <sys/stat.h>
58 #include <sys/conf.h>
59 #include <sys/kauth.h>
60 #include <dev/clock_subr.h>
61
62 #include <fs/udf/ecma167-udf.h>
63 #include <fs/udf/udf_mount.h>
64
65 #if defined(_KERNEL_OPT)
66 #include "opt_udf.h"
67 #endif
68
69 #include "udf.h"
70 #include "udf_subr.h"
71 #include "udf_bswap.h"
72
73
74 #define VTOI(vnode) ((struct udf_node *) (vnode)->v_data)
75
76 #define UDF_SET_SYSTEMFILE(vp) \
77 /* XXXAD Is the vnode locked? */ \
78 (vp)->v_vflag |= VV_SYSTEM; \
79 vref(vp); \
80 vput(vp); \
81
82 extern int syncer_maxdelay; /* maximum delay time */
83 extern int (**udf_vnodeop_p)(void *);
84
85 /* --------------------------------------------------------------------- */
86
87 //#ifdef DEBUG
88 #if 1
89
90 #if 0
91 static void
92 udf_dumpblob(boid *blob, uint32_t dlen)
93 {
94 int i, j;
95
96 printf("blob = %p\n", blob);
97 printf("dump of %d bytes\n", dlen);
98
99 for (i = 0; i < dlen; i+ = 16) {
100 printf("%04x ", i);
101 for (j = 0; j < 16; j++) {
102 if (i+j < dlen) {
103 printf("%02x ", blob[i+j]);
104 } else {
105 printf(" ");
106 }
107 }
108 for (j = 0; j < 16; j++) {
109 if (i+j < dlen) {
110 if (blob[i+j]>32 && blob[i+j]! = 127) {
111 printf("%c", blob[i+j]);
112 } else {
113 printf(".");
114 }
115 }
116 }
117 printf("\n");
118 }
119 printf("\n");
120 Debugger();
121 }
122 #endif
123
124 static void
125 udf_dump_discinfo(struct udf_mount *ump)
126 {
127 char bits[128];
128 struct mmc_discinfo *di = &ump->discinfo;
129
130 if ((udf_verbose & UDF_DEBUG_VOLUMES) == 0)
131 return;
132
133 printf("Device/media info :\n");
134 printf("\tMMC profile 0x%02x\n", di->mmc_profile);
135 printf("\tderived class %d\n", di->mmc_class);
136 printf("\tsector size %d\n", di->sector_size);
137 printf("\tdisc state %d\n", di->disc_state);
138 printf("\tlast ses state %d\n", di->last_session_state);
139 printf("\tbg format state %d\n", di->bg_format_state);
140 printf("\tfrst track %d\n", di->first_track);
141 printf("\tfst on last ses %d\n", di->first_track_last_session);
142 printf("\tlst on last ses %d\n", di->last_track_last_session);
143 printf("\tlink block penalty %d\n", di->link_block_penalty);
144 bitmask_snprintf(di->disc_flags, MMC_DFLAGS_FLAGBITS, bits,
145 sizeof(bits));
146 printf("\tdisc flags %s\n", bits);
147 printf("\tdisc id %x\n", di->disc_id);
148 printf("\tdisc barcode %"PRIx64"\n", di->disc_barcode);
149
150 printf("\tnum sessions %d\n", di->num_sessions);
151 printf("\tnum tracks %d\n", di->num_tracks);
152
153 bitmask_snprintf(di->mmc_cur, MMC_CAP_FLAGBITS, bits, sizeof(bits));
154 printf("\tcapabilities cur %s\n", bits);
155 bitmask_snprintf(di->mmc_cap, MMC_CAP_FLAGBITS, bits, sizeof(bits));
156 printf("\tcapabilities cap %s\n", bits);
157 }
158 #else
159 #define udf_dump_discinfo(a);
160 #endif
161
162
163 /* --------------------------------------------------------------------- */
164
165 /* not called often */
166 int
167 udf_update_discinfo(struct udf_mount *ump)
168 {
169 struct vnode *devvp = ump->devvp;
170 struct partinfo dpart;
171 struct mmc_discinfo *di;
172 int error;
173
174 DPRINTF(VOLUMES, ("read/update disc info\n"));
175 di = &ump->discinfo;
176 memset(di, 0, sizeof(struct mmc_discinfo));
177
178 /* check if we're on a MMC capable device, i.e. CD/DVD */
179 error = VOP_IOCTL(devvp, MMCGETDISCINFO, di, FKIOCTL, NOCRED);
180 if (error == 0) {
181 udf_dump_discinfo(ump);
182 return 0;
183 }
184
185 /* disc partition support */
186 error = VOP_IOCTL(devvp, DIOCGPART, &dpart, FREAD, NOCRED);
187 if (error)
188 return ENODEV;
189
190 /* set up a disc info profile for partitions */
191 di->mmc_profile = 0x01; /* disc type */
192 di->mmc_class = MMC_CLASS_DISC;
193 di->disc_state = MMC_STATE_CLOSED;
194 di->last_session_state = MMC_STATE_CLOSED;
195 di->bg_format_state = MMC_BGFSTATE_COMPLETED;
196 di->link_block_penalty = 0;
197
198 di->mmc_cur = MMC_CAP_RECORDABLE | MMC_CAP_REWRITABLE |
199 MMC_CAP_ZEROLINKBLK | MMC_CAP_HW_DEFECTFREE;
200 di->mmc_cap = di->mmc_cur;
201 di->disc_flags = MMC_DFLAGS_UNRESTRICTED;
202
203 /* TODO problem with last_possible_lba on resizable VND; request */
204 di->last_possible_lba = dpart.part->p_size;
205 di->sector_size = dpart.disklab->d_secsize;
206
207 di->num_sessions = 1;
208 di->num_tracks = 1;
209
210 di->first_track = 1;
211 di->first_track_last_session = di->last_track_last_session = 1;
212
213 udf_dump_discinfo(ump);
214 return 0;
215 }
216
217
218 int
219 udf_update_trackinfo(struct udf_mount *ump, struct mmc_trackinfo *ti)
220 {
221 struct vnode *devvp = ump->devvp;
222 struct mmc_discinfo *di = &ump->discinfo;
223 int error, class;
224
225 DPRINTF(VOLUMES, ("read track info\n"));
226
227 class = di->mmc_class;
228 if (class != MMC_CLASS_DISC) {
229 /* tracknr specified in struct ti */
230 error = VOP_IOCTL(devvp, MMCGETTRACKINFO, ti, FKIOCTL, NOCRED);
231 return error;
232 }
233
234 /* disc partition support */
235 if (ti->tracknr != 1)
236 return EIO;
237
238 /* create fake ti (TODO check for resized vnds) */
239 ti->sessionnr = 1;
240
241 ti->track_mode = 0; /* XXX */
242 ti->data_mode = 0; /* XXX */
243 ti->flags = MMC_TRACKINFO_LRA_VALID | MMC_TRACKINFO_NWA_VALID;
244
245 ti->track_start = 0;
246 ti->packet_size = 1;
247
248 /* TODO support for resizable vnd */
249 ti->track_size = di->last_possible_lba;
250 ti->next_writable = di->last_possible_lba;
251 ti->last_recorded = ti->next_writable;
252 ti->free_blocks = 0;
253
254 return 0;
255 }
256
257
258 int
259 udf_setup_writeparams(struct udf_mount *ump)
260 {
261 struct mmc_writeparams mmc_writeparams;
262 int error;
263
264 if (ump->discinfo.mmc_class == MMC_CLASS_DISC)
265 return 0;
266
267 /*
268 * only CD burning normally needs setting up, but other disc types
269 * might need other settings to be made. The MMC framework will set up
270 * the nessisary recording parameters according to the disc
271 * characteristics read in. Modifications can be made in the discinfo
272 * structure passed to change the nature of the disc.
273 */
274
275 memset(&mmc_writeparams, 0, sizeof(struct mmc_writeparams));
276 mmc_writeparams.mmc_class = ump->discinfo.mmc_class;
277 mmc_writeparams.mmc_cur = ump->discinfo.mmc_cur;
278
279 /*
280 * UDF dictates first track to determine track mode for the whole
281 * disc. [UDF 1.50/6.10.1.1, UDF 1.50/6.10.2.1]
282 * To prevent problems with a `reserved' track in front we start with
283 * the 2nd track and if that is not valid, go for the 1st.
284 */
285 mmc_writeparams.tracknr = 2;
286 mmc_writeparams.data_mode = MMC_DATAMODE_DEFAULT; /* XA disc */
287 mmc_writeparams.track_mode = MMC_TRACKMODE_DEFAULT; /* data */
288
289 error = VOP_IOCTL(ump->devvp, MMCSETUPWRITEPARAMS, &mmc_writeparams,
290 FKIOCTL, NOCRED);
291 if (error) {
292 mmc_writeparams.tracknr = 1;
293 error = VOP_IOCTL(ump->devvp, MMCSETUPWRITEPARAMS,
294 &mmc_writeparams, FKIOCTL, NOCRED);
295 }
296 return error;
297 }
298
299
300 int
301 udf_synchronise_caches(struct udf_mount *ump)
302 {
303 struct mmc_op mmc_op;
304
305 DPRINTF(CALL, ("udf_synchronise_caches()\n"));
306
307 if (ump->vfs_mountp->mnt_flag & MNT_RDONLY)
308 return 0;
309
310 /* discs are done now */
311 if (ump->discinfo.mmc_class == MMC_CLASS_DISC)
312 return 0;
313
314 bzero(&mmc_op, sizeof(struct mmc_op));
315 mmc_op.operation = MMC_OP_SYNCHRONISECACHE;
316
317 /* ignore return code */
318 (void) VOP_IOCTL(ump->devvp, MMCOP, &mmc_op, FKIOCTL, NOCRED);
319
320 return 0;
321 }
322
323 /* --------------------------------------------------------------------- */
324
325 /* track/session searching for mounting */
326 int
327 udf_search_tracks(struct udf_mount *ump, struct udf_args *args,
328 int *first_tracknr, int *last_tracknr)
329 {
330 struct mmc_trackinfo trackinfo;
331 uint32_t tracknr, start_track, num_tracks;
332 int error;
333
334 /* if negative, sessionnr is relative to last session */
335 if (args->sessionnr < 0) {
336 args->sessionnr += ump->discinfo.num_sessions;
337 }
338
339 /* sanity */
340 if (args->sessionnr < 0)
341 args->sessionnr = 0;
342 if (args->sessionnr > ump->discinfo.num_sessions)
343 args->sessionnr = ump->discinfo.num_sessions;
344
345 /* search the tracks for this session, zero session nr indicates last */
346 if (args->sessionnr == 0)
347 args->sessionnr = ump->discinfo.num_sessions;
348 if (ump->discinfo.last_session_state == MMC_STATE_EMPTY)
349 args->sessionnr--;
350
351 /* sanity again */
352 if (args->sessionnr < 0)
353 args->sessionnr = 0;
354
355 /* search the first and last track of the specified session */
356 num_tracks = ump->discinfo.num_tracks;
357 start_track = ump->discinfo.first_track;
358
359 /* search for first track of this session */
360 for (tracknr = start_track; tracknr <= num_tracks; tracknr++) {
361 /* get track info */
362 trackinfo.tracknr = tracknr;
363 error = udf_update_trackinfo(ump, &trackinfo);
364 if (error)
365 return error;
366
367 if (trackinfo.sessionnr == args->sessionnr)
368 break;
369 }
370 *first_tracknr = tracknr;
371
372 /* search for last track of this session */
373 for (;tracknr <= num_tracks; tracknr++) {
374 /* get track info */
375 trackinfo.tracknr = tracknr;
376 error = udf_update_trackinfo(ump, &trackinfo);
377 if (error || (trackinfo.sessionnr != args->sessionnr)) {
378 tracknr--;
379 break;
380 }
381 }
382 if (tracknr > num_tracks)
383 tracknr--;
384
385 *last_tracknr = tracknr;
386
387 if (*last_tracknr < *first_tracknr) {
388 printf( "udf_search_tracks: sanity check on drive+disc failed, "
389 "drive returned garbage\n");
390 return EINVAL;
391 }
392
393 assert(*last_tracknr >= *first_tracknr);
394 return 0;
395 }
396
397
398 /*
399 * NOTE: this is the only routine in this file that directly peeks into the
400 * metadata file but since its at a larval state of the mount it can't hurt.
401 *
402 * XXX candidate for udf_allocation.c
403 * XXX clean me up!, change to new node reading code.
404 */
405
406 static void
407 udf_check_track_metadata_overlap(struct udf_mount *ump,
408 struct mmc_trackinfo *trackinfo)
409 {
410 struct part_desc *part;
411 struct file_entry *fe;
412 struct extfile_entry *efe;
413 struct short_ad *s_ad;
414 struct long_ad *l_ad;
415 uint32_t track_start, track_end;
416 uint32_t phys_part_start, phys_part_end, part_start, part_end;
417 uint32_t sector_size, len, alloclen, plb_num;
418 uint8_t *pos;
419 int addr_type, icblen, icbflags, flags;
420
421 /* get our track extents */
422 track_start = trackinfo->track_start;
423 track_end = track_start + trackinfo->track_size;
424
425 /* get our base partition extent */
426 part = ump->partitions[ump->metadata_part];
427 phys_part_start = udf_rw32(part->start_loc);
428 phys_part_end = phys_part_start + udf_rw32(part->part_len);
429
430 /* no use if its outside the physical partition */
431 if ((phys_part_start >= track_end) || (phys_part_end < track_start))
432 return;
433
434 /*
435 * now follow all extents in the fe/efe to see if they refer to this
436 * track
437 */
438
439 sector_size = ump->discinfo.sector_size;
440
441 /* XXX should we claim exclusive access to the metafile ? */
442 /* TODO: move to new node read code */
443 fe = ump->metadata_node->fe;
444 efe = ump->metadata_node->efe;
445 if (fe) {
446 alloclen = udf_rw32(fe->l_ad);
447 pos = &fe->data[0] + udf_rw32(fe->l_ea);
448 icbflags = udf_rw16(fe->icbtag.flags);
449 } else {
450 assert(efe);
451 alloclen = udf_rw32(efe->l_ad);
452 pos = &efe->data[0] + udf_rw32(efe->l_ea);
453 icbflags = udf_rw16(efe->icbtag.flags);
454 }
455 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
456
457 while (alloclen) {
458 if (addr_type == UDF_ICB_SHORT_ALLOC) {
459 icblen = sizeof(struct short_ad);
460 s_ad = (struct short_ad *) pos;
461 len = udf_rw32(s_ad->len);
462 plb_num = udf_rw32(s_ad->lb_num);
463 } else {
464 /* should not be present, but why not */
465 icblen = sizeof(struct long_ad);
466 l_ad = (struct long_ad *) pos;
467 len = udf_rw32(l_ad->len);
468 plb_num = udf_rw32(l_ad->loc.lb_num);
469 /* pvpart_num = udf_rw16(l_ad->loc.part_num); */
470 }
471 /* process extent */
472 flags = UDF_EXT_FLAGS(len);
473 len = UDF_EXT_LEN(len);
474
475 part_start = phys_part_start + plb_num;
476 part_end = part_start + (len / sector_size);
477
478 if ((part_start >= track_start) && (part_end <= track_end)) {
479 /* extent is enclosed within this track */
480 ump->metadata_track = *trackinfo;
481 return;
482 }
483
484 pos += icblen;
485 alloclen -= icblen;
486 }
487 }
488
489
490 int
491 udf_search_writing_tracks(struct udf_mount *ump)
492 {
493 struct mmc_trackinfo trackinfo;
494 struct part_desc *part;
495 uint32_t tracknr, start_track, num_tracks;
496 uint32_t track_start, track_end, part_start, part_end;
497 int error;
498
499 /*
500 * in the CD/(HD)DVD/BD recordable device model a few tracks within
501 * the last session might be open but in the UDF device model at most
502 * three tracks can be open: a reserved track for delayed ISO VRS
503 * writing, a data track and a metadata track. We search here for the
504 * data track and the metadata track. Note that the reserved track is
505 * troublesome but can be detected by its small size of < 512 sectors.
506 */
507
508 num_tracks = ump->discinfo.num_tracks;
509 start_track = ump->discinfo.first_track;
510
511 /* fetch info on first and possibly only track */
512 trackinfo.tracknr = start_track;
513 error = udf_update_trackinfo(ump, &trackinfo);
514 if (error)
515 return error;
516
517 /* copy results to our mount point */
518 ump->data_track = trackinfo;
519 ump->metadata_track = trackinfo;
520
521 /* if not sequential, we're done */
522 if (num_tracks == 1)
523 return 0;
524
525 for (tracknr = start_track;tracknr <= num_tracks; tracknr++) {
526 /* get track info */
527 trackinfo.tracknr = tracknr;
528 error = udf_update_trackinfo(ump, &trackinfo);
529 if (error)
530 return error;
531
532 if ((trackinfo.flags & MMC_TRACKINFO_NWA_VALID) == 0)
533 continue;
534
535 track_start = trackinfo.track_start;
536 track_end = track_start + trackinfo.track_size;
537
538 /* check for overlap on data partition */
539 part = ump->partitions[ump->data_part];
540 part_start = udf_rw32(part->start_loc);
541 part_end = part_start + udf_rw32(part->part_len);
542 if ((part_start < track_end) && (part_end > track_start)) {
543 ump->data_track = trackinfo;
544 /* TODO check if UDF partition data_part is writable */
545 }
546
547 /* check for overlap on metadata partition */
548 if ((ump->meta_alloc == UDF_ALLOC_METASEQUENTIAL) ||
549 (ump->meta_alloc == UDF_ALLOC_METABITMAP)) {
550 udf_check_track_metadata_overlap(ump, &trackinfo);
551 } else {
552 ump->metadata_track = trackinfo;
553 }
554 }
555
556 if ((ump->data_track.flags & MMC_TRACKINFO_NWA_VALID) == 0)
557 return EROFS;
558
559 if ((ump->metadata_track.flags & MMC_TRACKINFO_NWA_VALID) == 0)
560 return EROFS;
561
562 return 0;
563 }
564
565 /* --------------------------------------------------------------------- */
566
567 /*
568 * Check if the blob starts with a good UDF tag. Tags are protected by a
569 * checksum over the reader except one byte at position 4 that is the checksum
570 * itself.
571 */
572
573 int
574 udf_check_tag(void *blob)
575 {
576 struct desc_tag *tag = blob;
577 uint8_t *pos, sum, cnt;
578
579 /* check TAG header checksum */
580 pos = (uint8_t *) tag;
581 sum = 0;
582
583 for(cnt = 0; cnt < 16; cnt++) {
584 if (cnt != 4)
585 sum += *pos;
586 pos++;
587 }
588 if (sum != tag->cksum) {
589 /* bad tag header checksum; this is not a valid tag */
590 return EINVAL;
591 }
592
593 return 0;
594 }
595
596
597 /*
598 * check tag payload will check descriptor CRC as specified.
599 * If the descriptor is too long, it will return EIO otherwise EINVAL.
600 */
601
602 int
603 udf_check_tag_payload(void *blob, uint32_t max_length)
604 {
605 struct desc_tag *tag = blob;
606 uint16_t crc, crc_len;
607
608 crc_len = udf_rw16(tag->desc_crc_len);
609
610 /* check payload CRC if applicable */
611 if (crc_len == 0)
612 return 0;
613
614 if (crc_len > max_length)
615 return EIO;
616
617 crc = udf_cksum(((uint8_t *) tag) + UDF_DESC_TAG_LENGTH, crc_len);
618 if (crc != udf_rw16(tag->desc_crc)) {
619 /* bad payload CRC; this is a broken tag */
620 return EINVAL;
621 }
622
623 return 0;
624 }
625
626
627 void
628 udf_validate_tag_sum(void *blob)
629 {
630 struct desc_tag *tag = blob;
631 uint8_t *pos, sum, cnt;
632
633 /* calculate TAG header checksum */
634 pos = (uint8_t *) tag;
635 sum = 0;
636
637 for(cnt = 0; cnt < 16; cnt++) {
638 if (cnt != 4) sum += *pos;
639 pos++;
640 }
641 tag->cksum = sum; /* 8 bit */
642 }
643
644
645 /* assumes sector number of descriptor to be saved already present */
646 void
647 udf_validate_tag_and_crc_sums(void *blob)
648 {
649 struct desc_tag *tag = blob;
650 uint8_t *btag = (uint8_t *) tag;
651 uint16_t crc, crc_len;
652
653 crc_len = udf_rw16(tag->desc_crc_len);
654
655 /* check payload CRC if applicable */
656 if (crc_len > 0) {
657 crc = udf_cksum(btag + UDF_DESC_TAG_LENGTH, crc_len);
658 tag->desc_crc = udf_rw16(crc);
659 }
660
661 /* calculate TAG header checksum */
662 udf_validate_tag_sum(blob);
663 }
664
665 /* --------------------------------------------------------------------- */
666
667 /*
668 * XXX note the different semantics from udfclient: for FIDs it still rounds
669 * up to sectors. Use udf_fidsize() for a correct length.
670 */
671
672 int
673 udf_tagsize(union dscrptr *dscr, uint32_t lb_size)
674 {
675 uint32_t size, tag_id, num_lb, elmsz;
676
677 tag_id = udf_rw16(dscr->tag.id);
678
679 switch (tag_id) {
680 case TAGID_LOGVOL :
681 size = sizeof(struct logvol_desc) - 1;
682 size += udf_rw32(dscr->lvd.mt_l);
683 break;
684 case TAGID_UNALLOC_SPACE :
685 elmsz = sizeof(struct extent_ad);
686 size = sizeof(struct unalloc_sp_desc) - elmsz;
687 size += udf_rw32(dscr->usd.alloc_desc_num) * elmsz;
688 break;
689 case TAGID_FID :
690 size = UDF_FID_SIZE + dscr->fid.l_fi + udf_rw16(dscr->fid.l_iu);
691 size = (size + 3) & ~3;
692 break;
693 case TAGID_LOGVOL_INTEGRITY :
694 size = sizeof(struct logvol_int_desc) - sizeof(uint32_t);
695 size += udf_rw32(dscr->lvid.l_iu);
696 size += (2 * udf_rw32(dscr->lvid.num_part) * sizeof(uint32_t));
697 break;
698 case TAGID_SPACE_BITMAP :
699 size = sizeof(struct space_bitmap_desc) - 1;
700 size += udf_rw32(dscr->sbd.num_bytes);
701 break;
702 case TAGID_SPARING_TABLE :
703 elmsz = sizeof(struct spare_map_entry);
704 size = sizeof(struct udf_sparing_table) - elmsz;
705 size += udf_rw16(dscr->spt.rt_l) * elmsz;
706 break;
707 case TAGID_FENTRY :
708 size = sizeof(struct file_entry);
709 size += udf_rw32(dscr->fe.l_ea) + udf_rw32(dscr->fe.l_ad)-1;
710 break;
711 case TAGID_EXTFENTRY :
712 size = sizeof(struct extfile_entry);
713 size += udf_rw32(dscr->efe.l_ea) + udf_rw32(dscr->efe.l_ad)-1;
714 break;
715 case TAGID_FSD :
716 size = sizeof(struct fileset_desc);
717 break;
718 default :
719 size = sizeof(union dscrptr);
720 break;
721 }
722
723 if ((size == 0) || (lb_size == 0)) return 0;
724
725 /* round up in sectors */
726 num_lb = (size + lb_size -1) / lb_size;
727 return num_lb * lb_size;
728 }
729
730
731 int
732 udf_fidsize(struct fileid_desc *fid)
733 {
734 uint32_t size;
735
736 if (udf_rw16(fid->tag.id) != TAGID_FID)
737 panic("got udf_fidsize on non FID\n");
738
739 size = UDF_FID_SIZE + fid->l_fi + udf_rw16(fid->l_iu);
740 size = (size + 3) & ~3;
741
742 return size;
743 }
744
745 /* --------------------------------------------------------------------- */
746
747 void
748 udf_lock_node(struct udf_node *udf_node, int flag, char const *fname, const int lineno)
749 {
750 int ret;
751
752 mutex_enter(&udf_node->node_mutex);
753 /* wait until free */
754 while (udf_node->i_flags & IN_LOCKED) {
755 ret = cv_timedwait(&udf_node->node_lock, &udf_node->node_mutex, hz/8);
756 /* TODO check if we should return error; abort */
757 if (ret == EWOULDBLOCK) {
758 DPRINTF(LOCKING, ( "udf_lock_node: udf_node %p would block "
759 "wanted at %s:%d, previously locked at %s:%d\n",
760 udf_node, fname, lineno,
761 udf_node->lock_fname, udf_node->lock_lineno));
762 }
763 }
764 /* grab */
765 udf_node->i_flags |= IN_LOCKED | flag;
766 /* debug */
767 udf_node->lock_fname = fname;
768 udf_node->lock_lineno = lineno;
769
770 mutex_exit(&udf_node->node_mutex);
771 }
772
773
774 void
775 udf_unlock_node(struct udf_node *udf_node, int flag)
776 {
777 mutex_enter(&udf_node->node_mutex);
778 udf_node->i_flags &= ~(IN_LOCKED | flag);
779 cv_broadcast(&udf_node->node_lock);
780 mutex_exit(&udf_node->node_mutex);
781 }
782
783
784 /* --------------------------------------------------------------------- */
785
786 static int
787 udf_read_anchor(struct udf_mount *ump, uint32_t sector, struct anchor_vdp **dst)
788 {
789 int error;
790
791 error = udf_read_phys_dscr(ump, sector, M_UDFVOLD,
792 (union dscrptr **) dst);
793 if (!error) {
794 /* blank terminator blocks are not allowed here */
795 if (*dst == NULL)
796 return ENOENT;
797 if (udf_rw16((*dst)->tag.id) != TAGID_ANCHOR) {
798 error = ENOENT;
799 free(*dst, M_UDFVOLD);
800 *dst = NULL;
801 DPRINTF(VOLUMES, ("Not an anchor\n"));
802 }
803 }
804
805 return error;
806 }
807
808
809 int
810 udf_read_anchors(struct udf_mount *ump)
811 {
812 struct udf_args *args = &ump->mount_args;
813 struct mmc_trackinfo first_track;
814 struct mmc_trackinfo second_track;
815 struct mmc_trackinfo last_track;
816 struct anchor_vdp **anchorsp;
817 uint32_t track_start;
818 uint32_t track_end;
819 uint32_t positions[4];
820 int first_tracknr, last_tracknr;
821 int error, anch, ok, first_anchor;
822
823 /* search the first and last track of the specified session */
824 error = udf_search_tracks(ump, args, &first_tracknr, &last_tracknr);
825 if (!error) {
826 first_track.tracknr = first_tracknr;
827 error = udf_update_trackinfo(ump, &first_track);
828 }
829 if (!error) {
830 last_track.tracknr = last_tracknr;
831 error = udf_update_trackinfo(ump, &last_track);
832 }
833 if ((!error) && (first_tracknr != last_tracknr)) {
834 second_track.tracknr = first_tracknr+1;
835 error = udf_update_trackinfo(ump, &second_track);
836 }
837 if (error) {
838 printf("UDF mount: reading disc geometry failed\n");
839 return 0;
840 }
841
842 track_start = first_track.track_start;
843
844 /* `end' is not as straitforward as start. */
845 track_end = last_track.track_start
846 + last_track.track_size - last_track.free_blocks - 1;
847
848 if (ump->discinfo.mmc_cur & MMC_CAP_SEQUENTIAL) {
849 /* end of track is not straitforward here */
850 if (last_track.flags & MMC_TRACKINFO_LRA_VALID)
851 track_end = last_track.last_recorded;
852 else if (last_track.flags & MMC_TRACKINFO_NWA_VALID)
853 track_end = last_track.next_writable
854 - ump->discinfo.link_block_penalty;
855 }
856
857 /* its no use reading a blank track */
858 first_anchor = 0;
859 if (first_track.flags & MMC_TRACKINFO_BLANK)
860 first_anchor = 1;
861
862 /* get our packet size */
863 ump->packet_size = first_track.packet_size;
864 if (first_track.flags & MMC_TRACKINFO_BLANK)
865 ump->packet_size = second_track.packet_size;
866
867 if (ump->packet_size <= 1) {
868 /* take max, but not bigger than 64 */
869 ump->packet_size = MAXPHYS / ump->discinfo.sector_size;
870 ump->packet_size = MIN(ump->packet_size, 64);
871 }
872 KASSERT(ump->packet_size >= 1);
873
874 /* read anchors start+256, start+512, end-256, end */
875 positions[0] = track_start+256;
876 positions[1] = track_end-256;
877 positions[2] = track_end;
878 positions[3] = track_start+512; /* [UDF 2.60/6.11.2] */
879 /* XXX shouldn't +512 be prefered above +256 for compat with Roxio CD */
880
881 ok = 0;
882 anchorsp = ump->anchors;
883 for (anch = first_anchor; anch < 4; anch++) {
884 DPRINTF(VOLUMES, ("Read anchor %d at sector %d\n", anch,
885 positions[anch]));
886 error = udf_read_anchor(ump, positions[anch], anchorsp);
887 if (!error) {
888 anchorsp++;
889 ok++;
890 }
891 }
892
893 /* VATs are only recorded on sequential media, but initialise */
894 ump->first_possible_vat_location = track_start + 2;
895 ump->last_possible_vat_location = track_end + last_track.packet_size;
896
897 return ok;
898 }
899
900 /* --------------------------------------------------------------------- */
901
902 /* we dont try to be smart; we just record the parts */
903 #define UDF_UPDATE_DSCR(name, dscr) \
904 if (name) \
905 free(name, M_UDFVOLD); \
906 name = dscr;
907
908 static int
909 udf_process_vds_descriptor(struct udf_mount *ump, union dscrptr *dscr)
910 {
911 struct part_desc *part;
912 uint16_t phys_part, raw_phys_part;
913
914 DPRINTF(VOLUMES, ("\tprocessing VDS descr %d\n",
915 udf_rw16(dscr->tag.id)));
916 switch (udf_rw16(dscr->tag.id)) {
917 case TAGID_PRI_VOL : /* primary partition */
918 UDF_UPDATE_DSCR(ump->primary_vol, &dscr->pvd);
919 break;
920 case TAGID_LOGVOL : /* logical volume */
921 UDF_UPDATE_DSCR(ump->logical_vol, &dscr->lvd);
922 break;
923 case TAGID_UNALLOC_SPACE : /* unallocated space */
924 UDF_UPDATE_DSCR(ump->unallocated, &dscr->usd);
925 break;
926 case TAGID_IMP_VOL : /* implementation */
927 /* XXX do we care about multiple impl. descr ? */
928 UDF_UPDATE_DSCR(ump->implementation, &dscr->ivd);
929 break;
930 case TAGID_PARTITION : /* physical partition */
931 /* not much use if its not allocated */
932 if ((udf_rw16(dscr->pd.flags) & UDF_PART_FLAG_ALLOCATED) == 0) {
933 free(dscr, M_UDFVOLD);
934 break;
935 }
936
937 /*
938 * BUGALERT: some rogue implementations use random physical
939 * partion numbers to break other implementations so lookup
940 * the number.
941 */
942 raw_phys_part = udf_rw16(dscr->pd.part_num);
943 for (phys_part = 0; phys_part < UDF_PARTITIONS; phys_part++) {
944 part = ump->partitions[phys_part];
945 if (part == NULL)
946 break;
947 if (udf_rw16(part->part_num) == raw_phys_part)
948 break;
949 }
950 if (phys_part == UDF_PARTITIONS) {
951 free(dscr, M_UDFVOLD);
952 return EINVAL;
953 }
954
955 UDF_UPDATE_DSCR(ump->partitions[phys_part], &dscr->pd);
956 break;
957 case TAGID_VOL : /* volume space extender; rare */
958 DPRINTF(VOLUMES, ("VDS extender ignored\n"));
959 free(dscr, M_UDFVOLD);
960 break;
961 default :
962 DPRINTF(VOLUMES, ("Unhandled VDS type %d\n",
963 udf_rw16(dscr->tag.id)));
964 free(dscr, M_UDFVOLD);
965 }
966
967 return 0;
968 }
969 #undef UDF_UPDATE_DSCR
970
971 /* --------------------------------------------------------------------- */
972
973 static int
974 udf_read_vds_extent(struct udf_mount *ump, uint32_t loc, uint32_t len)
975 {
976 union dscrptr *dscr;
977 uint32_t sector_size, dscr_size;
978 int error;
979
980 sector_size = ump->discinfo.sector_size;
981
982 /* loc is sectornr, len is in bytes */
983 error = EIO;
984 while (len) {
985 error = udf_read_phys_dscr(ump, loc, M_UDFVOLD, &dscr);
986 if (error)
987 return error;
988
989 /* blank block is a terminator */
990 if (dscr == NULL)
991 return 0;
992
993 /* TERM descriptor is a terminator */
994 if (udf_rw16(dscr->tag.id) == TAGID_TERM) {
995 free(dscr, M_UDFVOLD);
996 return 0;
997 }
998
999 /* process all others */
1000 dscr_size = udf_tagsize(dscr, sector_size);
1001 error = udf_process_vds_descriptor(ump, dscr);
1002 if (error) {
1003 free(dscr, M_UDFVOLD);
1004 break;
1005 }
1006 assert((dscr_size % sector_size) == 0);
1007
1008 len -= dscr_size;
1009 loc += dscr_size / sector_size;
1010 }
1011
1012 return error;
1013 }
1014
1015
1016 int
1017 udf_read_vds_space(struct udf_mount *ump)
1018 {
1019 /* struct udf_args *args = &ump->mount_args; */
1020 struct anchor_vdp *anchor, *anchor2;
1021 size_t size;
1022 uint32_t main_loc, main_len;
1023 uint32_t reserve_loc, reserve_len;
1024 int error;
1025
1026 /*
1027 * read in VDS space provided by the anchors; if one descriptor read
1028 * fails, try the mirror sector.
1029 *
1030 * check if 2nd anchor is different from 1st; if so, go for 2nd. This
1031 * avoids the `compatibility features' of DirectCD that may confuse
1032 * stuff completely.
1033 */
1034
1035 anchor = ump->anchors[0];
1036 anchor2 = ump->anchors[1];
1037 assert(anchor);
1038
1039 if (anchor2) {
1040 size = sizeof(struct extent_ad);
1041 if (memcmp(&anchor->main_vds_ex, &anchor2->main_vds_ex, size))
1042 anchor = anchor2;
1043 /* reserve is specified to be a literal copy of main */
1044 }
1045
1046 main_loc = udf_rw32(anchor->main_vds_ex.loc);
1047 main_len = udf_rw32(anchor->main_vds_ex.len);
1048
1049 reserve_loc = udf_rw32(anchor->reserve_vds_ex.loc);
1050 reserve_len = udf_rw32(anchor->reserve_vds_ex.len);
1051
1052 error = udf_read_vds_extent(ump, main_loc, main_len);
1053 if (error) {
1054 printf("UDF mount: reading in reserve VDS extent\n");
1055 error = udf_read_vds_extent(ump, reserve_loc, reserve_len);
1056 }
1057
1058 return error;
1059 }
1060
1061 /* --------------------------------------------------------------------- */
1062
1063 /*
1064 * Read in the logical volume integrity sequence pointed to by our logical
1065 * volume descriptor. Its a sequence that can be extended using fields in the
1066 * integrity descriptor itself. On sequential media only one is found, on
1067 * rewritable media a sequence of descriptors can be found as a form of
1068 * history keeping and on non sequential write-once media the chain is vital
1069 * to allow more and more descriptors to be written. The last descriptor
1070 * written in an extent needs to claim space for a new extent.
1071 */
1072
1073 static int
1074 udf_retrieve_lvint(struct udf_mount *ump)
1075 {
1076 union dscrptr *dscr;
1077 struct logvol_int_desc *lvint;
1078 struct udf_lvintq *trace;
1079 uint32_t lb_size, lbnum, len;
1080 int dscr_type, error, trace_len;
1081
1082 lb_size = udf_rw32(ump->logical_vol->lb_size);
1083 len = udf_rw32(ump->logical_vol->integrity_seq_loc.len);
1084 lbnum = udf_rw32(ump->logical_vol->integrity_seq_loc.loc);
1085
1086 /* clean trace */
1087 memset(ump->lvint_trace, 0,
1088 UDF_LVDINT_SEGMENTS * sizeof(struct udf_lvintq));
1089
1090 trace_len = 0;
1091 trace = ump->lvint_trace;
1092 trace->start = lbnum;
1093 trace->end = lbnum + len/lb_size;
1094 trace->pos = 0;
1095 trace->wpos = 0;
1096
1097 lvint = NULL;
1098 dscr = NULL;
1099 error = 0;
1100 while (len) {
1101 trace->pos = lbnum - trace->start;
1102 trace->wpos = trace->pos + 1;
1103
1104 /* read in our integrity descriptor */
1105 error = udf_read_phys_dscr(ump, lbnum, M_UDFVOLD, &dscr);
1106 if (!error) {
1107 if (dscr == NULL) {
1108 trace->wpos = trace->pos;
1109 break; /* empty terminates */
1110 }
1111 dscr_type = udf_rw16(dscr->tag.id);
1112 if (dscr_type == TAGID_TERM) {
1113 trace->wpos = trace->pos;
1114 break; /* clean terminator */
1115 }
1116 if (dscr_type != TAGID_LOGVOL_INTEGRITY) {
1117 /* fatal... corrupt disc */
1118 error = ENOENT;
1119 break;
1120 }
1121 if (lvint)
1122 free(lvint, M_UDFVOLD);
1123 lvint = &dscr->lvid;
1124 dscr = NULL;
1125 } /* else hope for the best... maybe the next is ok */
1126
1127 DPRINTFIF(VOLUMES, lvint, ("logvol integrity read, state %s\n",
1128 udf_rw32(lvint->integrity_type) ? "CLOSED" : "OPEN"));
1129
1130 /* proceed sequential */
1131 lbnum += 1;
1132 len -= lb_size;
1133
1134 /* are we linking to a new piece? */
1135 if (dscr && lvint->next_extent.len) {
1136 len = udf_rw32(lvint->next_extent.len);
1137 lbnum = udf_rw32(lvint->next_extent.loc);
1138
1139 if (trace_len >= UDF_LVDINT_SEGMENTS-1) {
1140 /* IEK! segment link full... */
1141 DPRINTF(VOLUMES, ("lvdint segments full\n"));
1142 error = EINVAL;
1143 } else {
1144 trace++;
1145 trace_len++;
1146
1147 trace->start = lbnum;
1148 trace->end = lbnum + len/lb_size;
1149 trace->pos = 0;
1150 trace->wpos = 0;
1151 }
1152 }
1153 }
1154
1155 /* clean up the mess, esp. when there is an error */
1156 if (dscr)
1157 free(dscr, M_UDFVOLD);
1158
1159 if (error && lvint) {
1160 free(lvint, M_UDFVOLD);
1161 lvint = NULL;
1162 }
1163
1164 if (!lvint)
1165 error = ENOENT;
1166
1167 ump->logvol_integrity = lvint;
1168 return error;
1169 }
1170
1171
1172 static int
1173 udf_loose_lvint_history(struct udf_mount *ump)
1174 {
1175 union dscrptr **bufs, *dscr, *last_dscr;
1176 struct udf_lvintq *trace, *in_trace, *out_trace;
1177 struct logvol_int_desc *lvint;
1178 uint32_t in_ext, in_pos, in_len;
1179 uint32_t out_ext, out_wpos, out_len;
1180 uint32_t lb_size, packet_size, lb_num;
1181 uint32_t len, start;
1182 int ext, minext, extlen, cnt, cpy_len, dscr_type;
1183 int losing;
1184 int error;
1185
1186 DPRINTF(VOLUMES, ("need to lose some lvint history\n"));
1187
1188 lb_size = udf_rw32(ump->logical_vol->lb_size);
1189 packet_size = ump->data_track.packet_size; /* XXX data track */
1190
1191 /* search smallest extent */
1192 trace = &ump->lvint_trace[0];
1193 minext = trace->end - trace->start;
1194 for (ext = 1; ext < UDF_LVDINT_SEGMENTS; ext++) {
1195 trace = &ump->lvint_trace[ext];
1196 extlen = trace->end - trace->start;
1197 if (extlen == 0)
1198 break;
1199 minext = MIN(minext, extlen);
1200 }
1201 losing = MIN(minext, UDF_LVINT_LOSSAGE);
1202 /* no sense wiping all */
1203 if (losing == minext)
1204 losing--;
1205
1206 DPRINTF(VOLUMES, ("\tlosing %d entries\n", losing));
1207
1208 /* get buffer for pieces */
1209 bufs = malloc(UDF_LVDINT_SEGMENTS * sizeof(void *), M_TEMP, M_WAITOK);
1210
1211 in_ext = 0;
1212 in_pos = losing;
1213 in_trace = &ump->lvint_trace[in_ext];
1214 in_len = in_trace->end - in_trace->start;
1215 out_ext = 0;
1216 out_wpos = 0;
1217 out_trace = &ump->lvint_trace[out_ext];
1218 out_len = out_trace->end - out_trace->start;
1219
1220 last_dscr = NULL;
1221 for(;;) {
1222 out_trace->pos = out_wpos;
1223 out_trace->wpos = out_trace->pos;
1224 if (in_pos >= in_len) {
1225 in_ext++;
1226 in_pos = 0;
1227 in_trace = &ump->lvint_trace[in_ext];
1228 in_len = in_trace->end - in_trace->start;
1229 }
1230 if (out_wpos >= out_len) {
1231 out_ext++;
1232 out_wpos = 0;
1233 out_trace = &ump->lvint_trace[out_ext];
1234 out_len = out_trace->end - out_trace->start;
1235 }
1236 /* copy overlap contents */
1237 cpy_len = MIN(in_len - in_pos, out_len - out_wpos);
1238 cpy_len = MIN(cpy_len, in_len - in_trace->pos);
1239 if (cpy_len == 0)
1240 break;
1241
1242 /* copy */
1243 DPRINTF(VOLUMES, ("\treading %d lvid descriptors\n", cpy_len));
1244 for (cnt = 0; cnt < cpy_len; cnt++) {
1245 /* read in our integrity descriptor */
1246 lb_num = in_trace->start + in_pos + cnt;
1247 error = udf_read_phys_dscr(ump, lb_num, M_UDFVOLD,
1248 &dscr);
1249 if (error) {
1250 /* copy last one */
1251 dscr = last_dscr;
1252 }
1253 bufs[cnt] = dscr;
1254 if (!error) {
1255 if (dscr == NULL) {
1256 out_trace->pos = out_wpos + cnt;
1257 out_trace->wpos = out_trace->pos;
1258 break; /* empty terminates */
1259 }
1260 dscr_type = udf_rw16(dscr->tag.id);
1261 if (dscr_type == TAGID_TERM) {
1262 out_trace->pos = out_wpos + cnt;
1263 out_trace->wpos = out_trace->pos;
1264 break; /* clean terminator */
1265 }
1266 if (dscr_type != TAGID_LOGVOL_INTEGRITY) {
1267 panic( "UDF integrity sequence "
1268 "corrupted while mounted!\n");
1269 }
1270 last_dscr = dscr;
1271 }
1272 }
1273
1274 /* patch up if first entry was on error */
1275 if (bufs[0] == NULL) {
1276 for (cnt = 0; cnt < cpy_len; cnt++)
1277 if (bufs[cnt] != NULL)
1278 break;
1279 last_dscr = bufs[cnt];
1280 for (; cnt > 0; cnt--) {
1281 bufs[cnt] = last_dscr;
1282 }
1283 }
1284
1285 /* glue + write out */
1286 DPRINTF(VOLUMES, ("\twriting %d lvid descriptors\n", cpy_len));
1287 for (cnt = 0; cnt < cpy_len; cnt++) {
1288 lb_num = out_trace->start + out_wpos + cnt;
1289 lvint = &bufs[cnt]->lvid;
1290
1291 /* set continuation */
1292 len = 0;
1293 start = 0;
1294 if (out_wpos + cnt == out_len) {
1295 /* get continuation */
1296 trace = &ump->lvint_trace[out_ext+1];
1297 len = trace->end - trace->start;
1298 start = trace->start;
1299 }
1300 lvint->next_extent.len = udf_rw32(len);
1301 lvint->next_extent.loc = udf_rw32(start);
1302
1303 lb_num = trace->start + trace->wpos;
1304 error = udf_write_phys_dscr_sync(ump, NULL, UDF_C_DSCR,
1305 bufs[cnt], lb_num, lb_num);
1306 DPRINTFIF(VOLUMES, error,
1307 ("error writing lvint lb_num\n"));
1308 }
1309
1310 /* free non repeating descriptors */
1311 last_dscr = NULL;
1312 for (cnt = 0; cnt < cpy_len; cnt++) {
1313 if (bufs[cnt] != last_dscr)
1314 free(bufs[cnt], M_UDFVOLD);
1315 last_dscr = bufs[cnt];
1316 }
1317
1318 /* advance */
1319 in_pos += cpy_len;
1320 out_wpos += cpy_len;
1321 }
1322
1323 free(bufs, M_TEMP);
1324
1325 return 0;
1326 }
1327
1328
1329 static int
1330 udf_writeout_lvint(struct udf_mount *ump, int lvflag)
1331 {
1332 struct udf_lvintq *trace;
1333 struct timeval now_v;
1334 struct timespec now_s;
1335 uint32_t sector;
1336 int logvol_integrity;
1337 int space, error;
1338
1339 DPRINTF(VOLUMES, ("writing out logvol integrity descriptor\n"));
1340
1341 again:
1342 /* get free space in last chunk */
1343 trace = ump->lvint_trace;
1344 while (trace->wpos > (trace->end - trace->start)) {
1345 DPRINTF(VOLUMES, ("skip : start = %d, end = %d, pos = %d, "
1346 "wpos = %d\n", trace->start, trace->end,
1347 trace->pos, trace->wpos));
1348 trace++;
1349 }
1350
1351 /* check if there is space to append */
1352 space = (trace->end - trace->start) - trace->wpos;
1353 DPRINTF(VOLUMES, ("write start = %d, end = %d, pos = %d, wpos = %d, "
1354 "space = %d\n", trace->start, trace->end, trace->pos,
1355 trace->wpos, space));
1356
1357 /* get state */
1358 logvol_integrity = udf_rw32(ump->logvol_integrity->integrity_type);
1359 if (logvol_integrity == UDF_INTEGRITY_CLOSED) {
1360 if ((space < 3) && (lvflag & UDF_APPENDONLY_LVINT)) {
1361 /* don't allow this logvol to be opened */
1362 /* TODO extent LVINT space if possible */
1363 return EROFS;
1364 }
1365 }
1366
1367 if (space < 1) {
1368 if (lvflag & UDF_APPENDONLY_LVINT)
1369 return EROFS;
1370 /* loose history by re-writing extents */
1371 error = udf_loose_lvint_history(ump);
1372 if (error)
1373 return error;
1374 goto again;
1375 }
1376
1377 /* update our integrity descriptor to identify us and timestamp it */
1378 DPRINTF(VOLUMES, ("updating integrity descriptor\n"));
1379 microtime(&now_v);
1380 TIMEVAL_TO_TIMESPEC(&now_v, &now_s);
1381 udf_timespec_to_timestamp(&now_s, &ump->logvol_integrity->time);
1382 udf_set_regid(&ump->logvol_info->impl_id, IMPL_NAME);
1383 udf_add_impl_regid(ump, &ump->logvol_info->impl_id);
1384
1385 /* writeout integrity descriptor */
1386 sector = trace->start + trace->wpos;
1387 error = udf_write_phys_dscr_sync(ump, NULL, UDF_C_DSCR,
1388 (union dscrptr *) ump->logvol_integrity,
1389 sector, sector);
1390 DPRINTF(VOLUMES, ("writeout lvint : error = %d\n", error));
1391 if (error)
1392 return error;
1393
1394 /* advance write position */
1395 trace->wpos++; space--;
1396 if (space >= 1) {
1397 /* append terminator */
1398 sector = trace->start + trace->wpos;
1399 error = udf_write_terminator(ump, sector);
1400
1401 DPRINTF(VOLUMES, ("write terminator : error = %d\n", error));
1402 }
1403
1404 space = (trace->end - trace->start) - trace->wpos;
1405 DPRINTF(VOLUMES, ("write start = %d, end = %d, pos = %d, wpos = %d, "
1406 "space = %d\n", trace->start, trace->end, trace->pos,
1407 trace->wpos, space));
1408 DPRINTF(VOLUMES, ("finished writing out logvol integrity descriptor "
1409 "successfull\n"));
1410
1411 return error;
1412 }
1413
1414 /* --------------------------------------------------------------------- */
1415
1416 static int
1417 udf_read_partition_spacetables(struct udf_mount *ump)
1418 {
1419 union dscrptr *dscr;
1420 /* struct udf_args *args = &ump->mount_args; */
1421 struct part_desc *partd;
1422 struct part_hdr_desc *parthdr;
1423 struct udf_bitmap *bitmap;
1424 uint32_t phys_part;
1425 uint32_t lb_num, len;
1426 int error, dscr_type;
1427
1428 /* unallocated space map */
1429 for (phys_part = 0; phys_part < UDF_PARTITIONS; phys_part++) {
1430 partd = ump->partitions[phys_part];
1431 if (partd == NULL)
1432 continue;
1433 parthdr = &partd->_impl_use.part_hdr;
1434
1435 lb_num = udf_rw32(partd->start_loc);
1436 lb_num += udf_rw32(parthdr->unalloc_space_bitmap.lb_num);
1437 len = udf_rw32(parthdr->unalloc_space_bitmap.len);
1438 if (len == 0)
1439 continue;
1440
1441 DPRINTF(VOLUMES, ("Read unalloc. space bitmap %d\n", lb_num));
1442 error = udf_read_phys_dscr(ump, lb_num, M_UDFVOLD, &dscr);
1443 if (!error && dscr) {
1444 /* analyse */
1445 dscr_type = udf_rw16(dscr->tag.id);
1446 if (dscr_type == TAGID_SPACE_BITMAP) {
1447 DPRINTF(VOLUMES, ("Accepting space bitmap\n"));
1448 ump->part_unalloc_dscr[phys_part] = &dscr->sbd;
1449
1450 /* fill in ump->part_unalloc_bits */
1451 bitmap = &ump->part_unalloc_bits[phys_part];
1452 bitmap->blob = (uint8_t *) dscr;
1453 bitmap->bits = dscr->sbd.data;
1454 bitmap->max_offset = udf_rw32(dscr->sbd.num_bits);
1455 bitmap->pages = NULL; /* TODO */
1456 bitmap->data_pos = 0;
1457 bitmap->metadata_pos = 0;
1458 } else {
1459 free(dscr, M_UDFVOLD);
1460
1461 printf( "UDF mount: error reading unallocated "
1462 "space bitmap\n");
1463 return EROFS;
1464 }
1465 } else {
1466 /* blank not allowed */
1467 printf("UDF mount: blank unallocated space bitmap\n");
1468 return EROFS;
1469 }
1470 }
1471
1472 /* unallocated space table (not supported) */
1473 for (phys_part = 0; phys_part < UDF_PARTITIONS; phys_part++) {
1474 partd = ump->partitions[phys_part];
1475 if (partd == NULL)
1476 continue;
1477 parthdr = &partd->_impl_use.part_hdr;
1478
1479 len = udf_rw32(parthdr->unalloc_space_table.len);
1480 if (len) {
1481 printf("UDF mount: space tables not supported\n");
1482 return EROFS;
1483 }
1484 }
1485
1486 /* freed space map */
1487 for (phys_part = 0; phys_part < UDF_PARTITIONS; phys_part++) {
1488 partd = ump->partitions[phys_part];
1489 if (partd == NULL)
1490 continue;
1491 parthdr = &partd->_impl_use.part_hdr;
1492
1493 /* freed space map */
1494 lb_num = udf_rw32(partd->start_loc);
1495 lb_num += udf_rw32(parthdr->freed_space_bitmap.lb_num);
1496 len = udf_rw32(parthdr->freed_space_bitmap.len);
1497 if (len == 0)
1498 continue;
1499
1500 DPRINTF(VOLUMES, ("Read unalloc. space bitmap %d\n", lb_num));
1501 error = udf_read_phys_dscr(ump, lb_num, M_UDFVOLD, &dscr);
1502 if (!error && dscr) {
1503 /* analyse */
1504 dscr_type = udf_rw16(dscr->tag.id);
1505 if (dscr_type == TAGID_SPACE_BITMAP) {
1506 DPRINTF(VOLUMES, ("Accepting space bitmap\n"));
1507 ump->part_freed_dscr[phys_part] = &dscr->sbd;
1508
1509 /* fill in ump->part_freed_bits */
1510 bitmap = &ump->part_unalloc_bits[phys_part];
1511 bitmap->blob = (uint8_t *) dscr;
1512 bitmap->bits = dscr->sbd.data;
1513 bitmap->max_offset = udf_rw32(dscr->sbd.num_bits);
1514 bitmap->pages = NULL; /* TODO */
1515 bitmap->data_pos = 0;
1516 bitmap->metadata_pos = 0;
1517 } else {
1518 free(dscr, M_UDFVOLD);
1519
1520 printf( "UDF mount: error reading freed "
1521 "space bitmap\n");
1522 return EROFS;
1523 }
1524 } else {
1525 /* blank not allowed */
1526 printf("UDF mount: blank freed space bitmap\n");
1527 return EROFS;
1528 }
1529 }
1530
1531 /* freed space table (not supported) */
1532 for (phys_part = 0; phys_part < UDF_PARTITIONS; phys_part++) {
1533 partd = ump->partitions[phys_part];
1534 if (partd == NULL)
1535 continue;
1536 parthdr = &partd->_impl_use.part_hdr;
1537
1538 len = udf_rw32(parthdr->freed_space_table.len);
1539 if (len) {
1540 printf("UDF mount: space tables not supported\n");
1541 return EROFS;
1542 }
1543 }
1544
1545 return 0;
1546 }
1547
1548
1549 /* TODO implement async writeout */
1550 int
1551 udf_write_partition_spacetables(struct udf_mount *ump, int waitfor)
1552 {
1553 union dscrptr *dscr;
1554 /* struct udf_args *args = &ump->mount_args; */
1555 struct part_desc *partd;
1556 struct part_hdr_desc *parthdr;
1557 uint32_t phys_part;
1558 uint32_t lb_num, len, ptov;
1559 int error_all, error;
1560
1561 error_all = 0;
1562 /* unallocated space map */
1563 for (phys_part = 0; phys_part < UDF_PARTITIONS; phys_part++) {
1564 partd = ump->partitions[phys_part];
1565 if (partd == NULL)
1566 continue;
1567 parthdr = &partd->_impl_use.part_hdr;
1568
1569 ptov = udf_rw32(partd->start_loc);
1570 lb_num = udf_rw32(parthdr->unalloc_space_bitmap.lb_num);
1571 len = udf_rw32(parthdr->unalloc_space_bitmap.len);
1572 if (len == 0)
1573 continue;
1574
1575 DPRINTF(VOLUMES, ("Write unalloc. space bitmap %d\n",
1576 lb_num + ptov));
1577 dscr = (union dscrptr *) ump->part_unalloc_dscr[phys_part];
1578 error = udf_write_phys_dscr_sync(ump, NULL, UDF_C_DSCR,
1579 (union dscrptr *) dscr,
1580 ptov + lb_num, lb_num);
1581 if (error) {
1582 DPRINTF(VOLUMES, ("\tfailed!! (error %d)\n", error));
1583 error_all = error;
1584 }
1585 }
1586
1587 /* freed space map */
1588 for (phys_part = 0; phys_part < UDF_PARTITIONS; phys_part++) {
1589 partd = ump->partitions[phys_part];
1590 if (partd == NULL)
1591 continue;
1592 parthdr = &partd->_impl_use.part_hdr;
1593
1594 /* freed space map */
1595 ptov = udf_rw32(partd->start_loc);
1596 lb_num = udf_rw32(parthdr->freed_space_bitmap.lb_num);
1597 len = udf_rw32(parthdr->freed_space_bitmap.len);
1598 if (len == 0)
1599 continue;
1600
1601 DPRINTF(VOLUMES, ("Write freed space bitmap %d\n",
1602 lb_num + ptov));
1603 dscr = (union dscrptr *) ump->part_freed_dscr[phys_part];
1604 error = udf_write_phys_dscr_sync(ump, NULL, UDF_C_DSCR,
1605 (union dscrptr *) dscr,
1606 ptov + lb_num, lb_num);
1607 if (error) {
1608 DPRINTF(VOLUMES, ("\tfailed!! (error %d)\n", error));
1609 error_all = error;
1610 }
1611 }
1612
1613 return error_all;
1614 }
1615
1616 /*
1617 * Checks if ump's vds information is correct and complete
1618 */
1619
1620 int
1621 udf_process_vds(struct udf_mount *ump) {
1622 union udf_pmap *mapping;
1623 /* struct udf_args *args = &ump->mount_args; */
1624 struct logvol_int_desc *lvint;
1625 struct udf_logvol_info *lvinfo;
1626 struct part_desc *part;
1627 uint32_t n_pm, mt_l;
1628 uint8_t *pmap_pos;
1629 char *domain_name, *map_name;
1630 const char *check_name;
1631 char bits[128];
1632 int pmap_stype, pmap_size;
1633 int pmap_type, log_part, phys_part, raw_phys_part;
1634 int n_phys, n_virt, n_spar, n_meta;
1635 int len, error;
1636
1637 if (ump == NULL)
1638 return ENOENT;
1639
1640 /* we need at least an anchor (trivial, but for safety) */
1641 if (ump->anchors[0] == NULL)
1642 return EINVAL;
1643
1644 /* we need at least one primary and one logical volume descriptor */
1645 if ((ump->primary_vol == NULL) || (ump->logical_vol) == NULL)
1646 return EINVAL;
1647
1648 /* we need at least one partition descriptor */
1649 if (ump->partitions[0] == NULL)
1650 return EINVAL;
1651
1652 /* check logical volume sector size verses device sector size */
1653 if (udf_rw32(ump->logical_vol->lb_size) != ump->discinfo.sector_size) {
1654 printf("UDF mount: format violation, lb_size != sector size\n");
1655 return EINVAL;
1656 }
1657
1658 /* check domain name */
1659 domain_name = ump->logical_vol->domain_id.id;
1660 if (strncmp(domain_name, "*OSTA UDF Compliant", 20)) {
1661 printf("mount_udf: disc not OSTA UDF Compliant, aborting\n");
1662 return EINVAL;
1663 }
1664
1665 /* retrieve logical volume integrity sequence */
1666 error = udf_retrieve_lvint(ump);
1667
1668 /*
1669 * We need at least one logvol integrity descriptor recorded. Note
1670 * that its OK to have an open logical volume integrity here. The VAT
1671 * will close/update the integrity.
1672 */
1673 if (ump->logvol_integrity == NULL)
1674 return EINVAL;
1675
1676 /* read in and check unallocated and free space info if writing */
1677 if ((ump->vfs_mountp->mnt_flag & MNT_RDONLY) == 0) {
1678 error = udf_read_partition_spacetables(ump);
1679 if (error)
1680 return error;
1681 }
1682
1683 /* process derived structures */
1684 n_pm = udf_rw32(ump->logical_vol->n_pm); /* num partmaps */
1685 lvint = ump->logvol_integrity;
1686 lvinfo = (struct udf_logvol_info *) (&lvint->tables[2 * n_pm]);
1687 ump->logvol_info = lvinfo;
1688
1689 /* TODO check udf versions? */
1690
1691 /*
1692 * check logvol mappings: effective virt->log partmap translation
1693 * check and recording of the mapping results. Saves expensive
1694 * strncmp() in tight places.
1695 */
1696 DPRINTF(VOLUMES, ("checking logvol mappings\n"));
1697 n_pm = udf_rw32(ump->logical_vol->n_pm); /* num partmaps */
1698 mt_l = udf_rw32(ump->logical_vol->mt_l); /* partmaps data length */
1699 pmap_pos = ump->logical_vol->maps;
1700
1701 if (n_pm > UDF_PMAPS) {
1702 printf("UDF mount: too many mappings\n");
1703 return EINVAL;
1704 }
1705
1706 ump->data_part = ump->metadata_part = 0;
1707 n_phys = n_virt = n_spar = n_meta = 0;
1708 for (log_part = 0; log_part < n_pm; log_part++) {
1709 mapping = (union udf_pmap *) pmap_pos;
1710 pmap_stype = pmap_pos[0];
1711 pmap_size = pmap_pos[1];
1712 switch (pmap_stype) {
1713 case 1: /* physical mapping */
1714 /* volseq = udf_rw16(mapping->pm1.vol_seq_num); */
1715 raw_phys_part = udf_rw16(mapping->pm1.part_num);
1716 pmap_type = UDF_VTOP_TYPE_PHYS;
1717 n_phys++;
1718 ump->data_part = log_part;
1719 ump->metadata_part = log_part;
1720 break;
1721 case 2: /* virtual/sparable/meta mapping */
1722 map_name = mapping->pm2.part_id.id;
1723 /* volseq = udf_rw16(mapping->pm2.vol_seq_num); */
1724 raw_phys_part = udf_rw16(mapping->pm2.part_num);
1725 pmap_type = UDF_VTOP_TYPE_UNKNOWN;
1726 len = UDF_REGID_ID_SIZE;
1727
1728 check_name = "*UDF Virtual Partition";
1729 if (strncmp(map_name, check_name, len) == 0) {
1730 pmap_type = UDF_VTOP_TYPE_VIRT;
1731 n_virt++;
1732 ump->metadata_part = log_part;
1733 break;
1734 }
1735 check_name = "*UDF Sparable Partition";
1736 if (strncmp(map_name, check_name, len) == 0) {
1737 pmap_type = UDF_VTOP_TYPE_SPARABLE;
1738 n_spar++;
1739 ump->data_part = log_part;
1740 ump->metadata_part = log_part;
1741 break;
1742 }
1743 check_name = "*UDF Metadata Partition";
1744 if (strncmp(map_name, check_name, len) == 0) {
1745 pmap_type = UDF_VTOP_TYPE_META;
1746 n_meta++;
1747 ump->metadata_part = log_part;
1748 break;
1749 }
1750 break;
1751 default:
1752 return EINVAL;
1753 }
1754
1755 /*
1756 * BUGALERT: some rogue implementations use random physical
1757 * partion numbers to break other implementations so lookup
1758 * the number.
1759 */
1760 for (phys_part = 0; phys_part < UDF_PARTITIONS; phys_part++) {
1761 part = ump->partitions[phys_part];
1762 if (part == NULL)
1763 continue;
1764 if (udf_rw16(part->part_num) == raw_phys_part)
1765 break;
1766 }
1767
1768 DPRINTF(VOLUMES, ("\t%d -> %d(%d) type %d\n", log_part,
1769 raw_phys_part, phys_part, pmap_type));
1770
1771 if (phys_part == UDF_PARTITIONS)
1772 return EINVAL;
1773 if (pmap_type == UDF_VTOP_TYPE_UNKNOWN)
1774 return EINVAL;
1775
1776 ump->vtop [log_part] = phys_part;
1777 ump->vtop_tp[log_part] = pmap_type;
1778
1779 pmap_pos += pmap_size;
1780 }
1781 /* not winning the beauty contest */
1782 ump->vtop_tp[UDF_VTOP_RAWPART] = UDF_VTOP_TYPE_RAW;
1783
1784 /* test some basic UDF assertions/requirements */
1785 if ((n_virt > 1) || (n_spar > 1) || (n_meta > 1))
1786 return EINVAL;
1787
1788 if (n_virt) {
1789 if ((n_phys == 0) || n_spar || n_meta)
1790 return EINVAL;
1791 }
1792 if (n_spar + n_phys == 0)
1793 return EINVAL;
1794
1795 /* determine allocation scheme's based on disc format */
1796 /* VAT's can only be on a sequential media */
1797 ump->data_alloc = UDF_ALLOC_SPACEMAP;
1798 if (n_virt)
1799 ump->data_alloc = UDF_ALLOC_SEQUENTIAL;
1800
1801 ump->meta_alloc = UDF_ALLOC_SPACEMAP;
1802 if (n_virt)
1803 ump->meta_alloc = UDF_ALLOC_VAT;
1804 if (n_meta)
1805 ump->meta_alloc = UDF_ALLOC_METABITMAP;
1806
1807 /* special cases for pseudo-overwrite */
1808 if (ump->discinfo.mmc_cur & MMC_CAP_PSEUDOOVERWRITE) {
1809 ump->data_alloc = UDF_ALLOC_SEQUENTIAL;
1810 if (n_meta) {
1811 ump->meta_alloc = UDF_ALLOC_METASEQUENTIAL;
1812 } else {
1813 ump->meta_alloc = UDF_ALLOC_RELAXEDSEQUENTIAL;
1814 }
1815 }
1816
1817 /* determine logical volume open/closure actions */
1818 if (n_virt) {
1819 ump->lvopen = 0;
1820 if (ump->discinfo.last_session_state == MMC_STATE_CLOSED)
1821 ump->lvopen |= UDF_OPEN_SESSION ;
1822 ump->lvclose = UDF_WRITE_VAT;
1823 if (ump->mount_args.udfmflags & UDFMNT_CLOSESESSION)
1824 ump->lvclose |= UDF_CLOSE_SESSION;
1825 } else {
1826 /* `normal' rewritable or non sequential media */
1827 ump->lvopen = UDF_WRITE_LVINT;
1828 ump->lvclose = UDF_WRITE_LVINT;
1829 if ((ump->discinfo.mmc_cur & MMC_CAP_REWRITABLE) == 0)
1830 ump->lvopen |= UDF_APPENDONLY_LVINT;
1831 }
1832
1833 /*
1834 * Determine sheduler error behaviour. For virtual partions, update
1835 * the trackinfo; for sparable partitions replace a whole block on the
1836 * sparable table. Allways requeue.
1837 */
1838 ump->lvreadwrite = 0;
1839 if (n_virt)
1840 ump->lvreadwrite = UDF_UPDATE_TRACKINFO;
1841 if (n_spar)
1842 ump->lvreadwrite = UDF_REMAP_BLOCK;
1843
1844 /*
1845 * Select our sheduler
1846 */
1847 ump->strategy = &udf_strat_rmw;
1848 if (n_virt || (ump->discinfo.mmc_cur & MMC_CAP_PSEUDOOVERWRITE))
1849 ump->strategy = &udf_strat_sequential;
1850 if ((ump->discinfo.mmc_class == MMC_CLASS_DISC) ||
1851 (ump->discinfo.mmc_class == MMC_CLASS_UNKN))
1852 ump->strategy = &udf_strat_direct;
1853 if (n_spar)
1854 ump->strategy = &udf_strat_rmw;
1855
1856 /* print results */
1857 DPRINTF(VOLUMES, ("\tdata alloc scheme %d, meta alloc scheme %d\n",
1858 ump->data_alloc, ump->meta_alloc));
1859 DPRINTF(VOLUMES, ("\tdata partition %d, metadata partition %d\n",
1860 ump->data_part, ump->metadata_part));
1861
1862 bitmask_snprintf(ump->lvopen, UDFLOGVOL_BITS, bits, sizeof(bits));
1863 DPRINTF(VOLUMES, ("\tactions on logvol open %s\n", bits));
1864 bitmask_snprintf(ump->lvclose, UDFLOGVOL_BITS, bits, sizeof(bits));
1865 DPRINTF(VOLUMES, ("\tactions on logvol close %s\n", bits));
1866 bitmask_snprintf(ump->lvreadwrite, UDFONERROR_BITS, bits, sizeof(bits));
1867 DPRINTF(VOLUMES, ("\tactions on logvol errors %s\n", bits));
1868
1869 DPRINTF(VOLUMES, ("\tselected sheduler `%s`\n",
1870 (ump->strategy == &udf_strat_direct) ? "Direct" :
1871 (ump->strategy == &udf_strat_sequential) ? "Sequential" :
1872 (ump->strategy == &udf_strat_rmw) ? "RMW" : "UNKNOWN!"));
1873
1874 /* signal its OK for now */
1875 return 0;
1876 }
1877
1878 /* --------------------------------------------------------------------- */
1879
1880 /*
1881 * Update logical volume name in all structures that keep a record of it. We
1882 * use memmove since each of them might be specified as a source.
1883 *
1884 * Note that it doesn't update the VAT structure!
1885 */
1886
1887 static void
1888 udf_update_logvolname(struct udf_mount *ump, char *logvol_id)
1889 {
1890 struct logvol_desc *lvd = NULL;
1891 struct fileset_desc *fsd = NULL;
1892 struct udf_lv_info *lvi = NULL;
1893
1894 DPRINTF(VOLUMES, ("Updating logical volume name\n"));
1895 lvd = ump->logical_vol;
1896 fsd = ump->fileset_desc;
1897 if (ump->implementation)
1898 lvi = &ump->implementation->_impl_use.lv_info;
1899
1900 /* logvol's id might be specified as origional so use memmove here */
1901 memmove(lvd->logvol_id, logvol_id, 128);
1902 if (fsd)
1903 memmove(fsd->logvol_id, logvol_id, 128);
1904 if (lvi)
1905 memmove(lvi->logvol_id, logvol_id, 128);
1906 }
1907
1908 /* --------------------------------------------------------------------- */
1909
1910 /*
1911 * Etended attribute support. UDF knows of 3 places for extended attributes:
1912 *
1913 * (a) inside the file's (e)fe in the length of the extended attriubute area
1914 * before the allocation desctriptors/filedata
1915 *
1916 * (b) in a file referenced by (e)fe->ext_attr_icb and
1917 *
1918 * (c) in the e(fe)'s associated stream directory that can hold various
1919 struct part_desc *part;
1920 * sub-files. In the stream directory a few fixed named subfiles are reserved
1921 * for NT/Unix ACL's and OS/2 attributes.
1922 *
1923 * NOTE: Extended attributes are read randomly but allways written
1924 * *atomicaly*. For ACL's this interface is propably different but not known
1925 * to me yet.
1926 */
1927
1928 static int
1929 udf_impl_extattr_check(struct impl_extattr_entry *implext)
1930 {
1931 uint16_t *spos;
1932
1933 if (strncmp(implext->imp_id.id, "*UDF", 4) == 0) {
1934 /* checksum valid? */
1935 DPRINTF(EXTATTR, ("checking UDF impl. attr checksum\n"));
1936 spos = (uint16_t *) implext->data;
1937 if (udf_rw16(*spos) != udf_ea_cksum((uint8_t *) implext))
1938 return EINVAL;
1939 }
1940 return 0;
1941 }
1942
1943 static void
1944 udf_calc_impl_extattr_checksum(struct impl_extattr_entry *implext)
1945 {
1946 uint16_t *spos;
1947
1948 if (strncmp(implext->imp_id.id, "*UDF", 4) == 0) {
1949 /* set checksum */
1950 spos = (uint16_t *) implext->data;
1951 *spos = udf_rw16(udf_ea_cksum((uint8_t *) implext));
1952 }
1953 }
1954
1955
1956 int
1957 udf_extattr_search_intern(struct udf_node *node,
1958 uint32_t sattr, char const *sattrname,
1959 uint32_t *offsetp, uint32_t *lengthp)
1960 {
1961 struct extattrhdr_desc *eahdr;
1962 struct extattr_entry *attrhdr;
1963 struct impl_extattr_entry *implext;
1964 uint32_t offset, a_l, sector_size;
1965 int32_t l_ea;
1966 uint8_t *pos;
1967 int error;
1968
1969 /* get mountpoint */
1970 sector_size = node->ump->discinfo.sector_size;
1971
1972 /* get information from fe/efe */
1973 if (node->fe) {
1974 l_ea = udf_rw32(node->fe->l_ea);
1975 eahdr = (struct extattrhdr_desc *) node->fe->data;
1976 } else {
1977 assert(node->efe);
1978 l_ea = udf_rw32(node->efe->l_ea);
1979 eahdr = (struct extattrhdr_desc *) node->efe->data;
1980 }
1981
1982 /* something recorded here? */
1983 if (l_ea == 0)
1984 return ENOENT;
1985
1986 /* check extended attribute tag; what to do if it fails? */
1987 error = udf_check_tag(eahdr);
1988 if (error)
1989 return EINVAL;
1990 if (udf_rw16(eahdr->tag.id) != TAGID_EXTATTR_HDR)
1991 return EINVAL;
1992 error = udf_check_tag_payload(eahdr, sizeof(struct extattrhdr_desc));
1993 if (error)
1994 return EINVAL;
1995
1996 DPRINTF(EXTATTR, ("Found %d bytes of extended attributes\n", l_ea));
1997
1998 /* looking for Ecma-167 attributes? */
1999 offset = sizeof(struct extattrhdr_desc);
2000
2001 /* looking for either implemenation use or application use */
2002 if (sattr == 2048) { /* [4/48.10.8] */
2003 offset = udf_rw32(eahdr->impl_attr_loc);
2004 if (offset == UDF_IMPL_ATTR_LOC_NOT_PRESENT)
2005 return ENOENT;
2006 }
2007 if (sattr == 65536) { /* [4/48.10.9] */
2008 offset = udf_rw32(eahdr->appl_attr_loc);
2009 if (offset == UDF_APPL_ATTR_LOC_NOT_PRESENT)
2010 return ENOENT;
2011 }
2012
2013 /* paranoia check offset and l_ea */
2014 if (l_ea + offset >= sector_size - sizeof(struct extattr_entry))
2015 return EINVAL;
2016
2017 DPRINTF(EXTATTR, ("Starting at offset %d\n", offset));
2018
2019 /* find our extended attribute */
2020 l_ea -= offset;
2021 pos = (uint8_t *) eahdr + offset;
2022
2023 while (l_ea >= sizeof(struct extattr_entry)) {
2024 DPRINTF(EXTATTR, ("%d extended attr bytes left\n", l_ea));
2025 attrhdr = (struct extattr_entry *) pos;
2026 implext = (struct impl_extattr_entry *) pos;
2027
2028 /* get complete attribute length and check for roque values */
2029 a_l = udf_rw32(attrhdr->a_l);
2030 DPRINTF(EXTATTR, ("attribute %d:%d, len %d/%d\n",
2031 udf_rw32(attrhdr->type),
2032 attrhdr->subtype, a_l, l_ea));
2033 if ((a_l == 0) || (a_l > l_ea))
2034 return EINVAL;
2035
2036 if (attrhdr->type != sattr)
2037 goto next_attribute;
2038
2039 /* we might have found it! */
2040 if (attrhdr->type < 2048) { /* Ecma-167 attribute */
2041 *offsetp = offset;
2042 *lengthp = a_l;
2043 return 0; /* success */
2044 }
2045
2046 /*
2047 * Implementation use and application use extended attributes
2048 * have a name to identify. They share the same structure only
2049 * UDF implementation use extended attributes have a checksum
2050 * we need to check
2051 */
2052
2053 DPRINTF(EXTATTR, ("named attribute %s\n", implext->imp_id.id));
2054 if (strcmp(implext->imp_id.id, sattrname) == 0) {
2055 /* we have found our appl/implementation attribute */
2056 *offsetp = offset;
2057 *lengthp = a_l;
2058 return 0; /* success */
2059 }
2060
2061 next_attribute:
2062 /* next attribute */
2063 pos += a_l;
2064 l_ea -= a_l;
2065 offset += a_l;
2066 }
2067 /* not found */
2068 return ENOENT;
2069 }
2070
2071
2072
2073 /* --------------------------------------------------------------------- */
2074
2075 static int
2076 udf_update_lvid_from_vat_extattr(struct udf_node *vat_node)
2077 {
2078 struct udf_mount *ump;
2079 struct udf_logvol_info *lvinfo;
2080 struct impl_extattr_entry *implext;
2081 struct vatlvext_extattr_entry lvext;
2082 const char *extstr = "*UDF VAT LVExtension";
2083 uint64_t vat_uniqueid;
2084 uint32_t offset, a_l;
2085 uint8_t *ea_start, *lvextpos;
2086 int error;
2087
2088 /* get mountpoint and lvinfo */
2089 ump = vat_node->ump;
2090 lvinfo = ump->logvol_info;
2091
2092 /* get information from fe/efe */
2093 if (vat_node->fe) {
2094 vat_uniqueid = udf_rw64(vat_node->fe->unique_id);
2095 ea_start = vat_node->fe->data;
2096 } else {
2097 vat_uniqueid = udf_rw64(vat_node->efe->unique_id);
2098 ea_start = vat_node->efe->data;
2099 }
2100
2101 error = udf_extattr_search_intern(vat_node, 2048, extstr, &offset, &a_l);
2102 if (error)
2103 return error;
2104
2105 implext = (struct impl_extattr_entry *) (ea_start + offset);
2106 error = udf_impl_extattr_check(implext);
2107 if (error)
2108 return error;
2109
2110 /* paranoia */
2111 if (a_l != sizeof(*implext) -1 + udf_rw32(implext->iu_l) + sizeof(lvext)) {
2112 DPRINTF(VOLUMES, ("VAT LVExtension size doesn't compute\n"));
2113 return EINVAL;
2114 }
2115
2116 /*
2117 * we have found our "VAT LVExtension attribute. BUT due to a
2118 * bug in the specification it might not be word aligned so
2119 * copy first to avoid panics on some machines (!!)
2120 */
2121 DPRINTF(VOLUMES, ("Found VAT LVExtension attr\n"));
2122 lvextpos = implext->data + udf_rw32(implext->iu_l);
2123 memcpy(&lvext, lvextpos, sizeof(lvext));
2124
2125 /* check if it was updated the last time */
2126 if (udf_rw64(lvext.unique_id_chk) == vat_uniqueid) {
2127 lvinfo->num_files = lvext.num_files;
2128 lvinfo->num_directories = lvext.num_directories;
2129 udf_update_logvolname(ump, lvext.logvol_id);
2130 } else {
2131 DPRINTF(VOLUMES, ("VAT LVExtension out of date\n"));
2132 /* replace VAT LVExt by free space EA */
2133 memset(implext->imp_id.id, 0, UDF_REGID_ID_SIZE);
2134 strcpy(implext->imp_id.id, "*UDF FreeEASpace");
2135 udf_calc_impl_extattr_checksum(implext);
2136 }
2137
2138 return 0;
2139 }
2140
2141
2142 static int
2143 udf_update_vat_extattr_from_lvid(struct udf_node *vat_node)
2144 {
2145 struct udf_mount *ump;
2146 struct udf_logvol_info *lvinfo;
2147 struct impl_extattr_entry *implext;
2148 struct vatlvext_extattr_entry lvext;
2149 const char *extstr = "*UDF VAT LVExtension";
2150 uint64_t vat_uniqueid;
2151 uint32_t offset, a_l;
2152 uint8_t *ea_start, *lvextpos;
2153 int error;
2154
2155 /* get mountpoint and lvinfo */
2156 ump = vat_node->ump;
2157 lvinfo = ump->logvol_info;
2158
2159 /* get information from fe/efe */
2160 if (vat_node->fe) {
2161 vat_uniqueid = udf_rw64(vat_node->fe->unique_id);
2162 ea_start = vat_node->fe->data;
2163 } else {
2164 vat_uniqueid = udf_rw64(vat_node->efe->unique_id);
2165 ea_start = vat_node->efe->data;
2166 }
2167
2168 error = udf_extattr_search_intern(vat_node, 2048, extstr, &offset, &a_l);
2169 if (error)
2170 return error;
2171 /* found, it existed */
2172
2173 /* paranoia */
2174 implext = (struct impl_extattr_entry *) (ea_start + offset);
2175 error = udf_impl_extattr_check(implext);
2176 if (error) {
2177 DPRINTF(VOLUMES, ("VAT LVExtension bad on update\n"));
2178 return error;
2179 }
2180 /* it is correct */
2181
2182 /*
2183 * we have found our "VAT LVExtension attribute. BUT due to a
2184 * bug in the specification it might not be word aligned so
2185 * copy first to avoid panics on some machines (!!)
2186 */
2187 DPRINTF(VOLUMES, ("Updating VAT LVExtension attr\n"));
2188 lvextpos = implext->data + udf_rw32(implext->iu_l);
2189
2190 lvext.unique_id_chk = vat_uniqueid;
2191 lvext.num_files = lvinfo->num_files;
2192 lvext.num_directories = lvinfo->num_directories;
2193 memmove(lvext.logvol_id, ump->logical_vol->logvol_id, 128);
2194
2195 memcpy(lvextpos, &lvext, sizeof(lvext));
2196
2197 return 0;
2198 }
2199
2200 /* --------------------------------------------------------------------- */
2201
2202 int
2203 udf_vat_read(struct udf_node *vat_node, uint8_t *blob, int size, uint32_t offset)
2204 {
2205 struct udf_mount *ump = vat_node->ump;
2206
2207 if (offset + size > ump->vat_offset + ump->vat_entries * 4)
2208 return EINVAL;
2209
2210 memcpy(blob, ump->vat_table + offset, size);
2211 return 0;
2212 }
2213
2214 int
2215 udf_vat_write(struct udf_node *vat_node, uint8_t *blob, int size, uint32_t offset)
2216 {
2217 struct udf_mount *ump = vat_node->ump;
2218 uint32_t offset_high;
2219 uint8_t *new_vat_table;
2220
2221 /* extent VAT allocation if needed */
2222 offset_high = offset + size;
2223 if (offset_high >= ump->vat_table_alloc_len) {
2224 /* realloc */
2225 new_vat_table = realloc(ump->vat_table,
2226 ump->vat_table_alloc_len + UDF_VAT_CHUNKSIZE,
2227 M_UDFVOLD, M_WAITOK | M_CANFAIL);
2228 if (!new_vat_table) {
2229 printf("udf_vat_write: can't extent VAT, out of mem\n");
2230 return ENOMEM;
2231 }
2232 ump->vat_table = new_vat_table;
2233 ump->vat_table_alloc_len += UDF_VAT_CHUNKSIZE;
2234 }
2235 ump->vat_table_len = MAX(ump->vat_table_len, offset_high);
2236
2237 memcpy(ump->vat_table + offset, blob, size);
2238 return 0;
2239 }
2240
2241 /* --------------------------------------------------------------------- */
2242
2243 /* TODO support previous VAT location writeout */
2244 static int
2245 udf_update_vat_descriptor(struct udf_mount *ump)
2246 {
2247 struct udf_node *vat_node = ump->vat_node;
2248 struct udf_logvol_info *lvinfo = ump->logvol_info;
2249 struct icb_tag *icbtag;
2250 struct udf_oldvat_tail *oldvat_tl;
2251 struct udf_vat *vat;
2252 uint64_t unique_id;
2253 uint32_t lb_size;
2254 uint8_t *raw_vat;
2255 int filetype, error;
2256
2257 KASSERT(vat_node);
2258 KASSERT(lvinfo);
2259 lb_size = udf_rw32(ump->logical_vol->lb_size);
2260
2261 /* get our new unique_id */
2262 unique_id = udf_advance_uniqueid(ump);
2263
2264 /* get information from fe/efe */
2265 if (vat_node->fe) {
2266 icbtag = &vat_node->fe->icbtag;
2267 vat_node->fe->unique_id = udf_rw64(unique_id);
2268 } else {
2269 icbtag = &vat_node->efe->icbtag;
2270 vat_node->efe->unique_id = udf_rw64(unique_id);
2271 }
2272
2273 /* Check icb filetype! it has to be 0 or UDF_ICB_FILETYPE_VAT */
2274 filetype = icbtag->file_type;
2275 KASSERT((filetype == 0) || (filetype == UDF_ICB_FILETYPE_VAT));
2276
2277 /* allocate piece to process head or tail of VAT file */
2278 raw_vat = malloc(lb_size, M_TEMP, M_WAITOK);
2279
2280 if (filetype == 0) {
2281 /*
2282 * Update "*UDF VAT LVExtension" extended attribute from the
2283 * lvint if present.
2284 */
2285 udf_update_vat_extattr_from_lvid(vat_node);
2286
2287 /* setup identifying regid */
2288 oldvat_tl = (struct udf_oldvat_tail *) raw_vat;
2289 memset(oldvat_tl, 0, sizeof(struct udf_oldvat_tail));
2290
2291 udf_set_regid(&oldvat_tl->id, "*UDF Virtual Alloc Tbl");
2292 udf_add_udf_regid(ump, &oldvat_tl->id);
2293 oldvat_tl->prev_vat = udf_rw32(0xffffffff);
2294
2295 /* write out new tail of virtual allocation table file */
2296 error = udf_vat_write(vat_node, raw_vat,
2297 sizeof(struct udf_oldvat_tail), ump->vat_entries * 4);
2298 } else {
2299 /* compose the VAT2 header */
2300 vat = (struct udf_vat *) raw_vat;
2301 memset(vat, 0, sizeof(struct udf_vat));
2302
2303 vat->header_len = udf_rw16(152); /* as per spec */
2304 vat->impl_use_len = udf_rw16(0);
2305 memmove(vat->logvol_id, ump->logical_vol->logvol_id, 128);
2306 vat->prev_vat = udf_rw32(0xffffffff);
2307 vat->num_files = lvinfo->num_files;
2308 vat->num_directories = lvinfo->num_directories;
2309 vat->min_udf_readver = lvinfo->min_udf_readver;
2310 vat->min_udf_writever = lvinfo->min_udf_writever;
2311 vat->max_udf_writever = lvinfo->max_udf_writever;
2312
2313 error = udf_vat_write(vat_node, raw_vat,
2314 sizeof(struct udf_vat), 0);
2315 }
2316 free(raw_vat, M_TEMP);
2317
2318 return error; /* success! */
2319 }
2320
2321
2322 int
2323 udf_writeout_vat(struct udf_mount *ump)
2324 {
2325 struct udf_node *vat_node = ump->vat_node;
2326 uint32_t vat_length;
2327 int error;
2328
2329 KASSERT(vat_node);
2330
2331 DPRINTF(CALL, ("udf_writeout_vat\n"));
2332
2333 mutex_enter(&ump->allocate_mutex);
2334 udf_update_vat_descriptor(ump);
2335
2336 /* write out the VAT contents ; TODO intelligent writing */
2337 vat_length = ump->vat_table_len;
2338 error = vn_rdwr(UIO_WRITE, vat_node->vnode,
2339 ump->vat_table, ump->vat_table_len, 0,
2340 UIO_SYSSPACE, IO_NODELOCKED, FSCRED, NULL, NULL);
2341 if (error) {
2342 printf("udf_writeout_vat: failed to write out VAT contents\n");
2343 goto out;
2344 }
2345
2346 mutex_exit(&ump->allocate_mutex);
2347
2348 vflushbuf(ump->vat_node->vnode, 1 /* sync */);
2349 error = VOP_FSYNC(ump->vat_node->vnode,
2350 FSCRED, FSYNC_WAIT, 0, 0);
2351 if (error)
2352 printf("udf_writeout_vat: error writing VAT node!\n");
2353 out:
2354
2355 return error;
2356 }
2357
2358 /* --------------------------------------------------------------------- */
2359
2360 /*
2361 * Read in relevant pieces of VAT file and check if its indeed a VAT file
2362 * descriptor. If OK, read in complete VAT file.
2363 */
2364
2365 static int
2366 udf_check_for_vat(struct udf_node *vat_node)
2367 {
2368 struct udf_mount *ump;
2369 struct icb_tag *icbtag;
2370 struct timestamp *mtime;
2371 struct udf_vat *vat;
2372 struct udf_oldvat_tail *oldvat_tl;
2373 struct udf_logvol_info *lvinfo;
2374 uint64_t unique_id;
2375 uint32_t vat_length;
2376 uint32_t vat_offset, vat_entries, vat_table_alloc_len;
2377 uint32_t sector_size;
2378 uint32_t *raw_vat;
2379 uint8_t *vat_table;
2380 char *regid_name;
2381 int filetype;
2382 int error;
2383
2384 /* vat_length is really 64 bits though impossible */
2385
2386 DPRINTF(VOLUMES, ("Checking for VAT\n"));
2387 if (!vat_node)
2388 return ENOENT;
2389
2390 /* get mount info */
2391 ump = vat_node->ump;
2392 sector_size = udf_rw32(ump->logical_vol->lb_size);
2393
2394 /* check assertions */
2395 assert(vat_node->fe || vat_node->efe);
2396 assert(ump->logvol_integrity);
2397
2398 /* set vnode type to regular file or we can't read from it! */
2399 vat_node->vnode->v_type = VREG;
2400
2401 /* get information from fe/efe */
2402 if (vat_node->fe) {
2403 vat_length = udf_rw64(vat_node->fe->inf_len);
2404 icbtag = &vat_node->fe->icbtag;
2405 mtime = &vat_node->fe->mtime;
2406 unique_id = udf_rw64(vat_node->fe->unique_id);
2407 } else {
2408 vat_length = udf_rw64(vat_node->efe->inf_len);
2409 icbtag = &vat_node->efe->icbtag;
2410 mtime = &vat_node->efe->mtime;
2411 unique_id = udf_rw64(vat_node->efe->unique_id);
2412 }
2413
2414 /* Check icb filetype! it has to be 0 or UDF_ICB_FILETYPE_VAT */
2415 filetype = icbtag->file_type;
2416 if ((filetype != 0) && (filetype != UDF_ICB_FILETYPE_VAT))
2417 return ENOENT;
2418
2419 DPRINTF(VOLUMES, ("\tPossible VAT length %d\n", vat_length));
2420
2421 vat_table_alloc_len =
2422 ((vat_length + UDF_VAT_CHUNKSIZE-1) / UDF_VAT_CHUNKSIZE)
2423 * UDF_VAT_CHUNKSIZE;
2424
2425 vat_table = malloc(vat_table_alloc_len, M_UDFVOLD,
2426 M_CANFAIL | M_WAITOK);
2427 if (vat_table == NULL) {
2428 printf("allocation of %d bytes failed for VAT\n",
2429 vat_table_alloc_len);
2430 return ENOMEM;
2431 }
2432
2433 /* allocate piece to read in head or tail of VAT file */
2434 raw_vat = malloc(sector_size, M_TEMP, M_WAITOK);
2435
2436 /*
2437 * check contents of the file if its the old 1.50 VAT table format.
2438 * Its notoriously broken and allthough some implementations support an
2439 * extention as defined in the UDF 1.50 errata document, its doubtfull
2440 * to be useable since a lot of implementations don't maintain it.
2441 */
2442 lvinfo = ump->logvol_info;
2443
2444 if (filetype == 0) {
2445 /* definition */
2446 vat_offset = 0;
2447 vat_entries = (vat_length-36)/4;
2448
2449 /* read in tail of virtual allocation table file */
2450 error = vn_rdwr(UIO_READ, vat_node->vnode,
2451 (uint8_t *) raw_vat,
2452 sizeof(struct udf_oldvat_tail),
2453 vat_entries * 4,
2454 UIO_SYSSPACE, IO_SYNC | IO_NODELOCKED, FSCRED,
2455 NULL, NULL);
2456 if (error)
2457 goto out;
2458
2459 /* check 1.50 VAT */
2460 oldvat_tl = (struct udf_oldvat_tail *) raw_vat;
2461 regid_name = (char *) oldvat_tl->id.id;
2462 error = strncmp(regid_name, "*UDF Virtual Alloc Tbl", 22);
2463 if (error) {
2464 DPRINTF(VOLUMES, ("VAT format 1.50 rejected\n"));
2465 error = ENOENT;
2466 goto out;
2467 }
2468
2469 /*
2470 * update LVID from "*UDF VAT LVExtension" extended attribute
2471 * if present.
2472 */
2473 udf_update_lvid_from_vat_extattr(vat_node);
2474 } else {
2475 /* read in head of virtual allocation table file */
2476 error = vn_rdwr(UIO_READ, vat_node->vnode,
2477 (uint8_t *) raw_vat,
2478 sizeof(struct udf_vat), 0,
2479 UIO_SYSSPACE, IO_SYNC | IO_NODELOCKED, FSCRED,
2480 NULL, NULL);
2481 if (error)
2482 goto out;
2483
2484 /* definition */
2485 vat = (struct udf_vat *) raw_vat;
2486 vat_offset = vat->header_len;
2487 vat_entries = (vat_length - vat_offset)/4;
2488
2489 assert(lvinfo);
2490 lvinfo->num_files = vat->num_files;
2491 lvinfo->num_directories = vat->num_directories;
2492 lvinfo->min_udf_readver = vat->min_udf_readver;
2493 lvinfo->min_udf_writever = vat->min_udf_writever;
2494 lvinfo->max_udf_writever = vat->max_udf_writever;
2495
2496 udf_update_logvolname(ump, vat->logvol_id);
2497 }
2498
2499 /* read in complete VAT file */
2500 error = vn_rdwr(UIO_READ, vat_node->vnode,
2501 vat_table,
2502 vat_length, 0,
2503 UIO_SYSSPACE, IO_SYNC | IO_NODELOCKED, FSCRED,
2504 NULL, NULL);
2505 if (error)
2506 printf("read in of complete VAT file failed (error %d)\n",
2507 error);
2508 if (error)
2509 goto out;
2510
2511 DPRINTF(VOLUMES, ("VAT format accepted, marking it closed\n"));
2512 ump->logvol_integrity->lvint_next_unique_id = unique_id;
2513 ump->logvol_integrity->integrity_type = udf_rw32(UDF_INTEGRITY_CLOSED);
2514 ump->logvol_integrity->time = *mtime;
2515
2516 ump->vat_table_len = vat_length;
2517 ump->vat_table_alloc_len = vat_table_alloc_len;
2518 ump->vat_table = vat_table;
2519 ump->vat_offset = vat_offset;
2520 ump->vat_entries = vat_entries;
2521 ump->vat_last_free_lb = 0; /* start at beginning */
2522
2523 out:
2524 if (error) {
2525 if (vat_table)
2526 free(vat_table, M_UDFVOLD);
2527 }
2528 free(raw_vat, M_TEMP);
2529
2530 return error;
2531 }
2532
2533 /* --------------------------------------------------------------------- */
2534
2535 static int
2536 udf_search_vat(struct udf_mount *ump, union udf_pmap *mapping)
2537 {
2538 struct udf_node *vat_node;
2539 struct long_ad icb_loc;
2540 uint32_t early_vat_loc, late_vat_loc, vat_loc;
2541 int error;
2542
2543 /* mapping info not needed */
2544 mapping = mapping;
2545
2546 vat_loc = ump->last_possible_vat_location;
2547 early_vat_loc = vat_loc - 256; /* 8 blocks of 32 sectors */
2548
2549 DPRINTF(VOLUMES, ("1) last possible %d, early_vat_loc %d \n",
2550 vat_loc, early_vat_loc));
2551 early_vat_loc = MAX(early_vat_loc, ump->first_possible_vat_location);
2552 late_vat_loc = vat_loc + 1024;
2553
2554 DPRINTF(VOLUMES, ("2) last possible %d, early_vat_loc %d \n",
2555 vat_loc, early_vat_loc));
2556
2557 /* start looking from the end of the range */
2558 do {
2559 DPRINTF(VOLUMES, ("Checking for VAT at sector %d\n", vat_loc));
2560 icb_loc.loc.part_num = udf_rw16(UDF_VTOP_RAWPART);
2561 icb_loc.loc.lb_num = udf_rw32(vat_loc);
2562
2563 error = udf_get_node(ump, &icb_loc, &vat_node);
2564 if (!error) {
2565 error = udf_check_for_vat(vat_node);
2566 DPRINTFIF(VOLUMES, !error,
2567 ("VAT accepted at %d\n", vat_loc));
2568 if (!error)
2569 break;
2570 }
2571 if (vat_node) {
2572 vput(vat_node->vnode);
2573 vat_node = NULL;
2574 }
2575 vat_loc--; /* walk backwards */
2576 } while (vat_loc >= early_vat_loc);
2577
2578 /* keep our VAT node around */
2579 if (vat_node) {
2580 UDF_SET_SYSTEMFILE(vat_node->vnode);
2581 ump->vat_node = vat_node;
2582 }
2583
2584 return error;
2585 }
2586
2587 /* --------------------------------------------------------------------- */
2588
2589 static int
2590 udf_read_sparables(struct udf_mount *ump, union udf_pmap *mapping)
2591 {
2592 union dscrptr *dscr;
2593 struct part_map_spare *pms = &mapping->pms;
2594 uint32_t lb_num;
2595 int spar, error;
2596
2597 /*
2598 * The partition mapping passed on to us specifies the information we
2599 * need to locate and initialise the sparable partition mapping
2600 * information we need.
2601 */
2602
2603 DPRINTF(VOLUMES, ("Read sparable table\n"));
2604 ump->sparable_packet_size = udf_rw16(pms->packet_len);
2605 KASSERT(ump->sparable_packet_size >= ump->packet_size); /* XXX */
2606
2607 for (spar = 0; spar < pms->n_st; spar++) {
2608 lb_num = pms->st_loc[spar];
2609 DPRINTF(VOLUMES, ("Checking for sparing table %d\n", lb_num));
2610 error = udf_read_phys_dscr(ump, lb_num, M_UDFVOLD, &dscr);
2611 if (!error && dscr) {
2612 if (udf_rw16(dscr->tag.id) == TAGID_SPARING_TABLE) {
2613 if (ump->sparing_table)
2614 free(ump->sparing_table, M_UDFVOLD);
2615 ump->sparing_table = &dscr->spt;
2616 dscr = NULL;
2617 DPRINTF(VOLUMES,
2618 ("Sparing table accepted (%d entries)\n",
2619 udf_rw16(ump->sparing_table->rt_l)));
2620 break; /* we're done */
2621 }
2622 }
2623 if (dscr)
2624 free(dscr, M_UDFVOLD);
2625 }
2626
2627 if (ump->sparing_table)
2628 return 0;
2629
2630 return ENOENT;
2631 }
2632
2633 /* --------------------------------------------------------------------- */
2634
2635 static int
2636 udf_read_metadata_nodes(struct udf_mount *ump, union udf_pmap *mapping)
2637 {
2638 struct part_map_meta *pmm = &mapping->pmm;
2639 struct long_ad icb_loc;
2640 struct vnode *vp;
2641 int error;
2642
2643 DPRINTF(VOLUMES, ("Reading in Metadata files\n"));
2644 icb_loc.loc.part_num = pmm->part_num;
2645 icb_loc.loc.lb_num = pmm->meta_file_lbn;
2646 DPRINTF(VOLUMES, ("Metadata file\n"));
2647 error = udf_get_node(ump, &icb_loc, &ump->metadata_node);
2648 if (ump->metadata_node) {
2649 vp = ump->metadata_node->vnode;
2650 UDF_SET_SYSTEMFILE(vp);
2651 }
2652
2653 icb_loc.loc.lb_num = pmm->meta_mirror_file_lbn;
2654 if (icb_loc.loc.lb_num != -1) {
2655 DPRINTF(VOLUMES, ("Metadata copy file\n"));
2656 error = udf_get_node(ump, &icb_loc, &ump->metadatamirror_node);
2657 if (ump->metadatamirror_node) {
2658 vp = ump->metadatamirror_node->vnode;
2659 UDF_SET_SYSTEMFILE(vp);
2660 }
2661 }
2662
2663 icb_loc.loc.lb_num = pmm->meta_bitmap_file_lbn;
2664 if (icb_loc.loc.lb_num != -1) {
2665 DPRINTF(VOLUMES, ("Metadata bitmap file\n"));
2666 error = udf_get_node(ump, &icb_loc, &ump->metadatabitmap_node);
2667 if (ump->metadatabitmap_node) {
2668 vp = ump->metadatabitmap_node->vnode;
2669 UDF_SET_SYSTEMFILE(vp);
2670 }
2671 }
2672
2673 /* if we're mounting read-only we relax the requirements */
2674 if (ump->vfs_mountp->mnt_flag & MNT_RDONLY) {
2675 error = EFAULT;
2676 if (ump->metadata_node)
2677 error = 0;
2678 if ((ump->metadata_node == NULL) && (ump->metadatamirror_node)) {
2679 printf( "udf mount: Metadata file not readable, "
2680 "substituting Metadata copy file\n");
2681 ump->metadata_node = ump->metadatamirror_node;
2682 ump->metadatamirror_node = NULL;
2683 error = 0;
2684 }
2685 } else {
2686 /* mounting read/write */
2687 DPRINTF(VOLUMES, ("udf mount: read only file system\n"));
2688 error = EROFS;
2689 }
2690 DPRINTFIF(VOLUMES, error, ("udf mount: failed to read "
2691 "metadata files\n"));
2692 return error;
2693 }
2694
2695 /* --------------------------------------------------------------------- */
2696
2697 int
2698 udf_read_vds_tables(struct udf_mount *ump)
2699 {
2700 union udf_pmap *mapping;
2701 /* struct udf_args *args = &ump->mount_args; */
2702 uint32_t n_pm, mt_l;
2703 uint32_t log_part;
2704 uint8_t *pmap_pos;
2705 int pmap_size;
2706 int error;
2707
2708 /* Iterate again over the part mappings for locations */
2709 n_pm = udf_rw32(ump->logical_vol->n_pm); /* num partmaps */
2710 mt_l = udf_rw32(ump->logical_vol->mt_l); /* partmaps data length */
2711 pmap_pos = ump->logical_vol->maps;
2712
2713 for (log_part = 0; log_part < n_pm; log_part++) {
2714 mapping = (union udf_pmap *) pmap_pos;
2715 switch (ump->vtop_tp[log_part]) {
2716 case UDF_VTOP_TYPE_PHYS :
2717 /* nothing */
2718 break;
2719 case UDF_VTOP_TYPE_VIRT :
2720 /* search and load VAT */
2721 error = udf_search_vat(ump, mapping);
2722 if (error)
2723 return ENOENT;
2724 break;
2725 case UDF_VTOP_TYPE_SPARABLE :
2726 /* load one of the sparable tables */
2727 error = udf_read_sparables(ump, mapping);
2728 if (error)
2729 return ENOENT;
2730 break;
2731 case UDF_VTOP_TYPE_META :
2732 /* load the associated file descriptors */
2733 error = udf_read_metadata_nodes(ump, mapping);
2734 if (error)
2735 return ENOENT;
2736 break;
2737 default:
2738 break;
2739 }
2740 pmap_size = pmap_pos[1];
2741 pmap_pos += pmap_size;
2742 }
2743
2744 return 0;
2745 }
2746
2747 /* --------------------------------------------------------------------- */
2748
2749 int
2750 udf_read_rootdirs(struct udf_mount *ump)
2751 {
2752 union dscrptr *dscr;
2753 /* struct udf_args *args = &ump->mount_args; */
2754 struct udf_node *rootdir_node, *streamdir_node;
2755 struct long_ad fsd_loc, *dir_loc;
2756 uint32_t lb_num, dummy;
2757 uint32_t fsd_len;
2758 int dscr_type;
2759 int error;
2760
2761 /* TODO implement FSD reading in separate function like integrity? */
2762 /* get fileset descriptor sequence */
2763 fsd_loc = ump->logical_vol->lv_fsd_loc;
2764 fsd_len = udf_rw32(fsd_loc.len);
2765
2766 dscr = NULL;
2767 error = 0;
2768 while (fsd_len || error) {
2769 DPRINTF(VOLUMES, ("fsd_len = %d\n", fsd_len));
2770 /* translate fsd_loc to lb_num */
2771 error = udf_translate_vtop(ump, &fsd_loc, &lb_num, &dummy);
2772 if (error)
2773 break;
2774 DPRINTF(VOLUMES, ("Reading FSD at lb %d\n", lb_num));
2775 error = udf_read_phys_dscr(ump, lb_num, M_UDFVOLD, &dscr);
2776 /* end markers */
2777 if (error || (dscr == NULL))
2778 break;
2779
2780 /* analyse */
2781 dscr_type = udf_rw16(dscr->tag.id);
2782 if (dscr_type == TAGID_TERM)
2783 break;
2784 if (dscr_type != TAGID_FSD) {
2785 free(dscr, M_UDFVOLD);
2786 return ENOENT;
2787 }
2788
2789 /*
2790 * TODO check for multiple fileset descriptors; its only
2791 * picking the last now. Also check for FSD
2792 * correctness/interpretability
2793 */
2794
2795 /* update */
2796 if (ump->fileset_desc) {
2797 free(ump->fileset_desc, M_UDFVOLD);
2798 }
2799 ump->fileset_desc = &dscr->fsd;
2800 dscr = NULL;
2801
2802 /* continue to the next fsd */
2803 fsd_len -= ump->discinfo.sector_size;
2804 fsd_loc.loc.lb_num = udf_rw32(udf_rw32(fsd_loc.loc.lb_num)+1);
2805
2806 /* follow up to fsd->next_ex (long_ad) if its not null */
2807 if (udf_rw32(ump->fileset_desc->next_ex.len)) {
2808 DPRINTF(VOLUMES, ("follow up FSD extent\n"));
2809 fsd_loc = ump->fileset_desc->next_ex;
2810 fsd_len = udf_rw32(ump->fileset_desc->next_ex.len);
2811 }
2812 }
2813 if (dscr)
2814 free(dscr, M_UDFVOLD);
2815
2816 /* there has to be one */
2817 if (ump->fileset_desc == NULL)
2818 return ENOENT;
2819
2820 DPRINTF(VOLUMES, ("FSD read in fine\n"));
2821 DPRINTF(VOLUMES, ("Updating fsd logical volume id\n"));
2822 udf_update_logvolname(ump, ump->logical_vol->logvol_id);
2823
2824 /*
2825 * Now the FSD is known, read in the rootdirectory and if one exists,
2826 * the system stream dir. Some files in the system streamdir are not
2827 * wanted in this implementation since they are not maintained. If
2828 * writing is enabled we'll delete these files if they exist.
2829 */
2830
2831 rootdir_node = streamdir_node = NULL;
2832 dir_loc = NULL;
2833
2834 /* try to read in the rootdir */
2835 dir_loc = &ump->fileset_desc->rootdir_icb;
2836 error = udf_get_node(ump, dir_loc, &rootdir_node);
2837 if (error)
2838 return ENOENT;
2839
2840 /* aparently it read in fine */
2841
2842 /*
2843 * Try the system stream directory; not very likely in the ones we
2844 * test, but for completeness.
2845 */
2846 dir_loc = &ump->fileset_desc->streamdir_icb;
2847 if (udf_rw32(dir_loc->len)) {
2848 printf("udf_read_rootdirs: streamdir defined ");
2849 error = udf_get_node(ump, dir_loc, &streamdir_node);
2850 if (error) {
2851 printf("but error in streamdir reading\n");
2852 } else {
2853 printf("but ignored\n");
2854 /*
2855 * TODO process streamdir `baddies' i.e. files we dont
2856 * want if R/W
2857 */
2858 }
2859 }
2860
2861 DPRINTF(VOLUMES, ("Rootdir(s) read in fine\n"));
2862
2863 /* release the vnodes again; they'll be auto-recycled later */
2864 if (streamdir_node) {
2865 vput(streamdir_node->vnode);
2866 }
2867 if (rootdir_node) {
2868 vput(rootdir_node->vnode);
2869 }
2870
2871 return 0;
2872 }
2873
2874 /* --------------------------------------------------------------------- */
2875
2876 /* To make absolutely sure we are NOT returning zero, add one :) */
2877
2878 long
2879 udf_calchash(struct long_ad *icbptr)
2880 {
2881 /* ought to be enough since each mountpoint has its own chain */
2882 return udf_rw32(icbptr->loc.lb_num) + 1;
2883 }
2884
2885
2886 static struct udf_node *
2887 udf_hash_lookup(struct udf_mount *ump, struct long_ad *icbptr)
2888 {
2889 struct udf_node *node;
2890 struct vnode *vp;
2891 uint32_t hashline;
2892
2893 loop:
2894 mutex_enter(&ump->ihash_lock);
2895
2896 hashline = udf_calchash(icbptr) & UDF_INODE_HASHMASK;
2897 LIST_FOREACH(node, &ump->udf_nodes[hashline], hashchain) {
2898 assert(node);
2899 if (node->loc.loc.lb_num == icbptr->loc.lb_num &&
2900 node->loc.loc.part_num == icbptr->loc.part_num) {
2901 vp = node->vnode;
2902 assert(vp);
2903 mutex_enter(&vp->v_interlock);
2904 mutex_exit(&ump->ihash_lock);
2905 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK))
2906 goto loop;
2907 return node;
2908 }
2909 }
2910 mutex_exit(&ump->ihash_lock);
2911
2912 return NULL;
2913 }
2914
2915
2916 static void
2917 udf_sorted_list_insert(struct udf_node *node)
2918 {
2919 struct udf_mount *ump;
2920 struct udf_node *s_node, *last_node;
2921 uint32_t loc, s_loc;
2922
2923 ump = node->ump;
2924 last_node = NULL; /* XXX gcc */
2925
2926 if (LIST_EMPTY(&ump->sorted_udf_nodes)) {
2927 LIST_INSERT_HEAD(&ump->sorted_udf_nodes, node, sortchain);
2928 return;
2929 }
2930
2931 /*
2932 * We sort on logical block number here and not on physical block
2933 * number here. Ideally we should go for the physical block nr to get
2934 * better sync performance though this sort will ensure that packets
2935 * won't get spit up unnessisarily.
2936 */
2937
2938 loc = udf_rw32(node->loc.loc.lb_num);
2939 LIST_FOREACH(s_node, &ump->sorted_udf_nodes, sortchain) {
2940 s_loc = udf_rw32(s_node->loc.loc.lb_num);
2941 if (s_loc > loc) {
2942 LIST_INSERT_BEFORE(s_node, node, sortchain);
2943 return;
2944 }
2945 last_node = s_node;
2946 }
2947 LIST_INSERT_AFTER(last_node, node, sortchain);
2948 }
2949
2950
2951 static void
2952 udf_register_node(struct udf_node *node)
2953 {
2954 struct udf_mount *ump;
2955 struct udf_node *chk;
2956 uint32_t hashline;
2957
2958 ump = node->ump;
2959 mutex_enter(&ump->ihash_lock);
2960
2961 /* add to our hash table */
2962 hashline = udf_calchash(&node->loc) & UDF_INODE_HASHMASK;
2963 #ifdef DEBUG
2964 LIST_FOREACH(chk, &ump->udf_nodes[hashline], hashchain) {
2965 assert(chk);
2966 if (chk->loc.loc.lb_num == node->loc.loc.lb_num &&
2967 chk->loc.loc.part_num == node->loc.loc.part_num)
2968 panic("Double node entered\n");
2969 }
2970 #else
2971 chk = NULL;
2972 #endif
2973 LIST_INSERT_HEAD(&ump->udf_nodes[hashline], node, hashchain);
2974
2975 /* add to our sorted list */
2976 udf_sorted_list_insert(node);
2977
2978 mutex_exit(&ump->ihash_lock);
2979 }
2980
2981
2982 static void
2983 udf_deregister_node(struct udf_node *node)
2984 {
2985 struct udf_mount *ump;
2986
2987 ump = node->ump;
2988 mutex_enter(&ump->ihash_lock);
2989
2990 /* from hash and sorted list */
2991 LIST_REMOVE(node, hashchain);
2992 LIST_REMOVE(node, sortchain);
2993
2994 mutex_exit(&ump->ihash_lock);
2995 }
2996
2997 /* --------------------------------------------------------------------- */
2998
2999 static void
3000 udf_inittag(struct udf_mount *ump, struct desc_tag *tag, int tagid,
3001 uint32_t sector)
3002 {
3003 assert(ump->logical_vol);
3004
3005 tag->id = udf_rw16(tagid);
3006 tag->descriptor_ver = ump->logical_vol->tag.descriptor_ver;
3007 tag->cksum = 0;
3008 tag->reserved = 0;
3009 tag->serial_num = ump->logical_vol->tag.serial_num;
3010 tag->tag_loc = udf_rw32(sector);
3011 }
3012
3013
3014 uint64_t
3015 udf_advance_uniqueid(struct udf_mount *ump)
3016 {
3017 uint64_t unique_id;
3018
3019 mutex_enter(&ump->logvol_mutex);
3020 unique_id = udf_rw64(ump->logvol_integrity->lvint_next_unique_id);
3021 if (unique_id < 0x10)
3022 unique_id = 0x10;
3023 ump->logvol_integrity->lvint_next_unique_id = udf_rw64(unique_id + 1);
3024 mutex_exit(&ump->logvol_mutex);
3025
3026 return unique_id;
3027 }
3028
3029
3030 static void
3031 udf_adjust_filecount(struct udf_node *udf_node, int sign)
3032 {
3033 struct udf_mount *ump = udf_node->ump;
3034 uint32_t num_dirs, num_files;
3035 int udf_file_type;
3036
3037 /* get file type */
3038 if (udf_node->fe) {
3039 udf_file_type = udf_node->fe->icbtag.file_type;
3040 } else {
3041 udf_file_type = udf_node->efe->icbtag.file_type;
3042 }
3043
3044 /* adjust file count */
3045 mutex_enter(&ump->allocate_mutex);
3046 if (udf_file_type == UDF_ICB_FILETYPE_DIRECTORY) {
3047 num_dirs = udf_rw32(ump->logvol_info->num_directories);
3048 ump->logvol_info->num_directories =
3049 udf_rw32((num_dirs + sign));
3050 } else {
3051 num_files = udf_rw32(ump->logvol_info->num_files);
3052 ump->logvol_info->num_files =
3053 udf_rw32((num_files + sign));
3054 }
3055 mutex_exit(&ump->allocate_mutex);
3056 }
3057
3058
3059 void
3060 udf_osta_charset(struct charspec *charspec)
3061 {
3062 bzero(charspec, sizeof(struct charspec));
3063 charspec->type = 0;
3064 strcpy((char *) charspec->inf, "OSTA Compressed Unicode");
3065 }
3066
3067
3068 /* first call udf_set_regid and then the suffix */
3069 void
3070 udf_set_regid(struct regid *regid, char const *name)
3071 {
3072 bzero(regid, sizeof(struct regid));
3073 regid->flags = 0; /* not dirty and not protected */
3074 strcpy((char *) regid->id, name);
3075 }
3076
3077
3078 void
3079 udf_add_domain_regid(struct udf_mount *ump, struct regid *regid)
3080 {
3081 uint16_t *ver;
3082
3083 ver = (uint16_t *) regid->id_suffix;
3084 *ver = ump->logvol_info->min_udf_readver;
3085 }
3086
3087
3088 void
3089 udf_add_udf_regid(struct udf_mount *ump, struct regid *regid)
3090 {
3091 uint16_t *ver;
3092
3093 ver = (uint16_t *) regid->id_suffix;
3094 *ver = ump->logvol_info->min_udf_readver;
3095
3096 regid->id_suffix[2] = 4; /* unix */
3097 regid->id_suffix[3] = 8; /* NetBSD */
3098 }
3099
3100
3101 void
3102 udf_add_impl_regid(struct udf_mount *ump, struct regid *regid)
3103 {
3104 regid->id_suffix[0] = 4; /* unix */
3105 regid->id_suffix[1] = 8; /* NetBSD */
3106 }
3107
3108
3109 void
3110 udf_add_app_regid(struct udf_mount *ump, struct regid *regid)
3111 {
3112 regid->id_suffix[0] = APP_VERSION_MAIN;
3113 regid->id_suffix[1] = APP_VERSION_SUB;
3114 }
3115
3116 static int
3117 udf_create_parentfid(struct udf_mount *ump, struct fileid_desc *fid,
3118 struct long_ad *parent, uint64_t unique_id)
3119 {
3120 /* the size of an empty FID is 38 but needs to be a multiple of 4 */
3121 int fidsize = 40;
3122
3123 udf_inittag(ump, &fid->tag, TAGID_FID, udf_rw32(parent->loc.lb_num));
3124 fid->file_version_num = udf_rw16(1); /* UDF 2.3.4.1 */
3125 fid->file_char = UDF_FILE_CHAR_DIR | UDF_FILE_CHAR_PAR;
3126 fid->icb = *parent;
3127 fid->icb.longad_uniqueid = udf_rw32((uint32_t) unique_id);
3128 fid->tag.desc_crc_len = fidsize - UDF_DESC_TAG_LENGTH;
3129 (void) udf_validate_tag_and_crc_sums((union dscrptr *) fid);
3130
3131 return fidsize;
3132 }
3133
3134 /* --------------------------------------------------------------------- */
3135
3136 int
3137 udf_open_logvol(struct udf_mount *ump)
3138 {
3139 int logvol_integrity;
3140 int error;
3141
3142 /* already/still open? */
3143 logvol_integrity = udf_rw32(ump->logvol_integrity->integrity_type);
3144 if (logvol_integrity == UDF_INTEGRITY_OPEN)
3145 return 0;
3146
3147 /* can we open it ? */
3148 if (ump->vfs_mountp->mnt_flag & MNT_RDONLY)
3149 return EROFS;
3150
3151 /* setup write parameters */
3152 DPRINTF(VOLUMES, ("Setting up write parameters\n"));
3153 if ((error = udf_setup_writeparams(ump)) != 0)
3154 return error;
3155
3156 /* determine data and metadata tracks (most likely same) */
3157 error = udf_search_writing_tracks(ump);
3158 if (error) {
3159 /* most likely lack of space */
3160 printf("udf_open_logvol: error searching writing tracks\n");
3161 return EROFS;
3162 }
3163
3164 /* writeout/update lvint on disc or only in memory */
3165 DPRINTF(VOLUMES, ("Opening logical volume\n"));
3166 if (ump->lvopen & UDF_OPEN_SESSION) {
3167 /* TODO implement writeout of VRS + VDS */
3168 printf( "udf_open_logvol:Opening a closed session not yet "
3169 "implemented\n");
3170 return EROFS;
3171
3172 /* determine data and metadata tracks again */
3173 error = udf_search_writing_tracks(ump);
3174 }
3175
3176 /* mark it open */
3177 ump->logvol_integrity->integrity_type = udf_rw32(UDF_INTEGRITY_OPEN);
3178
3179 /* do we need to write it out? */
3180 if (ump->lvopen & UDF_WRITE_LVINT) {
3181 error = udf_writeout_lvint(ump, ump->lvopen);
3182 /* if we couldn't write it mark it closed again */
3183 if (error) {
3184 ump->logvol_integrity->integrity_type =
3185 udf_rw32(UDF_INTEGRITY_CLOSED);
3186 return error;
3187 }
3188 }
3189
3190 return 0;
3191 }
3192
3193
3194 int
3195 udf_close_logvol(struct udf_mount *ump, int mntflags)
3196 {
3197 int logvol_integrity;
3198 int error = 0;
3199 int n;
3200
3201 /* already/still closed? */
3202 logvol_integrity = udf_rw32(ump->logvol_integrity->integrity_type);
3203 if (logvol_integrity == UDF_INTEGRITY_CLOSED)
3204 return 0;
3205
3206 /* writeout/update lvint or write out VAT */
3207 DPRINTF(VOLUMES, ("Closing logical volume\n"));
3208 if (ump->lvclose & UDF_WRITE_VAT) {
3209 DPRINTF(VOLUMES, ("lvclose & UDF_WRITE_VAT\n"));
3210
3211 /* preprocess the VAT node; its modified on every writeout */
3212 DPRINTF(VOLUMES, ("writeout vat_node\n"));
3213 udf_update_vat_descriptor(ump->vat_node->ump);
3214
3215 /* write out the VAT node */
3216 vflushbuf(ump->vat_node->vnode, 1 /* sync */);
3217 for (n = 0; n < 16; n++) {
3218 ump->vat_node->i_flags |= IN_MODIFIED;
3219 error = VOP_FSYNC(ump->vat_node->vnode,
3220 FSCRED, FSYNC_WAIT, 0, 0);
3221 }
3222 if (error) {
3223 printf("udf_close_logvol: writeout of VAT failed\n");
3224 return error;
3225 }
3226 }
3227
3228 if (ump->lvclose & UDF_WRITE_PART_BITMAPS) {
3229 error = udf_write_partition_spacetables(ump, 1 /* waitfor */);
3230 if (error) {
3231 printf( "udf_close_logvol: writeout of space tables "
3232 "failed\n");
3233 return error;
3234 }
3235 ump->lvclose &= ~UDF_WRITE_PART_BITMAPS;
3236 }
3237
3238 if (ump->lvclose & UDF_CLOSE_SESSION) {
3239 printf("TODO: Closing a session is not yet implemented\n");
3240 return EROFS;
3241 ump->lvopen |= UDF_OPEN_SESSION;
3242 }
3243
3244 /* mark it closed */
3245 ump->logvol_integrity->integrity_type = udf_rw32(UDF_INTEGRITY_CLOSED);
3246
3247 /* do we need to write out the logical volume integrity */
3248 if (ump->lvclose & UDF_WRITE_LVINT)
3249 error = udf_writeout_lvint(ump, ump->lvopen);
3250 if (error) {
3251 /* HELP now what? mark it open again for now */
3252 ump->logvol_integrity->integrity_type =
3253 udf_rw32(UDF_INTEGRITY_OPEN);
3254 return error;
3255 }
3256
3257 (void) udf_synchronise_caches(ump);
3258
3259 return 0;
3260 }
3261
3262 /* --------------------------------------------------------------------- */
3263
3264 /*
3265 * Genfs interfacing
3266 *
3267 * static const struct genfs_ops udf_genfsops = {
3268 * .gop_size = genfs_size,
3269 * size of transfers
3270 * .gop_alloc = udf_gop_alloc,
3271 * allocate len bytes at offset
3272 * .gop_write = genfs_gop_write,
3273 * putpages interface code
3274 * .gop_markupdate = udf_gop_markupdate,
3275 * set update/modify flags etc.
3276 * }
3277 */
3278
3279 /*
3280 * Genfs interface. These four functions are the only ones defined though not
3281 * documented... great....
3282 */
3283
3284 /*
3285 * Callback from genfs to allocate len bytes at offset off; only called when
3286 * filling up gaps in the allocation.
3287 */
3288 /* XXX should be check if there is space enough in udf_gop_alloc? */
3289 static int
3290 udf_gop_alloc(struct vnode *vp, off_t off,
3291 off_t len, int flags, kauth_cred_t cred)
3292 {
3293 #if 0
3294 struct udf_node *udf_node = VTOI(vp);
3295 struct udf_mount *ump = udf_node->ump;
3296 uint32_t lb_size, num_lb;
3297 #endif
3298
3299 DPRINTF(NOTIMPL, ("udf_gop_alloc not implemented\n"));
3300 DPRINTF(ALLOC, ("udf_gop_alloc called for %"PRIu64" bytes\n", len));
3301
3302 return 0;
3303 }
3304
3305
3306 /*
3307 * callback from genfs to update our flags
3308 */
3309 static void
3310 udf_gop_markupdate(struct vnode *vp, int flags)
3311 {
3312 struct udf_node *udf_node = VTOI(vp);
3313 u_long mask = 0;
3314
3315 if ((flags & GOP_UPDATE_ACCESSED) != 0) {
3316 mask = IN_ACCESS;
3317 }
3318 if ((flags & GOP_UPDATE_MODIFIED) != 0) {
3319 if (vp->v_type == VREG) {
3320 mask |= IN_CHANGE | IN_UPDATE;
3321 } else {
3322 mask |= IN_MODIFY;
3323 }
3324 }
3325 if (mask) {
3326 udf_node->i_flags |= mask;
3327 }
3328 }
3329
3330
3331 static const struct genfs_ops udf_genfsops = {
3332 .gop_size = genfs_size,
3333 .gop_alloc = udf_gop_alloc,
3334 .gop_write = genfs_gop_write_rwmap,
3335 .gop_markupdate = udf_gop_markupdate,
3336 };
3337
3338
3339 /* --------------------------------------------------------------------- */
3340
3341 int
3342 udf_write_terminator(struct udf_mount *ump, uint32_t sector)
3343 {
3344 union dscrptr *dscr;
3345 int error;
3346
3347 dscr = malloc(ump->discinfo.sector_size, M_TEMP, M_WAITOK);
3348 bzero(dscr, ump->discinfo.sector_size);
3349 udf_inittag(ump, &dscr->tag, TAGID_TERM, sector);
3350
3351 /* CRC length for an anchor is 512 - tag length; defined in Ecma 167 */
3352 dscr->tag.desc_crc_len = udf_rw16(512-UDF_DESC_TAG_LENGTH);
3353 (void) udf_validate_tag_and_crc_sums(dscr);
3354
3355 error = udf_write_phys_dscr_sync(ump, NULL, UDF_C_DSCR,
3356 dscr, sector, sector);
3357
3358 free(dscr, M_TEMP);
3359
3360 return error;
3361 }
3362
3363
3364 /* --------------------------------------------------------------------- */
3365
3366 /* UDF<->unix converters */
3367
3368 /* --------------------------------------------------------------------- */
3369
3370 static mode_t
3371 udf_perm_to_unix_mode(uint32_t perm)
3372 {
3373 mode_t mode;
3374
3375 mode = ((perm & UDF_FENTRY_PERM_USER_MASK) );
3376 mode |= ((perm & UDF_FENTRY_PERM_GRP_MASK ) >> 2);
3377 mode |= ((perm & UDF_FENTRY_PERM_OWNER_MASK) >> 4);
3378
3379 return mode;
3380 }
3381
3382 /* --------------------------------------------------------------------- */
3383
3384 static uint32_t
3385 unix_mode_to_udf_perm(mode_t mode)
3386 {
3387 uint32_t perm;
3388
3389 perm = ((mode & S_IRWXO) );
3390 perm |= ((mode & S_IRWXG) << 2);
3391 perm |= ((mode & S_IRWXU) << 4);
3392 perm |= ((mode & S_IWOTH) << 3);
3393 perm |= ((mode & S_IWGRP) << 5);
3394 perm |= ((mode & S_IWUSR) << 7);
3395
3396 return perm;
3397 }
3398
3399 /* --------------------------------------------------------------------- */
3400
3401 static uint32_t
3402 udf_icb_to_unix_filetype(uint32_t icbftype)
3403 {
3404 switch (icbftype) {
3405 case UDF_ICB_FILETYPE_DIRECTORY :
3406 case UDF_ICB_FILETYPE_STREAMDIR :
3407 return S_IFDIR;
3408 case UDF_ICB_FILETYPE_FIFO :
3409 return S_IFIFO;
3410 case UDF_ICB_FILETYPE_CHARDEVICE :
3411 return S_IFCHR;
3412 case UDF_ICB_FILETYPE_BLOCKDEVICE :
3413 return S_IFBLK;
3414 case UDF_ICB_FILETYPE_RANDOMACCESS :
3415 case UDF_ICB_FILETYPE_REALTIME :
3416 return S_IFREG;
3417 case UDF_ICB_FILETYPE_SYMLINK :
3418 return S_IFLNK;
3419 case UDF_ICB_FILETYPE_SOCKET :
3420 return S_IFSOCK;
3421 }
3422 /* no idea what this is */
3423 return 0;
3424 }
3425
3426 /* --------------------------------------------------------------------- */
3427
3428 void
3429 udf_to_unix_name(char *result, char *id, int len, struct charspec *chsp)
3430 {
3431 uint16_t *raw_name, *unix_name;
3432 uint16_t *inchp, ch;
3433 uint8_t *outchp;
3434 const char *osta_id = "OSTA Compressed Unicode";
3435 int ucode_chars, nice_uchars, is_osta_typ0;
3436
3437 raw_name = malloc(2048 * sizeof(uint16_t), M_UDFTEMP, M_WAITOK);
3438 unix_name = raw_name + 1024; /* split space in half */
3439 assert(sizeof(char) == sizeof(uint8_t));
3440 outchp = (uint8_t *) result;
3441
3442 is_osta_typ0 = (chsp->type == 0);
3443 is_osta_typ0 &= (strcmp((char *) chsp->inf, osta_id) == 0);
3444 if (is_osta_typ0) {
3445 *raw_name = *unix_name = 0;
3446 ucode_chars = udf_UncompressUnicode(len, (uint8_t *) id, raw_name);
3447 ucode_chars = MIN(ucode_chars, UnicodeLength((unicode_t *) raw_name));
3448 nice_uchars = UDFTransName(unix_name, raw_name, ucode_chars);
3449 for (inchp = unix_name; nice_uchars>0; inchp++, nice_uchars--) {
3450 ch = *inchp;
3451 /* XXX sloppy unicode -> latin */
3452 *outchp++ = ch & 255;
3453 if (!ch) break;
3454 }
3455 *outchp++ = 0;
3456 } else {
3457 /* assume 8bit char length byte latin-1 */
3458 assert(*id == 8);
3459 strncpy((char *) result, (char *) (id+1), strlen((char *) (id+1)));
3460 }
3461 free(raw_name, M_UDFTEMP);
3462 }
3463
3464 /* --------------------------------------------------------------------- */
3465
3466 void
3467 unix_to_udf_name(char *result, uint8_t *result_len, char const *name, int name_len,
3468 struct charspec *chsp)
3469 {
3470 uint16_t *raw_name;
3471 uint16_t *outchp;
3472 const char *inchp;
3473 const char *osta_id = "OSTA Compressed Unicode";
3474 int cnt, udf_chars, is_osta_typ0;
3475
3476 /* allocate temporary unicode-16 buffer */
3477 raw_name = malloc(1024, M_UDFTEMP, M_WAITOK);
3478
3479 /* convert latin-1 or whatever to unicode-16 */
3480 *raw_name = 0;
3481 inchp = name;
3482 outchp = raw_name;
3483 for (cnt = name_len; cnt; cnt--) {
3484 *outchp++ = (uint16_t) (*inchp++);
3485 }
3486
3487 is_osta_typ0 = (chsp->type == 0);
3488 is_osta_typ0 &= (strcmp((char *) chsp->inf, osta_id) == 0);
3489 if (is_osta_typ0) {
3490 udf_chars = udf_CompressUnicode(name_len, 8,
3491 (unicode_t *) raw_name,
3492 (byte *) result);
3493 } else {
3494 printf("unix to udf name: no CHSP0 ?\n");
3495 /* XXX assume 8bit char length byte latin-1 */
3496 *result++ = 8; udf_chars = 1;
3497 strncpy(result, name + 1, name_len);
3498 udf_chars += name_len;
3499 }
3500 *result_len = udf_chars;
3501 free(raw_name, M_UDFTEMP);
3502 }
3503
3504 /* --------------------------------------------------------------------- */
3505
3506 void
3507 udf_timestamp_to_timespec(struct udf_mount *ump,
3508 struct timestamp *timestamp,
3509 struct timespec *timespec)
3510 {
3511 struct clock_ymdhms ymdhms;
3512 uint32_t usecs, secs, nsecs;
3513 uint16_t tz;
3514
3515 /* fill in ymdhms structure from timestamp */
3516 memset(&ymdhms, 0, sizeof(ymdhms));
3517 ymdhms.dt_year = udf_rw16(timestamp->year);
3518 ymdhms.dt_mon = timestamp->month;
3519 ymdhms.dt_day = timestamp->day;
3520 ymdhms.dt_wday = 0; /* ? */
3521 ymdhms.dt_hour = timestamp->hour;
3522 ymdhms.dt_min = timestamp->minute;
3523 ymdhms.dt_sec = timestamp->second;
3524
3525 secs = clock_ymdhms_to_secs(&ymdhms);
3526 usecs = timestamp->usec +
3527 100*timestamp->hund_usec + 10000*timestamp->centisec;
3528 nsecs = usecs * 1000;
3529
3530 /*
3531 * Calculate the time zone. The timezone is 12 bit signed 2's
3532 * compliment, so we gotta do some extra magic to handle it right.
3533 */
3534 tz = udf_rw16(timestamp->type_tz);
3535 tz &= 0x0fff; /* only lower 12 bits are significant */
3536 if (tz & 0x0800) /* sign extention */
3537 tz |= 0xf000;
3538
3539 /* TODO check timezone conversion */
3540 /* check if we are specified a timezone to convert */
3541 if (udf_rw16(timestamp->type_tz) & 0x1000) {
3542 if ((int16_t) tz != -2047)
3543 secs -= (int16_t) tz * 60;
3544 } else {
3545 secs -= ump->mount_args.gmtoff;
3546 }
3547
3548 timespec->tv_sec = secs;
3549 timespec->tv_nsec = nsecs;
3550 }
3551
3552
3553 void
3554 udf_timespec_to_timestamp(struct timespec *timespec, struct timestamp *timestamp)
3555 {
3556 struct clock_ymdhms ymdhms;
3557 uint32_t husec, usec, csec;
3558
3559 (void) clock_secs_to_ymdhms(timespec->tv_sec, &ymdhms);
3560
3561 usec = (timespec->tv_nsec + 500) / 1000; /* round */
3562 husec = usec / 100;
3563 usec -= husec * 100; /* only 0-99 in usec */
3564 csec = husec / 100; /* only 0-99 in csec */
3565 husec -= csec * 100; /* only 0-99 in husec */
3566
3567 /* set method 1 for CUT/GMT */
3568 timestamp->type_tz = udf_rw16((1<<12) + 0);
3569 timestamp->year = udf_rw16(ymdhms.dt_year);
3570 timestamp->month = ymdhms.dt_mon;
3571 timestamp->day = ymdhms.dt_day;
3572 timestamp->hour = ymdhms.dt_hour;
3573 timestamp->minute = ymdhms.dt_min;
3574 timestamp->second = ymdhms.dt_sec;
3575 timestamp->centisec = csec;
3576 timestamp->hund_usec = husec;
3577 timestamp->usec = usec;
3578 }
3579
3580 /* --------------------------------------------------------------------- */
3581
3582 /*
3583 * Attribute and filetypes converters with get/set pairs
3584 */
3585
3586 uint32_t
3587 udf_getaccessmode(struct udf_node *udf_node)
3588 {
3589 struct file_entry *fe = udf_node->fe;;
3590 struct extfile_entry *efe = udf_node->efe;
3591 uint32_t udf_perm, icbftype;
3592 uint32_t mode, ftype;
3593 uint16_t icbflags;
3594
3595 UDF_LOCK_NODE(udf_node, 0);
3596 if (fe) {
3597 udf_perm = udf_rw32(fe->perm);
3598 icbftype = fe->icbtag.file_type;
3599 icbflags = udf_rw16(fe->icbtag.flags);
3600 } else {
3601 assert(udf_node->efe);
3602 udf_perm = udf_rw32(efe->perm);
3603 icbftype = efe->icbtag.file_type;
3604 icbflags = udf_rw16(efe->icbtag.flags);
3605 }
3606
3607 mode = udf_perm_to_unix_mode(udf_perm);
3608 ftype = udf_icb_to_unix_filetype(icbftype);
3609
3610 /* set suid, sgid, sticky from flags in fe/efe */
3611 if (icbflags & UDF_ICB_TAG_FLAGS_SETUID)
3612 mode |= S_ISUID;
3613 if (icbflags & UDF_ICB_TAG_FLAGS_SETGID)
3614 mode |= S_ISGID;
3615 if (icbflags & UDF_ICB_TAG_FLAGS_STICKY)
3616 mode |= S_ISVTX;
3617
3618 UDF_UNLOCK_NODE(udf_node, 0);
3619
3620 return mode | ftype;
3621 }
3622
3623
3624 void
3625 udf_setaccessmode(struct udf_node *udf_node, mode_t mode)
3626 {
3627 struct file_entry *fe = udf_node->fe;
3628 struct extfile_entry *efe = udf_node->efe;
3629 uint32_t udf_perm;
3630 uint16_t icbflags;
3631
3632 UDF_LOCK_NODE(udf_node, 0);
3633 udf_perm = unix_mode_to_udf_perm(mode & ALLPERMS);
3634 if (fe) {
3635 icbflags = udf_rw16(fe->icbtag.flags);
3636 } else {
3637 icbflags = udf_rw16(efe->icbtag.flags);
3638 }
3639
3640 icbflags &= ~UDF_ICB_TAG_FLAGS_SETUID;
3641 icbflags &= ~UDF_ICB_TAG_FLAGS_SETGID;
3642 icbflags &= ~UDF_ICB_TAG_FLAGS_STICKY;
3643 if (mode & S_ISUID)
3644 icbflags |= UDF_ICB_TAG_FLAGS_SETUID;
3645 if (mode & S_ISGID)
3646 icbflags |= UDF_ICB_TAG_FLAGS_SETGID;
3647 if (mode & S_ISVTX)
3648 icbflags |= UDF_ICB_TAG_FLAGS_STICKY;
3649
3650 if (fe) {
3651 fe->perm = udf_rw32(udf_perm);
3652 fe->icbtag.flags = udf_rw16(icbflags);
3653 } else {
3654 efe->perm = udf_rw32(udf_perm);
3655 efe->icbtag.flags = udf_rw16(icbflags);
3656 }
3657
3658 UDF_UNLOCK_NODE(udf_node, 0);
3659 }
3660
3661
3662 void
3663 udf_getownership(struct udf_node *udf_node, uid_t *uidp, gid_t *gidp)
3664 {
3665 struct udf_mount *ump = udf_node->ump;
3666 struct file_entry *fe = udf_node->fe;
3667 struct extfile_entry *efe = udf_node->efe;
3668 uid_t uid;
3669 gid_t gid;
3670
3671 UDF_LOCK_NODE(udf_node, 0);
3672 if (fe) {
3673 uid = (uid_t)udf_rw32(fe->uid);
3674 gid = (gid_t)udf_rw32(fe->gid);
3675 } else {
3676 assert(udf_node->efe);
3677 uid = (uid_t)udf_rw32(efe->uid);
3678 gid = (gid_t)udf_rw32(efe->gid);
3679 }
3680
3681 /* do the uid/gid translation game */
3682 if ((uid == (uid_t) -1) && (gid == (gid_t) -1)) {
3683 uid = ump->mount_args.anon_uid;
3684 gid = ump->mount_args.anon_gid;
3685 }
3686 *uidp = uid;
3687 *gidp = gid;
3688
3689 UDF_UNLOCK_NODE(udf_node, 0);
3690 }
3691
3692
3693 void
3694 udf_setownership(struct udf_node *udf_node, uid_t uid, gid_t gid)
3695 {
3696 struct udf_mount *ump = udf_node->ump;
3697 struct file_entry *fe = udf_node->fe;
3698 struct extfile_entry *efe = udf_node->efe;
3699 uid_t nobody_uid;
3700 gid_t nobody_gid;
3701
3702 UDF_LOCK_NODE(udf_node, 0);
3703
3704 /* do the uid/gid translation game */
3705 nobody_uid = ump->mount_args.nobody_uid;
3706 nobody_gid = ump->mount_args.nobody_gid;
3707 if ((uid == nobody_uid) && (gid == nobody_gid)) {
3708 uid = (uid_t) -1;
3709 gid = (gid_t) -1;
3710 }
3711
3712 if (fe) {
3713 fe->uid = udf_rw32((uint32_t) uid);
3714 fe->gid = udf_rw32((uint32_t) gid);
3715 } else {
3716 efe->uid = udf_rw32((uint32_t) uid);
3717 efe->gid = udf_rw32((uint32_t) gid);
3718 }
3719
3720 UDF_UNLOCK_NODE(udf_node, 0);
3721 }
3722
3723
3724 /* --------------------------------------------------------------------- */
3725
3726 /*
3727 * Directory read and manipulation functions.
3728 *
3729 * Note that if the file is found, the cached diroffset possition *before* the
3730 * advance is remembered. Thus if the same filename is lookup again just after
3731 * this lookup its immediately found.
3732 */
3733
3734 int
3735 udf_lookup_name_in_dir(struct vnode *vp, const char *name, int namelen,
3736 struct long_ad *icb_loc)
3737 {
3738 struct udf_node *dir_node = VTOI(vp);
3739 struct file_entry *fe = dir_node->fe;
3740 struct extfile_entry *efe = dir_node->efe;
3741 struct fileid_desc *fid;
3742 struct dirent *dirent;
3743 uint64_t file_size, diroffset, pre_diroffset;
3744 uint32_t lb_size;
3745 int found, error;
3746
3747 /* get directory filesize */
3748 if (fe) {
3749 file_size = udf_rw64(fe->inf_len);
3750 } else {
3751 assert(efe);
3752 file_size = udf_rw64(efe->inf_len);
3753 }
3754
3755 /* allocate temporary space for fid */
3756 lb_size = udf_rw32(dir_node->ump->logical_vol->lb_size);
3757 fid = malloc(lb_size, M_UDFTEMP, M_WAITOK);
3758
3759 found = 0;
3760 diroffset = dir_node->last_diroffset;
3761
3762 /*
3763 * if the directory is trunced or if we have never visited it yet,
3764 * start at the end.
3765 */
3766 if ((diroffset >= file_size) || (diroffset == 0)) {
3767 diroffset = dir_node->last_diroffset = file_size;
3768 }
3769
3770 dirent = malloc(sizeof(struct dirent), M_UDFTEMP, M_WAITOK);
3771
3772 while (!found) {
3773 /* if at the end, go trough zero */
3774 if (diroffset >= file_size)
3775 diroffset = 0;
3776
3777 pre_diroffset = diroffset;
3778
3779 /* transfer a new fid/dirent */
3780 error = udf_read_fid_stream(vp, &diroffset, fid, dirent);
3781 if (error)
3782 break;
3783
3784 /* skip deleted entries */
3785 if ((fid->file_char & UDF_FILE_CHAR_DEL) == 0) {
3786 if ((strlen(dirent->d_name) == namelen) &&
3787 (strncmp(dirent->d_name, name, namelen) == 0)) {
3788 found = 1;
3789 *icb_loc = fid->icb;
3790 /* remember where we were before the advance */
3791 diroffset = pre_diroffset;
3792 }
3793 }
3794
3795 if (diroffset == dir_node->last_diroffset) {
3796 /* we have cycled */
3797 break;
3798 }
3799 }
3800 free(fid, M_UDFTEMP);
3801 free(dirent, M_UDFTEMP);
3802 dir_node->last_diroffset = diroffset;
3803
3804 return found;
3805 }
3806
3807 /* --------------------------------------------------------------------- */
3808
3809 static int
3810 udf_create_new_fe(struct udf_mount *ump, struct file_entry *fe, int file_type,
3811 struct long_ad *node_icb, struct long_ad *parent_icb,
3812 uint64_t parent_unique_id)
3813 {
3814 struct timespec now;
3815 struct icb_tag *icb;
3816 uint64_t unique_id;
3817 uint32_t fidsize, lb_num;
3818 int crclen;
3819
3820 lb_num = udf_rw32(node_icb->loc.lb_num);
3821 udf_inittag(ump, &fe->tag, TAGID_FENTRY, lb_num);
3822 icb = &fe->icbtag;
3823
3824 /*
3825 * Always use strategy type 4 unless on WORM wich we don't support
3826 * (yet). Fill in defaults and set for internal allocation of data.
3827 */
3828 icb->strat_type = udf_rw16(4);
3829 icb->max_num_entries = udf_rw16(1);
3830 icb->file_type = file_type; /* 8 bit */
3831 icb->flags = udf_rw16(UDF_ICB_INTERN_ALLOC);
3832
3833 fe->perm = udf_rw32(0x7fff); /* all is allowed */
3834 fe->link_cnt = udf_rw16(0); /* explicit setting */
3835
3836 fe->ckpoint = udf_rw32(1); /* user supplied file version */
3837
3838 vfs_timestamp(&now);
3839 udf_timespec_to_timestamp(&now, &fe->atime);
3840 udf_timespec_to_timestamp(&now, &fe->attrtime);
3841 udf_timespec_to_timestamp(&now, &fe->mtime);
3842
3843 udf_set_regid(&fe->imp_id, IMPL_NAME);
3844 udf_add_impl_regid(ump, &fe->imp_id);
3845
3846 unique_id = udf_advance_uniqueid(ump);
3847 fe->unique_id = udf_rw64(unique_id);
3848
3849 fidsize = 0;
3850 if (file_type == UDF_ICB_FILETYPE_DIRECTORY) {
3851 fidsize = udf_create_parentfid(ump,
3852 (struct fileid_desc *) fe->data, parent_icb,
3853 parent_unique_id);
3854 }
3855
3856 /* record fidlength information */
3857 fe->inf_len = udf_rw64(fidsize);
3858 fe->l_ea = udf_rw32(0);
3859 fe->l_ad = udf_rw32(fidsize);
3860 fe->logblks_rec = udf_rw64(0); /* intern */
3861
3862 crclen = sizeof(struct file_entry) - 1 - UDF_DESC_TAG_LENGTH;
3863 crclen += fidsize;
3864 fe->tag.desc_crc_len = udf_rw16(crclen);
3865
3866 (void) udf_validate_tag_and_crc_sums((union dscrptr *) fe);
3867
3868 return fidsize;
3869 }
3870
3871 /* --------------------------------------------------------------------- */
3872
3873 static int
3874 udf_create_new_efe(struct udf_mount *ump, struct extfile_entry *efe,
3875 int file_type, struct long_ad *node_icb, struct long_ad *parent_icb,
3876 uint64_t parent_unique_id)
3877 {
3878 struct timespec now;
3879 struct icb_tag *icb;
3880 uint64_t unique_id;
3881 uint32_t fidsize, lb_num;
3882 int crclen;
3883
3884 lb_num = udf_rw32(node_icb->loc.lb_num);
3885 udf_inittag(ump, &efe->tag, TAGID_EXTFENTRY, lb_num);
3886 icb = &efe->icbtag;
3887
3888 /*
3889 * Always use strategy type 4 unless on WORM wich we don't support
3890 * (yet). Fill in defaults and set for internal allocation of data.
3891 */
3892 icb->strat_type = udf_rw16(4);
3893 icb->max_num_entries = udf_rw16(1);
3894 icb->file_type = file_type; /* 8 bit */
3895 icb->flags = udf_rw16(UDF_ICB_INTERN_ALLOC);
3896
3897 efe->perm = udf_rw32(0x7fff); /* all is allowed */
3898 efe->link_cnt = udf_rw16(0); /* explicit setting */
3899
3900 efe->ckpoint = udf_rw32(1); /* user supplied file version */
3901
3902 vfs_timestamp(&now);
3903 udf_timespec_to_timestamp(&now, &efe->ctime);
3904 udf_timespec_to_timestamp(&now, &efe->atime);
3905 udf_timespec_to_timestamp(&now, &efe->attrtime);
3906 udf_timespec_to_timestamp(&now, &efe->mtime);
3907
3908 udf_set_regid(&efe->imp_id, IMPL_NAME);
3909 udf_add_impl_regid(ump, &efe->imp_id);
3910
3911 unique_id = udf_advance_uniqueid(ump);
3912 efe->unique_id = udf_rw64(unique_id);
3913
3914 fidsize = 0;
3915 if (file_type == UDF_ICB_FILETYPE_DIRECTORY) {
3916 fidsize = udf_create_parentfid(ump,
3917 (struct fileid_desc *) efe->data, parent_icb,
3918 parent_unique_id);
3919 }
3920
3921 /* record fidlength information */
3922 efe->obj_size = udf_rw64(fidsize);
3923 efe->inf_len = udf_rw64(fidsize);
3924 efe->l_ea = udf_rw32(0);
3925 efe->l_ad = udf_rw32(fidsize);
3926 efe->logblks_rec = udf_rw64(0); /* intern */
3927
3928 crclen = sizeof(struct extfile_entry) - 1 - UDF_DESC_TAG_LENGTH;
3929 crclen += fidsize;
3930 efe->tag.desc_crc_len = udf_rw16(crclen);
3931
3932 (void) udf_validate_tag_and_crc_sums((union dscrptr *) efe);
3933
3934 return fidsize;
3935 }
3936
3937 /* --------------------------------------------------------------------- */
3938
3939 int
3940 udf_dir_detach(struct udf_mount *ump, struct udf_node *dir_node,
3941 struct udf_node *udf_node, struct componentname *cnp)
3942 {
3943 struct file_entry *fe = dir_node->fe;
3944 struct extfile_entry *efe = dir_node->efe;
3945 struct fileid_desc *fid;
3946 struct dirent *dirent;
3947 uint64_t file_size, diroffset;
3948 uint32_t lb_size, fidsize;
3949 int found, error;
3950 char const *name = cnp->cn_nameptr;
3951 int namelen = cnp->cn_namelen;
3952 int refcnt;
3953
3954 /* get directory filesize */
3955 if (fe) {
3956 file_size = udf_rw64(fe->inf_len);
3957 } else {
3958 assert(efe);
3959 file_size = udf_rw64(efe->inf_len);
3960 }
3961
3962 /* allocate temporary space for fid */
3963 lb_size = udf_rw32(dir_node->ump->logical_vol->lb_size);
3964 fid = malloc(lb_size, M_UDFTEMP, M_WAITOK);
3965
3966 found = 0;
3967 diroffset = dir_node->last_diroffset;
3968
3969 /*
3970 * if the directory is trunced or if we have never visited it yet,
3971 * start at the end.
3972 */
3973 if ((diroffset >= file_size) || (diroffset == 0)) {
3974 diroffset = dir_node->last_diroffset = file_size;
3975 }
3976
3977 dirent = malloc(sizeof(struct dirent), M_UDFTEMP, M_WAITOK);
3978
3979 while (!found) {
3980 /* if at the end, go trough zero */
3981 if (diroffset >= file_size)
3982 diroffset = 0;
3983
3984 /* transfer a new fid/dirent */
3985 error = udf_read_fid_stream(dir_node->vnode, &diroffset,
3986 fid, dirent);
3987 if (error)
3988 break;
3989
3990 /* skip deleted entries */
3991 if ((fid->file_char & UDF_FILE_CHAR_DEL) == 0) {
3992 if ((strlen(dirent->d_name) == namelen) &&
3993 (strncmp(dirent->d_name, name, namelen) == 0)) {
3994 found = 1;
3995 }
3996 }
3997
3998 if (diroffset == dir_node->last_diroffset) {
3999 /* we have cycled */
4000 break;
4001 }
4002 }
4003 if (!found) {
4004 free(fid, M_UDFTEMP);
4005 free(dirent, M_UDFTEMP);
4006 dir_node->last_diroffset = diroffset;
4007 return ENOENT;
4008 }
4009
4010 /* mark deleted */
4011 fid->file_char |= UDF_FILE_CHAR_DEL;
4012 #ifdef UDF_COMPLETE_DELETE
4013 memset(&fid->icb, 0, sizeof(fid->icb));
4014 #endif
4015 (void) udf_validate_tag_and_crc_sums((union dscrptr *) fid);
4016
4017 /* roll back last advance from udf_read_fid_stream */
4018 fidsize = udf_fidsize(fid);
4019 diroffset -= fidsize;
4020
4021 /* write out */
4022 error = vn_rdwr(UIO_WRITE, dir_node->vnode,
4023 fid, fidsize, diroffset,
4024 UIO_SYSSPACE, IO_ALTSEMANTICS | IO_NODELOCKED,
4025 FSCRED, NULL, NULL);
4026 if (error == 0) {
4027 /* get reference count of attached node */
4028 if (udf_node->fe) {
4029 refcnt = udf_rw16(udf_node->fe->link_cnt);
4030 } else {
4031 KASSERT(udf_node->efe);
4032 refcnt = udf_rw16(udf_node->efe->link_cnt);
4033 }
4034 #ifdef UDF_COMPLETE_DELETE
4035 /* substract reference counter in attached node */
4036 refcnt -= 1;
4037 if (udf_node->fe) {
4038 udf_node->fe->link_cnt = udf_rw16(refcnt);
4039 } else {
4040 udf_node->efe->link_cnt = udf_rw16(refcnt);
4041 }
4042
4043 /* prevent writeout when refcnt == 0 */
4044 if (refcnt == 0)
4045 udf_node->i_flags |= IN_DELETED;
4046
4047 if (fid->file_char & UDF_FILE_CHAR_DIR) {
4048 int drefcnt;
4049
4050 /* substract reference counter in directory node */
4051 /* note subtract 2 (?) for its was also backreferenced */
4052 if (dir_node->fe) {
4053 drefcnt = udf_rw16(dir_node->fe->link_cnt);
4054 drefcnt -= 1;
4055 dir_node->fe->link_cnt = udf_rw16(drefcnt);
4056 } else {
4057 KASSERT(dir_node->efe);
4058 drefcnt = udf_rw16(dir_node->efe->link_cnt);
4059 drefcnt -= 1;
4060 dir_node->efe->link_cnt = udf_rw16(drefcnt);
4061 }
4062 }
4063
4064 udf_node->i_flags |= IN_MODIFIED;
4065 dir_node->i_flags |= IN_MODIFIED;
4066 #endif
4067 /* if it is/was a hardlink adjust the file count */
4068 if (refcnt > 0)
4069 udf_adjust_filecount(udf_node, -1);
4070
4071 /* XXX we could restart at the deleted entry */
4072 diroffset = 0;
4073 }
4074
4075 free(fid, M_UDFTEMP);
4076 free(dirent, M_UDFTEMP);
4077 dir_node->last_diroffset = diroffset;
4078
4079 return error;
4080 }
4081
4082 /* --------------------------------------------------------------------- */
4083
4084 /*
4085 * We are not allowed to split the fid tag itself over an logical block so
4086 * check the space remaining in the logical block.
4087 *
4088 * We try to select the smallest candidate for recycling or when none is
4089 * found, append a new one at the end of the directory.
4090 */
4091
4092 int
4093 udf_dir_attach(struct udf_mount *ump, struct udf_node *dir_node,
4094 struct udf_node *udf_node, struct vattr *vap, struct componentname *cnp)
4095 {
4096 struct vnode *dvp = dir_node->vnode;
4097 struct fileid_desc *fid;
4098 struct icb_tag *icbtag;
4099 struct charspec osta_charspec;
4100 struct dirent dirent;
4101 uint64_t unique_id, dir_size, diroffset;
4102 uint64_t fid_pos, end_fid_pos, chosen_fid_pos;
4103 uint32_t chosen_size, chosen_size_diff;
4104 int lb_size, lb_rest, fidsize, this_fidsize, size_diff;
4105 int file_char, refcnt, icbflags, addr_type, error;
4106
4107 lb_size = udf_rw32(ump->logical_vol->lb_size);
4108 udf_osta_charset(&osta_charspec);
4109
4110 if (dir_node->fe) {
4111 dir_size = udf_rw64(dir_node->fe->inf_len);
4112 icbtag = &dir_node->fe->icbtag;
4113 } else {
4114 dir_size = udf_rw64(dir_node->efe->inf_len);
4115 icbtag = &dir_node->efe->icbtag;
4116 }
4117
4118 icbflags = udf_rw16(icbtag->flags);
4119 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
4120
4121 if (udf_node->fe) {
4122 unique_id = udf_rw64(udf_node->fe->unique_id);
4123 refcnt = udf_rw16(udf_node->fe->link_cnt);
4124 } else {
4125 unique_id = udf_rw64(udf_node->efe->unique_id);
4126 refcnt = udf_rw16(udf_node->efe->link_cnt);
4127 }
4128
4129 if (refcnt > 0) {
4130 unique_id = udf_advance_uniqueid(ump);
4131 udf_adjust_filecount(udf_node, 1);
4132 }
4133
4134
4135 /* determine file characteristics */
4136 file_char = 0; /* visible non deleted file and not stream metadata */
4137 if (vap->va_type == VDIR)
4138 file_char = UDF_FILE_CHAR_DIR;
4139
4140 /* malloc scrap buffer */
4141 fid = malloc(lb_size, M_TEMP, M_WAITOK);
4142 bzero(fid, lb_size);
4143
4144 /* calculate _minimum_ fid size */
4145 unix_to_udf_name((char *) fid->data, &fid->l_fi,
4146 cnp->cn_nameptr, cnp->cn_namelen, &osta_charspec);
4147 fidsize = UDF_FID_SIZE + fid->l_fi;
4148 fidsize = (fidsize + 3) & ~3; /* multiple of 4 */
4149
4150 /* find position that will fit the FID */
4151 diroffset = dir_node->last_diroffset;
4152
4153 /*
4154 * if the directory is trunced or if we have never visited it yet,
4155 * start at the end.
4156 */
4157 if ((diroffset >= dir_size) || (diroffset == 0)) {
4158 diroffset = dir_node->last_diroffset = dir_size;
4159 }
4160
4161 chosen_fid_pos = diroffset;
4162 chosen_size = 0;
4163 chosen_size_diff = UINT_MAX;
4164
4165 for (;;) {
4166 /* if at the end, go trough zero */
4167 if (diroffset >= dir_size)
4168 diroffset = 0;
4169
4170 /* get fid/dirent */
4171 fid_pos = diroffset;
4172 error = udf_read_fid_stream(dvp, &diroffset, fid, &dirent);
4173 if (error)
4174 break;
4175
4176 this_fidsize = udf_fidsize(fid);
4177
4178 /* reuse deleted entries */
4179 if ((fid->file_char & UDF_FILE_CHAR_DEL)) {
4180 size_diff = this_fidsize - fidsize;
4181 end_fid_pos = fid_pos + this_fidsize;
4182 lb_rest = lb_size - (end_fid_pos % lb_size);
4183
4184 #ifndef UDF_COMPLETE_DELETE
4185 /* only reuse entries that are wiped */
4186 /* check if the len + loc are marked zero */
4187 if (udf_rw32(fid->icb.len != 0))
4188 break;
4189 if (udf_rw32(fid->icb.loc.lb_num) != 0)
4190 break;
4191 if (udf_rw16(fid->icb.loc.part_num != 0))
4192 break;
4193 #endif
4194 /* select if not splitting the tag and its smaller */
4195 if ((size_diff >= 0) &&
4196 (size_diff < chosen_size_diff) &&
4197 (lb_rest >= sizeof(struct desc_tag)))
4198 {
4199 /* UDF 2.3.4.2+3 specifies rules for iu size */
4200 if ((size_diff == 0) || (size_diff >= 32)) {
4201 chosen_fid_pos = fid_pos;
4202 chosen_size = this_fidsize;
4203 chosen_size_diff = size_diff;
4204 }
4205 }
4206 }
4207
4208 if (diroffset == dir_node->last_diroffset) {
4209 /* we have cycled */
4210 break;
4211 }
4212 }
4213 /* unlikely */
4214 if (error) {
4215 free(fid, M_TEMP);
4216 return error;
4217 }
4218
4219 /* extend directory if no other candidate found */
4220 if (chosen_size == 0) {
4221 chosen_fid_pos = dir_size;
4222 chosen_size = fidsize;
4223 chosen_size_diff = 0;
4224
4225 /* special case UDF 2.00+ 2.3.4.4, no splitting up fid tag */
4226 if (addr_type == UDF_ICB_INTERN_ALLOC) {
4227 /* pre-grow directory to see if we're to switch */
4228 udf_grow_node(dir_node, dir_size + chosen_size);
4229
4230 icbflags = udf_rw16(icbtag->flags);
4231 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
4232 }
4233
4234 /* make sure the next fid desc_tag won't be splitted */
4235 if (addr_type != UDF_ICB_INTERN_ALLOC) {
4236 end_fid_pos = chosen_fid_pos + chosen_size;
4237 lb_rest = lb_size - (end_fid_pos % lb_size);
4238
4239 /* pad with implementation use regid if needed */
4240 if (lb_rest < sizeof(struct desc_tag))
4241 chosen_size += 32;
4242 }
4243 }
4244 chosen_size_diff = chosen_size - fidsize;
4245 diroffset = chosen_fid_pos + chosen_size;
4246
4247 /* populate the FID */
4248 memset(fid, 0, lb_size);
4249 udf_inittag(ump, &fid->tag, TAGID_FID, 0);
4250 fid->file_version_num = udf_rw16(1); /* UDF 2.3.4.1 */
4251 fid->file_char = file_char;
4252 fid->icb = udf_node->loc;
4253 fid->icb.longad_uniqueid = udf_rw32((uint32_t) unique_id);
4254 fid->l_iu = udf_rw16(0);
4255
4256 if (chosen_size > fidsize) {
4257 /* insert implementation-use regid to space it correctly */
4258 fid->l_iu = udf_rw16(chosen_size_diff);
4259
4260 /* set implementation use */
4261 udf_set_regid((struct regid *) fid->data, IMPL_NAME);
4262 udf_add_impl_regid(ump, (struct regid *) fid->data);
4263 }
4264
4265 /* fill in name */
4266 unix_to_udf_name((char *) fid->data + udf_rw16(fid->l_iu),
4267 &fid->l_fi, cnp->cn_nameptr, cnp->cn_namelen, &osta_charspec);
4268
4269 fid->tag.desc_crc_len = chosen_size - UDF_DESC_TAG_LENGTH;
4270 (void) udf_validate_tag_and_crc_sums((union dscrptr *) fid);
4271
4272 /* writeout FID/update parent directory */
4273 error = vn_rdwr(UIO_WRITE, dvp,
4274 fid, chosen_size, chosen_fid_pos,
4275 UIO_SYSSPACE, IO_ALTSEMANTICS | IO_NODELOCKED,
4276 FSCRED, NULL, NULL);
4277
4278 if (error) {
4279 free(fid, M_TEMP);
4280 return error;
4281 }
4282
4283 /* add reference counter in attached node */
4284 if (udf_node->fe) {
4285 refcnt = udf_rw16(udf_node->fe->link_cnt);
4286 udf_node->fe->link_cnt = udf_rw16(refcnt+1);
4287 } else {
4288 KASSERT(udf_node->efe);
4289 refcnt = udf_rw16(udf_node->efe->link_cnt);
4290 udf_node->efe->link_cnt = udf_rw16(refcnt+1);
4291 }
4292
4293 /* mark not deleted if it was... just in case, but do warn */
4294 if (udf_node->i_flags & IN_DELETED) {
4295 printf("udf: warning, marking a file undeleted\n");
4296 udf_node->i_flags &= ~IN_DELETED;
4297 }
4298
4299 if (file_char & UDF_FILE_CHAR_DIR) {
4300 /* add reference counter in directory node for '..' */
4301 if (dir_node->fe) {
4302 refcnt = udf_rw16(dir_node->fe->link_cnt);
4303 refcnt++;
4304 dir_node->fe->link_cnt = udf_rw16(refcnt);
4305 } else {
4306 KASSERT(dir_node->efe);
4307 refcnt = udf_rw16(dir_node->efe->link_cnt);
4308 refcnt++;
4309 dir_node->efe->link_cnt = udf_rw16(refcnt);
4310 }
4311 }
4312
4313 /* update our last position so we dont have to cycle again and again */
4314 dir_node->last_diroffset = diroffset;
4315
4316 udf_node->i_flags |= IN_CHANGE | IN_MODIFY; /* | IN_CREATE? */
4317 /* VN_KNOTE(udf_node, ...) */
4318 udf_update(udf_node->vnode, NULL, NULL, 0);
4319
4320 free(fid, M_TEMP);
4321
4322 return 0;
4323 }
4324
4325 /* --------------------------------------------------------------------- */
4326
4327 /*
4328 * Each node can have an attached streamdir node though not recursively. These
4329 * are otherwise known as named substreams/named extended attributes that have
4330 * no size limitations.
4331 *
4332 * `Normal' extended attributes are indicated with a number and are recorded
4333 * in either the fe/efe descriptor itself for small descriptors or recorded in
4334 * the attached extended attribute file. Since these spaces can get
4335 * fragmented, care ought to be taken.
4336 *
4337 * Since the size of the space reserved for allocation descriptors is limited,
4338 * there is a mechanim provided for extending this space; this is done by a
4339 * special extent to allow schrinking of the allocations without breaking the
4340 * linkage to the allocation extent descriptor.
4341 */
4342
4343 int
4344 udf_get_node(struct udf_mount *ump, struct long_ad *node_icb_loc,
4345 struct udf_node **udf_noderes)
4346 {
4347 union dscrptr *dscr;
4348 struct udf_node *udf_node;
4349 struct vnode *nvp;
4350 struct long_ad icb_loc, last_fe_icb_loc;
4351 uint64_t file_size;
4352 uint32_t lb_size, sector, dummy;
4353 uint8_t *file_data;
4354 int udf_file_type, dscr_type, strat, strat4096, needs_indirect;
4355 int slot, eof, error;
4356
4357 DPRINTF(NODE, ("udf_get_node called\n"));
4358 *udf_noderes = udf_node = NULL;
4359
4360 /* lock to disallow simultanious creation of same udf_node */
4361 mutex_enter(&ump->get_node_lock);
4362
4363 DPRINTF(NODE, ("\tlookup in hash table\n"));
4364 /* lookup in hash table */
4365 assert(ump);
4366 assert(node_icb_loc);
4367 udf_node = udf_hash_lookup(ump, node_icb_loc);
4368 if (udf_node) {
4369 DPRINTF(NODE, ("\tgot it from the hash!\n"));
4370 /* vnode is returned locked */
4371 *udf_noderes = udf_node;
4372 mutex_exit(&ump->get_node_lock);
4373 return 0;
4374 }
4375
4376 /* garbage check: translate udf_node_icb_loc to sectornr */
4377 error = udf_translate_vtop(ump, node_icb_loc, §or, &dummy);
4378 if (error) {
4379 /* no use, this will fail anyway */
4380 mutex_exit(&ump->get_node_lock);
4381 return EINVAL;
4382 }
4383
4384 /* build udf_node (do initialise!) */
4385 udf_node = pool_get(&udf_node_pool, PR_WAITOK);
4386 memset(udf_node, 0, sizeof(struct udf_node));
4387
4388 DPRINTF(NODE, ("\tget new vnode\n"));
4389 /* give it a vnode */
4390 error = getnewvnode(VT_UDF, ump->vfs_mountp, udf_vnodeop_p, &nvp);
4391 if (error) {
4392 pool_put(&udf_node_pool, udf_node);
4393 mutex_exit(&ump->get_node_lock);
4394 return error;
4395 }
4396
4397 /* always return locked vnode */
4398 if ((error = vn_lock(nvp, LK_EXCLUSIVE | LK_RETRY))) {
4399 /* recycle vnode and unlock; simultanious will fail too */
4400 ungetnewvnode(nvp);
4401 mutex_exit(&ump->get_node_lock);
4402 return error;
4403 }
4404
4405 /* initialise crosslinks, note location of fe/efe for hashing */
4406 udf_node->ump = ump;
4407 udf_node->vnode = nvp;
4408 nvp->v_data = udf_node;
4409 udf_node->loc = *node_icb_loc;
4410 udf_node->lockf = 0;
4411 mutex_init(&udf_node->node_mutex, MUTEX_DEFAULT, IPL_NONE);
4412 cv_init(&udf_node->node_lock, "udf_nlk");
4413 genfs_node_init(nvp, &udf_genfsops); /* inititise genfs */
4414 udf_node->outstanding_bufs = 0;
4415 udf_node->outstanding_nodedscr = 0;
4416
4417 /* insert into the hash lookup */
4418 udf_register_node(udf_node);
4419
4420 /* safe to unlock, the entry is in the hash table, vnode is locked */
4421 mutex_exit(&ump->get_node_lock);
4422
4423 icb_loc = *node_icb_loc;
4424 needs_indirect = 0;
4425 strat4096 = 0;
4426 udf_file_type = UDF_ICB_FILETYPE_UNKNOWN;
4427 file_size = 0;
4428 file_data = NULL;
4429 lb_size = udf_rw32(ump->logical_vol->lb_size);
4430
4431 DPRINTF(NODE, ("\tstart reading descriptors\n"));
4432 do {
4433 /* try to read in fe/efe */
4434 error = udf_read_logvol_dscr(ump, &icb_loc, &dscr);
4435
4436 /* blank sector marks end of sequence, check this */
4437 if ((dscr == NULL) && (!strat4096))
4438 error = ENOENT;
4439
4440 /* break if read error or blank sector */
4441 if (error || (dscr == NULL))
4442 break;
4443
4444 /* process descriptor based on the descriptor type */
4445 dscr_type = udf_rw16(dscr->tag.id);
4446 DPRINTF(NODE, ("\tread descriptor %d\n", dscr_type));
4447
4448 /* if dealing with an indirect entry, follow the link */
4449 if (dscr_type == TAGID_INDIRECTENTRY) {
4450 needs_indirect = 0;
4451 udf_free_logvol_dscr(ump, &icb_loc, dscr);
4452 icb_loc = dscr->inde.indirect_icb;
4453 continue;
4454 }
4455
4456 /* only file entries and extended file entries allowed here */
4457 if ((dscr_type != TAGID_FENTRY) &&
4458 (dscr_type != TAGID_EXTFENTRY)) {
4459 udf_free_logvol_dscr(ump, &icb_loc, dscr);
4460 error = ENOENT;
4461 break;
4462 }
4463
4464 KASSERT(udf_tagsize(dscr, lb_size) == lb_size);
4465
4466 /* choose this one */
4467 last_fe_icb_loc = icb_loc;
4468
4469 /* record and process/update (ext)fentry */
4470 file_data = NULL;
4471 if (dscr_type == TAGID_FENTRY) {
4472 if (udf_node->fe)
4473 udf_free_logvol_dscr(ump, &last_fe_icb_loc,
4474 udf_node->fe);
4475 udf_node->fe = &dscr->fe;
4476 strat = udf_rw16(udf_node->fe->icbtag.strat_type);
4477 udf_file_type = udf_node->fe->icbtag.file_type;
4478 file_size = udf_rw64(udf_node->fe->inf_len);
4479 file_data = udf_node->fe->data;
4480 } else {
4481 if (udf_node->efe)
4482 udf_free_logvol_dscr(ump, &last_fe_icb_loc,
4483 udf_node->efe);
4484 udf_node->efe = &dscr->efe;
4485 strat = udf_rw16(udf_node->efe->icbtag.strat_type);
4486 udf_file_type = udf_node->efe->icbtag.file_type;
4487 file_size = udf_rw64(udf_node->efe->inf_len);
4488 file_data = udf_node->efe->data;
4489 }
4490
4491 /* check recording strategy (structure) */
4492
4493 /*
4494 * Strategy 4096 is a daisy linked chain terminating with an
4495 * unrecorded sector or a TERM descriptor. The next
4496 * descriptor is to be found in the sector that follows the
4497 * current sector.
4498 */
4499 if (strat == 4096) {
4500 strat4096 = 1;
4501 needs_indirect = 1;
4502
4503 icb_loc.loc.lb_num = udf_rw32(icb_loc.loc.lb_num) + 1;
4504 }
4505
4506 /*
4507 * Strategy 4 is the normal strategy and terminates, but if
4508 * we're in strategy 4096, we can't have strategy 4 mixed in
4509 */
4510
4511 if (strat == 4) {
4512 if (strat4096) {
4513 error = EINVAL;
4514 break;
4515 }
4516 break; /* done */
4517 }
4518 } while (!error);
4519
4520 /* first round of cleanup code */
4521 if (error) {
4522 DPRINTF(NODE, ("\tnode fe/efe failed!\n"));
4523 /* recycle udf_node */
4524 udf_dispose_node(udf_node);
4525
4526 vlockmgr(nvp->v_vnlock, LK_RELEASE);
4527 nvp->v_data = NULL;
4528 ungetnewvnode(nvp);
4529
4530 return EINVAL; /* error code ok? */
4531 }
4532 DPRINTF(NODE, ("\tnode fe/efe read in fine\n"));
4533
4534 /* assert no references to dscr anymore beyong this point */
4535 assert((udf_node->fe) || (udf_node->efe));
4536 dscr = NULL;
4537
4538 /*
4539 * Remember where to record an updated version of the descriptor. If
4540 * there is a sequence of indirect entries, icb_loc will have been
4541 * updated. Its the write disipline to allocate new space and to make
4542 * sure the chain is maintained.
4543 *
4544 * `needs_indirect' flags if the next location is to be filled with
4545 * with an indirect entry.
4546 */
4547 udf_node->write_loc = icb_loc;
4548 udf_node->needs_indirect = needs_indirect;
4549
4550 /*
4551 * Go trough all allocations extents of this descriptor and when
4552 * encountering a redirect read in the allocation extension. These are
4553 * daisy-chained.
4554 */
4555 UDF_LOCK_NODE(udf_node, 0);
4556 udf_node->num_extensions = 0;
4557
4558 error = 0;
4559 slot = 0;
4560 for (;;) {
4561 udf_get_adslot(udf_node, slot, &icb_loc, &eof);
4562 if (eof)
4563 break;
4564
4565 if (UDF_EXT_FLAGS(udf_rw32(icb_loc.len)) != UDF_EXT_REDIRECT) {
4566 slot++;
4567 continue;
4568 }
4569
4570 DPRINTF(NODE, ("\tgot redirect extent\n"));
4571 if (udf_node->num_extensions >= UDF_MAX_ALLOC_EXTENTS) {
4572 DPRINTF(ALLOC, ("udf_get_node: implementation limit, "
4573 "too many allocation extensions on "
4574 "udf_node\n"));
4575 error = EINVAL;
4576 break;
4577 }
4578
4579 /* length can only be *one* lb : UDF 2.50/2.3.7.1 */
4580 if (udf_rw32(icb_loc.len) != lb_size) {
4581 DPRINTF(ALLOC, ("udf_get_node: bad allocation "
4582 "extension size in udf_node\n"));
4583 error = EINVAL;
4584 break;
4585 }
4586
4587 /* load in allocation extent */
4588 error = udf_read_logvol_dscr(ump, &icb_loc, &dscr);
4589 if (error || (dscr == NULL))
4590 break;
4591
4592 /* process read-in descriptor */
4593 dscr_type = udf_rw16(dscr->tag.id);
4594
4595 if (dscr_type != TAGID_ALLOCEXTENT) {
4596 udf_free_logvol_dscr(ump, &icb_loc, dscr);
4597 error = ENOENT;
4598 break;
4599 }
4600
4601 DPRINTF(NODE, ("\trecording redirect extent\n"));
4602 udf_node->ext[udf_node->num_extensions] = &dscr->aee;
4603 udf_node->ext_loc[udf_node->num_extensions] = icb_loc;
4604
4605 udf_node->num_extensions++;
4606
4607 } /* while */
4608 UDF_UNLOCK_NODE(udf_node, 0);
4609
4610 /* second round of cleanup code */
4611 if (error) {
4612 /* recycle udf_node */
4613 udf_dispose_node(udf_node);
4614
4615 vlockmgr(nvp->v_vnlock, LK_RELEASE);
4616 nvp->v_data = NULL;
4617 ungetnewvnode(nvp);
4618
4619 return EINVAL; /* error code ok? */
4620 }
4621
4622 DPRINTF(NODE, ("\tnode read in fine\n"));
4623
4624 /*
4625 * Translate UDF filetypes into vnode types.
4626 *
4627 * Systemfiles like the meta main and mirror files are not treated as
4628 * normal files, so we type them as having no type. UDF dictates that
4629 * they are not allowed to be visible.
4630 */
4631
4632 switch (udf_file_type) {
4633 case UDF_ICB_FILETYPE_DIRECTORY :
4634 case UDF_ICB_FILETYPE_STREAMDIR :
4635 nvp->v_type = VDIR;
4636 break;
4637 case UDF_ICB_FILETYPE_BLOCKDEVICE :
4638 nvp->v_type = VBLK;
4639 break;
4640 case UDF_ICB_FILETYPE_CHARDEVICE :
4641 nvp->v_type = VCHR;
4642 break;
4643 case UDF_ICB_FILETYPE_SOCKET :
4644 nvp->v_type = VSOCK;
4645 break;
4646 case UDF_ICB_FILETYPE_FIFO :
4647 nvp->v_type = VFIFO;
4648 break;
4649 case UDF_ICB_FILETYPE_SYMLINK :
4650 nvp->v_type = VLNK;
4651 break;
4652 case UDF_ICB_FILETYPE_VAT :
4653 case UDF_ICB_FILETYPE_META_MAIN :
4654 case UDF_ICB_FILETYPE_META_MIRROR :
4655 nvp->v_type = VNON;
4656 break;
4657 case UDF_ICB_FILETYPE_RANDOMACCESS :
4658 case UDF_ICB_FILETYPE_REALTIME :
4659 nvp->v_type = VREG;
4660 break;
4661 default:
4662 /* YIKES, something else */
4663 nvp->v_type = VNON;
4664 }
4665
4666 /* TODO specfs, fifofs etc etc. vnops setting */
4667
4668 /* don't forget to set vnode's v_size */
4669 uvm_vnp_setsize(nvp, file_size);
4670
4671 /* TODO ext attr and streamdir udf_nodes */
4672
4673 *udf_noderes = udf_node;
4674
4675 return 0;
4676 }
4677
4678 /* --------------------------------------------------------------------- */
4679
4680
4681 /* TODO !!!!! writeout alloc_ext_entry's!!! */
4682 int
4683 udf_writeout_node(struct udf_node *udf_node, int waitfor)
4684 {
4685 union dscrptr *dscr;
4686 struct long_ad *loc;
4687 int error;
4688
4689 DPRINTF(NODE, ("udf_writeout_node called\n"));
4690
4691 KASSERT(udf_node->outstanding_bufs == 0);
4692 KASSERT(udf_node->outstanding_nodedscr == 0);
4693
4694 KASSERT(LIST_EMPTY(&udf_node->vnode->v_dirtyblkhd));
4695
4696 if (udf_node->i_flags & IN_DELETED) {
4697 DPRINTF(NODE, ("\tnode deleted; not writing out\n"));
4698 return 0;
4699 }
4700
4701 /* we're going to write out the descriptor so clear the flags */
4702 udf_node->i_flags &= ~(IN_MODIFIED | IN_ACCESSED);
4703
4704 if (udf_node->fe) {
4705 dscr = (union dscrptr *) udf_node->fe;
4706 } else {
4707 KASSERT(udf_node->efe);
4708 dscr = (union dscrptr *) udf_node->efe;
4709 }
4710 KASSERT(dscr);
4711
4712 loc = &udf_node->write_loc;
4713 error = udf_write_logvol_dscr(udf_node, dscr, loc, waitfor);
4714 return error;
4715 }
4716
4717 /* --------------------------------------------------------------------- */
4718
4719 int
4720 udf_dispose_node(struct udf_node *udf_node)
4721 {
4722 struct vnode *vp;
4723
4724 DPRINTF(NODE, ("udf_dispose_node called on node %p\n", udf_node));
4725 if (!udf_node) {
4726 DPRINTF(NODE, ("UDF: Dispose node on node NULL, ignoring\n"));
4727 return 0;
4728 }
4729
4730 vp = udf_node->vnode;
4731 #ifdef DIAGNOSTIC
4732 if (vp->v_numoutput)
4733 panic("disposing UDF node with pending I/O's, udf_node = %p, "
4734 "v_numoutput = %d", udf_node, vp->v_numoutput);
4735 #endif
4736
4737 /* wait until out of sync (just in case we happen to stumble over one */
4738 KASSERT(!mutex_owned(&mntvnode_lock));
4739 mutex_enter(&mntvnode_lock);
4740 while (udf_node->i_flags & IN_SYNCED) {
4741 cv_timedwait(&udf_node->ump->dirtynodes_cv, &mntvnode_lock,
4742 hz/16);
4743 }
4744 mutex_exit(&mntvnode_lock);
4745
4746 /* TODO extended attributes and streamdir */
4747
4748 /* remove from our hash lookup table */
4749 udf_deregister_node(udf_node);
4750
4751 /* destroy our lock */
4752 mutex_destroy(&udf_node->node_mutex);
4753 cv_destroy(&udf_node->node_lock);
4754
4755 /* dissociate our udf_node from the vnode */
4756 genfs_node_destroy(udf_node->vnode);
4757 vp->v_data = NULL;
4758
4759 /* free associated memory and the node itself */
4760 if (udf_node->fe)
4761 udf_free_logvol_dscr(udf_node->ump, &udf_node->loc, udf_node->fe);
4762 if (udf_node->efe)
4763 udf_free_logvol_dscr(udf_node->ump, &udf_node->loc, udf_node->efe);
4764
4765 udf_node->fe = (void *) 0xdeadaaaa;
4766 udf_node->efe = (void *) 0xdeadbbbb;
4767 udf_node->ump = (void *) 0xdeadbeef;
4768 pool_put(&udf_node_pool, udf_node);
4769
4770 return 0;
4771 }
4772
4773
4774
4775 /*
4776 * create a new node using the specified vnodeops, vap and cnp but with the
4777 * udf_file_type. This allows special files to be created. Use with care.
4778 */
4779
4780 static int
4781 udf_create_node_raw(struct vnode *dvp, struct vnode **vpp, int udf_file_type,
4782 int (**vnodeops)(void *), struct vattr *vap, struct componentname *cnp)
4783 {
4784 union dscrptr *dscr;
4785 struct udf_node *dir_node = VTOI(dvp);;
4786 struct udf_node *udf_node;
4787 struct udf_mount *ump = dir_node->ump;
4788 struct vnode *nvp;
4789 struct long_ad node_icb_loc;
4790 uint64_t parent_unique_id;
4791 uint64_t lmapping, pmapping;
4792 uint32_t lb_size, lb_num;
4793 uint16_t vpart_num;
4794 int fid_size, error;
4795
4796 lb_size = udf_rw32(ump->logical_vol->lb_size);
4797 *vpp = NULL;
4798
4799 /* allocate vnode */
4800 error = getnewvnode(VT_UDF, ump->vfs_mountp, vnodeops, &nvp);
4801 if (error)
4802 return error;
4803
4804 /* lock node */
4805 error = vn_lock(nvp, LK_EXCLUSIVE | LK_RETRY);
4806 if (error) {
4807 nvp->v_data = NULL;
4808 ungetnewvnode(nvp);
4809 return error;
4810 }
4811
4812 /* get disc allocation for one logical block */
4813 error = udf_pre_allocate_space(ump, UDF_C_NODE, 1,
4814 &vpart_num, &lmapping, &pmapping);
4815 lb_num = lmapping;
4816 if (error) {
4817 vlockmgr(nvp->v_vnlock, LK_RELEASE);
4818 ungetnewvnode(nvp);
4819 return error;
4820 }
4821
4822 /* initialise pointer to location */
4823 memset(&node_icb_loc, 0, sizeof(struct long_ad));
4824 node_icb_loc.len = lb_size;
4825 node_icb_loc.loc.lb_num = udf_rw32(lb_num);
4826 node_icb_loc.loc.part_num = udf_rw16(vpart_num);
4827
4828 /* build udf_node (do initialise!) */
4829 udf_node = pool_get(&udf_node_pool, PR_WAITOK);
4830 memset(udf_node, 0, sizeof(struct udf_node));
4831
4832 /* initialise crosslinks, note location of fe/efe for hashing */
4833 /* bugalert: synchronise with udf_get_node() */
4834 udf_node->ump = ump;
4835 udf_node->vnode = nvp;
4836 nvp->v_data = udf_node;
4837 udf_node->loc = node_icb_loc;
4838 udf_node->write_loc = node_icb_loc;
4839 udf_node->lockf = 0;
4840 mutex_init(&udf_node->node_mutex, MUTEX_DEFAULT, IPL_NONE);
4841 cv_init(&udf_node->node_lock, "udf_nlk");
4842 udf_node->outstanding_bufs = 0;
4843 udf_node->outstanding_nodedscr = 0;
4844
4845 /* initialise genfs */
4846 genfs_node_init(nvp, &udf_genfsops);
4847
4848 /* insert into the hash lookup */
4849 udf_register_node(udf_node);
4850
4851 /* get parent's unique ID for refering '..' if its a directory */
4852 if (dir_node->fe) {
4853 parent_unique_id = udf_rw64(dir_node->fe->unique_id);
4854 } else {
4855 parent_unique_id = udf_rw64(dir_node->efe->unique_id);
4856 }
4857
4858 /* get descriptor */
4859 udf_create_logvol_dscr(ump, udf_node, &node_icb_loc, &dscr);
4860
4861 /* choose a fe or an efe for it */
4862 if (ump->logical_vol->tag.descriptor_ver == 2) {
4863 udf_node->fe = &dscr->fe;
4864 fid_size = udf_create_new_fe(ump, udf_node->fe,
4865 udf_file_type, &udf_node->loc,
4866 &dir_node->loc, parent_unique_id);
4867 /* TODO add extended attribute for creation time */
4868 } else {
4869 udf_node->efe = &dscr->efe;
4870 fid_size = udf_create_new_efe(ump, udf_node->efe,
4871 udf_file_type, &udf_node->loc,
4872 &dir_node->loc, parent_unique_id);
4873 }
4874 KASSERT(dscr->tag.tag_loc == udf_node->loc.loc.lb_num);
4875
4876 /* update vnode's size and type */
4877 nvp->v_type = vap->va_type;
4878 uvm_vnp_setsize(nvp, fid_size);
4879
4880 /* set access mode */
4881 udf_setaccessmode(udf_node, vap->va_mode);
4882
4883 /* set ownership */
4884 udf_setownership(udf_node, vap->va_uid, vap->va_gid);
4885
4886 error = udf_dir_attach(ump, dir_node, udf_node, vap, cnp);
4887 if (error) {
4888 /* free disc allocation for node */
4889 udf_free_allocated_space(ump, lb_num, vpart_num, 1);
4890
4891 /* recycle udf_node */
4892 udf_dispose_node(udf_node);
4893 vput(nvp);
4894
4895 *vpp = NULL;
4896 return error;
4897 }
4898
4899 /* adjust file count */
4900 udf_adjust_filecount(udf_node, 1);
4901
4902 /* return result */
4903 *vpp = nvp;
4904
4905 return 0;
4906 }
4907
4908
4909 int
4910 udf_create_node(struct vnode *dvp, struct vnode **vpp, struct vattr *vap,
4911 struct componentname *cnp)
4912 {
4913 int (**vnodeops)(void *);
4914 int udf_file_type;
4915
4916 DPRINTF(NODE, ("udf_create_node called\n"));
4917
4918 /* what type are we creating ? */
4919 vnodeops = udf_vnodeop_p;
4920 /* start with a default */
4921 udf_file_type = UDF_ICB_FILETYPE_RANDOMACCESS;
4922
4923 *vpp = NULL;
4924
4925 switch (vap->va_type) {
4926 case VREG :
4927 udf_file_type = UDF_ICB_FILETYPE_RANDOMACCESS;
4928 break;
4929 case VDIR :
4930 udf_file_type = UDF_ICB_FILETYPE_DIRECTORY;
4931 break;
4932 case VLNK :
4933 udf_file_type = UDF_ICB_FILETYPE_SYMLINK;
4934 break;
4935 case VBLK :
4936 udf_file_type = UDF_ICB_FILETYPE_BLOCKDEVICE;
4937 /* specfs */
4938 return ENOTSUP;
4939 break;
4940 case VCHR :
4941 udf_file_type = UDF_ICB_FILETYPE_CHARDEVICE;
4942 /* specfs */
4943 return ENOTSUP;
4944 break;
4945 case VFIFO :
4946 udf_file_type = UDF_ICB_FILETYPE_FIFO;
4947 /* specfs */
4948 return ENOTSUP;
4949 break;
4950 case VSOCK :
4951 udf_file_type = UDF_ICB_FILETYPE_SOCKET;
4952 /* specfs */
4953 return ENOTSUP;
4954 break;
4955 case VNON :
4956 case VBAD :
4957 default :
4958 /* nothing; can we even create these? */
4959 return EINVAL;
4960 }
4961
4962 return udf_create_node_raw(dvp, vpp, udf_file_type, vnodeops, vap, cnp);
4963 }
4964
4965 /* --------------------------------------------------------------------- */
4966
4967 static void
4968 udf_free_descriptor_space(struct udf_node *udf_node, struct long_ad *loc, void *mem)
4969 {
4970 struct udf_mount *ump = udf_node->ump;
4971 uint32_t lb_size, lb_num, len, num_lb;
4972 uint16_t vpart_num;
4973
4974 /* is there really one? */
4975 if (mem == NULL)
4976 return;
4977
4978 /* got a descriptor here */
4979 len = udf_rw32(loc->len);
4980 lb_num = udf_rw32(loc->loc.lb_num);
4981 vpart_num = udf_rw16(loc->loc.part_num);
4982
4983 lb_size = udf_rw32(ump->logical_vol->lb_size);
4984 num_lb = (len + lb_size -1) / lb_size;
4985
4986 udf_free_allocated_space(ump, lb_num, vpart_num, num_lb);
4987 }
4988
4989 void
4990 udf_delete_node(struct udf_node *udf_node)
4991 {
4992 void *dscr;
4993 struct udf_mount *ump;
4994 struct long_ad *loc;
4995 int extnr, lvint, dummy;
4996
4997 ump = udf_node->ump;
4998
4999 /* paranoia check on integrity; should be open!; we could panic */
5000 lvint = udf_rw32(udf_node->ump->logvol_integrity->integrity_type);
5001 if (lvint == UDF_INTEGRITY_CLOSED)
5002 printf("\tIntegrity was CLOSED!\n");
5003
5004 /* whatever the node type, change its size to zero */
5005 (void) udf_resize_node(udf_node, 0, &dummy);
5006
5007 /* force it to be `clean'; no use writing it out */
5008 udf_node->i_flags &= ~(IN_MODIFIED | IN_ACCESSED | IN_ACCESS |
5009 IN_CHANGE | IN_UPDATE | IN_MODIFY);
5010
5011 /* adjust file count */
5012 udf_adjust_filecount(udf_node, -1);
5013
5014 /*
5015 * Free its allocated descriptors; memory will be released when
5016 * vop_reclaim() is called.
5017 */
5018 loc = &udf_node->loc;
5019
5020 dscr = udf_node->fe;
5021 udf_free_descriptor_space(udf_node, loc, dscr);
5022 dscr = udf_node->efe;
5023 udf_free_descriptor_space(udf_node, loc, dscr);
5024
5025 for (extnr = 0; extnr < UDF_MAX_ALLOC_EXTENTS; extnr++) {
5026 dscr = udf_node->ext[extnr];
5027 loc = &udf_node->ext_loc[extnr];
5028 udf_free_descriptor_space(udf_node, loc, dscr);
5029 }
5030 }
5031
5032 /* --------------------------------------------------------------------- */
5033
5034 /* set new filesize; node but be LOCKED on entry and is locked on exit */
5035 int
5036 udf_resize_node(struct udf_node *udf_node, uint64_t new_size, int *extended)
5037 {
5038 struct file_entry *fe = udf_node->fe;
5039 struct extfile_entry *efe = udf_node->efe;
5040 uint64_t file_size;
5041 int error;
5042
5043 if (fe) {
5044 file_size = udf_rw64(fe->inf_len);
5045 } else {
5046 assert(udf_node->efe);
5047 file_size = udf_rw64(efe->inf_len);
5048 }
5049
5050 DPRINTF(ATTR, ("\tchanging file length from %"PRIu64" to %"PRIu64"\n",
5051 file_size, new_size));
5052
5053 /* if not changing, we're done */
5054 if (file_size == new_size)
5055 return 0;
5056
5057 *extended = (new_size > file_size);
5058 if (*extended) {
5059 error = udf_grow_node(udf_node, new_size);
5060 } else {
5061 error = udf_shrink_node(udf_node, new_size);
5062 }
5063
5064 return error;
5065 }
5066
5067
5068 /* --------------------------------------------------------------------- */
5069
5070 void
5071 udf_itimes(struct udf_node *udf_node, struct timespec *acc,
5072 struct timespec *mod, struct timespec *changed)
5073 {
5074 struct timespec now;
5075 struct file_entry *fe;
5076 struct extfile_entry *efe;
5077 struct timestamp *atime, *mtime, *attrtime;
5078
5079 /* protect against rogue values */
5080 if (!udf_node)
5081 return;
5082
5083 fe = udf_node->fe;
5084 efe = udf_node->efe;
5085
5086 if (!(udf_node->i_flags & (IN_ACCESS|IN_CHANGE|IN_UPDATE|IN_MODIFY)))
5087 return;
5088
5089 /* get descriptor information */
5090 if (fe) {
5091 atime = &fe->atime;
5092 mtime = &fe->mtime;
5093 attrtime = &fe->attrtime;
5094 } else {
5095 assert(udf_node->efe);
5096 atime = &efe->atime;
5097 mtime = &efe->mtime;
5098 attrtime = &efe->attrtime;
5099 }
5100
5101 vfs_timestamp(&now);
5102
5103 /* set access time */
5104 if (udf_node->i_flags & IN_ACCESS) {
5105 if (acc == NULL)
5106 acc = &now;
5107 udf_timespec_to_timestamp(acc, atime);
5108 }
5109
5110 /* set modification time */
5111 if (udf_node->i_flags & (IN_UPDATE | IN_MODIFY)) {
5112 if (mod == NULL)
5113 mod = &now;
5114 udf_timespec_to_timestamp(mod, mtime);
5115 }
5116
5117 /* set change time */
5118 if (udf_node->i_flags & (IN_CHANGE | IN_MODIFY)) {
5119 if (changed == NULL)
5120 changed = &now;
5121 udf_timespec_to_timestamp(changed, attrtime);
5122 }
5123
5124 /* notify updates to the node itself */
5125 if (udf_node->i_flags & (IN_ACCESS | IN_MODIFY))
5126 udf_node->i_flags |= IN_ACCESSED;
5127 if (udf_node->i_flags & (IN_UPDATE | IN_CHANGE))
5128 udf_node->i_flags |= IN_MODIFIED;
5129
5130 /* clear modification flags */
5131 udf_node->i_flags &= ~(IN_ACCESS | IN_CHANGE | IN_UPDATE | IN_MODIFY);
5132 }
5133
5134 /* --------------------------------------------------------------------- */
5135
5136 int
5137 udf_update(struct vnode *vp, struct timespec *acc,
5138 struct timespec *mod, int updflags)
5139 {
5140 struct udf_node *udf_node = VTOI(vp);
5141 struct udf_mount *ump = udf_node->ump;
5142 struct regid *impl_id;
5143 int mnt_async = (vp->v_mount->mnt_flag & MNT_ASYNC);
5144 int waitfor, flags;
5145
5146 #ifdef DEBUG
5147 char bits[128];
5148 DPRINTF(CALL, ("udf_update(node, %p, %p, %d)\n", acc, mod, updflags));
5149 bitmask_snprintf(udf_node->i_flags, IN_FLAGBITS, bits, sizeof(bits));
5150 DPRINTF(CALL, ("\tnode flags %s\n", bits));
5151 DPRINTF(CALL, ("\t\tmnt_async = %d\n", mnt_async));
5152 #endif
5153
5154 /* set our times */
5155 udf_itimes(udf_node, acc, mod, NULL);
5156
5157 /* set our implementation id */
5158 if (udf_node->fe) {
5159 impl_id = &udf_node->fe->imp_id;
5160 } else {
5161 impl_id = &udf_node->efe->imp_id;
5162 }
5163 udf_set_regid(impl_id, IMPL_NAME);
5164 udf_add_impl_regid(ump, impl_id);
5165
5166 /* if called when mounted readonly, never write back */
5167 if (vp->v_mount->mnt_flag & MNT_RDONLY)
5168 return 0;
5169
5170 /* check if the node is dirty 'enough'*/
5171 if (updflags & UPDATE_CLOSE) {
5172 flags = udf_node->i_flags & (IN_MODIFIED | IN_ACCESSED);
5173 } else {
5174 flags = udf_node->i_flags & IN_MODIFIED;
5175 }
5176 if (flags == 0)
5177 return 0;
5178
5179 /* determine if we need to write sync or async */
5180 waitfor = 0;
5181 if ((flags & IN_MODIFIED) && (mnt_async == 0)) {
5182 /* sync mounted */
5183 waitfor = updflags & UPDATE_WAIT;
5184 if (updflags & UPDATE_DIROP)
5185 waitfor |= UPDATE_WAIT;
5186 }
5187 if (waitfor)
5188 return VOP_FSYNC(vp, FSCRED, FSYNC_WAIT, 0,0);
5189
5190 return 0;
5191 }
5192
5193
5194 /* --------------------------------------------------------------------- */
5195
5196 /*
5197 * Read one fid and process it into a dirent and advance to the next (*fid)
5198 * has to be allocated a logical block in size, (*dirent) struct dirent length
5199 */
5200
5201 int
5202 udf_read_fid_stream(struct vnode *vp, uint64_t *offset,
5203 struct fileid_desc *fid, struct dirent *dirent)
5204 {
5205 struct udf_node *dir_node = VTOI(vp);
5206 struct udf_mount *ump = dir_node->ump;
5207 struct file_entry *fe = dir_node->fe;
5208 struct extfile_entry *efe = dir_node->efe;
5209 uint32_t fid_size, lb_size;
5210 uint64_t file_size;
5211 char *fid_name;
5212 int enough, error;
5213
5214 assert(fid);
5215 assert(dirent);
5216 assert(dir_node);
5217 assert(offset);
5218 assert(*offset != 1);
5219
5220 DPRINTF(FIDS, ("read_fid_stream called at offset %"PRIu64"\n", *offset));
5221 /* check if we're past the end of the directory */
5222 if (fe) {
5223 file_size = udf_rw64(fe->inf_len);
5224 } else {
5225 assert(dir_node->efe);
5226 file_size = udf_rw64(efe->inf_len);
5227 }
5228 if (*offset >= file_size)
5229 return EINVAL;
5230
5231 /* get maximum length of FID descriptor */
5232 lb_size = udf_rw32(ump->logical_vol->lb_size);
5233
5234 /* initialise return values */
5235 fid_size = 0;
5236 memset(dirent, 0, sizeof(struct dirent));
5237 memset(fid, 0, lb_size);
5238
5239 enough = (file_size - (*offset) >= UDF_FID_SIZE);
5240 if (!enough) {
5241 /* short dir ... */
5242 return EIO;
5243 }
5244
5245 error = vn_rdwr(UIO_READ, vp,
5246 fid, MIN(file_size - (*offset), lb_size), *offset,
5247 UIO_SYSSPACE, IO_ALTSEMANTICS | IO_NODELOCKED, FSCRED,
5248 NULL, NULL);
5249 if (error)
5250 return error;
5251
5252 DPRINTF(FIDS, ("\tfid piece read in fine\n"));
5253 /*
5254 * Check if we got a whole descriptor.
5255 * TODO Try to `resync' directory stream when something is very wrong.
5256 */
5257
5258 /* check if our FID header is OK */
5259 error = udf_check_tag(fid);
5260 if (error) {
5261 goto brokendir;
5262 }
5263 DPRINTF(FIDS, ("\ttag check ok\n"));
5264
5265 if (udf_rw16(fid->tag.id) != TAGID_FID) {
5266 error = EIO;
5267 goto brokendir;
5268 }
5269 DPRINTF(FIDS, ("\ttag checked ok: got TAGID_FID\n"));
5270
5271 /* check for length */
5272 fid_size = udf_fidsize(fid);
5273 enough = (file_size - (*offset) >= fid_size);
5274 if (!enough) {
5275 error = EIO;
5276 goto brokendir;
5277 }
5278 DPRINTF(FIDS, ("\tthe complete fid is read in\n"));
5279
5280 /* check FID contents */
5281 error = udf_check_tag_payload((union dscrptr *) fid, lb_size);
5282 brokendir:
5283 if (error) {
5284 /* note that is sometimes a bit quick to report */
5285 printf("BROKEN DIRECTORY ENTRY\n");
5286 /* RESYNC? */
5287 /* TODO: use udf_resync_fid_stream */
5288 return EIO;
5289 }
5290 DPRINTF(FIDS, ("\tpayload checked ok\n"));
5291
5292 /* we got a whole and valid descriptor! */
5293 DPRINTF(FIDS, ("\tinterpret FID\n"));
5294
5295 /* create resulting dirent structure */
5296 fid_name = (char *) fid->data + udf_rw16(fid->l_iu);
5297 udf_to_unix_name(dirent->d_name,
5298 fid_name, fid->l_fi, &ump->logical_vol->desc_charset);
5299
5300 /* '..' has no name, so provide one */
5301 if (fid->file_char & UDF_FILE_CHAR_PAR)
5302 strcpy(dirent->d_name, "..");
5303
5304 dirent->d_fileno = udf_calchash(&fid->icb); /* inode hash XXX */
5305 dirent->d_namlen = strlen(dirent->d_name);
5306 dirent->d_reclen = _DIRENT_SIZE(dirent);
5307
5308 /*
5309 * Note that its not worth trying to go for the filetypes now... its
5310 * too expensive too
5311 */
5312 dirent->d_type = DT_UNKNOWN;
5313
5314 /* initial guess for filetype we can make */
5315 if (fid->file_char & UDF_FILE_CHAR_DIR)
5316 dirent->d_type = DT_DIR;
5317
5318 /* advance */
5319 *offset += fid_size;
5320
5321 return error;
5322 }
5323
5324
5325 /* --------------------------------------------------------------------- */
5326
5327 static void
5328 udf_sync_pass(struct udf_mount *ump, kauth_cred_t cred, int waitfor,
5329 int pass, int *ndirty)
5330 {
5331 struct udf_node *udf_node, *n_udf_node;
5332 struct vnode *vp;
5333 int vdirty, error;
5334 int on_type, on_flags, on_vnode;
5335
5336 derailed:
5337 KASSERT(mutex_owned(&mntvnode_lock));
5338
5339 DPRINTF(SYNC, ("sync_pass %d\n", pass));
5340 udf_node = LIST_FIRST(&ump->sorted_udf_nodes);
5341 for (;udf_node; udf_node = n_udf_node) {
5342 DPRINTF(SYNC, ("."));
5343
5344 udf_node->i_flags &= ~IN_SYNCED;
5345 vp = udf_node->vnode;
5346
5347 mutex_enter(&vp->v_interlock);
5348 n_udf_node = LIST_NEXT(udf_node, sortchain);
5349 if (n_udf_node)
5350 n_udf_node->i_flags |= IN_SYNCED;
5351
5352 /* system nodes are not synced this way */
5353 if (vp->v_vflag & VV_SYSTEM) {
5354 mutex_exit(&vp->v_interlock);
5355 continue;
5356 }
5357
5358 /* check if its dirty enough to even try */
5359 on_type = (waitfor == MNT_LAZY || vp->v_type == VNON);
5360 on_flags = ((udf_node->i_flags &
5361 (IN_ACCESSED | IN_UPDATE | IN_MODIFIED)) == 0);
5362 on_vnode = LIST_EMPTY(&vp->v_dirtyblkhd)
5363 && UVM_OBJ_IS_CLEAN(&vp->v_uobj);
5364 if (on_type || (on_flags || on_vnode)) { /* XXX */
5365 /* not dirty (enough?) */
5366 mutex_exit(&vp->v_interlock);
5367 continue;
5368 }
5369
5370 mutex_exit(&mntvnode_lock);
5371 error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK);
5372 if (error) {
5373 mutex_enter(&mntvnode_lock);
5374 if (error == ENOENT)
5375 goto derailed;
5376 *ndirty += 1;
5377 continue;
5378 }
5379
5380 switch (pass) {
5381 case 1:
5382 VOP_FSYNC(vp, cred, 0 | FSYNC_DATAONLY,0,0);
5383 break;
5384 case 2:
5385 vdirty = vp->v_numoutput;
5386 if (vp->v_tag == VT_UDF)
5387 vdirty += udf_node->outstanding_bufs +
5388 udf_node->outstanding_nodedscr;
5389 if (vdirty == 0)
5390 VOP_FSYNC(vp, cred, 0,0,0);
5391 *ndirty += vdirty;
5392 break;
5393 case 3:
5394 vdirty = vp->v_numoutput;
5395 if (vp->v_tag == VT_UDF)
5396 vdirty += udf_node->outstanding_bufs +
5397 udf_node->outstanding_nodedscr;
5398 *ndirty += vdirty;
5399 break;
5400 }
5401
5402 vput(vp);
5403 mutex_enter(&mntvnode_lock);
5404 }
5405 DPRINTF(SYNC, ("END sync_pass %d\n", pass));
5406 }
5407
5408
5409 void
5410 udf_do_sync(struct udf_mount *ump, kauth_cred_t cred, int waitfor)
5411 {
5412 int dummy, ndirty;
5413
5414 mutex_enter(&mntvnode_lock);
5415 recount:
5416 dummy = 0;
5417 DPRINTF(CALL, ("issue VOP_FSYNC(DATA only) on all nodes\n"));
5418 DPRINTF(SYNC, ("issue VOP_FSYNC(DATA only) on all nodes\n"));
5419 udf_sync_pass(ump, cred, waitfor, 1, &dummy);
5420
5421 DPRINTF(CALL, ("issue VOP_FSYNC(COMPLETE) on all finished nodes\n"));
5422 DPRINTF(SYNC, ("issue VOP_FSYNC(COMPLETE) on all finished nodes\n"));
5423 udf_sync_pass(ump, cred, waitfor, 2, &dummy);
5424
5425 if (waitfor == MNT_WAIT) {
5426 ndirty = ump->devvp->v_numoutput;
5427 DPRINTF(NODE, ("counting pending blocks: on devvp %d\n",
5428 ndirty));
5429 udf_sync_pass(ump, cred, waitfor, 3, &ndirty);
5430 DPRINTF(NODE, ("counted num dirty pending blocks %d\n",
5431 ndirty));
5432
5433 if (ndirty) {
5434 /* 1/4 second wait */
5435 cv_timedwait(&ump->dirtynodes_cv, &mntvnode_lock,
5436 hz/4);
5437 goto recount;
5438 }
5439 }
5440
5441 mutex_exit(&mntvnode_lock);
5442 }
5443
5444 /* --------------------------------------------------------------------- */
5445
5446 /*
5447 * Read and write file extent in/from the buffer.
5448 *
5449 * The splitup of the extent into seperate request-buffers is to minimise
5450 * copying around as much as possible.
5451 *
5452 * block based file reading and writing
5453 */
5454
5455 static int
5456 udf_read_internal(struct udf_node *node, uint8_t *blob)
5457 {
5458 struct udf_mount *ump;
5459 struct file_entry *fe = node->fe;
5460 struct extfile_entry *efe = node->efe;
5461 uint64_t inflen;
5462 uint32_t sector_size;
5463 uint8_t *pos;
5464 int icbflags, addr_type;
5465
5466 /* get extent and do some paranoia checks */
5467 ump = node->ump;
5468 sector_size = ump->discinfo.sector_size;
5469
5470 if (fe) {
5471 inflen = udf_rw64(fe->inf_len);
5472 pos = &fe->data[0] + udf_rw32(fe->l_ea);
5473 icbflags = udf_rw16(fe->icbtag.flags);
5474 } else {
5475 assert(node->efe);
5476 inflen = udf_rw64(efe->inf_len);
5477 pos = &efe->data[0] + udf_rw32(efe->l_ea);
5478 icbflags = udf_rw16(efe->icbtag.flags);
5479 }
5480 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
5481
5482 assert(addr_type == UDF_ICB_INTERN_ALLOC);
5483 assert(inflen < sector_size);
5484
5485 /* copy out info */
5486 memset(blob, 0, sector_size);
5487 memcpy(blob, pos, inflen);
5488
5489 return 0;
5490 }
5491
5492
5493 static int
5494 udf_write_internal(struct udf_node *node, uint8_t *blob)
5495 {
5496 struct udf_mount *ump;
5497 struct file_entry *fe = node->fe;
5498 struct extfile_entry *efe = node->efe;
5499 uint64_t inflen;
5500 uint32_t sector_size;
5501 uint8_t *pos;
5502 int icbflags, addr_type;
5503
5504 /* get extent and do some paranoia checks */
5505 ump = node->ump;
5506 sector_size = ump->discinfo.sector_size;
5507
5508 if (fe) {
5509 inflen = udf_rw64(fe->inf_len);
5510 pos = &fe->data[0] + udf_rw32(fe->l_ea);
5511 icbflags = udf_rw16(fe->icbtag.flags);
5512 } else {
5513 assert(node->efe);
5514 inflen = udf_rw64(efe->inf_len);
5515 pos = &efe->data[0] + udf_rw32(efe->l_ea);
5516 icbflags = udf_rw16(efe->icbtag.flags);
5517 }
5518 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
5519
5520 assert(addr_type == UDF_ICB_INTERN_ALLOC);
5521 assert(inflen < sector_size);
5522
5523 /* copy in blob */
5524 /* memset(pos, 0, inflen); */
5525 memcpy(pos, blob, inflen);
5526
5527 return 0;
5528 }
5529
5530
5531 void
5532 udf_read_filebuf(struct udf_node *udf_node, struct buf *buf)
5533 {
5534 struct buf *nestbuf;
5535 struct udf_mount *ump = udf_node->ump;
5536 uint64_t *mapping;
5537 uint64_t run_start;
5538 uint32_t sector_size;
5539 uint32_t buf_offset, sector, rbuflen, rblk;
5540 uint32_t from, lblkno;
5541 uint32_t sectors;
5542 uint8_t *buf_pos;
5543 int error, run_length, isdir, what;
5544
5545 sector_size = udf_node->ump->discinfo.sector_size;
5546
5547 from = buf->b_blkno;
5548 sectors = buf->b_bcount / sector_size;
5549
5550 isdir = (udf_node->vnode->v_type == VDIR);
5551 what = isdir ? UDF_C_FIDS : UDF_C_USERDATA;
5552
5553 /* assure we have enough translation slots */
5554 KASSERT(buf->b_bcount / sector_size <= UDF_MAX_MAPPINGS);
5555 KASSERT(MAXPHYS / sector_size <= UDF_MAX_MAPPINGS);
5556
5557 if (sectors > UDF_MAX_MAPPINGS) {
5558 printf("udf_read_filebuf: implementation limit on bufsize\n");
5559 buf->b_error = EIO;
5560 biodone(buf);
5561 return;
5562 }
5563
5564 mapping = malloc(sizeof(*mapping) * UDF_MAX_MAPPINGS, M_TEMP, M_WAITOK);
5565
5566 error = 0;
5567 DPRINTF(READ, ("\ttranslate %d-%d\n", from, sectors));
5568 error = udf_translate_file_extent(udf_node, from, sectors, mapping);
5569 if (error) {
5570 buf->b_error = error;
5571 biodone(buf);
5572 goto out;
5573 }
5574 DPRINTF(READ, ("\ttranslate extent went OK\n"));
5575
5576 /* pre-check if its an internal */
5577 if (*mapping == UDF_TRANS_INTERN) {
5578 error = udf_read_internal(udf_node, (uint8_t *) buf->b_data);
5579 if (error)
5580 buf->b_error = error;
5581 biodone(buf);
5582 goto out;
5583 }
5584 DPRINTF(READ, ("\tnot intern\n"));
5585
5586 #ifdef DEBUG
5587 if (udf_verbose & UDF_DEBUG_TRANSLATE) {
5588 printf("Returned translation table:\n");
5589 for (sector = 0; sector < sectors; sector++) {
5590 printf("%d : %"PRIu64"\n", sector, mapping[sector]);
5591 }
5592 }
5593 #endif
5594
5595 /* request read-in of data from disc sheduler */
5596 buf->b_resid = buf->b_bcount;
5597 for (sector = 0; sector < sectors; sector++) {
5598 buf_offset = sector * sector_size;
5599 buf_pos = (uint8_t *) buf->b_data + buf_offset;
5600 DPRINTF(READ, ("\tprocessing rel sector %d\n", sector));
5601
5602 /* check if its zero or unmapped to stop reading */
5603 switch (mapping[sector]) {
5604 case UDF_TRANS_UNMAPPED:
5605 case UDF_TRANS_ZERO:
5606 /* copy zero sector TODO runlength like below */
5607 memset(buf_pos, 0, sector_size);
5608 DPRINTF(READ, ("\treturning zero sector\n"));
5609 nestiobuf_done(buf, sector_size, 0);
5610 break;
5611 default :
5612 DPRINTF(READ, ("\tread sector "
5613 "%"PRIu64"\n", mapping[sector]));
5614
5615 lblkno = from + sector;
5616 run_start = mapping[sector];
5617 run_length = 1;
5618 while (sector < sectors-1) {
5619 if (mapping[sector+1] != mapping[sector]+1)
5620 break;
5621 run_length++;
5622 sector++;
5623 }
5624
5625 /*
5626 * nest an iobuf and mark it for async reading. Since
5627 * we're using nested buffers, they can't be cached by
5628 * design.
5629 */
5630 rbuflen = run_length * sector_size;
5631 rblk = run_start * (sector_size/DEV_BSIZE);
5632
5633 nestbuf = getiobuf(NULL, true);
5634 nestiobuf_setup(buf, nestbuf, buf_offset, rbuflen);
5635 /* nestbuf is B_ASYNC */
5636
5637 /* identify this nestbuf */
5638 nestbuf->b_lblkno = lblkno;
5639 assert(nestbuf->b_vp == udf_node->vnode);
5640
5641 /* CD shedules on raw blkno */
5642 nestbuf->b_blkno = rblk;
5643 nestbuf->b_proc = NULL;
5644 nestbuf->b_rawblkno = rblk;
5645 nestbuf->b_udf_c_type = what;
5646
5647 udf_discstrat_queuebuf(ump, nestbuf);
5648 }
5649 }
5650 out:
5651 /* if we're synchronously reading, wait for the completion */
5652 if ((buf->b_flags & B_ASYNC) == 0)
5653 biowait(buf);
5654
5655 DPRINTF(READ, ("\tend of read_filebuf\n"));
5656 free(mapping, M_TEMP);
5657 return;
5658 }
5659
5660
5661 void
5662 udf_write_filebuf(struct udf_node *udf_node, struct buf *buf)
5663 {
5664 struct buf *nestbuf;
5665 struct udf_mount *ump = udf_node->ump;
5666 uint64_t *mapping;
5667 uint64_t run_start;
5668 uint32_t lb_size;
5669 uint32_t buf_offset, lb_num, rbuflen, rblk;
5670 uint32_t from, lblkno;
5671 uint32_t num_lb;
5672 uint8_t *buf_pos;
5673 int error, run_length, isdir, what, s;
5674
5675 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
5676
5677 from = buf->b_blkno;
5678 num_lb = buf->b_bcount / lb_size;
5679
5680 isdir = (udf_node->vnode->v_type == VDIR);
5681 what = isdir ? UDF_C_FIDS : UDF_C_USERDATA;
5682
5683 /* assure we have enough translation slots */
5684 KASSERT(buf->b_bcount / lb_size <= UDF_MAX_MAPPINGS);
5685 KASSERT(MAXPHYS / lb_size <= UDF_MAX_MAPPINGS);
5686
5687 if (num_lb > UDF_MAX_MAPPINGS) {
5688 printf("udf_write_filebuf: implementation limit on bufsize\n");
5689 buf->b_error = EIO;
5690 biodone(buf);
5691 return;
5692 }
5693
5694 mapping = malloc(sizeof(*mapping) * UDF_MAX_MAPPINGS, M_TEMP, M_WAITOK);
5695
5696 error = 0;
5697 DPRINTF(WRITE, ("\ttranslate %d-%d\n", from, num_lb));
5698 error = udf_translate_file_extent(udf_node, from, num_lb, mapping);
5699 if (error) {
5700 buf->b_error = error;
5701 biodone(buf);
5702 goto out;
5703 }
5704 DPRINTF(WRITE, ("\ttranslate extent went OK\n"));
5705
5706 /* if its internally mapped, we can write it in the descriptor itself */
5707 if (*mapping == UDF_TRANS_INTERN) {
5708 /* TODO paranoia check if we ARE going to have enough space */
5709 error = udf_write_internal(udf_node, (uint8_t *) buf->b_data);
5710 if (error)
5711 buf->b_error = error;
5712 biodone(buf);
5713 goto out;
5714 }
5715 DPRINTF(WRITE, ("\tnot intern\n"));
5716
5717 /* request write out of data to disc sheduler */
5718 buf->b_resid = buf->b_bcount;
5719 for (lb_num = 0; lb_num < num_lb; lb_num++) {
5720 buf_offset = lb_num * lb_size;
5721 buf_pos = (uint8_t *) buf->b_data + buf_offset;
5722 DPRINTF(WRITE, ("\tprocessing rel lb_num %d\n", lb_num));
5723
5724 /*
5725 * Mappings are not that important here. Just before we write
5726 * the lb_num we late-allocate them when needed and update the
5727 * mapping in the udf_node.
5728 */
5729
5730 /* XXX why not ignore the mapping altogether ? */
5731 /* TODO estimate here how much will be late-allocated */
5732 DPRINTF(WRITE, ("\twrite lb_num "
5733 "%"PRIu64, mapping[lb_num]));
5734
5735 lblkno = from + lb_num;
5736 run_start = mapping[lb_num];
5737 run_length = 1;
5738 while (lb_num < num_lb-1) {
5739 if (mapping[lb_num+1] != mapping[lb_num]+1)
5740 if (mapping[lb_num+1] != mapping[lb_num])
5741 break;
5742 run_length++;
5743 lb_num++;
5744 }
5745 DPRINTF(WRITE, ("+ %d\n", run_length));
5746
5747 /* nest an iobuf on the master buffer for the extent */
5748 rbuflen = run_length * lb_size;
5749 rblk = run_start * (lb_size/DEV_BSIZE);
5750
5751 #if 0
5752 /* if its zero or unmapped, our blknr gets -1 for unmapped */
5753 switch (mapping[lb_num]) {
5754 case UDF_TRANS_UNMAPPED:
5755 case UDF_TRANS_ZERO:
5756 rblk = -1;
5757 break;
5758 default:
5759 rblk = run_start * (lb_size/DEV_BSIZE);
5760 break;
5761 }
5762 #endif
5763
5764 nestbuf = getiobuf(NULL, true);
5765 nestiobuf_setup(buf, nestbuf, buf_offset, rbuflen);
5766 /* nestbuf is B_ASYNC */
5767
5768 /* identify this nestbuf */
5769 nestbuf->b_lblkno = lblkno;
5770 KASSERT(nestbuf->b_vp == udf_node->vnode);
5771
5772 /* CD shedules on raw blkno */
5773 nestbuf->b_blkno = rblk;
5774 nestbuf->b_proc = NULL;
5775 nestbuf->b_rawblkno = rblk;
5776 nestbuf->b_udf_c_type = what;
5777
5778 /* increment our outstanding bufs counter */
5779 s = splbio();
5780 udf_node->outstanding_bufs++;
5781 splx(s);
5782
5783 udf_discstrat_queuebuf(ump, nestbuf);
5784 }
5785 out:
5786 /* if we're synchronously writing, wait for the completion */
5787 if ((buf->b_flags & B_ASYNC) == 0)
5788 biowait(buf);
5789
5790 DPRINTF(WRITE, ("\tend of write_filebuf\n"));
5791 free(mapping, M_TEMP);
5792 return;
5793 }
5794
5795 /* --------------------------------------------------------------------- */
5796
5797
5798