udf_subr.c revision 1.66 1 /* $NetBSD: udf_subr.c,v 1.66 2008/07/22 19:06:55 reinoud Exp $ */
2
3 /*
4 * Copyright (c) 2006, 2008 Reinoud Zandijk
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 *
27 */
28
29
30 #include <sys/cdefs.h>
31 #ifndef lint
32 __KERNEL_RCSID(0, "$NetBSD: udf_subr.c,v 1.66 2008/07/22 19:06:55 reinoud Exp $");
33 #endif /* not lint */
34
35
36 #if defined(_KERNEL_OPT)
37 #include "opt_quota.h"
38 #include "opt_compat_netbsd.h"
39 #endif
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/sysctl.h>
44 #include <sys/namei.h>
45 #include <sys/proc.h>
46 #include <sys/kernel.h>
47 #include <sys/vnode.h>
48 #include <miscfs/genfs/genfs_node.h>
49 #include <sys/mount.h>
50 #include <sys/buf.h>
51 #include <sys/file.h>
52 #include <sys/device.h>
53 #include <sys/disklabel.h>
54 #include <sys/ioctl.h>
55 #include <sys/malloc.h>
56 #include <sys/dirent.h>
57 #include <sys/stat.h>
58 #include <sys/conf.h>
59 #include <sys/kauth.h>
60 #include <fs/unicode.h>
61 #include <dev/clock_subr.h>
62
63 #include <fs/udf/ecma167-udf.h>
64 #include <fs/udf/udf_mount.h>
65
66 #if defined(_KERNEL_OPT)
67 #include "opt_udf.h"
68 #endif
69
70 #include "udf.h"
71 #include "udf_subr.h"
72 #include "udf_bswap.h"
73
74
75 #define VTOI(vnode) ((struct udf_node *) (vnode)->v_data)
76
77 #define UDF_SET_SYSTEMFILE(vp) \
78 /* XXXAD Is the vnode locked? */ \
79 (vp)->v_vflag |= VV_SYSTEM; \
80 vref(vp); \
81 vput(vp); \
82
83 extern int syncer_maxdelay; /* maximum delay time */
84 extern int (**udf_vnodeop_p)(void *);
85
86 /* --------------------------------------------------------------------- */
87
88 //#ifdef DEBUG
89 #if 1
90
91 #if 0
92 static void
93 udf_dumpblob(boid *blob, uint32_t dlen)
94 {
95 int i, j;
96
97 printf("blob = %p\n", blob);
98 printf("dump of %d bytes\n", dlen);
99
100 for (i = 0; i < dlen; i+ = 16) {
101 printf("%04x ", i);
102 for (j = 0; j < 16; j++) {
103 if (i+j < dlen) {
104 printf("%02x ", blob[i+j]);
105 } else {
106 printf(" ");
107 }
108 }
109 for (j = 0; j < 16; j++) {
110 if (i+j < dlen) {
111 if (blob[i+j]>32 && blob[i+j]! = 127) {
112 printf("%c", blob[i+j]);
113 } else {
114 printf(".");
115 }
116 }
117 }
118 printf("\n");
119 }
120 printf("\n");
121 Debugger();
122 }
123 #endif
124
125 static void
126 udf_dump_discinfo(struct udf_mount *ump)
127 {
128 char bits[128];
129 struct mmc_discinfo *di = &ump->discinfo;
130
131 if ((udf_verbose & UDF_DEBUG_VOLUMES) == 0)
132 return;
133
134 printf("Device/media info :\n");
135 printf("\tMMC profile 0x%02x\n", di->mmc_profile);
136 printf("\tderived class %d\n", di->mmc_class);
137 printf("\tsector size %d\n", di->sector_size);
138 printf("\tdisc state %d\n", di->disc_state);
139 printf("\tlast ses state %d\n", di->last_session_state);
140 printf("\tbg format state %d\n", di->bg_format_state);
141 printf("\tfrst track %d\n", di->first_track);
142 printf("\tfst on last ses %d\n", di->first_track_last_session);
143 printf("\tlst on last ses %d\n", di->last_track_last_session);
144 printf("\tlink block penalty %d\n", di->link_block_penalty);
145 bitmask_snprintf(di->disc_flags, MMC_DFLAGS_FLAGBITS, bits,
146 sizeof(bits));
147 printf("\tdisc flags %s\n", bits);
148 printf("\tdisc id %x\n", di->disc_id);
149 printf("\tdisc barcode %"PRIx64"\n", di->disc_barcode);
150
151 printf("\tnum sessions %d\n", di->num_sessions);
152 printf("\tnum tracks %d\n", di->num_tracks);
153
154 bitmask_snprintf(di->mmc_cur, MMC_CAP_FLAGBITS, bits, sizeof(bits));
155 printf("\tcapabilities cur %s\n", bits);
156 bitmask_snprintf(di->mmc_cap, MMC_CAP_FLAGBITS, bits, sizeof(bits));
157 printf("\tcapabilities cap %s\n", bits);
158 }
159 #else
160 #define udf_dump_discinfo(a);
161 #endif
162
163
164 /* --------------------------------------------------------------------- */
165
166 /* not called often */
167 int
168 udf_update_discinfo(struct udf_mount *ump)
169 {
170 struct vnode *devvp = ump->devvp;
171 struct partinfo dpart;
172 struct mmc_discinfo *di;
173 int error;
174
175 DPRINTF(VOLUMES, ("read/update disc info\n"));
176 di = &ump->discinfo;
177 memset(di, 0, sizeof(struct mmc_discinfo));
178
179 /* check if we're on a MMC capable device, i.e. CD/DVD */
180 error = VOP_IOCTL(devvp, MMCGETDISCINFO, di, FKIOCTL, NOCRED);
181 if (error == 0) {
182 udf_dump_discinfo(ump);
183 return 0;
184 }
185
186 /* disc partition support */
187 error = VOP_IOCTL(devvp, DIOCGPART, &dpart, FREAD, NOCRED);
188 if (error)
189 return ENODEV;
190
191 /* set up a disc info profile for partitions */
192 di->mmc_profile = 0x01; /* disc type */
193 di->mmc_class = MMC_CLASS_DISC;
194 di->disc_state = MMC_STATE_CLOSED;
195 di->last_session_state = MMC_STATE_CLOSED;
196 di->bg_format_state = MMC_BGFSTATE_COMPLETED;
197 di->link_block_penalty = 0;
198
199 di->mmc_cur = MMC_CAP_RECORDABLE | MMC_CAP_REWRITABLE |
200 MMC_CAP_ZEROLINKBLK | MMC_CAP_HW_DEFECTFREE;
201 di->mmc_cap = di->mmc_cur;
202 di->disc_flags = MMC_DFLAGS_UNRESTRICTED;
203
204 /* TODO problem with last_possible_lba on resizable VND; request */
205 di->last_possible_lba = dpart.part->p_size;
206 di->sector_size = dpart.disklab->d_secsize;
207
208 di->num_sessions = 1;
209 di->num_tracks = 1;
210
211 di->first_track = 1;
212 di->first_track_last_session = di->last_track_last_session = 1;
213
214 udf_dump_discinfo(ump);
215 return 0;
216 }
217
218
219 int
220 udf_update_trackinfo(struct udf_mount *ump, struct mmc_trackinfo *ti)
221 {
222 struct vnode *devvp = ump->devvp;
223 struct mmc_discinfo *di = &ump->discinfo;
224 int error, class;
225
226 DPRINTF(VOLUMES, ("read track info\n"));
227
228 class = di->mmc_class;
229 if (class != MMC_CLASS_DISC) {
230 /* tracknr specified in struct ti */
231 error = VOP_IOCTL(devvp, MMCGETTRACKINFO, ti, FKIOCTL, NOCRED);
232 return error;
233 }
234
235 /* disc partition support */
236 if (ti->tracknr != 1)
237 return EIO;
238
239 /* create fake ti (TODO check for resized vnds) */
240 ti->sessionnr = 1;
241
242 ti->track_mode = 0; /* XXX */
243 ti->data_mode = 0; /* XXX */
244 ti->flags = MMC_TRACKINFO_LRA_VALID | MMC_TRACKINFO_NWA_VALID;
245
246 ti->track_start = 0;
247 ti->packet_size = 1;
248
249 /* TODO support for resizable vnd */
250 ti->track_size = di->last_possible_lba;
251 ti->next_writable = di->last_possible_lba;
252 ti->last_recorded = ti->next_writable;
253 ti->free_blocks = 0;
254
255 return 0;
256 }
257
258
259 int
260 udf_setup_writeparams(struct udf_mount *ump)
261 {
262 struct mmc_writeparams mmc_writeparams;
263 int error;
264
265 if (ump->discinfo.mmc_class == MMC_CLASS_DISC)
266 return 0;
267
268 /*
269 * only CD burning normally needs setting up, but other disc types
270 * might need other settings to be made. The MMC framework will set up
271 * the nessisary recording parameters according to the disc
272 * characteristics read in. Modifications can be made in the discinfo
273 * structure passed to change the nature of the disc.
274 */
275
276 memset(&mmc_writeparams, 0, sizeof(struct mmc_writeparams));
277 mmc_writeparams.mmc_class = ump->discinfo.mmc_class;
278 mmc_writeparams.mmc_cur = ump->discinfo.mmc_cur;
279
280 /*
281 * UDF dictates first track to determine track mode for the whole
282 * disc. [UDF 1.50/6.10.1.1, UDF 1.50/6.10.2.1]
283 * To prevent problems with a `reserved' track in front we start with
284 * the 2nd track and if that is not valid, go for the 1st.
285 */
286 mmc_writeparams.tracknr = 2;
287 mmc_writeparams.data_mode = MMC_DATAMODE_DEFAULT; /* XA disc */
288 mmc_writeparams.track_mode = MMC_TRACKMODE_DEFAULT; /* data */
289
290 error = VOP_IOCTL(ump->devvp, MMCSETUPWRITEPARAMS, &mmc_writeparams,
291 FKIOCTL, NOCRED);
292 if (error) {
293 mmc_writeparams.tracknr = 1;
294 error = VOP_IOCTL(ump->devvp, MMCSETUPWRITEPARAMS,
295 &mmc_writeparams, FKIOCTL, NOCRED);
296 }
297 return error;
298 }
299
300
301 int
302 udf_synchronise_caches(struct udf_mount *ump)
303 {
304 struct mmc_op mmc_op;
305
306 DPRINTF(CALL, ("udf_synchronise_caches()\n"));
307
308 if (ump->vfs_mountp->mnt_flag & MNT_RDONLY)
309 return 0;
310
311 /* discs are done now */
312 if (ump->discinfo.mmc_class == MMC_CLASS_DISC)
313 return 0;
314
315 bzero(&mmc_op, sizeof(struct mmc_op));
316 mmc_op.operation = MMC_OP_SYNCHRONISECACHE;
317
318 /* ignore return code */
319 (void) VOP_IOCTL(ump->devvp, MMCOP, &mmc_op, FKIOCTL, NOCRED);
320
321 return 0;
322 }
323
324 /* --------------------------------------------------------------------- */
325
326 /* track/session searching for mounting */
327 int
328 udf_search_tracks(struct udf_mount *ump, struct udf_args *args,
329 int *first_tracknr, int *last_tracknr)
330 {
331 struct mmc_trackinfo trackinfo;
332 uint32_t tracknr, start_track, num_tracks;
333 int error;
334
335 /* if negative, sessionnr is relative to last session */
336 if (args->sessionnr < 0) {
337 args->sessionnr += ump->discinfo.num_sessions;
338 }
339
340 /* sanity */
341 if (args->sessionnr < 0)
342 args->sessionnr = 0;
343 if (args->sessionnr > ump->discinfo.num_sessions)
344 args->sessionnr = ump->discinfo.num_sessions;
345
346 /* search the tracks for this session, zero session nr indicates last */
347 if (args->sessionnr == 0)
348 args->sessionnr = ump->discinfo.num_sessions;
349 if (ump->discinfo.last_session_state == MMC_STATE_EMPTY)
350 args->sessionnr--;
351
352 /* sanity again */
353 if (args->sessionnr < 0)
354 args->sessionnr = 0;
355
356 /* search the first and last track of the specified session */
357 num_tracks = ump->discinfo.num_tracks;
358 start_track = ump->discinfo.first_track;
359
360 /* search for first track of this session */
361 for (tracknr = start_track; tracknr <= num_tracks; tracknr++) {
362 /* get track info */
363 trackinfo.tracknr = tracknr;
364 error = udf_update_trackinfo(ump, &trackinfo);
365 if (error)
366 return error;
367
368 if (trackinfo.sessionnr == args->sessionnr)
369 break;
370 }
371 *first_tracknr = tracknr;
372
373 /* search for last track of this session */
374 for (;tracknr <= num_tracks; tracknr++) {
375 /* get track info */
376 trackinfo.tracknr = tracknr;
377 error = udf_update_trackinfo(ump, &trackinfo);
378 if (error || (trackinfo.sessionnr != args->sessionnr)) {
379 tracknr--;
380 break;
381 }
382 }
383 if (tracknr > num_tracks)
384 tracknr--;
385
386 *last_tracknr = tracknr;
387
388 if (*last_tracknr < *first_tracknr) {
389 printf( "udf_search_tracks: sanity check on drive+disc failed, "
390 "drive returned garbage\n");
391 return EINVAL;
392 }
393
394 assert(*last_tracknr >= *first_tracknr);
395 return 0;
396 }
397
398
399 /*
400 * NOTE: this is the only routine in this file that directly peeks into the
401 * metadata file but since its at a larval state of the mount it can't hurt.
402 *
403 * XXX candidate for udf_allocation.c
404 * XXX clean me up!, change to new node reading code.
405 */
406
407 static void
408 udf_check_track_metadata_overlap(struct udf_mount *ump,
409 struct mmc_trackinfo *trackinfo)
410 {
411 struct part_desc *part;
412 struct file_entry *fe;
413 struct extfile_entry *efe;
414 struct short_ad *s_ad;
415 struct long_ad *l_ad;
416 uint32_t track_start, track_end;
417 uint32_t phys_part_start, phys_part_end, part_start, part_end;
418 uint32_t sector_size, len, alloclen, plb_num;
419 uint8_t *pos;
420 int addr_type, icblen, icbflags, flags;
421
422 /* get our track extents */
423 track_start = trackinfo->track_start;
424 track_end = track_start + trackinfo->track_size;
425
426 /* get our base partition extent */
427 part = ump->partitions[ump->metadata_part];
428 phys_part_start = udf_rw32(part->start_loc);
429 phys_part_end = phys_part_start + udf_rw32(part->part_len);
430
431 /* no use if its outside the physical partition */
432 if ((phys_part_start >= track_end) || (phys_part_end < track_start))
433 return;
434
435 /*
436 * now follow all extents in the fe/efe to see if they refer to this
437 * track
438 */
439
440 sector_size = ump->discinfo.sector_size;
441
442 /* XXX should we claim exclusive access to the metafile ? */
443 /* TODO: move to new node read code */
444 fe = ump->metadata_node->fe;
445 efe = ump->metadata_node->efe;
446 if (fe) {
447 alloclen = udf_rw32(fe->l_ad);
448 pos = &fe->data[0] + udf_rw32(fe->l_ea);
449 icbflags = udf_rw16(fe->icbtag.flags);
450 } else {
451 assert(efe);
452 alloclen = udf_rw32(efe->l_ad);
453 pos = &efe->data[0] + udf_rw32(efe->l_ea);
454 icbflags = udf_rw16(efe->icbtag.flags);
455 }
456 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
457
458 while (alloclen) {
459 if (addr_type == UDF_ICB_SHORT_ALLOC) {
460 icblen = sizeof(struct short_ad);
461 s_ad = (struct short_ad *) pos;
462 len = udf_rw32(s_ad->len);
463 plb_num = udf_rw32(s_ad->lb_num);
464 } else {
465 /* should not be present, but why not */
466 icblen = sizeof(struct long_ad);
467 l_ad = (struct long_ad *) pos;
468 len = udf_rw32(l_ad->len);
469 plb_num = udf_rw32(l_ad->loc.lb_num);
470 /* pvpart_num = udf_rw16(l_ad->loc.part_num); */
471 }
472 /* process extent */
473 flags = UDF_EXT_FLAGS(len);
474 len = UDF_EXT_LEN(len);
475
476 part_start = phys_part_start + plb_num;
477 part_end = part_start + (len / sector_size);
478
479 if ((part_start >= track_start) && (part_end <= track_end)) {
480 /* extent is enclosed within this track */
481 ump->metadata_track = *trackinfo;
482 return;
483 }
484
485 pos += icblen;
486 alloclen -= icblen;
487 }
488 }
489
490
491 int
492 udf_search_writing_tracks(struct udf_mount *ump)
493 {
494 struct mmc_trackinfo trackinfo;
495 struct part_desc *part;
496 uint32_t tracknr, start_track, num_tracks;
497 uint32_t track_start, track_end, part_start, part_end;
498 int error;
499
500 /*
501 * in the CD/(HD)DVD/BD recordable device model a few tracks within
502 * the last session might be open but in the UDF device model at most
503 * three tracks can be open: a reserved track for delayed ISO VRS
504 * writing, a data track and a metadata track. We search here for the
505 * data track and the metadata track. Note that the reserved track is
506 * troublesome but can be detected by its small size of < 512 sectors.
507 */
508
509 num_tracks = ump->discinfo.num_tracks;
510 start_track = ump->discinfo.first_track;
511
512 /* fetch info on first and possibly only track */
513 trackinfo.tracknr = start_track;
514 error = udf_update_trackinfo(ump, &trackinfo);
515 if (error)
516 return error;
517
518 /* copy results to our mount point */
519 ump->data_track = trackinfo;
520 ump->metadata_track = trackinfo;
521
522 /* if not sequential, we're done */
523 if (num_tracks == 1)
524 return 0;
525
526 for (tracknr = start_track;tracknr <= num_tracks; tracknr++) {
527 /* get track info */
528 trackinfo.tracknr = tracknr;
529 error = udf_update_trackinfo(ump, &trackinfo);
530 if (error)
531 return error;
532
533 if ((trackinfo.flags & MMC_TRACKINFO_NWA_VALID) == 0)
534 continue;
535
536 track_start = trackinfo.track_start;
537 track_end = track_start + trackinfo.track_size;
538
539 /* check for overlap on data partition */
540 part = ump->partitions[ump->data_part];
541 part_start = udf_rw32(part->start_loc);
542 part_end = part_start + udf_rw32(part->part_len);
543 if ((part_start < track_end) && (part_end > track_start)) {
544 ump->data_track = trackinfo;
545 /* TODO check if UDF partition data_part is writable */
546 }
547
548 /* check for overlap on metadata partition */
549 if ((ump->meta_alloc == UDF_ALLOC_METASEQUENTIAL) ||
550 (ump->meta_alloc == UDF_ALLOC_METABITMAP)) {
551 udf_check_track_metadata_overlap(ump, &trackinfo);
552 } else {
553 ump->metadata_track = trackinfo;
554 }
555 }
556
557 if ((ump->data_track.flags & MMC_TRACKINFO_NWA_VALID) == 0)
558 return EROFS;
559
560 if ((ump->metadata_track.flags & MMC_TRACKINFO_NWA_VALID) == 0)
561 return EROFS;
562
563 return 0;
564 }
565
566 /* --------------------------------------------------------------------- */
567
568 /*
569 * Check if the blob starts with a good UDF tag. Tags are protected by a
570 * checksum over the reader except one byte at position 4 that is the checksum
571 * itself.
572 */
573
574 int
575 udf_check_tag(void *blob)
576 {
577 struct desc_tag *tag = blob;
578 uint8_t *pos, sum, cnt;
579
580 /* check TAG header checksum */
581 pos = (uint8_t *) tag;
582 sum = 0;
583
584 for(cnt = 0; cnt < 16; cnt++) {
585 if (cnt != 4)
586 sum += *pos;
587 pos++;
588 }
589 if (sum != tag->cksum) {
590 /* bad tag header checksum; this is not a valid tag */
591 return EINVAL;
592 }
593
594 return 0;
595 }
596
597
598 /*
599 * check tag payload will check descriptor CRC as specified.
600 * If the descriptor is too long, it will return EIO otherwise EINVAL.
601 */
602
603 int
604 udf_check_tag_payload(void *blob, uint32_t max_length)
605 {
606 struct desc_tag *tag = blob;
607 uint16_t crc, crc_len;
608
609 crc_len = udf_rw16(tag->desc_crc_len);
610
611 /* check payload CRC if applicable */
612 if (crc_len == 0)
613 return 0;
614
615 if (crc_len > max_length)
616 return EIO;
617
618 crc = udf_cksum(((uint8_t *) tag) + UDF_DESC_TAG_LENGTH, crc_len);
619 if (crc != udf_rw16(tag->desc_crc)) {
620 /* bad payload CRC; this is a broken tag */
621 return EINVAL;
622 }
623
624 return 0;
625 }
626
627
628 void
629 udf_validate_tag_sum(void *blob)
630 {
631 struct desc_tag *tag = blob;
632 uint8_t *pos, sum, cnt;
633
634 /* calculate TAG header checksum */
635 pos = (uint8_t *) tag;
636 sum = 0;
637
638 for(cnt = 0; cnt < 16; cnt++) {
639 if (cnt != 4) sum += *pos;
640 pos++;
641 }
642 tag->cksum = sum; /* 8 bit */
643 }
644
645
646 /* assumes sector number of descriptor to be saved already present */
647 void
648 udf_validate_tag_and_crc_sums(void *blob)
649 {
650 struct desc_tag *tag = blob;
651 uint8_t *btag = (uint8_t *) tag;
652 uint16_t crc, crc_len;
653
654 crc_len = udf_rw16(tag->desc_crc_len);
655
656 /* check payload CRC if applicable */
657 if (crc_len > 0) {
658 crc = udf_cksum(btag + UDF_DESC_TAG_LENGTH, crc_len);
659 tag->desc_crc = udf_rw16(crc);
660 }
661
662 /* calculate TAG header checksum */
663 udf_validate_tag_sum(blob);
664 }
665
666 /* --------------------------------------------------------------------- */
667
668 /*
669 * XXX note the different semantics from udfclient: for FIDs it still rounds
670 * up to sectors. Use udf_fidsize() for a correct length.
671 */
672
673 int
674 udf_tagsize(union dscrptr *dscr, uint32_t lb_size)
675 {
676 uint32_t size, tag_id, num_lb, elmsz;
677
678 tag_id = udf_rw16(dscr->tag.id);
679
680 switch (tag_id) {
681 case TAGID_LOGVOL :
682 size = sizeof(struct logvol_desc) - 1;
683 size += udf_rw32(dscr->lvd.mt_l);
684 break;
685 case TAGID_UNALLOC_SPACE :
686 elmsz = sizeof(struct extent_ad);
687 size = sizeof(struct unalloc_sp_desc) - elmsz;
688 size += udf_rw32(dscr->usd.alloc_desc_num) * elmsz;
689 break;
690 case TAGID_FID :
691 size = UDF_FID_SIZE + dscr->fid.l_fi + udf_rw16(dscr->fid.l_iu);
692 size = (size + 3) & ~3;
693 break;
694 case TAGID_LOGVOL_INTEGRITY :
695 size = sizeof(struct logvol_int_desc) - sizeof(uint32_t);
696 size += udf_rw32(dscr->lvid.l_iu);
697 size += (2 * udf_rw32(dscr->lvid.num_part) * sizeof(uint32_t));
698 break;
699 case TAGID_SPACE_BITMAP :
700 size = sizeof(struct space_bitmap_desc) - 1;
701 size += udf_rw32(dscr->sbd.num_bytes);
702 break;
703 case TAGID_SPARING_TABLE :
704 elmsz = sizeof(struct spare_map_entry);
705 size = sizeof(struct udf_sparing_table) - elmsz;
706 size += udf_rw16(dscr->spt.rt_l) * elmsz;
707 break;
708 case TAGID_FENTRY :
709 size = sizeof(struct file_entry);
710 size += udf_rw32(dscr->fe.l_ea) + udf_rw32(dscr->fe.l_ad)-1;
711 break;
712 case TAGID_EXTFENTRY :
713 size = sizeof(struct extfile_entry);
714 size += udf_rw32(dscr->efe.l_ea) + udf_rw32(dscr->efe.l_ad)-1;
715 break;
716 case TAGID_FSD :
717 size = sizeof(struct fileset_desc);
718 break;
719 default :
720 size = sizeof(union dscrptr);
721 break;
722 }
723
724 if ((size == 0) || (lb_size == 0)) return 0;
725
726 /* round up in sectors */
727 num_lb = (size + lb_size -1) / lb_size;
728 return num_lb * lb_size;
729 }
730
731
732 int
733 udf_fidsize(struct fileid_desc *fid)
734 {
735 uint32_t size;
736
737 if (udf_rw16(fid->tag.id) != TAGID_FID)
738 panic("got udf_fidsize on non FID\n");
739
740 size = UDF_FID_SIZE + fid->l_fi + udf_rw16(fid->l_iu);
741 size = (size + 3) & ~3;
742
743 return size;
744 }
745
746 /* --------------------------------------------------------------------- */
747
748 void
749 udf_lock_node(struct udf_node *udf_node, int flag, char const *fname, const int lineno)
750 {
751 int ret;
752
753 mutex_enter(&udf_node->node_mutex);
754 /* wait until free */
755 while (udf_node->i_flags & IN_LOCKED) {
756 ret = cv_timedwait(&udf_node->node_lock, &udf_node->node_mutex, hz/8);
757 /* TODO check if we should return error; abort */
758 if (ret == EWOULDBLOCK) {
759 DPRINTF(LOCKING, ( "udf_lock_node: udf_node %p would block "
760 "wanted at %s:%d, previously locked at %s:%d\n",
761 udf_node, fname, lineno,
762 udf_node->lock_fname, udf_node->lock_lineno));
763 }
764 }
765 /* grab */
766 udf_node->i_flags |= IN_LOCKED | flag;
767 /* debug */
768 udf_node->lock_fname = fname;
769 udf_node->lock_lineno = lineno;
770
771 mutex_exit(&udf_node->node_mutex);
772 }
773
774
775 void
776 udf_unlock_node(struct udf_node *udf_node, int flag)
777 {
778 mutex_enter(&udf_node->node_mutex);
779 udf_node->i_flags &= ~(IN_LOCKED | flag);
780 cv_broadcast(&udf_node->node_lock);
781 mutex_exit(&udf_node->node_mutex);
782 }
783
784
785 /* --------------------------------------------------------------------- */
786
787 static int
788 udf_read_anchor(struct udf_mount *ump, uint32_t sector, struct anchor_vdp **dst)
789 {
790 int error;
791
792 error = udf_read_phys_dscr(ump, sector, M_UDFVOLD,
793 (union dscrptr **) dst);
794 if (!error) {
795 /* blank terminator blocks are not allowed here */
796 if (*dst == NULL)
797 return ENOENT;
798 if (udf_rw16((*dst)->tag.id) != TAGID_ANCHOR) {
799 error = ENOENT;
800 free(*dst, M_UDFVOLD);
801 *dst = NULL;
802 DPRINTF(VOLUMES, ("Not an anchor\n"));
803 }
804 }
805
806 return error;
807 }
808
809
810 int
811 udf_read_anchors(struct udf_mount *ump)
812 {
813 struct udf_args *args = &ump->mount_args;
814 struct mmc_trackinfo first_track;
815 struct mmc_trackinfo second_track;
816 struct mmc_trackinfo last_track;
817 struct anchor_vdp **anchorsp;
818 uint32_t track_start;
819 uint32_t track_end;
820 uint32_t positions[4];
821 int first_tracknr, last_tracknr;
822 int error, anch, ok, first_anchor;
823
824 /* search the first and last track of the specified session */
825 error = udf_search_tracks(ump, args, &first_tracknr, &last_tracknr);
826 if (!error) {
827 first_track.tracknr = first_tracknr;
828 error = udf_update_trackinfo(ump, &first_track);
829 }
830 if (!error) {
831 last_track.tracknr = last_tracknr;
832 error = udf_update_trackinfo(ump, &last_track);
833 }
834 if ((!error) && (first_tracknr != last_tracknr)) {
835 second_track.tracknr = first_tracknr+1;
836 error = udf_update_trackinfo(ump, &second_track);
837 }
838 if (error) {
839 printf("UDF mount: reading disc geometry failed\n");
840 return 0;
841 }
842
843 track_start = first_track.track_start;
844
845 /* `end' is not as straitforward as start. */
846 track_end = last_track.track_start
847 + last_track.track_size - last_track.free_blocks - 1;
848
849 if (ump->discinfo.mmc_cur & MMC_CAP_SEQUENTIAL) {
850 /* end of track is not straitforward here */
851 if (last_track.flags & MMC_TRACKINFO_LRA_VALID)
852 track_end = last_track.last_recorded;
853 else if (last_track.flags & MMC_TRACKINFO_NWA_VALID)
854 track_end = last_track.next_writable
855 - ump->discinfo.link_block_penalty;
856 }
857
858 /* its no use reading a blank track */
859 first_anchor = 0;
860 if (first_track.flags & MMC_TRACKINFO_BLANK)
861 first_anchor = 1;
862
863 /* get our packet size */
864 ump->packet_size = first_track.packet_size;
865 if (first_track.flags & MMC_TRACKINFO_BLANK)
866 ump->packet_size = second_track.packet_size;
867
868 if (ump->packet_size <= 1) {
869 /* take max, but not bigger than 64 */
870 ump->packet_size = MAXPHYS / ump->discinfo.sector_size;
871 ump->packet_size = MIN(ump->packet_size, 64);
872 }
873 KASSERT(ump->packet_size >= 1);
874
875 /* read anchors start+256, start+512, end-256, end */
876 positions[0] = track_start+256;
877 positions[1] = track_end-256;
878 positions[2] = track_end;
879 positions[3] = track_start+512; /* [UDF 2.60/6.11.2] */
880 /* XXX shouldn't +512 be prefered above +256 for compat with Roxio CD */
881
882 ok = 0;
883 anchorsp = ump->anchors;
884 for (anch = first_anchor; anch < 4; anch++) {
885 DPRINTF(VOLUMES, ("Read anchor %d at sector %d\n", anch,
886 positions[anch]));
887 error = udf_read_anchor(ump, positions[anch], anchorsp);
888 if (!error) {
889 anchorsp++;
890 ok++;
891 }
892 }
893
894 /* VATs are only recorded on sequential media, but initialise */
895 ump->first_possible_vat_location = track_start + 2;
896 ump->last_possible_vat_location = track_end + last_track.packet_size;
897
898 return ok;
899 }
900
901 /* --------------------------------------------------------------------- */
902
903 /* we dont try to be smart; we just record the parts */
904 #define UDF_UPDATE_DSCR(name, dscr) \
905 if (name) \
906 free(name, M_UDFVOLD); \
907 name = dscr;
908
909 static int
910 udf_process_vds_descriptor(struct udf_mount *ump, union dscrptr *dscr)
911 {
912 struct part_desc *part;
913 uint16_t phys_part, raw_phys_part;
914
915 DPRINTF(VOLUMES, ("\tprocessing VDS descr %d\n",
916 udf_rw16(dscr->tag.id)));
917 switch (udf_rw16(dscr->tag.id)) {
918 case TAGID_PRI_VOL : /* primary partition */
919 UDF_UPDATE_DSCR(ump->primary_vol, &dscr->pvd);
920 break;
921 case TAGID_LOGVOL : /* logical volume */
922 UDF_UPDATE_DSCR(ump->logical_vol, &dscr->lvd);
923 break;
924 case TAGID_UNALLOC_SPACE : /* unallocated space */
925 UDF_UPDATE_DSCR(ump->unallocated, &dscr->usd);
926 break;
927 case TAGID_IMP_VOL : /* implementation */
928 /* XXX do we care about multiple impl. descr ? */
929 UDF_UPDATE_DSCR(ump->implementation, &dscr->ivd);
930 break;
931 case TAGID_PARTITION : /* physical partition */
932 /* not much use if its not allocated */
933 if ((udf_rw16(dscr->pd.flags) & UDF_PART_FLAG_ALLOCATED) == 0) {
934 free(dscr, M_UDFVOLD);
935 break;
936 }
937
938 /*
939 * BUGALERT: some rogue implementations use random physical
940 * partion numbers to break other implementations so lookup
941 * the number.
942 */
943 raw_phys_part = udf_rw16(dscr->pd.part_num);
944 for (phys_part = 0; phys_part < UDF_PARTITIONS; phys_part++) {
945 part = ump->partitions[phys_part];
946 if (part == NULL)
947 break;
948 if (udf_rw16(part->part_num) == raw_phys_part)
949 break;
950 }
951 if (phys_part == UDF_PARTITIONS) {
952 free(dscr, M_UDFVOLD);
953 return EINVAL;
954 }
955
956 UDF_UPDATE_DSCR(ump->partitions[phys_part], &dscr->pd);
957 break;
958 case TAGID_VOL : /* volume space extender; rare */
959 DPRINTF(VOLUMES, ("VDS extender ignored\n"));
960 free(dscr, M_UDFVOLD);
961 break;
962 default :
963 DPRINTF(VOLUMES, ("Unhandled VDS type %d\n",
964 udf_rw16(dscr->tag.id)));
965 free(dscr, M_UDFVOLD);
966 }
967
968 return 0;
969 }
970 #undef UDF_UPDATE_DSCR
971
972 /* --------------------------------------------------------------------- */
973
974 static int
975 udf_read_vds_extent(struct udf_mount *ump, uint32_t loc, uint32_t len)
976 {
977 union dscrptr *dscr;
978 uint32_t sector_size, dscr_size;
979 int error;
980
981 sector_size = ump->discinfo.sector_size;
982
983 /* loc is sectornr, len is in bytes */
984 error = EIO;
985 while (len) {
986 error = udf_read_phys_dscr(ump, loc, M_UDFVOLD, &dscr);
987 if (error)
988 return error;
989
990 /* blank block is a terminator */
991 if (dscr == NULL)
992 return 0;
993
994 /* TERM descriptor is a terminator */
995 if (udf_rw16(dscr->tag.id) == TAGID_TERM) {
996 free(dscr, M_UDFVOLD);
997 return 0;
998 }
999
1000 /* process all others */
1001 dscr_size = udf_tagsize(dscr, sector_size);
1002 error = udf_process_vds_descriptor(ump, dscr);
1003 if (error) {
1004 free(dscr, M_UDFVOLD);
1005 break;
1006 }
1007 assert((dscr_size % sector_size) == 0);
1008
1009 len -= dscr_size;
1010 loc += dscr_size / sector_size;
1011 }
1012
1013 return error;
1014 }
1015
1016
1017 int
1018 udf_read_vds_space(struct udf_mount *ump)
1019 {
1020 /* struct udf_args *args = &ump->mount_args; */
1021 struct anchor_vdp *anchor, *anchor2;
1022 size_t size;
1023 uint32_t main_loc, main_len;
1024 uint32_t reserve_loc, reserve_len;
1025 int error;
1026
1027 /*
1028 * read in VDS space provided by the anchors; if one descriptor read
1029 * fails, try the mirror sector.
1030 *
1031 * check if 2nd anchor is different from 1st; if so, go for 2nd. This
1032 * avoids the `compatibility features' of DirectCD that may confuse
1033 * stuff completely.
1034 */
1035
1036 anchor = ump->anchors[0];
1037 anchor2 = ump->anchors[1];
1038 assert(anchor);
1039
1040 if (anchor2) {
1041 size = sizeof(struct extent_ad);
1042 if (memcmp(&anchor->main_vds_ex, &anchor2->main_vds_ex, size))
1043 anchor = anchor2;
1044 /* reserve is specified to be a literal copy of main */
1045 }
1046
1047 main_loc = udf_rw32(anchor->main_vds_ex.loc);
1048 main_len = udf_rw32(anchor->main_vds_ex.len);
1049
1050 reserve_loc = udf_rw32(anchor->reserve_vds_ex.loc);
1051 reserve_len = udf_rw32(anchor->reserve_vds_ex.len);
1052
1053 error = udf_read_vds_extent(ump, main_loc, main_len);
1054 if (error) {
1055 printf("UDF mount: reading in reserve VDS extent\n");
1056 error = udf_read_vds_extent(ump, reserve_loc, reserve_len);
1057 }
1058
1059 return error;
1060 }
1061
1062 /* --------------------------------------------------------------------- */
1063
1064 /*
1065 * Read in the logical volume integrity sequence pointed to by our logical
1066 * volume descriptor. Its a sequence that can be extended using fields in the
1067 * integrity descriptor itself. On sequential media only one is found, on
1068 * rewritable media a sequence of descriptors can be found as a form of
1069 * history keeping and on non sequential write-once media the chain is vital
1070 * to allow more and more descriptors to be written. The last descriptor
1071 * written in an extent needs to claim space for a new extent.
1072 */
1073
1074 static int
1075 udf_retrieve_lvint(struct udf_mount *ump)
1076 {
1077 union dscrptr *dscr;
1078 struct logvol_int_desc *lvint;
1079 struct udf_lvintq *trace;
1080 uint32_t lb_size, lbnum, len;
1081 int dscr_type, error, trace_len;
1082
1083 lb_size = udf_rw32(ump->logical_vol->lb_size);
1084 len = udf_rw32(ump->logical_vol->integrity_seq_loc.len);
1085 lbnum = udf_rw32(ump->logical_vol->integrity_seq_loc.loc);
1086
1087 /* clean trace */
1088 memset(ump->lvint_trace, 0,
1089 UDF_LVDINT_SEGMENTS * sizeof(struct udf_lvintq));
1090
1091 trace_len = 0;
1092 trace = ump->lvint_trace;
1093 trace->start = lbnum;
1094 trace->end = lbnum + len/lb_size;
1095 trace->pos = 0;
1096 trace->wpos = 0;
1097
1098 lvint = NULL;
1099 dscr = NULL;
1100 error = 0;
1101 while (len) {
1102 trace->pos = lbnum - trace->start;
1103 trace->wpos = trace->pos + 1;
1104
1105 /* read in our integrity descriptor */
1106 error = udf_read_phys_dscr(ump, lbnum, M_UDFVOLD, &dscr);
1107 if (!error) {
1108 if (dscr == NULL) {
1109 trace->wpos = trace->pos;
1110 break; /* empty terminates */
1111 }
1112 dscr_type = udf_rw16(dscr->tag.id);
1113 if (dscr_type == TAGID_TERM) {
1114 trace->wpos = trace->pos;
1115 break; /* clean terminator */
1116 }
1117 if (dscr_type != TAGID_LOGVOL_INTEGRITY) {
1118 /* fatal... corrupt disc */
1119 error = ENOENT;
1120 break;
1121 }
1122 if (lvint)
1123 free(lvint, M_UDFVOLD);
1124 lvint = &dscr->lvid;
1125 dscr = NULL;
1126 } /* else hope for the best... maybe the next is ok */
1127
1128 DPRINTFIF(VOLUMES, lvint, ("logvol integrity read, state %s\n",
1129 udf_rw32(lvint->integrity_type) ? "CLOSED" : "OPEN"));
1130
1131 /* proceed sequential */
1132 lbnum += 1;
1133 len -= lb_size;
1134
1135 /* are we linking to a new piece? */
1136 if (dscr && lvint->next_extent.len) {
1137 len = udf_rw32(lvint->next_extent.len);
1138 lbnum = udf_rw32(lvint->next_extent.loc);
1139
1140 if (trace_len >= UDF_LVDINT_SEGMENTS-1) {
1141 /* IEK! segment link full... */
1142 DPRINTF(VOLUMES, ("lvdint segments full\n"));
1143 error = EINVAL;
1144 } else {
1145 trace++;
1146 trace_len++;
1147
1148 trace->start = lbnum;
1149 trace->end = lbnum + len/lb_size;
1150 trace->pos = 0;
1151 trace->wpos = 0;
1152 }
1153 }
1154 }
1155
1156 /* clean up the mess, esp. when there is an error */
1157 if (dscr)
1158 free(dscr, M_UDFVOLD);
1159
1160 if (error && lvint) {
1161 free(lvint, M_UDFVOLD);
1162 lvint = NULL;
1163 }
1164
1165 if (!lvint)
1166 error = ENOENT;
1167
1168 ump->logvol_integrity = lvint;
1169 return error;
1170 }
1171
1172
1173 static int
1174 udf_loose_lvint_history(struct udf_mount *ump)
1175 {
1176 union dscrptr **bufs, *dscr, *last_dscr;
1177 struct udf_lvintq *trace, *in_trace, *out_trace;
1178 struct logvol_int_desc *lvint;
1179 uint32_t in_ext, in_pos, in_len;
1180 uint32_t out_ext, out_wpos, out_len;
1181 uint32_t lb_size, packet_size, lb_num;
1182 uint32_t len, start;
1183 int ext, minext, extlen, cnt, cpy_len, dscr_type;
1184 int losing;
1185 int error;
1186
1187 DPRINTF(VOLUMES, ("need to lose some lvint history\n"));
1188
1189 lb_size = udf_rw32(ump->logical_vol->lb_size);
1190 packet_size = ump->data_track.packet_size; /* XXX data track */
1191
1192 /* search smallest extent */
1193 trace = &ump->lvint_trace[0];
1194 minext = trace->end - trace->start;
1195 for (ext = 1; ext < UDF_LVDINT_SEGMENTS; ext++) {
1196 trace = &ump->lvint_trace[ext];
1197 extlen = trace->end - trace->start;
1198 if (extlen == 0)
1199 break;
1200 minext = MIN(minext, extlen);
1201 }
1202 losing = MIN(minext, UDF_LVINT_LOSSAGE);
1203 /* no sense wiping all */
1204 if (losing == minext)
1205 losing--;
1206
1207 DPRINTF(VOLUMES, ("\tlosing %d entries\n", losing));
1208
1209 /* get buffer for pieces */
1210 bufs = malloc(UDF_LVDINT_SEGMENTS * sizeof(void *), M_TEMP, M_WAITOK);
1211
1212 in_ext = 0;
1213 in_pos = losing;
1214 in_trace = &ump->lvint_trace[in_ext];
1215 in_len = in_trace->end - in_trace->start;
1216 out_ext = 0;
1217 out_wpos = 0;
1218 out_trace = &ump->lvint_trace[out_ext];
1219 out_len = out_trace->end - out_trace->start;
1220
1221 last_dscr = NULL;
1222 for(;;) {
1223 out_trace->pos = out_wpos;
1224 out_trace->wpos = out_trace->pos;
1225 if (in_pos >= in_len) {
1226 in_ext++;
1227 in_pos = 0;
1228 in_trace = &ump->lvint_trace[in_ext];
1229 in_len = in_trace->end - in_trace->start;
1230 }
1231 if (out_wpos >= out_len) {
1232 out_ext++;
1233 out_wpos = 0;
1234 out_trace = &ump->lvint_trace[out_ext];
1235 out_len = out_trace->end - out_trace->start;
1236 }
1237 /* copy overlap contents */
1238 cpy_len = MIN(in_len - in_pos, out_len - out_wpos);
1239 cpy_len = MIN(cpy_len, in_len - in_trace->pos);
1240 if (cpy_len == 0)
1241 break;
1242
1243 /* copy */
1244 DPRINTF(VOLUMES, ("\treading %d lvid descriptors\n", cpy_len));
1245 for (cnt = 0; cnt < cpy_len; cnt++) {
1246 /* read in our integrity descriptor */
1247 lb_num = in_trace->start + in_pos + cnt;
1248 error = udf_read_phys_dscr(ump, lb_num, M_UDFVOLD,
1249 &dscr);
1250 if (error) {
1251 /* copy last one */
1252 dscr = last_dscr;
1253 }
1254 bufs[cnt] = dscr;
1255 if (!error) {
1256 if (dscr == NULL) {
1257 out_trace->pos = out_wpos + cnt;
1258 out_trace->wpos = out_trace->pos;
1259 break; /* empty terminates */
1260 }
1261 dscr_type = udf_rw16(dscr->tag.id);
1262 if (dscr_type == TAGID_TERM) {
1263 out_trace->pos = out_wpos + cnt;
1264 out_trace->wpos = out_trace->pos;
1265 break; /* clean terminator */
1266 }
1267 if (dscr_type != TAGID_LOGVOL_INTEGRITY) {
1268 panic( "UDF integrity sequence "
1269 "corrupted while mounted!\n");
1270 }
1271 last_dscr = dscr;
1272 }
1273 }
1274
1275 /* patch up if first entry was on error */
1276 if (bufs[0] == NULL) {
1277 for (cnt = 0; cnt < cpy_len; cnt++)
1278 if (bufs[cnt] != NULL)
1279 break;
1280 last_dscr = bufs[cnt];
1281 for (; cnt > 0; cnt--) {
1282 bufs[cnt] = last_dscr;
1283 }
1284 }
1285
1286 /* glue + write out */
1287 DPRINTF(VOLUMES, ("\twriting %d lvid descriptors\n", cpy_len));
1288 for (cnt = 0; cnt < cpy_len; cnt++) {
1289 lb_num = out_trace->start + out_wpos + cnt;
1290 lvint = &bufs[cnt]->lvid;
1291
1292 /* set continuation */
1293 len = 0;
1294 start = 0;
1295 if (out_wpos + cnt == out_len) {
1296 /* get continuation */
1297 trace = &ump->lvint_trace[out_ext+1];
1298 len = trace->end - trace->start;
1299 start = trace->start;
1300 }
1301 lvint->next_extent.len = udf_rw32(len);
1302 lvint->next_extent.loc = udf_rw32(start);
1303
1304 lb_num = trace->start + trace->wpos;
1305 error = udf_write_phys_dscr_sync(ump, NULL, UDF_C_DSCR,
1306 bufs[cnt], lb_num, lb_num);
1307 DPRINTFIF(VOLUMES, error,
1308 ("error writing lvint lb_num\n"));
1309 }
1310
1311 /* free non repeating descriptors */
1312 last_dscr = NULL;
1313 for (cnt = 0; cnt < cpy_len; cnt++) {
1314 if (bufs[cnt] != last_dscr)
1315 free(bufs[cnt], M_UDFVOLD);
1316 last_dscr = bufs[cnt];
1317 }
1318
1319 /* advance */
1320 in_pos += cpy_len;
1321 out_wpos += cpy_len;
1322 }
1323
1324 free(bufs, M_TEMP);
1325
1326 return 0;
1327 }
1328
1329
1330 static int
1331 udf_writeout_lvint(struct udf_mount *ump, int lvflag)
1332 {
1333 struct udf_lvintq *trace;
1334 struct timeval now_v;
1335 struct timespec now_s;
1336 uint32_t sector;
1337 int logvol_integrity;
1338 int space, error;
1339
1340 DPRINTF(VOLUMES, ("writing out logvol integrity descriptor\n"));
1341
1342 again:
1343 /* get free space in last chunk */
1344 trace = ump->lvint_trace;
1345 while (trace->wpos > (trace->end - trace->start)) {
1346 DPRINTF(VOLUMES, ("skip : start = %d, end = %d, pos = %d, "
1347 "wpos = %d\n", trace->start, trace->end,
1348 trace->pos, trace->wpos));
1349 trace++;
1350 }
1351
1352 /* check if there is space to append */
1353 space = (trace->end - trace->start) - trace->wpos;
1354 DPRINTF(VOLUMES, ("write start = %d, end = %d, pos = %d, wpos = %d, "
1355 "space = %d\n", trace->start, trace->end, trace->pos,
1356 trace->wpos, space));
1357
1358 /* get state */
1359 logvol_integrity = udf_rw32(ump->logvol_integrity->integrity_type);
1360 if (logvol_integrity == UDF_INTEGRITY_CLOSED) {
1361 if ((space < 3) && (lvflag & UDF_APPENDONLY_LVINT)) {
1362 /* don't allow this logvol to be opened */
1363 /* TODO extent LVINT space if possible */
1364 return EROFS;
1365 }
1366 }
1367
1368 if (space < 1) {
1369 if (lvflag & UDF_APPENDONLY_LVINT)
1370 return EROFS;
1371 /* loose history by re-writing extents */
1372 error = udf_loose_lvint_history(ump);
1373 if (error)
1374 return error;
1375 goto again;
1376 }
1377
1378 /* update our integrity descriptor to identify us and timestamp it */
1379 DPRINTF(VOLUMES, ("updating integrity descriptor\n"));
1380 microtime(&now_v);
1381 TIMEVAL_TO_TIMESPEC(&now_v, &now_s);
1382 udf_timespec_to_timestamp(&now_s, &ump->logvol_integrity->time);
1383 udf_set_regid(&ump->logvol_info->impl_id, IMPL_NAME);
1384 udf_add_impl_regid(ump, &ump->logvol_info->impl_id);
1385
1386 /* writeout integrity descriptor */
1387 sector = trace->start + trace->wpos;
1388 error = udf_write_phys_dscr_sync(ump, NULL, UDF_C_DSCR,
1389 (union dscrptr *) ump->logvol_integrity,
1390 sector, sector);
1391 DPRINTF(VOLUMES, ("writeout lvint : error = %d\n", error));
1392 if (error)
1393 return error;
1394
1395 /* advance write position */
1396 trace->wpos++; space--;
1397 if (space >= 1) {
1398 /* append terminator */
1399 sector = trace->start + trace->wpos;
1400 error = udf_write_terminator(ump, sector);
1401
1402 DPRINTF(VOLUMES, ("write terminator : error = %d\n", error));
1403 }
1404
1405 space = (trace->end - trace->start) - trace->wpos;
1406 DPRINTF(VOLUMES, ("write start = %d, end = %d, pos = %d, wpos = %d, "
1407 "space = %d\n", trace->start, trace->end, trace->pos,
1408 trace->wpos, space));
1409 DPRINTF(VOLUMES, ("finished writing out logvol integrity descriptor "
1410 "successfull\n"));
1411
1412 return error;
1413 }
1414
1415 /* --------------------------------------------------------------------- */
1416
1417 static int
1418 udf_read_partition_spacetables(struct udf_mount *ump)
1419 {
1420 union dscrptr *dscr;
1421 /* struct udf_args *args = &ump->mount_args; */
1422 struct part_desc *partd;
1423 struct part_hdr_desc *parthdr;
1424 struct udf_bitmap *bitmap;
1425 uint32_t phys_part;
1426 uint32_t lb_num, len;
1427 int error, dscr_type;
1428
1429 /* unallocated space map */
1430 for (phys_part = 0; phys_part < UDF_PARTITIONS; phys_part++) {
1431 partd = ump->partitions[phys_part];
1432 if (partd == NULL)
1433 continue;
1434 parthdr = &partd->_impl_use.part_hdr;
1435
1436 lb_num = udf_rw32(partd->start_loc);
1437 lb_num += udf_rw32(parthdr->unalloc_space_bitmap.lb_num);
1438 len = udf_rw32(parthdr->unalloc_space_bitmap.len);
1439 if (len == 0)
1440 continue;
1441
1442 DPRINTF(VOLUMES, ("Read unalloc. space bitmap %d\n", lb_num));
1443 error = udf_read_phys_dscr(ump, lb_num, M_UDFVOLD, &dscr);
1444 if (!error && dscr) {
1445 /* analyse */
1446 dscr_type = udf_rw16(dscr->tag.id);
1447 if (dscr_type == TAGID_SPACE_BITMAP) {
1448 DPRINTF(VOLUMES, ("Accepting space bitmap\n"));
1449 ump->part_unalloc_dscr[phys_part] = &dscr->sbd;
1450
1451 /* fill in ump->part_unalloc_bits */
1452 bitmap = &ump->part_unalloc_bits[phys_part];
1453 bitmap->blob = (uint8_t *) dscr;
1454 bitmap->bits = dscr->sbd.data;
1455 bitmap->max_offset = udf_rw32(dscr->sbd.num_bits);
1456 bitmap->pages = NULL; /* TODO */
1457 bitmap->data_pos = 0;
1458 bitmap->metadata_pos = 0;
1459 } else {
1460 free(dscr, M_UDFVOLD);
1461
1462 printf( "UDF mount: error reading unallocated "
1463 "space bitmap\n");
1464 return EROFS;
1465 }
1466 } else {
1467 /* blank not allowed */
1468 printf("UDF mount: blank unallocated space bitmap\n");
1469 return EROFS;
1470 }
1471 }
1472
1473 /* unallocated space table (not supported) */
1474 for (phys_part = 0; phys_part < UDF_PARTITIONS; phys_part++) {
1475 partd = ump->partitions[phys_part];
1476 if (partd == NULL)
1477 continue;
1478 parthdr = &partd->_impl_use.part_hdr;
1479
1480 len = udf_rw32(parthdr->unalloc_space_table.len);
1481 if (len) {
1482 printf("UDF mount: space tables not supported\n");
1483 return EROFS;
1484 }
1485 }
1486
1487 /* freed space map */
1488 for (phys_part = 0; phys_part < UDF_PARTITIONS; phys_part++) {
1489 partd = ump->partitions[phys_part];
1490 if (partd == NULL)
1491 continue;
1492 parthdr = &partd->_impl_use.part_hdr;
1493
1494 /* freed space map */
1495 lb_num = udf_rw32(partd->start_loc);
1496 lb_num += udf_rw32(parthdr->freed_space_bitmap.lb_num);
1497 len = udf_rw32(parthdr->freed_space_bitmap.len);
1498 if (len == 0)
1499 continue;
1500
1501 DPRINTF(VOLUMES, ("Read unalloc. space bitmap %d\n", lb_num));
1502 error = udf_read_phys_dscr(ump, lb_num, M_UDFVOLD, &dscr);
1503 if (!error && dscr) {
1504 /* analyse */
1505 dscr_type = udf_rw16(dscr->tag.id);
1506 if (dscr_type == TAGID_SPACE_BITMAP) {
1507 DPRINTF(VOLUMES, ("Accepting space bitmap\n"));
1508 ump->part_freed_dscr[phys_part] = &dscr->sbd;
1509
1510 /* fill in ump->part_freed_bits */
1511 bitmap = &ump->part_unalloc_bits[phys_part];
1512 bitmap->blob = (uint8_t *) dscr;
1513 bitmap->bits = dscr->sbd.data;
1514 bitmap->max_offset = udf_rw32(dscr->sbd.num_bits);
1515 bitmap->pages = NULL; /* TODO */
1516 bitmap->data_pos = 0;
1517 bitmap->metadata_pos = 0;
1518 } else {
1519 free(dscr, M_UDFVOLD);
1520
1521 printf( "UDF mount: error reading freed "
1522 "space bitmap\n");
1523 return EROFS;
1524 }
1525 } else {
1526 /* blank not allowed */
1527 printf("UDF mount: blank freed space bitmap\n");
1528 return EROFS;
1529 }
1530 }
1531
1532 /* freed space table (not supported) */
1533 for (phys_part = 0; phys_part < UDF_PARTITIONS; phys_part++) {
1534 partd = ump->partitions[phys_part];
1535 if (partd == NULL)
1536 continue;
1537 parthdr = &partd->_impl_use.part_hdr;
1538
1539 len = udf_rw32(parthdr->freed_space_table.len);
1540 if (len) {
1541 printf("UDF mount: space tables not supported\n");
1542 return EROFS;
1543 }
1544 }
1545
1546 return 0;
1547 }
1548
1549
1550 /* TODO implement async writeout */
1551 int
1552 udf_write_partition_spacetables(struct udf_mount *ump, int waitfor)
1553 {
1554 union dscrptr *dscr;
1555 /* struct udf_args *args = &ump->mount_args; */
1556 struct part_desc *partd;
1557 struct part_hdr_desc *parthdr;
1558 uint32_t phys_part;
1559 uint32_t lb_num, len, ptov;
1560 int error_all, error;
1561
1562 error_all = 0;
1563 /* unallocated space map */
1564 for (phys_part = 0; phys_part < UDF_PARTITIONS; phys_part++) {
1565 partd = ump->partitions[phys_part];
1566 if (partd == NULL)
1567 continue;
1568 parthdr = &partd->_impl_use.part_hdr;
1569
1570 ptov = udf_rw32(partd->start_loc);
1571 lb_num = udf_rw32(parthdr->unalloc_space_bitmap.lb_num);
1572 len = udf_rw32(parthdr->unalloc_space_bitmap.len);
1573 if (len == 0)
1574 continue;
1575
1576 DPRINTF(VOLUMES, ("Write unalloc. space bitmap %d\n",
1577 lb_num + ptov));
1578 dscr = (union dscrptr *) ump->part_unalloc_dscr[phys_part];
1579 error = udf_write_phys_dscr_sync(ump, NULL, UDF_C_DSCR,
1580 (union dscrptr *) dscr,
1581 ptov + lb_num, lb_num);
1582 if (error) {
1583 DPRINTF(VOLUMES, ("\tfailed!! (error %d)\n", error));
1584 error_all = error;
1585 }
1586 }
1587
1588 /* freed space map */
1589 for (phys_part = 0; phys_part < UDF_PARTITIONS; phys_part++) {
1590 partd = ump->partitions[phys_part];
1591 if (partd == NULL)
1592 continue;
1593 parthdr = &partd->_impl_use.part_hdr;
1594
1595 /* freed space map */
1596 ptov = udf_rw32(partd->start_loc);
1597 lb_num = udf_rw32(parthdr->freed_space_bitmap.lb_num);
1598 len = udf_rw32(parthdr->freed_space_bitmap.len);
1599 if (len == 0)
1600 continue;
1601
1602 DPRINTF(VOLUMES, ("Write freed space bitmap %d\n",
1603 lb_num + ptov));
1604 dscr = (union dscrptr *) ump->part_freed_dscr[phys_part];
1605 error = udf_write_phys_dscr_sync(ump, NULL, UDF_C_DSCR,
1606 (union dscrptr *) dscr,
1607 ptov + lb_num, lb_num);
1608 if (error) {
1609 DPRINTF(VOLUMES, ("\tfailed!! (error %d)\n", error));
1610 error_all = error;
1611 }
1612 }
1613
1614 return error_all;
1615 }
1616
1617 /*
1618 * Checks if ump's vds information is correct and complete
1619 */
1620
1621 int
1622 udf_process_vds(struct udf_mount *ump) {
1623 union udf_pmap *mapping;
1624 /* struct udf_args *args = &ump->mount_args; */
1625 struct logvol_int_desc *lvint;
1626 struct udf_logvol_info *lvinfo;
1627 struct part_desc *part;
1628 uint32_t n_pm, mt_l;
1629 uint8_t *pmap_pos;
1630 char *domain_name, *map_name;
1631 const char *check_name;
1632 char bits[128];
1633 int pmap_stype, pmap_size;
1634 int pmap_type, log_part, phys_part, raw_phys_part;
1635 int n_phys, n_virt, n_spar, n_meta;
1636 int len, error;
1637
1638 if (ump == NULL)
1639 return ENOENT;
1640
1641 /* we need at least an anchor (trivial, but for safety) */
1642 if (ump->anchors[0] == NULL)
1643 return EINVAL;
1644
1645 /* we need at least one primary and one logical volume descriptor */
1646 if ((ump->primary_vol == NULL) || (ump->logical_vol) == NULL)
1647 return EINVAL;
1648
1649 /* we need at least one partition descriptor */
1650 if (ump->partitions[0] == NULL)
1651 return EINVAL;
1652
1653 /* check logical volume sector size verses device sector size */
1654 if (udf_rw32(ump->logical_vol->lb_size) != ump->discinfo.sector_size) {
1655 printf("UDF mount: format violation, lb_size != sector size\n");
1656 return EINVAL;
1657 }
1658
1659 /* check domain name */
1660 domain_name = ump->logical_vol->domain_id.id;
1661 if (strncmp(domain_name, "*OSTA UDF Compliant", 20)) {
1662 printf("mount_udf: disc not OSTA UDF Compliant, aborting\n");
1663 return EINVAL;
1664 }
1665
1666 /* retrieve logical volume integrity sequence */
1667 error = udf_retrieve_lvint(ump);
1668
1669 /*
1670 * We need at least one logvol integrity descriptor recorded. Note
1671 * that its OK to have an open logical volume integrity here. The VAT
1672 * will close/update the integrity.
1673 */
1674 if (ump->logvol_integrity == NULL)
1675 return EINVAL;
1676
1677 /* read in and check unallocated and free space info if writing */
1678 if ((ump->vfs_mountp->mnt_flag & MNT_RDONLY) == 0) {
1679 error = udf_read_partition_spacetables(ump);
1680 if (error)
1681 return error;
1682 }
1683
1684 /* process derived structures */
1685 n_pm = udf_rw32(ump->logical_vol->n_pm); /* num partmaps */
1686 lvint = ump->logvol_integrity;
1687 lvinfo = (struct udf_logvol_info *) (&lvint->tables[2 * n_pm]);
1688 ump->logvol_info = lvinfo;
1689
1690 /* TODO check udf versions? */
1691
1692 /*
1693 * check logvol mappings: effective virt->log partmap translation
1694 * check and recording of the mapping results. Saves expensive
1695 * strncmp() in tight places.
1696 */
1697 DPRINTF(VOLUMES, ("checking logvol mappings\n"));
1698 n_pm = udf_rw32(ump->logical_vol->n_pm); /* num partmaps */
1699 mt_l = udf_rw32(ump->logical_vol->mt_l); /* partmaps data length */
1700 pmap_pos = ump->logical_vol->maps;
1701
1702 if (n_pm > UDF_PMAPS) {
1703 printf("UDF mount: too many mappings\n");
1704 return EINVAL;
1705 }
1706
1707 ump->data_part = ump->metadata_part = 0;
1708 n_phys = n_virt = n_spar = n_meta = 0;
1709 for (log_part = 0; log_part < n_pm; log_part++) {
1710 mapping = (union udf_pmap *) pmap_pos;
1711 pmap_stype = pmap_pos[0];
1712 pmap_size = pmap_pos[1];
1713 switch (pmap_stype) {
1714 case 1: /* physical mapping */
1715 /* volseq = udf_rw16(mapping->pm1.vol_seq_num); */
1716 raw_phys_part = udf_rw16(mapping->pm1.part_num);
1717 pmap_type = UDF_VTOP_TYPE_PHYS;
1718 n_phys++;
1719 ump->data_part = log_part;
1720 ump->metadata_part = log_part;
1721 break;
1722 case 2: /* virtual/sparable/meta mapping */
1723 map_name = mapping->pm2.part_id.id;
1724 /* volseq = udf_rw16(mapping->pm2.vol_seq_num); */
1725 raw_phys_part = udf_rw16(mapping->pm2.part_num);
1726 pmap_type = UDF_VTOP_TYPE_UNKNOWN;
1727 len = UDF_REGID_ID_SIZE;
1728
1729 check_name = "*UDF Virtual Partition";
1730 if (strncmp(map_name, check_name, len) == 0) {
1731 pmap_type = UDF_VTOP_TYPE_VIRT;
1732 n_virt++;
1733 ump->metadata_part = log_part;
1734 break;
1735 }
1736 check_name = "*UDF Sparable Partition";
1737 if (strncmp(map_name, check_name, len) == 0) {
1738 pmap_type = UDF_VTOP_TYPE_SPARABLE;
1739 n_spar++;
1740 ump->data_part = log_part;
1741 ump->metadata_part = log_part;
1742 break;
1743 }
1744 check_name = "*UDF Metadata Partition";
1745 if (strncmp(map_name, check_name, len) == 0) {
1746 pmap_type = UDF_VTOP_TYPE_META;
1747 n_meta++;
1748 ump->metadata_part = log_part;
1749 break;
1750 }
1751 break;
1752 default:
1753 return EINVAL;
1754 }
1755
1756 /*
1757 * BUGALERT: some rogue implementations use random physical
1758 * partion numbers to break other implementations so lookup
1759 * the number.
1760 */
1761 for (phys_part = 0; phys_part < UDF_PARTITIONS; phys_part++) {
1762 part = ump->partitions[phys_part];
1763 if (part == NULL)
1764 continue;
1765 if (udf_rw16(part->part_num) == raw_phys_part)
1766 break;
1767 }
1768
1769 DPRINTF(VOLUMES, ("\t%d -> %d(%d) type %d\n", log_part,
1770 raw_phys_part, phys_part, pmap_type));
1771
1772 if (phys_part == UDF_PARTITIONS)
1773 return EINVAL;
1774 if (pmap_type == UDF_VTOP_TYPE_UNKNOWN)
1775 return EINVAL;
1776
1777 ump->vtop [log_part] = phys_part;
1778 ump->vtop_tp[log_part] = pmap_type;
1779
1780 pmap_pos += pmap_size;
1781 }
1782 /* not winning the beauty contest */
1783 ump->vtop_tp[UDF_VTOP_RAWPART] = UDF_VTOP_TYPE_RAW;
1784
1785 /* test some basic UDF assertions/requirements */
1786 if ((n_virt > 1) || (n_spar > 1) || (n_meta > 1))
1787 return EINVAL;
1788
1789 if (n_virt) {
1790 if ((n_phys == 0) || n_spar || n_meta)
1791 return EINVAL;
1792 }
1793 if (n_spar + n_phys == 0)
1794 return EINVAL;
1795
1796 /* determine allocation scheme's based on disc format */
1797 /* VAT's can only be on a sequential media */
1798 ump->data_alloc = UDF_ALLOC_SPACEMAP;
1799 if (n_virt)
1800 ump->data_alloc = UDF_ALLOC_SEQUENTIAL;
1801
1802 ump->meta_alloc = UDF_ALLOC_SPACEMAP;
1803 if (n_virt)
1804 ump->meta_alloc = UDF_ALLOC_VAT;
1805 if (n_meta)
1806 ump->meta_alloc = UDF_ALLOC_METABITMAP;
1807
1808 /* special cases for pseudo-overwrite */
1809 if (ump->discinfo.mmc_cur & MMC_CAP_PSEUDOOVERWRITE) {
1810 ump->data_alloc = UDF_ALLOC_SEQUENTIAL;
1811 if (n_meta) {
1812 ump->meta_alloc = UDF_ALLOC_METASEQUENTIAL;
1813 } else {
1814 ump->meta_alloc = UDF_ALLOC_RELAXEDSEQUENTIAL;
1815 }
1816 }
1817
1818 /* determine default allocation descriptors to use */
1819 ump->data_allocdscr = UDF_ICB_SHORT_ALLOC;
1820 ump->meta_allocdscr = UDF_ICB_SHORT_ALLOC;
1821 if (n_pm > 1) {
1822 ump->data_allocdscr = UDF_ICB_LONG_ALLOC;
1823 ump->meta_allocdscr = UDF_ICB_LONG_ALLOC;
1824 /* metadata partitions are forced to have short */
1825 if (n_meta)
1826 ump->meta_allocdscr = UDF_ICB_SHORT_ALLOC;
1827 }
1828
1829 /* determine logical volume open/closure actions */
1830 if (n_virt) {
1831 ump->lvopen = 0;
1832 if (ump->discinfo.last_session_state == MMC_STATE_CLOSED)
1833 ump->lvopen |= UDF_OPEN_SESSION ;
1834 ump->lvclose = UDF_WRITE_VAT;
1835 if (ump->mount_args.udfmflags & UDFMNT_CLOSESESSION)
1836 ump->lvclose |= UDF_CLOSE_SESSION;
1837 } else {
1838 /* `normal' rewritable or non sequential media */
1839 ump->lvopen = UDF_WRITE_LVINT;
1840 ump->lvclose = UDF_WRITE_LVINT;
1841 if ((ump->discinfo.mmc_cur & MMC_CAP_REWRITABLE) == 0)
1842 ump->lvopen |= UDF_APPENDONLY_LVINT;
1843 }
1844
1845 /*
1846 * Determine sheduler error behaviour. For virtual partions, update
1847 * the trackinfo; for sparable partitions replace a whole block on the
1848 * sparable table. Allways requeue.
1849 */
1850 ump->lvreadwrite = 0;
1851 if (n_virt)
1852 ump->lvreadwrite = UDF_UPDATE_TRACKINFO;
1853 if (n_spar)
1854 ump->lvreadwrite = UDF_REMAP_BLOCK;
1855
1856 /*
1857 * Select our sheduler
1858 */
1859 ump->strategy = &udf_strat_rmw;
1860 if (n_virt || (ump->discinfo.mmc_cur & MMC_CAP_PSEUDOOVERWRITE))
1861 ump->strategy = &udf_strat_sequential;
1862 if ((ump->discinfo.mmc_class == MMC_CLASS_DISC) ||
1863 (ump->discinfo.mmc_class == MMC_CLASS_UNKN))
1864 ump->strategy = &udf_strat_direct;
1865 if (n_spar)
1866 ump->strategy = &udf_strat_rmw;
1867
1868 /* print results */
1869 DPRINTF(VOLUMES, ("\tdata alloc scheme %d, meta alloc scheme %d\n",
1870 ump->data_alloc, ump->meta_alloc));
1871 DPRINTF(VOLUMES, ("\tdata partition %d, metadata partition %d\n",
1872 ump->data_part, ump->metadata_part));
1873
1874 bitmask_snprintf(ump->lvopen, UDFLOGVOL_BITS, bits, sizeof(bits));
1875 DPRINTF(VOLUMES, ("\tactions on logvol open %s\n", bits));
1876 bitmask_snprintf(ump->lvclose, UDFLOGVOL_BITS, bits, sizeof(bits));
1877 DPRINTF(VOLUMES, ("\tactions on logvol close %s\n", bits));
1878 bitmask_snprintf(ump->lvreadwrite, UDFONERROR_BITS, bits, sizeof(bits));
1879 DPRINTF(VOLUMES, ("\tactions on logvol errors %s\n", bits));
1880
1881 DPRINTF(VOLUMES, ("\tselected sheduler `%s`\n",
1882 (ump->strategy == &udf_strat_direct) ? "Direct" :
1883 (ump->strategy == &udf_strat_sequential) ? "Sequential" :
1884 (ump->strategy == &udf_strat_rmw) ? "RMW" : "UNKNOWN!"));
1885
1886 /* signal its OK for now */
1887 return 0;
1888 }
1889
1890 /* --------------------------------------------------------------------- */
1891
1892 /*
1893 * Update logical volume name in all structures that keep a record of it. We
1894 * use memmove since each of them might be specified as a source.
1895 *
1896 * Note that it doesn't update the VAT structure!
1897 */
1898
1899 static void
1900 udf_update_logvolname(struct udf_mount *ump, char *logvol_id)
1901 {
1902 struct logvol_desc *lvd = NULL;
1903 struct fileset_desc *fsd = NULL;
1904 struct udf_lv_info *lvi = NULL;
1905
1906 DPRINTF(VOLUMES, ("Updating logical volume name\n"));
1907 lvd = ump->logical_vol;
1908 fsd = ump->fileset_desc;
1909 if (ump->implementation)
1910 lvi = &ump->implementation->_impl_use.lv_info;
1911
1912 /* logvol's id might be specified as origional so use memmove here */
1913 memmove(lvd->logvol_id, logvol_id, 128);
1914 if (fsd)
1915 memmove(fsd->logvol_id, logvol_id, 128);
1916 if (lvi)
1917 memmove(lvi->logvol_id, logvol_id, 128);
1918 }
1919
1920 /* --------------------------------------------------------------------- */
1921
1922 void
1923 udf_inittag(struct udf_mount *ump, struct desc_tag *tag, int tagid,
1924 uint32_t sector)
1925 {
1926 assert(ump->logical_vol);
1927
1928 tag->id = udf_rw16(tagid);
1929 tag->descriptor_ver = ump->logical_vol->tag.descriptor_ver;
1930 tag->cksum = 0;
1931 tag->reserved = 0;
1932 tag->serial_num = ump->logical_vol->tag.serial_num;
1933 tag->tag_loc = udf_rw32(sector);
1934 }
1935
1936
1937 uint64_t
1938 udf_advance_uniqueid(struct udf_mount *ump)
1939 {
1940 uint64_t unique_id;
1941
1942 mutex_enter(&ump->logvol_mutex);
1943 unique_id = udf_rw64(ump->logvol_integrity->lvint_next_unique_id);
1944 if (unique_id < 0x10)
1945 unique_id = 0x10;
1946 ump->logvol_integrity->lvint_next_unique_id = udf_rw64(unique_id + 1);
1947 mutex_exit(&ump->logvol_mutex);
1948
1949 return unique_id;
1950 }
1951
1952
1953 static void
1954 udf_adjust_filecount(struct udf_node *udf_node, int sign)
1955 {
1956 struct udf_mount *ump = udf_node->ump;
1957 uint32_t num_dirs, num_files;
1958 int udf_file_type;
1959
1960 /* get file type */
1961 if (udf_node->fe) {
1962 udf_file_type = udf_node->fe->icbtag.file_type;
1963 } else {
1964 udf_file_type = udf_node->efe->icbtag.file_type;
1965 }
1966
1967 /* adjust file count */
1968 mutex_enter(&ump->allocate_mutex);
1969 if (udf_file_type == UDF_ICB_FILETYPE_DIRECTORY) {
1970 num_dirs = udf_rw32(ump->logvol_info->num_directories);
1971 ump->logvol_info->num_directories =
1972 udf_rw32((num_dirs + sign));
1973 } else {
1974 num_files = udf_rw32(ump->logvol_info->num_files);
1975 ump->logvol_info->num_files =
1976 udf_rw32((num_files + sign));
1977 }
1978 mutex_exit(&ump->allocate_mutex);
1979 }
1980
1981
1982 void
1983 udf_osta_charset(struct charspec *charspec)
1984 {
1985 bzero(charspec, sizeof(struct charspec));
1986 charspec->type = 0;
1987 strcpy((char *) charspec->inf, "OSTA Compressed Unicode");
1988 }
1989
1990
1991 /* first call udf_set_regid and then the suffix */
1992 void
1993 udf_set_regid(struct regid *regid, char const *name)
1994 {
1995 bzero(regid, sizeof(struct regid));
1996 regid->flags = 0; /* not dirty and not protected */
1997 strcpy((char *) regid->id, name);
1998 }
1999
2000
2001 void
2002 udf_add_domain_regid(struct udf_mount *ump, struct regid *regid)
2003 {
2004 uint16_t *ver;
2005
2006 ver = (uint16_t *) regid->id_suffix;
2007 *ver = ump->logvol_info->min_udf_readver;
2008 }
2009
2010
2011 void
2012 udf_add_udf_regid(struct udf_mount *ump, struct regid *regid)
2013 {
2014 uint16_t *ver;
2015
2016 ver = (uint16_t *) regid->id_suffix;
2017 *ver = ump->logvol_info->min_udf_readver;
2018
2019 regid->id_suffix[2] = 4; /* unix */
2020 regid->id_suffix[3] = 8; /* NetBSD */
2021 }
2022
2023
2024 void
2025 udf_add_impl_regid(struct udf_mount *ump, struct regid *regid)
2026 {
2027 regid->id_suffix[0] = 4; /* unix */
2028 regid->id_suffix[1] = 8; /* NetBSD */
2029 }
2030
2031
2032 void
2033 udf_add_app_regid(struct udf_mount *ump, struct regid *regid)
2034 {
2035 regid->id_suffix[0] = APP_VERSION_MAIN;
2036 regid->id_suffix[1] = APP_VERSION_SUB;
2037 }
2038
2039 static int
2040 udf_create_parentfid(struct udf_mount *ump, struct fileid_desc *fid,
2041 struct long_ad *parent, uint64_t unique_id)
2042 {
2043 /* the size of an empty FID is 38 but needs to be a multiple of 4 */
2044 int fidsize = 40;
2045
2046 udf_inittag(ump, &fid->tag, TAGID_FID, udf_rw32(parent->loc.lb_num));
2047 fid->file_version_num = udf_rw16(1); /* UDF 2.3.4.1 */
2048 fid->file_char = UDF_FILE_CHAR_DIR | UDF_FILE_CHAR_PAR;
2049 fid->icb = *parent;
2050 fid->icb.longad_uniqueid = udf_rw32((uint32_t) unique_id);
2051 fid->tag.desc_crc_len = fidsize - UDF_DESC_TAG_LENGTH;
2052 (void) udf_validate_tag_and_crc_sums((union dscrptr *) fid);
2053
2054 return fidsize;
2055 }
2056
2057 /* --------------------------------------------------------------------- */
2058
2059 /*
2060 * Extended attribute support. UDF knows of 3 places for extended attributes:
2061 *
2062 * (a) inside the file's (e)fe in the length of the extended attribute area
2063 * before the allocation descriptors/filedata
2064 *
2065 * (b) in a file referenced by (e)fe->ext_attr_icb and
2066 *
2067 * (c) in the e(fe)'s associated stream directory that can hold various
2068 * sub-files. In the stream directory a few fixed named subfiles are reserved
2069 * for NT/Unix ACL's and OS/2 attributes.
2070 *
2071 * NOTE: Extended attributes are read randomly but allways written
2072 * *atomicaly*. For ACL's this interface is propably different but not known
2073 * to me yet.
2074 *
2075 * Order of extended attributes in a space :
2076 * ECMA 167 EAs
2077 * Non block aligned Implementation Use EAs
2078 * Block aligned Implementation Use EAs
2079 * Application Use EAs
2080 */
2081
2082 static int
2083 udf_impl_extattr_check(struct impl_extattr_entry *implext)
2084 {
2085 uint16_t *spos;
2086
2087 if (strncmp(implext->imp_id.id, "*UDF", 4) == 0) {
2088 /* checksum valid? */
2089 DPRINTF(EXTATTR, ("checking UDF impl. attr checksum\n"));
2090 spos = (uint16_t *) implext->data;
2091 if (udf_rw16(*spos) != udf_ea_cksum((uint8_t *) implext))
2092 return EINVAL;
2093 }
2094 return 0;
2095 }
2096
2097 static void
2098 udf_calc_impl_extattr_checksum(struct impl_extattr_entry *implext)
2099 {
2100 uint16_t *spos;
2101
2102 if (strncmp(implext->imp_id.id, "*UDF", 4) == 0) {
2103 /* set checksum */
2104 spos = (uint16_t *) implext->data;
2105 *spos = udf_rw16(udf_ea_cksum((uint8_t *) implext));
2106 }
2107 }
2108
2109
2110 int
2111 udf_extattr_search_intern(struct udf_node *node,
2112 uint32_t sattr, char const *sattrname,
2113 uint32_t *offsetp, uint32_t *lengthp)
2114 {
2115 struct extattrhdr_desc *eahdr;
2116 struct extattr_entry *attrhdr;
2117 struct impl_extattr_entry *implext;
2118 uint32_t offset, a_l, sector_size;
2119 int32_t l_ea;
2120 uint8_t *pos;
2121 int error;
2122
2123 /* get mountpoint */
2124 sector_size = node->ump->discinfo.sector_size;
2125
2126 /* get information from fe/efe */
2127 if (node->fe) {
2128 l_ea = udf_rw32(node->fe->l_ea);
2129 eahdr = (struct extattrhdr_desc *) node->fe->data;
2130 } else {
2131 assert(node->efe);
2132 l_ea = udf_rw32(node->efe->l_ea);
2133 eahdr = (struct extattrhdr_desc *) node->efe->data;
2134 }
2135
2136 /* something recorded here? */
2137 if (l_ea == 0)
2138 return ENOENT;
2139
2140 /* check extended attribute tag; what to do if it fails? */
2141 error = udf_check_tag(eahdr);
2142 if (error)
2143 return EINVAL;
2144 if (udf_rw16(eahdr->tag.id) != TAGID_EXTATTR_HDR)
2145 return EINVAL;
2146 error = udf_check_tag_payload(eahdr, sizeof(struct extattrhdr_desc));
2147 if (error)
2148 return EINVAL;
2149
2150 DPRINTF(EXTATTR, ("Found %d bytes of extended attributes\n", l_ea));
2151
2152 /* looking for Ecma-167 attributes? */
2153 offset = sizeof(struct extattrhdr_desc);
2154
2155 /* looking for either implemenation use or application use */
2156 if (sattr == 2048) { /* [4/48.10.8] */
2157 offset = udf_rw32(eahdr->impl_attr_loc);
2158 if (offset == UDF_IMPL_ATTR_LOC_NOT_PRESENT)
2159 return ENOENT;
2160 }
2161 if (sattr == 65536) { /* [4/48.10.9] */
2162 offset = udf_rw32(eahdr->appl_attr_loc);
2163 if (offset == UDF_APPL_ATTR_LOC_NOT_PRESENT)
2164 return ENOENT;
2165 }
2166
2167 /* paranoia check offset and l_ea */
2168 if (l_ea + offset >= sector_size - sizeof(struct extattr_entry))
2169 return EINVAL;
2170
2171 DPRINTF(EXTATTR, ("Starting at offset %d\n", offset));
2172
2173 /* find our extended attribute */
2174 l_ea -= offset;
2175 pos = (uint8_t *) eahdr + offset;
2176
2177 while (l_ea >= sizeof(struct extattr_entry)) {
2178 DPRINTF(EXTATTR, ("%d extended attr bytes left\n", l_ea));
2179 attrhdr = (struct extattr_entry *) pos;
2180 implext = (struct impl_extattr_entry *) pos;
2181
2182 /* get complete attribute length and check for roque values */
2183 a_l = udf_rw32(attrhdr->a_l);
2184 DPRINTF(EXTATTR, ("attribute %d:%d, len %d/%d\n",
2185 udf_rw32(attrhdr->type),
2186 attrhdr->subtype, a_l, l_ea));
2187 if ((a_l == 0) || (a_l > l_ea))
2188 return EINVAL;
2189
2190 if (attrhdr->type != sattr)
2191 goto next_attribute;
2192
2193 /* we might have found it! */
2194 if (attrhdr->type < 2048) { /* Ecma-167 attribute */
2195 *offsetp = offset;
2196 *lengthp = a_l;
2197 return 0; /* success */
2198 }
2199
2200 /*
2201 * Implementation use and application use extended attributes
2202 * have a name to identify. They share the same structure only
2203 * UDF implementation use extended attributes have a checksum
2204 * we need to check
2205 */
2206
2207 DPRINTF(EXTATTR, ("named attribute %s\n", implext->imp_id.id));
2208 if (strcmp(implext->imp_id.id, sattrname) == 0) {
2209 /* we have found our appl/implementation attribute */
2210 *offsetp = offset;
2211 *lengthp = a_l;
2212 return 0; /* success */
2213 }
2214
2215 next_attribute:
2216 /* next attribute */
2217 pos += a_l;
2218 l_ea -= a_l;
2219 offset += a_l;
2220 }
2221 /* not found */
2222 return ENOENT;
2223 }
2224
2225
2226 static void
2227 udf_extattr_insert_internal(struct udf_mount *ump, union dscrptr *dscr,
2228 struct extattr_entry *extattr)
2229 {
2230 struct file_entry *fe;
2231 struct extfile_entry *efe;
2232 struct extattrhdr_desc *extattrhdr;
2233 struct impl_extattr_entry *implext;
2234 uint32_t impl_attr_loc, appl_attr_loc, l_ea, a_l, exthdr_len;
2235 uint32_t *l_eap, l_ad;
2236 uint16_t *spos;
2237 uint8_t *bpos, *data;
2238
2239 if (udf_rw16(dscr->tag.id) == TAGID_FENTRY) {
2240 fe = &dscr->fe;
2241 data = fe->data;
2242 l_eap = &fe->l_ea;
2243 l_ad = udf_rw32(fe->l_ad);
2244 } else if (udf_rw16(dscr->tag.id) == TAGID_EXTFENTRY) {
2245 efe = &dscr->efe;
2246 data = efe->data;
2247 l_eap = &efe->l_ea;
2248 l_ad = udf_rw32(efe->l_ad);
2249 } else {
2250 panic("Bad tag passed to udf_extattr_insert_internal");
2251 }
2252
2253 /* can't append already written to file descriptors yet */
2254 assert(l_ad == 0);
2255
2256 /* should have a header! */
2257 extattrhdr = (struct extattrhdr_desc *) data;
2258 l_ea = udf_rw32(*l_eap);
2259 if (l_ea == 0) {
2260 /* create empty extended attribute header */
2261 exthdr_len = sizeof(struct extattrhdr_desc);
2262
2263 udf_inittag(ump, &extattrhdr->tag, TAGID_EXTATTR_HDR,
2264 /* loc */ 0);
2265 extattrhdr->impl_attr_loc = udf_rw32(exthdr_len);
2266 extattrhdr->appl_attr_loc = udf_rw32(exthdr_len);
2267 extattrhdr->tag.desc_crc_len = udf_rw16(8);
2268
2269 /* record extended attribute header length */
2270 l_ea = exthdr_len;
2271 *l_eap = udf_rw32(l_ea);
2272 }
2273
2274 /* extract locations */
2275 impl_attr_loc = udf_rw32(extattrhdr->impl_attr_loc);
2276 appl_attr_loc = udf_rw32(extattrhdr->appl_attr_loc);
2277 if (impl_attr_loc == UDF_IMPL_ATTR_LOC_NOT_PRESENT)
2278 impl_attr_loc = l_ea;
2279 if (appl_attr_loc == UDF_IMPL_ATTR_LOC_NOT_PRESENT)
2280 appl_attr_loc = l_ea;
2281
2282 /* Ecma 167 EAs */
2283 if (udf_rw32(extattr->type) < 2048) {
2284 assert(impl_attr_loc == l_ea);
2285 assert(appl_attr_loc == l_ea);
2286 }
2287
2288 /* implementation use extended attributes */
2289 if (udf_rw32(extattr->type) == 2048) {
2290 assert(appl_attr_loc == l_ea);
2291
2292 /* calculate and write extended attribute header checksum */
2293 implext = (struct impl_extattr_entry *) extattr;
2294 assert(udf_rw32(implext->iu_l) == 4); /* [UDF 3.3.4.5] */
2295 spos = (uint16_t *) implext->data;
2296 *spos = udf_rw16(udf_ea_cksum((uint8_t *) implext));
2297 }
2298
2299 /* application use extended attributes */
2300 assert(udf_rw32(extattr->type) != 65536);
2301 assert(appl_attr_loc == l_ea);
2302
2303 /* append the attribute at the end of the current space */
2304 bpos = data + udf_rw32(*l_eap);
2305 a_l = udf_rw32(extattr->a_l);
2306
2307 /* update impl. attribute locations */
2308 if (udf_rw32(extattr->type) < 2048) {
2309 impl_attr_loc = l_ea + a_l;
2310 appl_attr_loc = l_ea + a_l;
2311 }
2312 if (udf_rw32(extattr->type) == 2048) {
2313 appl_attr_loc = l_ea + a_l;
2314 }
2315
2316 /* copy and advance */
2317 memcpy(bpos, extattr, a_l);
2318 l_ea += a_l;
2319 *l_eap = udf_rw32(l_ea);
2320
2321 /* do the `dance` again backwards */
2322 if (udf_rw16(ump->logical_vol->tag.descriptor_ver) != 2) {
2323 if (impl_attr_loc == l_ea)
2324 impl_attr_loc = UDF_IMPL_ATTR_LOC_NOT_PRESENT;
2325 if (appl_attr_loc == l_ea)
2326 appl_attr_loc = UDF_APPL_ATTR_LOC_NOT_PRESENT;
2327 }
2328
2329 /* store offsets */
2330 extattrhdr->impl_attr_loc = udf_rw32(impl_attr_loc);
2331 extattrhdr->appl_attr_loc = udf_rw32(appl_attr_loc);
2332 }
2333
2334
2335 /* --------------------------------------------------------------------- */
2336
2337 static int
2338 udf_update_lvid_from_vat_extattr(struct udf_node *vat_node)
2339 {
2340 struct udf_mount *ump;
2341 struct udf_logvol_info *lvinfo;
2342 struct impl_extattr_entry *implext;
2343 struct vatlvext_extattr_entry lvext;
2344 const char *extstr = "*UDF VAT LVExtension";
2345 uint64_t vat_uniqueid;
2346 uint32_t offset, a_l;
2347 uint8_t *ea_start, *lvextpos;
2348 int error;
2349
2350 /* get mountpoint and lvinfo */
2351 ump = vat_node->ump;
2352 lvinfo = ump->logvol_info;
2353
2354 /* get information from fe/efe */
2355 if (vat_node->fe) {
2356 vat_uniqueid = udf_rw64(vat_node->fe->unique_id);
2357 ea_start = vat_node->fe->data;
2358 } else {
2359 vat_uniqueid = udf_rw64(vat_node->efe->unique_id);
2360 ea_start = vat_node->efe->data;
2361 }
2362
2363 error = udf_extattr_search_intern(vat_node, 2048, extstr, &offset, &a_l);
2364 if (error)
2365 return error;
2366
2367 implext = (struct impl_extattr_entry *) (ea_start + offset);
2368 error = udf_impl_extattr_check(implext);
2369 if (error)
2370 return error;
2371
2372 /* paranoia */
2373 if (a_l != sizeof(*implext) -1 + udf_rw32(implext->iu_l) + sizeof(lvext)) {
2374 DPRINTF(VOLUMES, ("VAT LVExtension size doesn't compute\n"));
2375 return EINVAL;
2376 }
2377
2378 /*
2379 * we have found our "VAT LVExtension attribute. BUT due to a
2380 * bug in the specification it might not be word aligned so
2381 * copy first to avoid panics on some machines (!!)
2382 */
2383 DPRINTF(VOLUMES, ("Found VAT LVExtension attr\n"));
2384 lvextpos = implext->data + udf_rw32(implext->iu_l);
2385 memcpy(&lvext, lvextpos, sizeof(lvext));
2386
2387 /* check if it was updated the last time */
2388 if (udf_rw64(lvext.unique_id_chk) == vat_uniqueid) {
2389 lvinfo->num_files = lvext.num_files;
2390 lvinfo->num_directories = lvext.num_directories;
2391 udf_update_logvolname(ump, lvext.logvol_id);
2392 } else {
2393 DPRINTF(VOLUMES, ("VAT LVExtension out of date\n"));
2394 /* replace VAT LVExt by free space EA */
2395 memset(implext->imp_id.id, 0, UDF_REGID_ID_SIZE);
2396 strcpy(implext->imp_id.id, "*UDF FreeEASpace");
2397 udf_calc_impl_extattr_checksum(implext);
2398 }
2399
2400 return 0;
2401 }
2402
2403
2404 static int
2405 udf_update_vat_extattr_from_lvid(struct udf_node *vat_node)
2406 {
2407 struct udf_mount *ump;
2408 struct udf_logvol_info *lvinfo;
2409 struct impl_extattr_entry *implext;
2410 struct vatlvext_extattr_entry lvext;
2411 const char *extstr = "*UDF VAT LVExtension";
2412 uint64_t vat_uniqueid;
2413 uint32_t offset, a_l;
2414 uint8_t *ea_start, *lvextpos;
2415 int error;
2416
2417 /* get mountpoint and lvinfo */
2418 ump = vat_node->ump;
2419 lvinfo = ump->logvol_info;
2420
2421 /* get information from fe/efe */
2422 if (vat_node->fe) {
2423 vat_uniqueid = udf_rw64(vat_node->fe->unique_id);
2424 ea_start = vat_node->fe->data;
2425 } else {
2426 vat_uniqueid = udf_rw64(vat_node->efe->unique_id);
2427 ea_start = vat_node->efe->data;
2428 }
2429
2430 error = udf_extattr_search_intern(vat_node, 2048, extstr, &offset, &a_l);
2431 if (error)
2432 return error;
2433 /* found, it existed */
2434
2435 /* paranoia */
2436 implext = (struct impl_extattr_entry *) (ea_start + offset);
2437 error = udf_impl_extattr_check(implext);
2438 if (error) {
2439 DPRINTF(VOLUMES, ("VAT LVExtension bad on update\n"));
2440 return error;
2441 }
2442 /* it is correct */
2443
2444 /*
2445 * we have found our "VAT LVExtension attribute. BUT due to a
2446 * bug in the specification it might not be word aligned so
2447 * copy first to avoid panics on some machines (!!)
2448 */
2449 DPRINTF(VOLUMES, ("Updating VAT LVExtension attr\n"));
2450 lvextpos = implext->data + udf_rw32(implext->iu_l);
2451
2452 lvext.unique_id_chk = vat_uniqueid;
2453 lvext.num_files = lvinfo->num_files;
2454 lvext.num_directories = lvinfo->num_directories;
2455 memmove(lvext.logvol_id, ump->logical_vol->logvol_id, 128);
2456
2457 memcpy(lvextpos, &lvext, sizeof(lvext));
2458
2459 return 0;
2460 }
2461
2462 /* --------------------------------------------------------------------- */
2463
2464 int
2465 udf_vat_read(struct udf_node *vat_node, uint8_t *blob, int size, uint32_t offset)
2466 {
2467 struct udf_mount *ump = vat_node->ump;
2468
2469 if (offset + size > ump->vat_offset + ump->vat_entries * 4)
2470 return EINVAL;
2471
2472 memcpy(blob, ump->vat_table + offset, size);
2473 return 0;
2474 }
2475
2476 int
2477 udf_vat_write(struct udf_node *vat_node, uint8_t *blob, int size, uint32_t offset)
2478 {
2479 struct udf_mount *ump = vat_node->ump;
2480 uint32_t offset_high;
2481 uint8_t *new_vat_table;
2482
2483 /* extent VAT allocation if needed */
2484 offset_high = offset + size;
2485 if (offset_high >= ump->vat_table_alloc_len) {
2486 /* realloc */
2487 new_vat_table = realloc(ump->vat_table,
2488 ump->vat_table_alloc_len + UDF_VAT_CHUNKSIZE,
2489 M_UDFVOLD, M_WAITOK | M_CANFAIL);
2490 if (!new_vat_table) {
2491 printf("udf_vat_write: can't extent VAT, out of mem\n");
2492 return ENOMEM;
2493 }
2494 ump->vat_table = new_vat_table;
2495 ump->vat_table_alloc_len += UDF_VAT_CHUNKSIZE;
2496 }
2497 ump->vat_table_len = MAX(ump->vat_table_len, offset_high);
2498
2499 memcpy(ump->vat_table + offset, blob, size);
2500 return 0;
2501 }
2502
2503 /* --------------------------------------------------------------------- */
2504
2505 /* TODO support previous VAT location writeout */
2506 static int
2507 udf_update_vat_descriptor(struct udf_mount *ump)
2508 {
2509 struct udf_node *vat_node = ump->vat_node;
2510 struct udf_logvol_info *lvinfo = ump->logvol_info;
2511 struct icb_tag *icbtag;
2512 struct udf_oldvat_tail *oldvat_tl;
2513 struct udf_vat *vat;
2514 uint64_t unique_id;
2515 uint32_t lb_size;
2516 uint8_t *raw_vat;
2517 int filetype, error;
2518
2519 KASSERT(vat_node);
2520 KASSERT(lvinfo);
2521 lb_size = udf_rw32(ump->logical_vol->lb_size);
2522
2523 /* get our new unique_id */
2524 unique_id = udf_advance_uniqueid(ump);
2525
2526 /* get information from fe/efe */
2527 if (vat_node->fe) {
2528 icbtag = &vat_node->fe->icbtag;
2529 vat_node->fe->unique_id = udf_rw64(unique_id);
2530 } else {
2531 icbtag = &vat_node->efe->icbtag;
2532 vat_node->efe->unique_id = udf_rw64(unique_id);
2533 }
2534
2535 /* Check icb filetype! it has to be 0 or UDF_ICB_FILETYPE_VAT */
2536 filetype = icbtag->file_type;
2537 KASSERT((filetype == 0) || (filetype == UDF_ICB_FILETYPE_VAT));
2538
2539 /* allocate piece to process head or tail of VAT file */
2540 raw_vat = malloc(lb_size, M_TEMP, M_WAITOK);
2541
2542 if (filetype == 0) {
2543 /*
2544 * Update "*UDF VAT LVExtension" extended attribute from the
2545 * lvint if present.
2546 */
2547 udf_update_vat_extattr_from_lvid(vat_node);
2548
2549 /* setup identifying regid */
2550 oldvat_tl = (struct udf_oldvat_tail *) raw_vat;
2551 memset(oldvat_tl, 0, sizeof(struct udf_oldvat_tail));
2552
2553 udf_set_regid(&oldvat_tl->id, "*UDF Virtual Alloc Tbl");
2554 udf_add_udf_regid(ump, &oldvat_tl->id);
2555 oldvat_tl->prev_vat = udf_rw32(0xffffffff);
2556
2557 /* write out new tail of virtual allocation table file */
2558 error = udf_vat_write(vat_node, raw_vat,
2559 sizeof(struct udf_oldvat_tail), ump->vat_entries * 4);
2560 } else {
2561 /* compose the VAT2 header */
2562 vat = (struct udf_vat *) raw_vat;
2563 memset(vat, 0, sizeof(struct udf_vat));
2564
2565 vat->header_len = udf_rw16(152); /* as per spec */
2566 vat->impl_use_len = udf_rw16(0);
2567 memmove(vat->logvol_id, ump->logical_vol->logvol_id, 128);
2568 vat->prev_vat = udf_rw32(0xffffffff);
2569 vat->num_files = lvinfo->num_files;
2570 vat->num_directories = lvinfo->num_directories;
2571 vat->min_udf_readver = lvinfo->min_udf_readver;
2572 vat->min_udf_writever = lvinfo->min_udf_writever;
2573 vat->max_udf_writever = lvinfo->max_udf_writever;
2574
2575 error = udf_vat_write(vat_node, raw_vat,
2576 sizeof(struct udf_vat), 0);
2577 }
2578 free(raw_vat, M_TEMP);
2579
2580 return error; /* success! */
2581 }
2582
2583
2584 int
2585 udf_writeout_vat(struct udf_mount *ump)
2586 {
2587 struct udf_node *vat_node = ump->vat_node;
2588 uint32_t vat_length;
2589 int error;
2590
2591 KASSERT(vat_node);
2592
2593 DPRINTF(CALL, ("udf_writeout_vat\n"));
2594
2595 mutex_enter(&ump->allocate_mutex);
2596 udf_update_vat_descriptor(ump);
2597
2598 /* write out the VAT contents ; TODO intelligent writing */
2599 vat_length = ump->vat_table_len;
2600 error = vn_rdwr(UIO_WRITE, vat_node->vnode,
2601 ump->vat_table, ump->vat_table_len, 0,
2602 UIO_SYSSPACE, IO_NODELOCKED, FSCRED, NULL, NULL);
2603 if (error) {
2604 printf("udf_writeout_vat: failed to write out VAT contents\n");
2605 goto out;
2606 }
2607
2608 mutex_exit(&ump->allocate_mutex);
2609
2610 vflushbuf(ump->vat_node->vnode, 1 /* sync */);
2611 error = VOP_FSYNC(ump->vat_node->vnode,
2612 FSCRED, FSYNC_WAIT, 0, 0);
2613 if (error)
2614 printf("udf_writeout_vat: error writing VAT node!\n");
2615 out:
2616
2617 return error;
2618 }
2619
2620 /* --------------------------------------------------------------------- */
2621
2622 /*
2623 * Read in relevant pieces of VAT file and check if its indeed a VAT file
2624 * descriptor. If OK, read in complete VAT file.
2625 */
2626
2627 static int
2628 udf_check_for_vat(struct udf_node *vat_node)
2629 {
2630 struct udf_mount *ump;
2631 struct icb_tag *icbtag;
2632 struct timestamp *mtime;
2633 struct udf_vat *vat;
2634 struct udf_oldvat_tail *oldvat_tl;
2635 struct udf_logvol_info *lvinfo;
2636 uint64_t unique_id;
2637 uint32_t vat_length;
2638 uint32_t vat_offset, vat_entries, vat_table_alloc_len;
2639 uint32_t sector_size;
2640 uint32_t *raw_vat;
2641 uint8_t *vat_table;
2642 char *regid_name;
2643 int filetype;
2644 int error;
2645
2646 /* vat_length is really 64 bits though impossible */
2647
2648 DPRINTF(VOLUMES, ("Checking for VAT\n"));
2649 if (!vat_node)
2650 return ENOENT;
2651
2652 /* get mount info */
2653 ump = vat_node->ump;
2654 sector_size = udf_rw32(ump->logical_vol->lb_size);
2655
2656 /* check assertions */
2657 assert(vat_node->fe || vat_node->efe);
2658 assert(ump->logvol_integrity);
2659
2660 /* set vnode type to regular file or we can't read from it! */
2661 vat_node->vnode->v_type = VREG;
2662
2663 /* get information from fe/efe */
2664 if (vat_node->fe) {
2665 vat_length = udf_rw64(vat_node->fe->inf_len);
2666 icbtag = &vat_node->fe->icbtag;
2667 mtime = &vat_node->fe->mtime;
2668 unique_id = udf_rw64(vat_node->fe->unique_id);
2669 } else {
2670 vat_length = udf_rw64(vat_node->efe->inf_len);
2671 icbtag = &vat_node->efe->icbtag;
2672 mtime = &vat_node->efe->mtime;
2673 unique_id = udf_rw64(vat_node->efe->unique_id);
2674 }
2675
2676 /* Check icb filetype! it has to be 0 or UDF_ICB_FILETYPE_VAT */
2677 filetype = icbtag->file_type;
2678 if ((filetype != 0) && (filetype != UDF_ICB_FILETYPE_VAT))
2679 return ENOENT;
2680
2681 DPRINTF(VOLUMES, ("\tPossible VAT length %d\n", vat_length));
2682
2683 vat_table_alloc_len =
2684 ((vat_length + UDF_VAT_CHUNKSIZE-1) / UDF_VAT_CHUNKSIZE)
2685 * UDF_VAT_CHUNKSIZE;
2686
2687 vat_table = malloc(vat_table_alloc_len, M_UDFVOLD,
2688 M_CANFAIL | M_WAITOK);
2689 if (vat_table == NULL) {
2690 printf("allocation of %d bytes failed for VAT\n",
2691 vat_table_alloc_len);
2692 return ENOMEM;
2693 }
2694
2695 /* allocate piece to read in head or tail of VAT file */
2696 raw_vat = malloc(sector_size, M_TEMP, M_WAITOK);
2697
2698 /*
2699 * check contents of the file if its the old 1.50 VAT table format.
2700 * Its notoriously broken and allthough some implementations support an
2701 * extention as defined in the UDF 1.50 errata document, its doubtfull
2702 * to be useable since a lot of implementations don't maintain it.
2703 */
2704 lvinfo = ump->logvol_info;
2705
2706 if (filetype == 0) {
2707 /* definition */
2708 vat_offset = 0;
2709 vat_entries = (vat_length-36)/4;
2710
2711 /* read in tail of virtual allocation table file */
2712 error = vn_rdwr(UIO_READ, vat_node->vnode,
2713 (uint8_t *) raw_vat,
2714 sizeof(struct udf_oldvat_tail),
2715 vat_entries * 4,
2716 UIO_SYSSPACE, IO_SYNC | IO_NODELOCKED, FSCRED,
2717 NULL, NULL);
2718 if (error)
2719 goto out;
2720
2721 /* check 1.50 VAT */
2722 oldvat_tl = (struct udf_oldvat_tail *) raw_vat;
2723 regid_name = (char *) oldvat_tl->id.id;
2724 error = strncmp(regid_name, "*UDF Virtual Alloc Tbl", 22);
2725 if (error) {
2726 DPRINTF(VOLUMES, ("VAT format 1.50 rejected\n"));
2727 error = ENOENT;
2728 goto out;
2729 }
2730
2731 /*
2732 * update LVID from "*UDF VAT LVExtension" extended attribute
2733 * if present.
2734 */
2735 udf_update_lvid_from_vat_extattr(vat_node);
2736 } else {
2737 /* read in head of virtual allocation table file */
2738 error = vn_rdwr(UIO_READ, vat_node->vnode,
2739 (uint8_t *) raw_vat,
2740 sizeof(struct udf_vat), 0,
2741 UIO_SYSSPACE, IO_SYNC | IO_NODELOCKED, FSCRED,
2742 NULL, NULL);
2743 if (error)
2744 goto out;
2745
2746 /* definition */
2747 vat = (struct udf_vat *) raw_vat;
2748 vat_offset = vat->header_len;
2749 vat_entries = (vat_length - vat_offset)/4;
2750
2751 assert(lvinfo);
2752 lvinfo->num_files = vat->num_files;
2753 lvinfo->num_directories = vat->num_directories;
2754 lvinfo->min_udf_readver = vat->min_udf_readver;
2755 lvinfo->min_udf_writever = vat->min_udf_writever;
2756 lvinfo->max_udf_writever = vat->max_udf_writever;
2757
2758 udf_update_logvolname(ump, vat->logvol_id);
2759 }
2760
2761 /* read in complete VAT file */
2762 error = vn_rdwr(UIO_READ, vat_node->vnode,
2763 vat_table,
2764 vat_length, 0,
2765 UIO_SYSSPACE, IO_SYNC | IO_NODELOCKED, FSCRED,
2766 NULL, NULL);
2767 if (error)
2768 printf("read in of complete VAT file failed (error %d)\n",
2769 error);
2770 if (error)
2771 goto out;
2772
2773 DPRINTF(VOLUMES, ("VAT format accepted, marking it closed\n"));
2774 ump->logvol_integrity->lvint_next_unique_id = unique_id;
2775 ump->logvol_integrity->integrity_type = udf_rw32(UDF_INTEGRITY_CLOSED);
2776 ump->logvol_integrity->time = *mtime;
2777
2778 ump->vat_table_len = vat_length;
2779 ump->vat_table_alloc_len = vat_table_alloc_len;
2780 ump->vat_table = vat_table;
2781 ump->vat_offset = vat_offset;
2782 ump->vat_entries = vat_entries;
2783 ump->vat_last_free_lb = 0; /* start at beginning */
2784
2785 out:
2786 if (error) {
2787 if (vat_table)
2788 free(vat_table, M_UDFVOLD);
2789 }
2790 free(raw_vat, M_TEMP);
2791
2792 return error;
2793 }
2794
2795 /* --------------------------------------------------------------------- */
2796
2797 static int
2798 udf_search_vat(struct udf_mount *ump, union udf_pmap *mapping)
2799 {
2800 struct udf_node *vat_node;
2801 struct long_ad icb_loc;
2802 uint32_t early_vat_loc, late_vat_loc, vat_loc;
2803 int error;
2804
2805 /* mapping info not needed */
2806 mapping = mapping;
2807
2808 vat_loc = ump->last_possible_vat_location;
2809 early_vat_loc = vat_loc - 256; /* 8 blocks of 32 sectors */
2810
2811 DPRINTF(VOLUMES, ("1) last possible %d, early_vat_loc %d \n",
2812 vat_loc, early_vat_loc));
2813 early_vat_loc = MAX(early_vat_loc, ump->first_possible_vat_location);
2814 late_vat_loc = vat_loc + 1024;
2815
2816 DPRINTF(VOLUMES, ("2) last possible %d, early_vat_loc %d \n",
2817 vat_loc, early_vat_loc));
2818
2819 /* start looking from the end of the range */
2820 do {
2821 DPRINTF(VOLUMES, ("Checking for VAT at sector %d\n", vat_loc));
2822 icb_loc.loc.part_num = udf_rw16(UDF_VTOP_RAWPART);
2823 icb_loc.loc.lb_num = udf_rw32(vat_loc);
2824
2825 error = udf_get_node(ump, &icb_loc, &vat_node);
2826 if (!error) {
2827 error = udf_check_for_vat(vat_node);
2828 DPRINTFIF(VOLUMES, !error,
2829 ("VAT accepted at %d\n", vat_loc));
2830 if (!error)
2831 break;
2832 }
2833 if (vat_node) {
2834 vput(vat_node->vnode);
2835 vat_node = NULL;
2836 }
2837 vat_loc--; /* walk backwards */
2838 } while (vat_loc >= early_vat_loc);
2839
2840 /* keep our VAT node around */
2841 if (vat_node) {
2842 UDF_SET_SYSTEMFILE(vat_node->vnode);
2843 ump->vat_node = vat_node;
2844 }
2845
2846 return error;
2847 }
2848
2849 /* --------------------------------------------------------------------- */
2850
2851 static int
2852 udf_read_sparables(struct udf_mount *ump, union udf_pmap *mapping)
2853 {
2854 union dscrptr *dscr;
2855 struct part_map_spare *pms = &mapping->pms;
2856 uint32_t lb_num;
2857 int spar, error;
2858
2859 /*
2860 * The partition mapping passed on to us specifies the information we
2861 * need to locate and initialise the sparable partition mapping
2862 * information we need.
2863 */
2864
2865 DPRINTF(VOLUMES, ("Read sparable table\n"));
2866 ump->sparable_packet_size = udf_rw16(pms->packet_len);
2867 KASSERT(ump->sparable_packet_size >= ump->packet_size); /* XXX */
2868
2869 for (spar = 0; spar < pms->n_st; spar++) {
2870 lb_num = pms->st_loc[spar];
2871 DPRINTF(VOLUMES, ("Checking for sparing table %d\n", lb_num));
2872 error = udf_read_phys_dscr(ump, lb_num, M_UDFVOLD, &dscr);
2873 if (!error && dscr) {
2874 if (udf_rw16(dscr->tag.id) == TAGID_SPARING_TABLE) {
2875 if (ump->sparing_table)
2876 free(ump->sparing_table, M_UDFVOLD);
2877 ump->sparing_table = &dscr->spt;
2878 dscr = NULL;
2879 DPRINTF(VOLUMES,
2880 ("Sparing table accepted (%d entries)\n",
2881 udf_rw16(ump->sparing_table->rt_l)));
2882 break; /* we're done */
2883 }
2884 }
2885 if (dscr)
2886 free(dscr, M_UDFVOLD);
2887 }
2888
2889 if (ump->sparing_table)
2890 return 0;
2891
2892 return ENOENT;
2893 }
2894
2895 /* --------------------------------------------------------------------- */
2896
2897 static int
2898 udf_read_metadata_nodes(struct udf_mount *ump, union udf_pmap *mapping)
2899 {
2900 struct part_map_meta *pmm = &mapping->pmm;
2901 struct long_ad icb_loc;
2902 struct vnode *vp;
2903 int error;
2904
2905 DPRINTF(VOLUMES, ("Reading in Metadata files\n"));
2906 icb_loc.loc.part_num = pmm->part_num;
2907 icb_loc.loc.lb_num = pmm->meta_file_lbn;
2908 DPRINTF(VOLUMES, ("Metadata file\n"));
2909 error = udf_get_node(ump, &icb_loc, &ump->metadata_node);
2910 if (ump->metadata_node) {
2911 vp = ump->metadata_node->vnode;
2912 UDF_SET_SYSTEMFILE(vp);
2913 }
2914
2915 icb_loc.loc.lb_num = pmm->meta_mirror_file_lbn;
2916 if (icb_loc.loc.lb_num != -1) {
2917 DPRINTF(VOLUMES, ("Metadata copy file\n"));
2918 error = udf_get_node(ump, &icb_loc, &ump->metadatamirror_node);
2919 if (ump->metadatamirror_node) {
2920 vp = ump->metadatamirror_node->vnode;
2921 UDF_SET_SYSTEMFILE(vp);
2922 }
2923 }
2924
2925 icb_loc.loc.lb_num = pmm->meta_bitmap_file_lbn;
2926 if (icb_loc.loc.lb_num != -1) {
2927 DPRINTF(VOLUMES, ("Metadata bitmap file\n"));
2928 error = udf_get_node(ump, &icb_loc, &ump->metadatabitmap_node);
2929 if (ump->metadatabitmap_node) {
2930 vp = ump->metadatabitmap_node->vnode;
2931 UDF_SET_SYSTEMFILE(vp);
2932 }
2933 }
2934
2935 /* if we're mounting read-only we relax the requirements */
2936 if (ump->vfs_mountp->mnt_flag & MNT_RDONLY) {
2937 error = EFAULT;
2938 if (ump->metadata_node)
2939 error = 0;
2940 if ((ump->metadata_node == NULL) && (ump->metadatamirror_node)) {
2941 printf( "udf mount: Metadata file not readable, "
2942 "substituting Metadata copy file\n");
2943 ump->metadata_node = ump->metadatamirror_node;
2944 ump->metadatamirror_node = NULL;
2945 error = 0;
2946 }
2947 } else {
2948 /* mounting read/write */
2949 /* if (error) */
2950 error = EROFS;
2951 }
2952 DPRINTFIF(VOLUMES, error, ("udf mount: failed to read "
2953 "metadata files\n"));
2954 return error;
2955 }
2956
2957 /* --------------------------------------------------------------------- */
2958
2959 int
2960 udf_read_vds_tables(struct udf_mount *ump)
2961 {
2962 union udf_pmap *mapping;
2963 /* struct udf_args *args = &ump->mount_args; */
2964 uint32_t n_pm, mt_l;
2965 uint32_t log_part;
2966 uint8_t *pmap_pos;
2967 int pmap_size;
2968 int error;
2969
2970 /* Iterate again over the part mappings for locations */
2971 n_pm = udf_rw32(ump->logical_vol->n_pm); /* num partmaps */
2972 mt_l = udf_rw32(ump->logical_vol->mt_l); /* partmaps data length */
2973 pmap_pos = ump->logical_vol->maps;
2974
2975 for (log_part = 0; log_part < n_pm; log_part++) {
2976 mapping = (union udf_pmap *) pmap_pos;
2977 switch (ump->vtop_tp[log_part]) {
2978 case UDF_VTOP_TYPE_PHYS :
2979 /* nothing */
2980 break;
2981 case UDF_VTOP_TYPE_VIRT :
2982 /* search and load VAT */
2983 error = udf_search_vat(ump, mapping);
2984 if (error)
2985 return ENOENT;
2986 break;
2987 case UDF_VTOP_TYPE_SPARABLE :
2988 /* load one of the sparable tables */
2989 error = udf_read_sparables(ump, mapping);
2990 if (error)
2991 return ENOENT;
2992 break;
2993 case UDF_VTOP_TYPE_META :
2994 /* load the associated file descriptors */
2995 error = udf_read_metadata_nodes(ump, mapping);
2996 if (error)
2997 return ENOENT;
2998 break;
2999 default:
3000 break;
3001 }
3002 pmap_size = pmap_pos[1];
3003 pmap_pos += pmap_size;
3004 }
3005
3006 return 0;
3007 }
3008
3009 /* --------------------------------------------------------------------- */
3010
3011 int
3012 udf_read_rootdirs(struct udf_mount *ump)
3013 {
3014 union dscrptr *dscr;
3015 /* struct udf_args *args = &ump->mount_args; */
3016 struct udf_node *rootdir_node, *streamdir_node;
3017 struct long_ad fsd_loc, *dir_loc;
3018 uint32_t lb_num, dummy;
3019 uint32_t fsd_len;
3020 int dscr_type;
3021 int error;
3022
3023 /* TODO implement FSD reading in separate function like integrity? */
3024 /* get fileset descriptor sequence */
3025 fsd_loc = ump->logical_vol->lv_fsd_loc;
3026 fsd_len = udf_rw32(fsd_loc.len);
3027
3028 dscr = NULL;
3029 error = 0;
3030 while (fsd_len || error) {
3031 DPRINTF(VOLUMES, ("fsd_len = %d\n", fsd_len));
3032 /* translate fsd_loc to lb_num */
3033 error = udf_translate_vtop(ump, &fsd_loc, &lb_num, &dummy);
3034 if (error)
3035 break;
3036 DPRINTF(VOLUMES, ("Reading FSD at lb %d\n", lb_num));
3037 error = udf_read_phys_dscr(ump, lb_num, M_UDFVOLD, &dscr);
3038 /* end markers */
3039 if (error || (dscr == NULL))
3040 break;
3041
3042 /* analyse */
3043 dscr_type = udf_rw16(dscr->tag.id);
3044 if (dscr_type == TAGID_TERM)
3045 break;
3046 if (dscr_type != TAGID_FSD) {
3047 free(dscr, M_UDFVOLD);
3048 return ENOENT;
3049 }
3050
3051 /*
3052 * TODO check for multiple fileset descriptors; its only
3053 * picking the last now. Also check for FSD
3054 * correctness/interpretability
3055 */
3056
3057 /* update */
3058 if (ump->fileset_desc) {
3059 free(ump->fileset_desc, M_UDFVOLD);
3060 }
3061 ump->fileset_desc = &dscr->fsd;
3062 dscr = NULL;
3063
3064 /* continue to the next fsd */
3065 fsd_len -= ump->discinfo.sector_size;
3066 fsd_loc.loc.lb_num = udf_rw32(udf_rw32(fsd_loc.loc.lb_num)+1);
3067
3068 /* follow up to fsd->next_ex (long_ad) if its not null */
3069 if (udf_rw32(ump->fileset_desc->next_ex.len)) {
3070 DPRINTF(VOLUMES, ("follow up FSD extent\n"));
3071 fsd_loc = ump->fileset_desc->next_ex;
3072 fsd_len = udf_rw32(ump->fileset_desc->next_ex.len);
3073 }
3074 }
3075 if (dscr)
3076 free(dscr, M_UDFVOLD);
3077
3078 /* there has to be one */
3079 if (ump->fileset_desc == NULL)
3080 return ENOENT;
3081
3082 DPRINTF(VOLUMES, ("FSD read in fine\n"));
3083 DPRINTF(VOLUMES, ("Updating fsd logical volume id\n"));
3084 udf_update_logvolname(ump, ump->logical_vol->logvol_id);
3085
3086 /*
3087 * Now the FSD is known, read in the rootdirectory and if one exists,
3088 * the system stream dir. Some files in the system streamdir are not
3089 * wanted in this implementation since they are not maintained. If
3090 * writing is enabled we'll delete these files if they exist.
3091 */
3092
3093 rootdir_node = streamdir_node = NULL;
3094 dir_loc = NULL;
3095
3096 /* try to read in the rootdir */
3097 dir_loc = &ump->fileset_desc->rootdir_icb;
3098 error = udf_get_node(ump, dir_loc, &rootdir_node);
3099 if (error)
3100 return ENOENT;
3101
3102 /* aparently it read in fine */
3103
3104 /*
3105 * Try the system stream directory; not very likely in the ones we
3106 * test, but for completeness.
3107 */
3108 dir_loc = &ump->fileset_desc->streamdir_icb;
3109 if (udf_rw32(dir_loc->len)) {
3110 printf("udf_read_rootdirs: streamdir defined ");
3111 error = udf_get_node(ump, dir_loc, &streamdir_node);
3112 if (error) {
3113 printf("but error in streamdir reading\n");
3114 } else {
3115 printf("but ignored\n");
3116 /*
3117 * TODO process streamdir `baddies' i.e. files we dont
3118 * want if R/W
3119 */
3120 }
3121 }
3122
3123 DPRINTF(VOLUMES, ("Rootdir(s) read in fine\n"));
3124
3125 /* release the vnodes again; they'll be auto-recycled later */
3126 if (streamdir_node) {
3127 vput(streamdir_node->vnode);
3128 }
3129 if (rootdir_node) {
3130 vput(rootdir_node->vnode);
3131 }
3132
3133 return 0;
3134 }
3135
3136 /* --------------------------------------------------------------------- */
3137
3138 /* To make absolutely sure we are NOT returning zero, add one :) */
3139
3140 long
3141 udf_calchash(struct long_ad *icbptr)
3142 {
3143 /* ought to be enough since each mountpoint has its own chain */
3144 return udf_rw32(icbptr->loc.lb_num) + 1;
3145 }
3146
3147
3148 static struct udf_node *
3149 udf_hash_lookup(struct udf_mount *ump, struct long_ad *icbptr)
3150 {
3151 struct udf_node *node;
3152 struct vnode *vp;
3153 uint32_t hashline;
3154
3155 loop:
3156 mutex_enter(&ump->ihash_lock);
3157
3158 hashline = udf_calchash(icbptr) & UDF_INODE_HASHMASK;
3159 LIST_FOREACH(node, &ump->udf_nodes[hashline], hashchain) {
3160 assert(node);
3161 if (node->loc.loc.lb_num == icbptr->loc.lb_num &&
3162 node->loc.loc.part_num == icbptr->loc.part_num) {
3163 vp = node->vnode;
3164 assert(vp);
3165 mutex_enter(&vp->v_interlock);
3166 mutex_exit(&ump->ihash_lock);
3167 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK))
3168 goto loop;
3169 return node;
3170 }
3171 }
3172 mutex_exit(&ump->ihash_lock);
3173
3174 return NULL;
3175 }
3176
3177
3178 static void
3179 udf_sorted_list_insert(struct udf_node *node)
3180 {
3181 struct udf_mount *ump;
3182 struct udf_node *s_node, *last_node;
3183 uint32_t loc, s_loc;
3184
3185 ump = node->ump;
3186 last_node = NULL; /* XXX gcc */
3187
3188 if (LIST_EMPTY(&ump->sorted_udf_nodes)) {
3189 LIST_INSERT_HEAD(&ump->sorted_udf_nodes, node, sortchain);
3190 return;
3191 }
3192
3193 /*
3194 * We sort on logical block number here and not on physical block
3195 * number here. Ideally we should go for the physical block nr to get
3196 * better sync performance though this sort will ensure that packets
3197 * won't get spit up unnessisarily.
3198 */
3199
3200 loc = udf_rw32(node->loc.loc.lb_num);
3201 LIST_FOREACH(s_node, &ump->sorted_udf_nodes, sortchain) {
3202 s_loc = udf_rw32(s_node->loc.loc.lb_num);
3203 if (s_loc > loc) {
3204 LIST_INSERT_BEFORE(s_node, node, sortchain);
3205 return;
3206 }
3207 last_node = s_node;
3208 }
3209 LIST_INSERT_AFTER(last_node, node, sortchain);
3210 }
3211
3212
3213 static void
3214 udf_register_node(struct udf_node *node)
3215 {
3216 struct udf_mount *ump;
3217 struct udf_node *chk;
3218 uint32_t hashline;
3219
3220 ump = node->ump;
3221 mutex_enter(&ump->ihash_lock);
3222
3223 /* add to our hash table */
3224 hashline = udf_calchash(&node->loc) & UDF_INODE_HASHMASK;
3225 #ifdef DEBUG
3226 LIST_FOREACH(chk, &ump->udf_nodes[hashline], hashchain) {
3227 assert(chk);
3228 if (chk->loc.loc.lb_num == node->loc.loc.lb_num &&
3229 chk->loc.loc.part_num == node->loc.loc.part_num)
3230 panic("Double node entered\n");
3231 }
3232 #else
3233 chk = NULL;
3234 #endif
3235 LIST_INSERT_HEAD(&ump->udf_nodes[hashline], node, hashchain);
3236
3237 /* add to our sorted list */
3238 udf_sorted_list_insert(node);
3239
3240 mutex_exit(&ump->ihash_lock);
3241 }
3242
3243
3244 static void
3245 udf_deregister_node(struct udf_node *node)
3246 {
3247 struct udf_mount *ump;
3248
3249 ump = node->ump;
3250 mutex_enter(&ump->ihash_lock);
3251
3252 /* from hash and sorted list */
3253 LIST_REMOVE(node, hashchain);
3254 LIST_REMOVE(node, sortchain);
3255
3256 mutex_exit(&ump->ihash_lock);
3257 }
3258
3259 /* --------------------------------------------------------------------- */
3260
3261 int
3262 udf_open_logvol(struct udf_mount *ump)
3263 {
3264 int logvol_integrity;
3265 int error;
3266
3267 /* already/still open? */
3268 logvol_integrity = udf_rw32(ump->logvol_integrity->integrity_type);
3269 if (logvol_integrity == UDF_INTEGRITY_OPEN)
3270 return 0;
3271
3272 /* can we open it ? */
3273 if (ump->vfs_mountp->mnt_flag & MNT_RDONLY)
3274 return EROFS;
3275
3276 /* setup write parameters */
3277 DPRINTF(VOLUMES, ("Setting up write parameters\n"));
3278 if ((error = udf_setup_writeparams(ump)) != 0)
3279 return error;
3280
3281 /* determine data and metadata tracks (most likely same) */
3282 error = udf_search_writing_tracks(ump);
3283 if (error) {
3284 /* most likely lack of space */
3285 printf("udf_open_logvol: error searching writing tracks\n");
3286 return EROFS;
3287 }
3288
3289 /* writeout/update lvint on disc or only in memory */
3290 DPRINTF(VOLUMES, ("Opening logical volume\n"));
3291 if (ump->lvopen & UDF_OPEN_SESSION) {
3292 /* TODO implement writeout of VRS + VDS */
3293 printf( "udf_open_logvol:Opening a closed session not yet "
3294 "implemented\n");
3295 return EROFS;
3296
3297 /* determine data and metadata tracks again */
3298 error = udf_search_writing_tracks(ump);
3299 }
3300
3301 /* mark it open */
3302 ump->logvol_integrity->integrity_type = udf_rw32(UDF_INTEGRITY_OPEN);
3303
3304 /* do we need to write it out? */
3305 if (ump->lvopen & UDF_WRITE_LVINT) {
3306 error = udf_writeout_lvint(ump, ump->lvopen);
3307 /* if we couldn't write it mark it closed again */
3308 if (error) {
3309 ump->logvol_integrity->integrity_type =
3310 udf_rw32(UDF_INTEGRITY_CLOSED);
3311 return error;
3312 }
3313 }
3314
3315 return 0;
3316 }
3317
3318
3319 int
3320 udf_close_logvol(struct udf_mount *ump, int mntflags)
3321 {
3322 int logvol_integrity;
3323 int error = 0;
3324 int n;
3325
3326 /* already/still closed? */
3327 logvol_integrity = udf_rw32(ump->logvol_integrity->integrity_type);
3328 if (logvol_integrity == UDF_INTEGRITY_CLOSED)
3329 return 0;
3330
3331 /* writeout/update lvint or write out VAT */
3332 DPRINTF(VOLUMES, ("Closing logical volume\n"));
3333 if (ump->lvclose & UDF_WRITE_VAT) {
3334 DPRINTF(VOLUMES, ("lvclose & UDF_WRITE_VAT\n"));
3335
3336 /* preprocess the VAT node; its modified on every writeout */
3337 DPRINTF(VOLUMES, ("writeout vat_node\n"));
3338 udf_update_vat_descriptor(ump->vat_node->ump);
3339
3340 /* write out the VAT node */
3341 vflushbuf(ump->vat_node->vnode, 1 /* sync */);
3342 for (n = 0; n < 16; n++) {
3343 ump->vat_node->i_flags |= IN_MODIFIED;
3344 error = VOP_FSYNC(ump->vat_node->vnode,
3345 FSCRED, FSYNC_WAIT, 0, 0);
3346 }
3347 if (error) {
3348 printf("udf_close_logvol: writeout of VAT failed\n");
3349 return error;
3350 }
3351 }
3352
3353 if (ump->lvclose & UDF_WRITE_PART_BITMAPS) {
3354 error = udf_write_partition_spacetables(ump, 1 /* waitfor */);
3355 if (error) {
3356 printf( "udf_close_logvol: writeout of space tables "
3357 "failed\n");
3358 return error;
3359 }
3360 ump->lvclose &= ~UDF_WRITE_PART_BITMAPS;
3361 }
3362
3363 if (ump->lvclose & UDF_CLOSE_SESSION) {
3364 printf("TODO: Closing a session is not yet implemented\n");
3365 return EROFS;
3366 ump->lvopen |= UDF_OPEN_SESSION;
3367 }
3368
3369 /* mark it closed */
3370 ump->logvol_integrity->integrity_type = udf_rw32(UDF_INTEGRITY_CLOSED);
3371
3372 /* do we need to write out the logical volume integrity */
3373 if (ump->lvclose & UDF_WRITE_LVINT)
3374 error = udf_writeout_lvint(ump, ump->lvopen);
3375 if (error) {
3376 /* HELP now what? mark it open again for now */
3377 ump->logvol_integrity->integrity_type =
3378 udf_rw32(UDF_INTEGRITY_OPEN);
3379 return error;
3380 }
3381
3382 (void) udf_synchronise_caches(ump);
3383
3384 return 0;
3385 }
3386
3387 /* --------------------------------------------------------------------- */
3388
3389 /*
3390 * Genfs interfacing
3391 *
3392 * static const struct genfs_ops udf_genfsops = {
3393 * .gop_size = genfs_size,
3394 * size of transfers
3395 * .gop_alloc = udf_gop_alloc,
3396 * allocate len bytes at offset
3397 * .gop_write = genfs_gop_write,
3398 * putpages interface code
3399 * .gop_markupdate = udf_gop_markupdate,
3400 * set update/modify flags etc.
3401 * }
3402 */
3403
3404 /*
3405 * Genfs interface. These four functions are the only ones defined though not
3406 * documented... great....
3407 */
3408
3409 /*
3410 * Callback from genfs to allocate len bytes at offset off; only called when
3411 * filling up gaps in the allocation.
3412 */
3413 /* XXX should we check if there is space enough in udf_gop_alloc? */
3414 static int
3415 udf_gop_alloc(struct vnode *vp, off_t off,
3416 off_t len, int flags, kauth_cred_t cred)
3417 {
3418 #if 0
3419 struct udf_node *udf_node = VTOI(vp);
3420 struct udf_mount *ump = udf_node->ump;
3421 uint32_t lb_size, num_lb;
3422 #endif
3423
3424 DPRINTF(NOTIMPL, ("udf_gop_alloc not implemented\n"));
3425 DPRINTF(ALLOC, ("udf_gop_alloc called for %"PRIu64" bytes\n", len));
3426
3427 return 0;
3428 }
3429
3430
3431 /*
3432 * callback from genfs to update our flags
3433 */
3434 static void
3435 udf_gop_markupdate(struct vnode *vp, int flags)
3436 {
3437 struct udf_node *udf_node = VTOI(vp);
3438 u_long mask = 0;
3439
3440 if ((flags & GOP_UPDATE_ACCESSED) != 0) {
3441 mask = IN_ACCESS;
3442 }
3443 if ((flags & GOP_UPDATE_MODIFIED) != 0) {
3444 if (vp->v_type == VREG) {
3445 mask |= IN_CHANGE | IN_UPDATE;
3446 } else {
3447 mask |= IN_MODIFY;
3448 }
3449 }
3450 if (mask) {
3451 udf_node->i_flags |= mask;
3452 }
3453 }
3454
3455
3456 static const struct genfs_ops udf_genfsops = {
3457 .gop_size = genfs_size,
3458 .gop_alloc = udf_gop_alloc,
3459 .gop_write = genfs_gop_write_rwmap,
3460 .gop_markupdate = udf_gop_markupdate,
3461 };
3462
3463
3464 /* --------------------------------------------------------------------- */
3465
3466 int
3467 udf_write_terminator(struct udf_mount *ump, uint32_t sector)
3468 {
3469 union dscrptr *dscr;
3470 int error;
3471
3472 dscr = malloc(ump->discinfo.sector_size, M_TEMP, M_WAITOK);
3473 bzero(dscr, ump->discinfo.sector_size);
3474 udf_inittag(ump, &dscr->tag, TAGID_TERM, sector);
3475
3476 /* CRC length for an anchor is 512 - tag length; defined in Ecma 167 */
3477 dscr->tag.desc_crc_len = udf_rw16(512-UDF_DESC_TAG_LENGTH);
3478 (void) udf_validate_tag_and_crc_sums(dscr);
3479
3480 error = udf_write_phys_dscr_sync(ump, NULL, UDF_C_DSCR,
3481 dscr, sector, sector);
3482
3483 free(dscr, M_TEMP);
3484
3485 return error;
3486 }
3487
3488
3489 /* --------------------------------------------------------------------- */
3490
3491 /* UDF<->unix converters */
3492
3493 /* --------------------------------------------------------------------- */
3494
3495 static mode_t
3496 udf_perm_to_unix_mode(uint32_t perm)
3497 {
3498 mode_t mode;
3499
3500 mode = ((perm & UDF_FENTRY_PERM_USER_MASK) );
3501 mode |= ((perm & UDF_FENTRY_PERM_GRP_MASK ) >> 2);
3502 mode |= ((perm & UDF_FENTRY_PERM_OWNER_MASK) >> 4);
3503
3504 return mode;
3505 }
3506
3507 /* --------------------------------------------------------------------- */
3508
3509 static uint32_t
3510 unix_mode_to_udf_perm(mode_t mode)
3511 {
3512 uint32_t perm;
3513
3514 perm = ((mode & S_IRWXO) );
3515 perm |= ((mode & S_IRWXG) << 2);
3516 perm |= ((mode & S_IRWXU) << 4);
3517 perm |= ((mode & S_IWOTH) << 3);
3518 perm |= ((mode & S_IWGRP) << 5);
3519 perm |= ((mode & S_IWUSR) << 7);
3520
3521 return perm;
3522 }
3523
3524 /* --------------------------------------------------------------------- */
3525
3526 static uint32_t
3527 udf_icb_to_unix_filetype(uint32_t icbftype)
3528 {
3529 switch (icbftype) {
3530 case UDF_ICB_FILETYPE_DIRECTORY :
3531 case UDF_ICB_FILETYPE_STREAMDIR :
3532 return S_IFDIR;
3533 case UDF_ICB_FILETYPE_FIFO :
3534 return S_IFIFO;
3535 case UDF_ICB_FILETYPE_CHARDEVICE :
3536 return S_IFCHR;
3537 case UDF_ICB_FILETYPE_BLOCKDEVICE :
3538 return S_IFBLK;
3539 case UDF_ICB_FILETYPE_RANDOMACCESS :
3540 case UDF_ICB_FILETYPE_REALTIME :
3541 return S_IFREG;
3542 case UDF_ICB_FILETYPE_SYMLINK :
3543 return S_IFLNK;
3544 case UDF_ICB_FILETYPE_SOCKET :
3545 return S_IFSOCK;
3546 }
3547 /* no idea what this is */
3548 return 0;
3549 }
3550
3551 /* --------------------------------------------------------------------- */
3552
3553 void
3554 udf_to_unix_name(char *result, int result_len, char *id, int len,
3555 struct charspec *chsp)
3556 {
3557 uint16_t *raw_name, *unix_name;
3558 uint16_t *inchp, ch;
3559 uint8_t *outchp;
3560 const char *osta_id = "OSTA Compressed Unicode";
3561 int ucode_chars, nice_uchars, is_osta_typ0, nout;
3562
3563 raw_name = malloc(2048 * sizeof(uint16_t), M_UDFTEMP, M_WAITOK);
3564 unix_name = raw_name + 1024; /* split space in half */
3565 assert(sizeof(char) == sizeof(uint8_t));
3566 outchp = (uint8_t *) result;
3567
3568 is_osta_typ0 = (chsp->type == 0);
3569 is_osta_typ0 &= (strcmp((char *) chsp->inf, osta_id) == 0);
3570 if (is_osta_typ0) {
3571 /* TODO clean up */
3572 *raw_name = *unix_name = 0;
3573 ucode_chars = udf_UncompressUnicode(len, (uint8_t *) id, raw_name);
3574 ucode_chars = MIN(ucode_chars, UnicodeLength((unicode_t *) raw_name));
3575 nice_uchars = UDFTransName(unix_name, raw_name, ucode_chars);
3576 /* output UTF8 */
3577 for (inchp = unix_name; nice_uchars>0; inchp++, nice_uchars--) {
3578 ch = *inchp;
3579 nout = wput_utf8(outchp, result_len, ch);
3580 outchp += nout; result_len -= nout;
3581 if (!ch) break;
3582 }
3583 *outchp++ = 0;
3584 } else {
3585 /* assume 8bit char length byte latin-1 */
3586 assert(*id == 8);
3587 assert(strlen((char *) (id+1)) <= MAXNAMLEN);
3588 strncpy((char *) result, (char *) (id+1), strlen((char *) (id+1)));
3589 }
3590 free(raw_name, M_UDFTEMP);
3591 }
3592
3593 /* --------------------------------------------------------------------- */
3594
3595 void
3596 unix_to_udf_name(char *result, uint8_t *result_len, char const *name, int name_len,
3597 struct charspec *chsp)
3598 {
3599 uint16_t *raw_name;
3600 uint16_t *outchp;
3601 const char *inchp;
3602 const char *osta_id = "OSTA Compressed Unicode";
3603 int udf_chars, is_osta_typ0, bits;
3604 size_t cnt;
3605
3606 /* allocate temporary unicode-16 buffer */
3607 raw_name = malloc(1024, M_UDFTEMP, M_WAITOK);
3608
3609 /* convert utf8 to unicode-16 */
3610 *raw_name = 0;
3611 inchp = name;
3612 outchp = raw_name;
3613 bits = 8;
3614 for (cnt = name_len, udf_chars = 0; cnt;) {
3615 /*###3490 [cc] warning: passing argument 2 of 'wget_utf8' from incompatible pointer type%%%*/
3616 *outchp = wget_utf8(&inchp, &cnt);
3617 if (*outchp > 0xff)
3618 bits=16;
3619 outchp++;
3620 udf_chars++;
3621 }
3622 /* null terminate just in case */
3623 *outchp++ = 0;
3624
3625 is_osta_typ0 = (chsp->type == 0);
3626 is_osta_typ0 &= (strcmp((char *) chsp->inf, osta_id) == 0);
3627 if (is_osta_typ0) {
3628 udf_chars = udf_CompressUnicode(udf_chars, bits,
3629 (unicode_t *) raw_name,
3630 (byte *) result);
3631 } else {
3632 printf("unix to udf name: no CHSP0 ?\n");
3633 /* XXX assume 8bit char length byte latin-1 */
3634 *result++ = 8; udf_chars = 1;
3635 strncpy(result, name + 1, name_len);
3636 udf_chars += name_len;
3637 }
3638 *result_len = udf_chars;
3639 free(raw_name, M_UDFTEMP);
3640 }
3641
3642 /* --------------------------------------------------------------------- */
3643
3644 void
3645 udf_timestamp_to_timespec(struct udf_mount *ump,
3646 struct timestamp *timestamp,
3647 struct timespec *timespec)
3648 {
3649 struct clock_ymdhms ymdhms;
3650 uint32_t usecs, secs, nsecs;
3651 uint16_t tz;
3652
3653 /* fill in ymdhms structure from timestamp */
3654 memset(&ymdhms, 0, sizeof(ymdhms));
3655 ymdhms.dt_year = udf_rw16(timestamp->year);
3656 ymdhms.dt_mon = timestamp->month;
3657 ymdhms.dt_day = timestamp->day;
3658 ymdhms.dt_wday = 0; /* ? */
3659 ymdhms.dt_hour = timestamp->hour;
3660 ymdhms.dt_min = timestamp->minute;
3661 ymdhms.dt_sec = timestamp->second;
3662
3663 secs = clock_ymdhms_to_secs(&ymdhms);
3664 usecs = timestamp->usec +
3665 100*timestamp->hund_usec + 10000*timestamp->centisec;
3666 nsecs = usecs * 1000;
3667
3668 /*
3669 * Calculate the time zone. The timezone is 12 bit signed 2's
3670 * compliment, so we gotta do some extra magic to handle it right.
3671 */
3672 tz = udf_rw16(timestamp->type_tz);
3673 tz &= 0x0fff; /* only lower 12 bits are significant */
3674 if (tz & 0x0800) /* sign extention */
3675 tz |= 0xf000;
3676
3677 /* TODO check timezone conversion */
3678 /* check if we are specified a timezone to convert */
3679 if (udf_rw16(timestamp->type_tz) & 0x1000) {
3680 if ((int16_t) tz != -2047)
3681 secs -= (int16_t) tz * 60;
3682 } else {
3683 secs -= ump->mount_args.gmtoff;
3684 }
3685
3686 timespec->tv_sec = secs;
3687 timespec->tv_nsec = nsecs;
3688 }
3689
3690
3691 void
3692 udf_timespec_to_timestamp(struct timespec *timespec, struct timestamp *timestamp)
3693 {
3694 struct clock_ymdhms ymdhms;
3695 uint32_t husec, usec, csec;
3696
3697 (void) clock_secs_to_ymdhms(timespec->tv_sec, &ymdhms);
3698
3699 usec = timespec->tv_nsec / 1000;
3700 husec = usec / 100;
3701 usec -= husec * 100; /* only 0-99 in usec */
3702 csec = husec / 100; /* only 0-99 in csec */
3703 husec -= csec * 100; /* only 0-99 in husec */
3704
3705 /* set method 1 for CUT/GMT */
3706 timestamp->type_tz = udf_rw16((1<<12) + 0);
3707 timestamp->year = udf_rw16(ymdhms.dt_year);
3708 timestamp->month = ymdhms.dt_mon;
3709 timestamp->day = ymdhms.dt_day;
3710 timestamp->hour = ymdhms.dt_hour;
3711 timestamp->minute = ymdhms.dt_min;
3712 timestamp->second = ymdhms.dt_sec;
3713 timestamp->centisec = csec;
3714 timestamp->hund_usec = husec;
3715 timestamp->usec = usec;
3716 }
3717
3718 /* --------------------------------------------------------------------- */
3719
3720 /*
3721 * Attribute and filetypes converters with get/set pairs
3722 */
3723
3724 uint32_t
3725 udf_getaccessmode(struct udf_node *udf_node)
3726 {
3727 struct file_entry *fe = udf_node->fe;;
3728 struct extfile_entry *efe = udf_node->efe;
3729 uint32_t udf_perm, icbftype;
3730 uint32_t mode, ftype;
3731 uint16_t icbflags;
3732
3733 UDF_LOCK_NODE(udf_node, 0);
3734 if (fe) {
3735 udf_perm = udf_rw32(fe->perm);
3736 icbftype = fe->icbtag.file_type;
3737 icbflags = udf_rw16(fe->icbtag.flags);
3738 } else {
3739 assert(udf_node->efe);
3740 udf_perm = udf_rw32(efe->perm);
3741 icbftype = efe->icbtag.file_type;
3742 icbflags = udf_rw16(efe->icbtag.flags);
3743 }
3744
3745 mode = udf_perm_to_unix_mode(udf_perm);
3746 ftype = udf_icb_to_unix_filetype(icbftype);
3747
3748 /* set suid, sgid, sticky from flags in fe/efe */
3749 if (icbflags & UDF_ICB_TAG_FLAGS_SETUID)
3750 mode |= S_ISUID;
3751 if (icbflags & UDF_ICB_TAG_FLAGS_SETGID)
3752 mode |= S_ISGID;
3753 if (icbflags & UDF_ICB_TAG_FLAGS_STICKY)
3754 mode |= S_ISVTX;
3755
3756 UDF_UNLOCK_NODE(udf_node, 0);
3757
3758 return mode | ftype;
3759 }
3760
3761
3762 void
3763 udf_setaccessmode(struct udf_node *udf_node, mode_t mode)
3764 {
3765 struct file_entry *fe = udf_node->fe;
3766 struct extfile_entry *efe = udf_node->efe;
3767 uint32_t udf_perm;
3768 uint16_t icbflags;
3769
3770 UDF_LOCK_NODE(udf_node, 0);
3771 udf_perm = unix_mode_to_udf_perm(mode & ALLPERMS);
3772 if (fe) {
3773 icbflags = udf_rw16(fe->icbtag.flags);
3774 } else {
3775 icbflags = udf_rw16(efe->icbtag.flags);
3776 }
3777
3778 icbflags &= ~UDF_ICB_TAG_FLAGS_SETUID;
3779 icbflags &= ~UDF_ICB_TAG_FLAGS_SETGID;
3780 icbflags &= ~UDF_ICB_TAG_FLAGS_STICKY;
3781 if (mode & S_ISUID)
3782 icbflags |= UDF_ICB_TAG_FLAGS_SETUID;
3783 if (mode & S_ISGID)
3784 icbflags |= UDF_ICB_TAG_FLAGS_SETGID;
3785 if (mode & S_ISVTX)
3786 icbflags |= UDF_ICB_TAG_FLAGS_STICKY;
3787
3788 if (fe) {
3789 fe->perm = udf_rw32(udf_perm);
3790 fe->icbtag.flags = udf_rw16(icbflags);
3791 } else {
3792 efe->perm = udf_rw32(udf_perm);
3793 efe->icbtag.flags = udf_rw16(icbflags);
3794 }
3795
3796 UDF_UNLOCK_NODE(udf_node, 0);
3797 }
3798
3799
3800 void
3801 udf_getownership(struct udf_node *udf_node, uid_t *uidp, gid_t *gidp)
3802 {
3803 struct udf_mount *ump = udf_node->ump;
3804 struct file_entry *fe = udf_node->fe;
3805 struct extfile_entry *efe = udf_node->efe;
3806 uid_t uid;
3807 gid_t gid;
3808
3809 UDF_LOCK_NODE(udf_node, 0);
3810 if (fe) {
3811 uid = (uid_t)udf_rw32(fe->uid);
3812 gid = (gid_t)udf_rw32(fe->gid);
3813 } else {
3814 assert(udf_node->efe);
3815 uid = (uid_t)udf_rw32(efe->uid);
3816 gid = (gid_t)udf_rw32(efe->gid);
3817 }
3818
3819 /* do the uid/gid translation game */
3820 if ((uid == (uid_t) -1) && (gid == (gid_t) -1)) {
3821 uid = ump->mount_args.anon_uid;
3822 gid = ump->mount_args.anon_gid;
3823 }
3824 *uidp = uid;
3825 *gidp = gid;
3826
3827 UDF_UNLOCK_NODE(udf_node, 0);
3828 }
3829
3830
3831 void
3832 udf_setownership(struct udf_node *udf_node, uid_t uid, gid_t gid)
3833 {
3834 struct udf_mount *ump = udf_node->ump;
3835 struct file_entry *fe = udf_node->fe;
3836 struct extfile_entry *efe = udf_node->efe;
3837 uid_t nobody_uid;
3838 gid_t nobody_gid;
3839
3840 UDF_LOCK_NODE(udf_node, 0);
3841
3842 /* do the uid/gid translation game */
3843 nobody_uid = ump->mount_args.nobody_uid;
3844 nobody_gid = ump->mount_args.nobody_gid;
3845 if ((uid == nobody_uid) && (gid == nobody_gid)) {
3846 uid = (uid_t) -1;
3847 gid = (gid_t) -1;
3848 }
3849
3850 if (fe) {
3851 fe->uid = udf_rw32((uint32_t) uid);
3852 fe->gid = udf_rw32((uint32_t) gid);
3853 } else {
3854 efe->uid = udf_rw32((uint32_t) uid);
3855 efe->gid = udf_rw32((uint32_t) gid);
3856 }
3857
3858 UDF_UNLOCK_NODE(udf_node, 0);
3859 }
3860
3861
3862 /* --------------------------------------------------------------------- */
3863
3864 /*
3865 * UDF dirhash implementation
3866 */
3867
3868 static uint32_t
3869 udf_dirhash_hash(const char *str, int namelen)
3870 {
3871 uint32_t hash = 5381;
3872 int i, c;
3873
3874 for (i = 0; i < namelen; i++) {
3875 c = *str++;
3876 hash = ((hash << 5) + hash) + c; /* hash * 33 + c */
3877 }
3878 return hash;
3879 }
3880
3881
3882 static void
3883 udf_dirhash_purge(struct udf_dirhash *dirh)
3884 {
3885 struct udf_dirhash_entry *dirh_e;
3886 uint32_t hashline;
3887
3888 if (dirh == NULL)
3889 return;
3890
3891 for (hashline = 0; hashline < UDF_DIRHASH_HASHSIZE; hashline++) {
3892 dirh_e = LIST_FIRST(&dirh->entries[hashline]);
3893 while (dirh_e) {
3894 LIST_REMOVE(dirh_e, next);
3895 pool_put(&udf_dirhash_entry_pool, dirh_e);
3896 dirh_e = LIST_FIRST(&dirh->entries[hashline]);
3897 }
3898 }
3899 dirh_e = LIST_FIRST(&dirh->free_entries);
3900
3901 while (dirh_e) {
3902 LIST_REMOVE(dirh_e, next);
3903 pool_put(&udf_dirhash_entry_pool, dirh_e);
3904 dirh_e = LIST_FIRST(&dirh->entries[hashline]);
3905 }
3906
3907 dirh->flags &= ~UDF_DIRH_COMPLETE;
3908 dirh->flags |= UDF_DIRH_PURGED;
3909
3910 udf_dirhashsize -= dirh->size;
3911 dirh->size = 0;
3912 }
3913
3914
3915 static void
3916 udf_dirhash_destroy(struct udf_dirhash **dirhp)
3917 {
3918 struct udf_dirhash *dirh = *dirhp;
3919
3920 if (dirh == NULL)
3921 return;
3922
3923 mutex_enter(&udf_dirhashmutex);
3924
3925 udf_dirhash_purge(dirh);
3926 TAILQ_REMOVE(&udf_dirhash_queue, dirh, next);
3927 pool_put(&udf_dirhash_pool, dirh);
3928
3929 *dirhp = NULL;
3930
3931 mutex_exit(&udf_dirhashmutex);
3932 }
3933
3934
3935 static void
3936 udf_dirhash_get(struct udf_dirhash **dirhp)
3937 {
3938 struct udf_dirhash *dirh;
3939 uint32_t hashline;
3940
3941 mutex_enter(&udf_dirhashmutex);
3942
3943 dirh = *dirhp;
3944 if (*dirhp == NULL) {
3945 dirh = pool_get(&udf_dirhash_pool, PR_WAITOK);
3946 *dirhp = dirh;
3947 memset(dirh, 0, sizeof(struct udf_dirhash));
3948 for (hashline = 0; hashline < UDF_DIRHASH_HASHSIZE; hashline++)
3949 LIST_INIT(&dirh->entries[hashline]);
3950 dirh->size = 0;
3951 dirh->refcnt = 0;
3952 dirh->flags = 0;
3953 } else {
3954 TAILQ_REMOVE(&udf_dirhash_queue, dirh, next);
3955 }
3956
3957 dirh->refcnt++;
3958 TAILQ_INSERT_HEAD(&udf_dirhash_queue, dirh, next);
3959
3960 mutex_exit(&udf_dirhashmutex);
3961 }
3962
3963
3964 static void
3965 udf_dirhash_put(struct udf_dirhash *dirh)
3966 {
3967 mutex_enter(&udf_dirhashmutex);
3968 dirh->refcnt--;
3969 mutex_exit(&udf_dirhashmutex);
3970 }
3971
3972
3973 static void
3974 udf_dirhash_enter(struct udf_node *dir_node, struct fileid_desc *fid,
3975 struct dirent *dirent, uint64_t offset, uint32_t fid_size, int new)
3976 {
3977 struct udf_dirhash *dirh, *del_dirh, *prev_dirh;
3978 struct udf_dirhash_entry *dirh_e;
3979 uint32_t hashvalue, hashline;
3980 int entrysize;
3981
3982 /* make sure we have a dirhash to work on */
3983 dirh = dir_node->dir_hash;
3984 KASSERT(dirh);
3985 KASSERT(dirh->refcnt > 0);
3986
3987 /* are we trying to re-enter an entry? */
3988 if (!new && (dirh->flags & UDF_DIRH_COMPLETE))
3989 return;
3990
3991 /* calculate our hash */
3992 hashvalue = udf_dirhash_hash(dirent->d_name, dirent->d_namlen);
3993 hashline = hashvalue & UDF_DIRHASH_HASHMASK;
3994
3995 /* lookup and insert entry if not there yet */
3996 LIST_FOREACH(dirh_e, &dirh->entries[hashline], next) {
3997 /* check for hash collision */
3998 if (dirh_e->hashvalue != hashvalue)
3999 continue;
4000 if (dirh_e->offset != offset)
4001 continue;
4002 /* got it already */
4003 KASSERT(dirh_e->d_namlen == dirent->d_namlen);
4004 KASSERT(dirh_e->fid_size == fid_size);
4005 return;
4006 }
4007
4008 DPRINTF(DIRHASH, ("dirhash enter %"PRIu64", %d, %d for `%*.*s`\n",
4009 offset, fid_size, dirent->d_namlen,
4010 dirent->d_namlen, dirent->d_namlen, dirent->d_name));
4011
4012 /* check if entry is in free space list */
4013 LIST_FOREACH(dirh_e, &dirh->free_entries, next) {
4014 if (dirh_e->offset == offset) {
4015 DPRINTF(DIRHASH, ("\tremoving free entry\n"));
4016 LIST_REMOVE(dirh_e, next);
4017 break;
4018 }
4019 }
4020
4021 /* ensure we are not passing the dirhash limit */
4022 entrysize = sizeof(struct udf_dirhash_entry);
4023 if (udf_dirhashsize + entrysize > udf_maxdirhashsize) {
4024 del_dirh = TAILQ_LAST(&udf_dirhash_queue, _udf_dirhash);
4025 KASSERT(del_dirh);
4026 while (udf_dirhashsize + entrysize > udf_maxdirhashsize) {
4027 /* no use trying to delete myself */
4028 if (del_dirh == dirh)
4029 break;
4030 prev_dirh = TAILQ_PREV(del_dirh, _udf_dirhash, next);
4031 if (del_dirh->refcnt == 0)
4032 udf_dirhash_purge(del_dirh);
4033 del_dirh = prev_dirh;
4034 }
4035 }
4036
4037 /* add to the hashline */
4038 dirh_e = pool_get(&udf_dirhash_entry_pool, PR_WAITOK);
4039 memset(dirh_e, 0, sizeof(struct udf_dirhash_entry));
4040
4041 dirh_e->hashvalue = hashvalue;
4042 dirh_e->offset = offset;
4043 dirh_e->d_namlen = dirent->d_namlen;
4044 dirh_e->fid_size = fid_size;
4045
4046 dirh->size += sizeof(struct udf_dirhash_entry);
4047 udf_dirhashsize += sizeof(struct udf_dirhash_entry);
4048 LIST_INSERT_HEAD(&dirh->entries[hashline], dirh_e, next);
4049 }
4050
4051
4052 static void
4053 udf_dirhash_enter_freed(struct udf_node *dir_node, uint64_t offset,
4054 uint32_t fid_size)
4055 {
4056 struct udf_dirhash *dirh;
4057 struct udf_dirhash_entry *dirh_e;
4058
4059 /* make sure we have a dirhash to work on */
4060 dirh = dir_node->dir_hash;
4061 KASSERT(dirh);
4062 KASSERT(dirh->refcnt > 0);
4063
4064 #ifdef DEBUG
4065 /* check for double entry of free space */
4066 LIST_FOREACH(dirh_e, &dirh->free_entries, next)
4067 KASSERT(dirh_e->offset != offset);
4068 #endif
4069
4070 DPRINTF(DIRHASH, ("dirhash enter FREED %"PRIu64", %d\n",
4071 offset, fid_size));
4072 dirh_e = pool_get(&udf_dirhash_entry_pool, PR_WAITOK);
4073 memset(dirh_e, 0, sizeof(struct udf_dirhash_entry));
4074
4075 dirh_e->hashvalue = 0; /* not relevant */
4076 dirh_e->offset = offset;
4077 dirh_e->d_namlen = 0; /* not relevant */
4078 dirh_e->fid_size = fid_size;
4079
4080 /* XXX it might be preferable to append them at the tail */
4081 LIST_INSERT_HEAD(&dirh->free_entries, dirh_e, next);
4082 dirh->size += sizeof(struct udf_dirhash_entry);
4083 udf_dirhashsize += sizeof(struct udf_dirhash_entry);
4084 }
4085
4086
4087 static void
4088 udf_dirhash_remove(struct udf_node *dir_node, struct dirent *dirent,
4089 uint64_t offset, uint32_t fid_size)
4090 {
4091 struct udf_dirhash *dirh;
4092 struct udf_dirhash_entry *dirh_e;
4093 uint32_t hashvalue, hashline;
4094
4095 DPRINTF(DIRHASH, ("dirhash remove %"PRIu64", %d for `%*.*s`\n",
4096 offset, fid_size,
4097 dirent->d_namlen, dirent->d_namlen, dirent->d_name));
4098
4099 /* make sure we have a dirhash to work on */
4100 dirh = dir_node->dir_hash;
4101 KASSERT(dirh);
4102 KASSERT(dirh->refcnt > 0);
4103
4104 /* calculate our hash */
4105 hashvalue = udf_dirhash_hash(dirent->d_name, dirent->d_namlen);
4106 hashline = hashvalue & UDF_DIRHASH_HASHMASK;
4107
4108 /* lookup entry */
4109 LIST_FOREACH(dirh_e, &dirh->entries[hashline], next) {
4110 /* check for hash collision */
4111 if (dirh_e->hashvalue != hashvalue)
4112 continue;
4113 if (dirh_e->offset != offset)
4114 continue;
4115
4116 /* got it! */
4117 KASSERT(dirh_e->d_namlen == dirent->d_namlen);
4118 KASSERT(dirh_e->fid_size == fid_size);
4119 LIST_REMOVE(dirh_e, next);
4120 dirh->size -= sizeof(struct udf_dirhash_entry);
4121 udf_dirhashsize -= sizeof(struct udf_dirhash_entry);
4122
4123 udf_dirhash_enter_freed(dir_node, offset, fid_size);
4124 return;
4125 }
4126
4127 /* not found! */
4128 panic("dirhash_remove couldn't find entry in hash table\n");
4129 }
4130
4131
4132 /* BUGALERT: don't use result longer than needed, never past the node lock */
4133 /* call with NULL *result initially and it will return nonzero if again */
4134 static int
4135 udf_dirhash_lookup(struct udf_node *dir_node, const char *d_name, int d_namlen,
4136 struct udf_dirhash_entry **result)
4137 {
4138 struct udf_dirhash *dirh;
4139 struct udf_dirhash_entry *dirh_e;
4140 uint32_t hashvalue, hashline;
4141
4142 KASSERT(VOP_ISLOCKED(dir_node->vnode));
4143
4144 /* make sure we have a dirhash to work on */
4145 dirh = dir_node->dir_hash;
4146 KASSERT(dirh);
4147 KASSERT(dirh->refcnt > 0);
4148
4149 /* start where we were */
4150 if (*result) {
4151 KASSERT(dir_node->dir_hash);
4152 dirh_e = *result;
4153
4154 /* retrieve information to avoid recalculation and advance */
4155 hashvalue = dirh_e->hashvalue;
4156 dirh_e = LIST_NEXT(*result, next);
4157 } else {
4158 /* calculate our hash and lookup all entries in hashline */
4159 hashvalue = udf_dirhash_hash(d_name, d_namlen);
4160 hashline = hashvalue & UDF_DIRHASH_HASHMASK;
4161 dirh_e = LIST_FIRST(&dirh->entries[hashline]);
4162 }
4163
4164 for (; dirh_e; dirh_e = LIST_NEXT(dirh_e, next)) {
4165 /* check for hash collision */
4166 if (dirh_e->hashvalue != hashvalue)
4167 continue;
4168 if (dirh_e->d_namlen != d_namlen)
4169 continue;
4170 /* might have an entry in the cache */
4171 *result = dirh_e;
4172 return 1;
4173 }
4174
4175 *result = NULL;
4176 return 0;
4177 }
4178
4179
4180 /* BUGALERT: don't use result longer than needed, never past the node lock */
4181 /* call with NULL *result initially and it will return nonzero if again */
4182 static int
4183 udf_dirhash_lookup_freed(struct udf_node *dir_node, uint32_t min_fidsize,
4184 struct udf_dirhash_entry **result)
4185 {
4186 struct udf_dirhash *dirh;
4187 struct udf_dirhash_entry *dirh_e;
4188
4189 KASSERT(VOP_ISLOCKED(dir_node->vnode));
4190
4191 /* make sure we have a dirhash to work on */
4192 dirh = dir_node->dir_hash;
4193 KASSERT(dirh);
4194 KASSERT(dirh->refcnt > 0);
4195
4196 /* start where we were */
4197 if (*result) {
4198 KASSERT(dir_node->dir_hash);
4199 dirh_e = LIST_NEXT(*result, next);
4200 } else {
4201 /* lookup all entries that match */
4202 dirh_e = LIST_FIRST(&dirh->free_entries);
4203 }
4204
4205 for (; dirh_e; dirh_e = LIST_NEXT(dirh_e, next)) {
4206 /* check for minimum size */
4207 if (dirh_e->fid_size < min_fidsize)
4208 continue;
4209 /* might be a candidate */
4210 *result = dirh_e;
4211 return 1;
4212 }
4213
4214 *result = NULL;
4215 return 0;
4216 }
4217
4218
4219 static int
4220 udf_dirhash_fill(struct udf_node *dir_node)
4221 {
4222 struct vnode *dvp = dir_node->vnode;
4223 struct udf_dirhash *dirh;
4224 struct file_entry *fe = dir_node->fe;
4225 struct extfile_entry *efe = dir_node->efe;
4226 struct fileid_desc *fid;
4227 struct dirent *dirent;
4228 uint64_t file_size, pre_diroffset, diroffset;
4229 uint32_t lb_size;
4230 int error;
4231
4232 /* make sure we have a dirhash to work on */
4233 dirh = dir_node->dir_hash;
4234 KASSERT(dirh);
4235 KASSERT(dirh->refcnt > 0);
4236
4237 if (dirh->flags & UDF_DIRH_BROKEN)
4238 return EIO;
4239 if (dirh->flags & UDF_DIRH_COMPLETE)
4240 return 0;
4241
4242 /* make sure we have a clean dirhash to add to */
4243 udf_dirhash_purge(dirh);
4244
4245 /* get directory filesize */
4246 if (fe) {
4247 file_size = udf_rw64(fe->inf_len);
4248 } else {
4249 assert(efe);
4250 file_size = udf_rw64(efe->inf_len);
4251 }
4252
4253 /* allocate temporary space for fid */
4254 lb_size = udf_rw32(dir_node->ump->logical_vol->lb_size);
4255 fid = malloc(lb_size, M_UDFTEMP, M_WAITOK);
4256
4257 /* allocate temporary space for dirent */
4258 dirent = malloc(sizeof(struct dirent), M_UDFTEMP, M_WAITOK);
4259
4260 error = 0;
4261 diroffset = 0;
4262 while (diroffset < file_size) {
4263 /* transfer a new fid/dirent */
4264 pre_diroffset = diroffset;
4265 error = udf_read_fid_stream(dvp, &diroffset, fid, dirent);
4266 if (error) {
4267 /* TODO what to do? continue but not add? */
4268 dirh->flags |= UDF_DIRH_BROKEN;
4269 udf_dirhash_purge(dirh);
4270 break;
4271 }
4272
4273 if ((fid->file_char & UDF_FILE_CHAR_DEL)) {
4274 /* register deleted extent for reuse */
4275 udf_dirhash_enter_freed(dir_node, pre_diroffset,
4276 udf_fidsize(fid));
4277 } else {
4278 /* append to the dirhash */
4279 udf_dirhash_enter(dir_node, fid, dirent, pre_diroffset,
4280 udf_fidsize(fid), 0);
4281 }
4282 }
4283 dirh->flags |= UDF_DIRH_COMPLETE;
4284
4285 free(fid, M_UDFTEMP);
4286 free(dirent, M_UDFTEMP);
4287
4288 return error;
4289 }
4290
4291
4292 /* --------------------------------------------------------------------- */
4293
4294 /*
4295 * Directory read and manipulation functions.
4296 *
4297 * Note that if the file is found, the cached diroffset position *before* the
4298 * advance is remembered. Thus if the same filename is lookup again just after
4299 * this lookup its immediately found.
4300 */
4301
4302 int
4303 udf_lookup_name_in_dir(struct vnode *vp, const char *name, int namelen,
4304 struct long_ad *icb_loc, int *found)
4305 {
4306 struct udf_node *dir_node = VTOI(vp);
4307 struct udf_dirhash_entry *dirh_ep;
4308 struct fileid_desc *fid;
4309 struct dirent *dirent;
4310 uint64_t diroffset;
4311 uint32_t lb_size;
4312 int hit, error;
4313
4314 /* set default return */
4315 *found = 0;
4316
4317 /* get our dirhash and make sure its read in */
4318 udf_dirhash_get(&dir_node->dir_hash);
4319 error = udf_dirhash_fill(dir_node);
4320 if (error) {
4321 udf_dirhash_put(dir_node->dir_hash);
4322 return error;
4323 }
4324
4325 /* allocate temporary space for fid */
4326 lb_size = udf_rw32(dir_node->ump->logical_vol->lb_size);
4327 fid = malloc(lb_size, M_UDFTEMP, M_WAITOK);
4328 dirent = malloc(sizeof(struct dirent), M_UDFTEMP, M_WAITOK);
4329
4330 DPRINTF(DIRHASH, ("dirhash_lookup looking for `%*.*s`\n",
4331 namelen, namelen, name));
4332
4333 /* search our dirhash hits */
4334 memset(icb_loc, 0, sizeof(*icb_loc));
4335 dirh_ep = NULL;
4336 for (;;) {
4337 hit = udf_dirhash_lookup(dir_node, name, namelen, &dirh_ep);
4338 /* if no hit, abort the search */
4339 if (!hit)
4340 break;
4341
4342 /* check this hit */
4343 diroffset = dirh_ep->offset;
4344
4345 /* transfer a new fid/dirent */
4346 error = udf_read_fid_stream(vp, &diroffset, fid, dirent);
4347 if (error)
4348 break;
4349
4350 DPRINTF(DIRHASH, ("dirhash_lookup\tchecking `%*.*s`\n",
4351 dirent->d_namlen, dirent->d_namlen, dirent->d_name));
4352
4353 /* see if its our entry */
4354 KASSERT(dirent->d_namlen == namelen);
4355 if (strncmp(dirent->d_name, name, namelen) == 0) {
4356 *found = 1;
4357 *icb_loc = fid->icb;
4358 break;
4359 }
4360 }
4361 free(fid, M_UDFTEMP);
4362 free(dirent, M_UDFTEMP);
4363
4364 udf_dirhash_put(dir_node->dir_hash);
4365
4366 return error;
4367 }
4368
4369 /* --------------------------------------------------------------------- */
4370
4371 static int
4372 udf_create_new_fe(struct udf_mount *ump, struct file_entry *fe, int file_type,
4373 struct long_ad *node_icb, struct long_ad *parent_icb,
4374 uint64_t parent_unique_id)
4375 {
4376 struct timespec now;
4377 struct icb_tag *icb;
4378 struct filetimes_extattr_entry *ft_extattr;
4379 uint64_t unique_id;
4380 uint32_t fidsize, lb_num;
4381 uint8_t *bpos;
4382 int crclen, attrlen;
4383
4384 lb_num = udf_rw32(node_icb->loc.lb_num);
4385 udf_inittag(ump, &fe->tag, TAGID_FENTRY, lb_num);
4386 icb = &fe->icbtag;
4387
4388 /*
4389 * Always use strategy type 4 unless on WORM wich we don't support
4390 * (yet). Fill in defaults and set for internal allocation of data.
4391 */
4392 icb->strat_type = udf_rw16(4);
4393 icb->max_num_entries = udf_rw16(1);
4394 icb->file_type = file_type; /* 8 bit */
4395 icb->flags = udf_rw16(UDF_ICB_INTERN_ALLOC);
4396
4397 fe->perm = udf_rw32(0x7fff); /* all is allowed */
4398 fe->link_cnt = udf_rw16(0); /* explicit setting */
4399
4400 fe->ckpoint = udf_rw32(1); /* user supplied file version */
4401
4402 vfs_timestamp(&now);
4403 udf_timespec_to_timestamp(&now, &fe->atime);
4404 udf_timespec_to_timestamp(&now, &fe->attrtime);
4405 udf_timespec_to_timestamp(&now, &fe->mtime);
4406
4407 udf_set_regid(&fe->imp_id, IMPL_NAME);
4408 udf_add_impl_regid(ump, &fe->imp_id);
4409
4410 unique_id = udf_advance_uniqueid(ump);
4411 fe->unique_id = udf_rw64(unique_id);
4412 fe->l_ea = udf_rw32(0);
4413
4414 /* create extended attribute to record our creation time */
4415 attrlen = UDF_FILETIMES_ATTR_SIZE(1);
4416 ft_extattr = malloc(attrlen, M_UDFTEMP, M_WAITOK);
4417 memset(ft_extattr, 0, attrlen);
4418 ft_extattr->hdr.type = udf_rw32(UDF_FILETIMES_ATTR_NO);
4419 ft_extattr->hdr.subtype = 1; /* [4/48.10.5] */
4420 ft_extattr->hdr.a_l = udf_rw32(UDF_FILETIMES_ATTR_SIZE(1));
4421 ft_extattr->d_l = udf_rw32(UDF_TIMESTAMP_SIZE); /* one item */
4422 ft_extattr->existence = UDF_FILETIMES_FILE_CREATION;
4423 udf_timespec_to_timestamp(&now, &ft_extattr->times[0]);
4424
4425 udf_extattr_insert_internal(ump, (union dscrptr *) fe,
4426 (struct extattr_entry *) ft_extattr);
4427 free(ft_extattr, M_UDFTEMP);
4428
4429 /* if its a directory, create '..' */
4430 bpos = (uint8_t *) fe->data + udf_rw32(fe->l_ea);
4431 fidsize = 0;
4432 if (file_type == UDF_ICB_FILETYPE_DIRECTORY) {
4433 fidsize = udf_create_parentfid(ump,
4434 (struct fileid_desc *) bpos, parent_icb,
4435 parent_unique_id);
4436 }
4437
4438 /* record fidlength information */
4439 fe->inf_len = udf_rw64(fidsize);
4440 fe->l_ad = udf_rw32(fidsize);
4441 fe->logblks_rec = udf_rw64(0); /* intern */
4442
4443 crclen = sizeof(struct file_entry) - 1 - UDF_DESC_TAG_LENGTH;
4444 crclen += udf_rw32(fe->l_ea) + fidsize;
4445 fe->tag.desc_crc_len = udf_rw16(crclen);
4446
4447 (void) udf_validate_tag_and_crc_sums((union dscrptr *) fe);
4448
4449 return fidsize;
4450 }
4451
4452 /* --------------------------------------------------------------------- */
4453
4454 static int
4455 udf_create_new_efe(struct udf_mount *ump, struct extfile_entry *efe,
4456 int file_type, struct long_ad *node_icb, struct long_ad *parent_icb,
4457 uint64_t parent_unique_id)
4458 {
4459 struct timespec now;
4460 struct icb_tag *icb;
4461 uint64_t unique_id;
4462 uint32_t fidsize, lb_num;
4463 uint8_t *bpos;
4464 int crclen;
4465
4466 lb_num = udf_rw32(node_icb->loc.lb_num);
4467 udf_inittag(ump, &efe->tag, TAGID_EXTFENTRY, lb_num);
4468 icb = &efe->icbtag;
4469
4470 /*
4471 * Always use strategy type 4 unless on WORM wich we don't support
4472 * (yet). Fill in defaults and set for internal allocation of data.
4473 */
4474 icb->strat_type = udf_rw16(4);
4475 icb->max_num_entries = udf_rw16(1);
4476 icb->file_type = file_type; /* 8 bit */
4477 icb->flags = udf_rw16(UDF_ICB_INTERN_ALLOC);
4478
4479 efe->perm = udf_rw32(0x7fff); /* all is allowed */
4480 efe->link_cnt = udf_rw16(0); /* explicit setting */
4481
4482 efe->ckpoint = udf_rw32(1); /* user supplied file version */
4483
4484 vfs_timestamp(&now);
4485 udf_timespec_to_timestamp(&now, &efe->ctime);
4486 udf_timespec_to_timestamp(&now, &efe->atime);
4487 udf_timespec_to_timestamp(&now, &efe->attrtime);
4488 udf_timespec_to_timestamp(&now, &efe->mtime);
4489
4490 udf_set_regid(&efe->imp_id, IMPL_NAME);
4491 udf_add_impl_regid(ump, &efe->imp_id);
4492
4493 unique_id = udf_advance_uniqueid(ump);
4494 efe->unique_id = udf_rw64(unique_id);
4495 efe->l_ea = udf_rw32(0);
4496
4497 /* if its a directory, create '..' */
4498 bpos = (uint8_t *) efe->data + udf_rw32(efe->l_ea);
4499 fidsize = 0;
4500 if (file_type == UDF_ICB_FILETYPE_DIRECTORY) {
4501 fidsize = udf_create_parentfid(ump,
4502 (struct fileid_desc *) bpos, parent_icb,
4503 parent_unique_id);
4504 }
4505
4506 /* record fidlength information */
4507 efe->obj_size = udf_rw64(fidsize);
4508 efe->inf_len = udf_rw64(fidsize);
4509 efe->l_ad = udf_rw32(fidsize);
4510 efe->logblks_rec = udf_rw64(0); /* intern */
4511
4512 crclen = sizeof(struct extfile_entry) - 1 - UDF_DESC_TAG_LENGTH;
4513 crclen += udf_rw32(efe->l_ea) + fidsize;
4514 efe->tag.desc_crc_len = udf_rw16(crclen);
4515
4516 (void) udf_validate_tag_and_crc_sums((union dscrptr *) efe);
4517
4518 return fidsize;
4519 }
4520
4521 /* --------------------------------------------------------------------- */
4522
4523 int
4524 udf_dir_detach(struct udf_mount *ump, struct udf_node *dir_node,
4525 struct udf_node *udf_node, struct componentname *cnp)
4526 {
4527 struct vnode *dvp = dir_node->vnode;
4528 struct udf_dirhash_entry *dirh_ep;
4529 struct file_entry *fe = dir_node->fe;
4530 struct extfile_entry *efe = dir_node->efe;
4531 struct fileid_desc *fid;
4532 struct dirent *dirent;
4533 uint64_t file_size, diroffset;
4534 uint32_t lb_size, fidsize;
4535 int found, error;
4536 char const *name = cnp->cn_nameptr;
4537 int namelen = cnp->cn_namelen;
4538 int hit, refcnt;
4539
4540 /* get our dirhash and make sure its read in */
4541 udf_dirhash_get(&dir_node->dir_hash);
4542 error = udf_dirhash_fill(dir_node);
4543 if (error) {
4544 udf_dirhash_put(dir_node->dir_hash);
4545 return error;
4546 }
4547
4548 /* get directory filesize */
4549 if (fe) {
4550 file_size = udf_rw64(fe->inf_len);
4551 } else {
4552 assert(efe);
4553 file_size = udf_rw64(efe->inf_len);
4554 }
4555
4556 /* allocate temporary space for fid */
4557 lb_size = udf_rw32(dir_node->ump->logical_vol->lb_size);
4558 fid = malloc(lb_size, M_UDFTEMP, M_WAITOK);
4559 dirent = malloc(sizeof(struct dirent), M_UDFTEMP, M_WAITOK);
4560
4561 /* search our dirhash hits */
4562 found = 0;
4563 dirh_ep = NULL;
4564 for (;;) {
4565 hit = udf_dirhash_lookup(dir_node, name, namelen, &dirh_ep);
4566 /* if no hit, abort the search */
4567 if (!hit)
4568 break;
4569
4570 /* check this hit */
4571 diroffset = dirh_ep->offset;
4572
4573 /* transfer a new fid/dirent */
4574 error = udf_read_fid_stream(dvp, &diroffset, fid, dirent);
4575 if (error)
4576 break;
4577
4578 /* see if its our entry */
4579 KASSERT(dirent->d_namlen == namelen);
4580 if (strncmp(dirent->d_name, name, namelen) == 0) {
4581 found = 1;
4582 break;
4583 }
4584 }
4585
4586 if (!found)
4587 error = ENOENT;
4588 if (error)
4589 goto error_out;
4590
4591 /* mark deleted */
4592 fid->file_char |= UDF_FILE_CHAR_DEL;
4593 #ifdef UDF_COMPLETE_DELETE
4594 memset(&fid->icb, 0, sizeof(fid->icb));
4595 #endif
4596 (void) udf_validate_tag_and_crc_sums((union dscrptr *) fid);
4597
4598 /* get size of fid and compensate for the read_fid_stream advance */
4599 fidsize = udf_fidsize(fid);
4600 diroffset -= fidsize;
4601
4602 /* write out */
4603 error = vn_rdwr(UIO_WRITE, dir_node->vnode,
4604 fid, fidsize, diroffset,
4605 UIO_SYSSPACE, IO_ALTSEMANTICS | IO_NODELOCKED,
4606 FSCRED, NULL, NULL);
4607 if (error)
4608 goto error_out;
4609
4610 /* get reference count of attached node */
4611 if (udf_node->fe) {
4612 refcnt = udf_rw16(udf_node->fe->link_cnt);
4613 } else {
4614 KASSERT(udf_node->efe);
4615 refcnt = udf_rw16(udf_node->efe->link_cnt);
4616 }
4617 #ifdef UDF_COMPLETE_DELETE
4618 /* substract reference counter in attached node */
4619 refcnt -= 1;
4620 if (udf_node->fe) {
4621 udf_node->fe->link_cnt = udf_rw16(refcnt);
4622 } else {
4623 udf_node->efe->link_cnt = udf_rw16(refcnt);
4624 }
4625
4626 /* prevent writeout when refcnt == 0 */
4627 if (refcnt == 0)
4628 udf_node->i_flags |= IN_DELETED;
4629
4630 if (fid->file_char & UDF_FILE_CHAR_DIR) {
4631 int drefcnt;
4632
4633 /* substract reference counter in directory node */
4634 /* note subtract 2 (?) for its was also backreferenced */
4635 if (dir_node->fe) {
4636 drefcnt = udf_rw16(dir_node->fe->link_cnt);
4637 drefcnt -= 1;
4638 dir_node->fe->link_cnt = udf_rw16(drefcnt);
4639 } else {
4640 KASSERT(dir_node->efe);
4641 drefcnt = udf_rw16(dir_node->efe->link_cnt);
4642 drefcnt -= 1;
4643 dir_node->efe->link_cnt = udf_rw16(drefcnt);
4644 }
4645 }
4646
4647 udf_node->i_flags |= IN_MODIFIED;
4648 dir_node->i_flags |= IN_MODIFIED;
4649 #endif
4650 /* if it is/was a hardlink adjust the file count */
4651 if (refcnt > 0)
4652 udf_adjust_filecount(udf_node, -1);
4653
4654 /* remove from the dirhash */
4655 udf_dirhash_remove(dir_node, dirent, diroffset,
4656 udf_fidsize(fid));
4657
4658 error_out:
4659 free(fid, M_UDFTEMP);
4660 free(dirent, M_UDFTEMP);
4661
4662 udf_dirhash_put(dir_node->dir_hash);
4663
4664 return error;
4665 }
4666
4667 /* --------------------------------------------------------------------- */
4668
4669 /*
4670 * We are not allowed to split the fid tag itself over an logical block so
4671 * check the space remaining in the logical block.
4672 *
4673 * We try to select the smallest candidate for recycling or when none is
4674 * found, append a new one at the end of the directory.
4675 */
4676
4677 int
4678 udf_dir_attach(struct udf_mount *ump, struct udf_node *dir_node,
4679 struct udf_node *udf_node, struct vattr *vap, struct componentname *cnp)
4680 {
4681 struct vnode *dvp = dir_node->vnode;
4682 struct udf_dirhash_entry *dirh_ep;
4683 struct fileid_desc *fid;
4684 struct icb_tag *icbtag;
4685 struct charspec osta_charspec;
4686 struct dirent dirent;
4687 uint64_t unique_id, dir_size, diroffset;
4688 uint64_t fid_pos, end_fid_pos, chosen_fid_pos;
4689 uint32_t chosen_size, chosen_size_diff;
4690 int lb_size, lb_rest, fidsize, this_fidsize, size_diff;
4691 int file_char, refcnt, icbflags, addr_type, hit, error;
4692
4693 /* get our dirhash and make sure its read in */
4694 udf_dirhash_get(&dir_node->dir_hash);
4695 error = udf_dirhash_fill(dir_node);
4696 if (error) {
4697 udf_dirhash_put(dir_node->dir_hash);
4698 return error;
4699 }
4700
4701 /* get info */
4702 lb_size = udf_rw32(ump->logical_vol->lb_size);
4703 udf_osta_charset(&osta_charspec);
4704
4705 if (dir_node->fe) {
4706 dir_size = udf_rw64(dir_node->fe->inf_len);
4707 icbtag = &dir_node->fe->icbtag;
4708 } else {
4709 dir_size = udf_rw64(dir_node->efe->inf_len);
4710 icbtag = &dir_node->efe->icbtag;
4711 }
4712
4713 icbflags = udf_rw16(icbtag->flags);
4714 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
4715
4716 if (udf_node->fe) {
4717 unique_id = udf_rw64(udf_node->fe->unique_id);
4718 refcnt = udf_rw16(udf_node->fe->link_cnt);
4719 } else {
4720 unique_id = udf_rw64(udf_node->efe->unique_id);
4721 refcnt = udf_rw16(udf_node->efe->link_cnt);
4722 }
4723
4724 if (refcnt > 0) {
4725 unique_id = udf_advance_uniqueid(ump);
4726 udf_adjust_filecount(udf_node, 1);
4727 }
4728
4729 /* determine file characteristics */
4730 file_char = 0; /* visible non deleted file and not stream metadata */
4731 if (vap->va_type == VDIR)
4732 file_char = UDF_FILE_CHAR_DIR;
4733
4734 /* malloc scrap buffer */
4735 fid = malloc(lb_size, M_TEMP, M_WAITOK);
4736 bzero(fid, lb_size);
4737
4738 /* calculate _minimum_ fid size */
4739 unix_to_udf_name((char *) fid->data, &fid->l_fi,
4740 cnp->cn_nameptr, cnp->cn_namelen, &osta_charspec);
4741 fidsize = UDF_FID_SIZE + fid->l_fi;
4742 fidsize = (fidsize + 3) & ~3; /* multiple of 4 */
4743
4744 /* find position that will fit the FID */
4745 chosen_fid_pos = dir_size;
4746 chosen_size = 0;
4747 chosen_size_diff = UINT_MAX;
4748
4749 /* shut up gcc */
4750 dirent.d_namlen = 0;
4751
4752 /* search our dirhash hits */
4753 error = 0;
4754 dirh_ep = NULL;
4755 for (;;) {
4756 hit = udf_dirhash_lookup_freed(dir_node, fidsize, &dirh_ep);
4757 /* if no hit, abort the search */
4758 if (!hit)
4759 break;
4760
4761 /* check this hit for size */
4762 this_fidsize = dirh_ep->fid_size;
4763
4764 /* check this hit */
4765 fid_pos = dirh_ep->offset;
4766 end_fid_pos = fid_pos + this_fidsize;
4767 size_diff = this_fidsize - fidsize;
4768 lb_rest = lb_size - (end_fid_pos % lb_size);
4769
4770 #ifndef UDF_COMPLETE_DELETE
4771 /* transfer a new fid/dirent */
4772 error = udf_read_fid_stream(vp, &fid_pos, fid, dirent);
4773 if (error)
4774 goto error_out;
4775
4776 /* only reuse entries that are wiped */
4777 /* check if the len + loc are marked zero */
4778 if (udf_rw32(fid->icb.len != 0))
4779 continue;
4780 if (udf_rw32(fid->icb.loc.lb_num) != 0)
4781 continue;
4782 if (udf_rw16(fid->icb.loc.part_num != 0))
4783 continue;
4784 #endif /* UDF_COMPLETE_DELETE */
4785
4786 /* select if not splitting the tag and its smaller */
4787 if ((size_diff >= 0) &&
4788 (size_diff < chosen_size_diff) &&
4789 (lb_rest >= sizeof(struct desc_tag)))
4790 {
4791 /* UDF 2.3.4.2+3 specifies rules for iu size */
4792 if ((size_diff == 0) || (size_diff >= 32)) {
4793 chosen_fid_pos = fid_pos;
4794 chosen_size = this_fidsize;
4795 chosen_size_diff = size_diff;
4796 }
4797 }
4798 }
4799
4800
4801 /* extend directory if no other candidate found */
4802 if (chosen_size == 0) {
4803 chosen_fid_pos = dir_size;
4804 chosen_size = fidsize;
4805 chosen_size_diff = 0;
4806
4807 /* special case UDF 2.00+ 2.3.4.4, no splitting up fid tag */
4808 if (addr_type == UDF_ICB_INTERN_ALLOC) {
4809 /* pre-grow directory to see if we're to switch */
4810 udf_grow_node(dir_node, dir_size + chosen_size);
4811
4812 icbflags = udf_rw16(icbtag->flags);
4813 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
4814 }
4815
4816 /* make sure the next fid desc_tag won't be splitted */
4817 if (addr_type != UDF_ICB_INTERN_ALLOC) {
4818 end_fid_pos = chosen_fid_pos + chosen_size;
4819 lb_rest = lb_size - (end_fid_pos % lb_size);
4820
4821 /* pad with implementation use regid if needed */
4822 if (lb_rest < sizeof(struct desc_tag))
4823 chosen_size += 32;
4824 }
4825 }
4826 chosen_size_diff = chosen_size - fidsize;
4827 diroffset = chosen_fid_pos + chosen_size;
4828
4829 /* populate the FID */
4830 memset(fid, 0, lb_size);
4831 udf_inittag(ump, &fid->tag, TAGID_FID, 0);
4832 fid->file_version_num = udf_rw16(1); /* UDF 2.3.4.1 */
4833 fid->file_char = file_char;
4834 fid->icb = udf_node->loc;
4835 fid->icb.longad_uniqueid = udf_rw32((uint32_t) unique_id);
4836 fid->l_iu = udf_rw16(0);
4837
4838 if (chosen_size > fidsize) {
4839 /* insert implementation-use regid to space it correctly */
4840 fid->l_iu = udf_rw16(chosen_size_diff);
4841
4842 /* set implementation use */
4843 udf_set_regid((struct regid *) fid->data, IMPL_NAME);
4844 udf_add_impl_regid(ump, (struct regid *) fid->data);
4845 }
4846
4847 /* fill in name */
4848 unix_to_udf_name((char *) fid->data + udf_rw16(fid->l_iu),
4849 &fid->l_fi, cnp->cn_nameptr, cnp->cn_namelen, &osta_charspec);
4850
4851 fid->tag.desc_crc_len = chosen_size - UDF_DESC_TAG_LENGTH;
4852 (void) udf_validate_tag_and_crc_sums((union dscrptr *) fid);
4853
4854 /* writeout FID/update parent directory */
4855 error = vn_rdwr(UIO_WRITE, dvp,
4856 fid, chosen_size, chosen_fid_pos,
4857 UIO_SYSSPACE, IO_ALTSEMANTICS | IO_NODELOCKED,
4858 FSCRED, NULL, NULL);
4859
4860 if (error)
4861 goto error_out;
4862
4863 /* add reference counter in attached node */
4864 if (udf_node->fe) {
4865 refcnt = udf_rw16(udf_node->fe->link_cnt);
4866 udf_node->fe->link_cnt = udf_rw16(refcnt+1);
4867 } else {
4868 KASSERT(udf_node->efe);
4869 refcnt = udf_rw16(udf_node->efe->link_cnt);
4870 udf_node->efe->link_cnt = udf_rw16(refcnt+1);
4871 }
4872
4873 /* mark not deleted if it was... just in case, but do warn */
4874 if (udf_node->i_flags & IN_DELETED) {
4875 printf("udf: warning, marking a file undeleted\n");
4876 udf_node->i_flags &= ~IN_DELETED;
4877 }
4878
4879 if (file_char & UDF_FILE_CHAR_DIR) {
4880 /* add reference counter in directory node for '..' */
4881 if (dir_node->fe) {
4882 refcnt = udf_rw16(dir_node->fe->link_cnt);
4883 refcnt++;
4884 dir_node->fe->link_cnt = udf_rw16(refcnt);
4885 } else {
4886 KASSERT(dir_node->efe);
4887 refcnt = udf_rw16(dir_node->efe->link_cnt);
4888 refcnt++;
4889 dir_node->efe->link_cnt = udf_rw16(refcnt);
4890 }
4891 }
4892
4893 /* append to the dirhash */
4894 dirent.d_namlen = cnp->cn_namelen;
4895 memcpy(dirent.d_name, cnp->cn_nameptr, cnp->cn_namelen);
4896 udf_dirhash_enter(dir_node, fid, &dirent, chosen_fid_pos,
4897 udf_fidsize(fid), 1);
4898
4899 /* note updates */
4900 udf_node->i_flags |= IN_CHANGE | IN_MODIFY; /* | IN_CREATE? */
4901 /* VN_KNOTE(udf_node, ...) */
4902 udf_update(udf_node->vnode, NULL, NULL, NULL, 0);
4903
4904 error_out:
4905 free(fid, M_TEMP);
4906
4907 udf_dirhash_put(dir_node->dir_hash);
4908
4909 return error;
4910 }
4911
4912 /* --------------------------------------------------------------------- */
4913
4914 /*
4915 * Each node can have an attached streamdir node though not recursively. These
4916 * are otherwise known as named substreams/named extended attributes that have
4917 * no size limitations.
4918 *
4919 * `Normal' extended attributes are indicated with a number and are recorded
4920 * in either the fe/efe descriptor itself for small descriptors or recorded in
4921 * the attached extended attribute file. Since these spaces can get
4922 * fragmented, care ought to be taken.
4923 *
4924 * Since the size of the space reserved for allocation descriptors is limited,
4925 * there is a mechanim provided for extending this space; this is done by a
4926 * special extent to allow schrinking of the allocations without breaking the
4927 * linkage to the allocation extent descriptor.
4928 */
4929
4930 int
4931 udf_get_node(struct udf_mount *ump, struct long_ad *node_icb_loc,
4932 struct udf_node **udf_noderes)
4933 {
4934 union dscrptr *dscr;
4935 struct udf_node *udf_node;
4936 struct vnode *nvp;
4937 struct long_ad icb_loc, last_fe_icb_loc;
4938 uint64_t file_size;
4939 uint32_t lb_size, sector, dummy;
4940 uint8_t *file_data;
4941 int udf_file_type, dscr_type, strat, strat4096, needs_indirect;
4942 int slot, eof, error;
4943
4944 DPRINTF(NODE, ("udf_get_node called\n"));
4945 *udf_noderes = udf_node = NULL;
4946
4947 /* lock to disallow simultanious creation of same udf_node */
4948 mutex_enter(&ump->get_node_lock);
4949
4950 DPRINTF(NODE, ("\tlookup in hash table\n"));
4951 /* lookup in hash table */
4952 assert(ump);
4953 assert(node_icb_loc);
4954 udf_node = udf_hash_lookup(ump, node_icb_loc);
4955 if (udf_node) {
4956 DPRINTF(NODE, ("\tgot it from the hash!\n"));
4957 /* vnode is returned locked */
4958 *udf_noderes = udf_node;
4959 mutex_exit(&ump->get_node_lock);
4960 return 0;
4961 }
4962
4963 /* garbage check: translate udf_node_icb_loc to sectornr */
4964 error = udf_translate_vtop(ump, node_icb_loc, §or, &dummy);
4965 if (error) {
4966 /* no use, this will fail anyway */
4967 mutex_exit(&ump->get_node_lock);
4968 return EINVAL;
4969 }
4970
4971 /* build udf_node (do initialise!) */
4972 udf_node = pool_get(&udf_node_pool, PR_WAITOK);
4973 memset(udf_node, 0, sizeof(struct udf_node));
4974
4975 DPRINTF(NODE, ("\tget new vnode\n"));
4976 /* give it a vnode */
4977 error = getnewvnode(VT_UDF, ump->vfs_mountp, udf_vnodeop_p, &nvp);
4978 if (error) {
4979 pool_put(&udf_node_pool, udf_node);
4980 mutex_exit(&ump->get_node_lock);
4981 return error;
4982 }
4983
4984 /* always return locked vnode */
4985 if ((error = vn_lock(nvp, LK_EXCLUSIVE | LK_RETRY))) {
4986 /* recycle vnode and unlock; simultanious will fail too */
4987 ungetnewvnode(nvp);
4988 mutex_exit(&ump->get_node_lock);
4989 return error;
4990 }
4991
4992 /* initialise crosslinks, note location of fe/efe for hashing */
4993 udf_node->ump = ump;
4994 udf_node->vnode = nvp;
4995 nvp->v_data = udf_node;
4996 udf_node->loc = *node_icb_loc;
4997 udf_node->lockf = 0;
4998 mutex_init(&udf_node->node_mutex, MUTEX_DEFAULT, IPL_NONE);
4999 cv_init(&udf_node->node_lock, "udf_nlk");
5000 genfs_node_init(nvp, &udf_genfsops); /* inititise genfs */
5001 udf_node->outstanding_bufs = 0;
5002 udf_node->outstanding_nodedscr = 0;
5003
5004 /* insert into the hash lookup */
5005 udf_register_node(udf_node);
5006
5007 /* safe to unlock, the entry is in the hash table, vnode is locked */
5008 mutex_exit(&ump->get_node_lock);
5009
5010 icb_loc = *node_icb_loc;
5011 needs_indirect = 0;
5012 strat4096 = 0;
5013 udf_file_type = UDF_ICB_FILETYPE_UNKNOWN;
5014 file_size = 0;
5015 file_data = NULL;
5016 lb_size = udf_rw32(ump->logical_vol->lb_size);
5017
5018 DPRINTF(NODE, ("\tstart reading descriptors\n"));
5019 do {
5020 /* try to read in fe/efe */
5021 error = udf_read_logvol_dscr(ump, &icb_loc, &dscr);
5022
5023 /* blank sector marks end of sequence, check this */
5024 if ((dscr == NULL) && (!strat4096))
5025 error = ENOENT;
5026
5027 /* break if read error or blank sector */
5028 if (error || (dscr == NULL))
5029 break;
5030
5031 /* process descriptor based on the descriptor type */
5032 dscr_type = udf_rw16(dscr->tag.id);
5033 DPRINTF(NODE, ("\tread descriptor %d\n", dscr_type));
5034
5035 /* if dealing with an indirect entry, follow the link */
5036 if (dscr_type == TAGID_INDIRECTENTRY) {
5037 needs_indirect = 0;
5038 udf_free_logvol_dscr(ump, &icb_loc, dscr);
5039 icb_loc = dscr->inde.indirect_icb;
5040 continue;
5041 }
5042
5043 /* only file entries and extended file entries allowed here */
5044 if ((dscr_type != TAGID_FENTRY) &&
5045 (dscr_type != TAGID_EXTFENTRY)) {
5046 udf_free_logvol_dscr(ump, &icb_loc, dscr);
5047 error = ENOENT;
5048 break;
5049 }
5050
5051 KASSERT(udf_tagsize(dscr, lb_size) == lb_size);
5052
5053 /* choose this one */
5054 last_fe_icb_loc = icb_loc;
5055
5056 /* record and process/update (ext)fentry */
5057 file_data = NULL;
5058 if (dscr_type == TAGID_FENTRY) {
5059 if (udf_node->fe)
5060 udf_free_logvol_dscr(ump, &last_fe_icb_loc,
5061 udf_node->fe);
5062 udf_node->fe = &dscr->fe;
5063 strat = udf_rw16(udf_node->fe->icbtag.strat_type);
5064 udf_file_type = udf_node->fe->icbtag.file_type;
5065 file_size = udf_rw64(udf_node->fe->inf_len);
5066 file_data = udf_node->fe->data;
5067 } else {
5068 if (udf_node->efe)
5069 udf_free_logvol_dscr(ump, &last_fe_icb_loc,
5070 udf_node->efe);
5071 udf_node->efe = &dscr->efe;
5072 strat = udf_rw16(udf_node->efe->icbtag.strat_type);
5073 udf_file_type = udf_node->efe->icbtag.file_type;
5074 file_size = udf_rw64(udf_node->efe->inf_len);
5075 file_data = udf_node->efe->data;
5076 }
5077
5078 /* check recording strategy (structure) */
5079
5080 /*
5081 * Strategy 4096 is a daisy linked chain terminating with an
5082 * unrecorded sector or a TERM descriptor. The next
5083 * descriptor is to be found in the sector that follows the
5084 * current sector.
5085 */
5086 if (strat == 4096) {
5087 strat4096 = 1;
5088 needs_indirect = 1;
5089
5090 icb_loc.loc.lb_num = udf_rw32(icb_loc.loc.lb_num) + 1;
5091 }
5092
5093 /*
5094 * Strategy 4 is the normal strategy and terminates, but if
5095 * we're in strategy 4096, we can't have strategy 4 mixed in
5096 */
5097
5098 if (strat == 4) {
5099 if (strat4096) {
5100 error = EINVAL;
5101 break;
5102 }
5103 break; /* done */
5104 }
5105 } while (!error);
5106
5107 /* first round of cleanup code */
5108 if (error) {
5109 DPRINTF(NODE, ("\tnode fe/efe failed!\n"));
5110 /* recycle udf_node */
5111 udf_dispose_node(udf_node);
5112
5113 vlockmgr(nvp->v_vnlock, LK_RELEASE);
5114 nvp->v_data = NULL;
5115 ungetnewvnode(nvp);
5116
5117 return EINVAL; /* error code ok? */
5118 }
5119 DPRINTF(NODE, ("\tnode fe/efe read in fine\n"));
5120
5121 /* assert no references to dscr anymore beyong this point */
5122 assert((udf_node->fe) || (udf_node->efe));
5123 dscr = NULL;
5124
5125 /*
5126 * Remember where to record an updated version of the descriptor. If
5127 * there is a sequence of indirect entries, icb_loc will have been
5128 * updated. Its the write disipline to allocate new space and to make
5129 * sure the chain is maintained.
5130 *
5131 * `needs_indirect' flags if the next location is to be filled with
5132 * with an indirect entry.
5133 */
5134 udf_node->write_loc = icb_loc;
5135 udf_node->needs_indirect = needs_indirect;
5136
5137 /*
5138 * Go trough all allocations extents of this descriptor and when
5139 * encountering a redirect read in the allocation extension. These are
5140 * daisy-chained.
5141 */
5142 UDF_LOCK_NODE(udf_node, 0);
5143 udf_node->num_extensions = 0;
5144
5145 error = 0;
5146 slot = 0;
5147 for (;;) {
5148 udf_get_adslot(udf_node, slot, &icb_loc, &eof);
5149 DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, len = %d, "
5150 "lb_num = %d, part = %d\n", slot, eof,
5151 UDF_EXT_FLAGS(udf_rw32(icb_loc.len)),
5152 UDF_EXT_LEN(udf_rw32(icb_loc.len)),
5153 udf_rw32(icb_loc.loc.lb_num),
5154 udf_rw16(icb_loc.loc.part_num)));
5155 if (eof)
5156 break;
5157 slot++;
5158
5159 if (UDF_EXT_FLAGS(udf_rw32(icb_loc.len)) != UDF_EXT_REDIRECT)
5160 continue;
5161
5162 DPRINTF(NODE, ("\tgot redirect extent\n"));
5163 if (udf_node->num_extensions >= UDF_MAX_ALLOC_EXTENTS) {
5164 DPRINTF(ALLOC, ("udf_get_node: implementation limit, "
5165 "too many allocation extensions on "
5166 "udf_node\n"));
5167 error = EINVAL;
5168 break;
5169 }
5170
5171 /* length can only be *one* lb : UDF 2.50/2.3.7.1 */
5172 if (UDF_EXT_LEN(udf_rw32(icb_loc.len)) != lb_size) {
5173 DPRINTF(ALLOC, ("udf_get_node: bad allocation "
5174 "extension size in udf_node\n"));
5175 error = EINVAL;
5176 break;
5177 }
5178
5179 DPRINTF(NODE, ("read allocation extent at lb_num %d\n",
5180 UDF_EXT_LEN(udf_rw32(icb_loc.loc.lb_num))));
5181 /* load in allocation extent */
5182 error = udf_read_logvol_dscr(ump, &icb_loc, &dscr);
5183 if (error || (dscr == NULL))
5184 break;
5185
5186 /* process read-in descriptor */
5187 dscr_type = udf_rw16(dscr->tag.id);
5188
5189 if (dscr_type != TAGID_ALLOCEXTENT) {
5190 udf_free_logvol_dscr(ump, &icb_loc, dscr);
5191 error = ENOENT;
5192 break;
5193 }
5194
5195 DPRINTF(NODE, ("\trecording redirect extent\n"));
5196 udf_node->ext[udf_node->num_extensions] = &dscr->aee;
5197 udf_node->ext_loc[udf_node->num_extensions] = icb_loc;
5198
5199 udf_node->num_extensions++;
5200
5201 } /* while */
5202 UDF_UNLOCK_NODE(udf_node, 0);
5203
5204 /* second round of cleanup code */
5205 if (error) {
5206 /* recycle udf_node */
5207 udf_dispose_node(udf_node);
5208
5209 vlockmgr(nvp->v_vnlock, LK_RELEASE);
5210 nvp->v_data = NULL;
5211 ungetnewvnode(nvp);
5212
5213 return EINVAL; /* error code ok? */
5214 }
5215
5216 DPRINTF(NODE, ("\tnode read in fine\n"));
5217
5218 /*
5219 * Translate UDF filetypes into vnode types.
5220 *
5221 * Systemfiles like the meta main and mirror files are not treated as
5222 * normal files, so we type them as having no type. UDF dictates that
5223 * they are not allowed to be visible.
5224 */
5225
5226 switch (udf_file_type) {
5227 case UDF_ICB_FILETYPE_DIRECTORY :
5228 case UDF_ICB_FILETYPE_STREAMDIR :
5229 nvp->v_type = VDIR;
5230 break;
5231 case UDF_ICB_FILETYPE_BLOCKDEVICE :
5232 nvp->v_type = VBLK;
5233 break;
5234 case UDF_ICB_FILETYPE_CHARDEVICE :
5235 nvp->v_type = VCHR;
5236 break;
5237 case UDF_ICB_FILETYPE_SOCKET :
5238 nvp->v_type = VSOCK;
5239 break;
5240 case UDF_ICB_FILETYPE_FIFO :
5241 nvp->v_type = VFIFO;
5242 break;
5243 case UDF_ICB_FILETYPE_SYMLINK :
5244 nvp->v_type = VLNK;
5245 break;
5246 case UDF_ICB_FILETYPE_VAT :
5247 case UDF_ICB_FILETYPE_META_MAIN :
5248 case UDF_ICB_FILETYPE_META_MIRROR :
5249 nvp->v_type = VNON;
5250 break;
5251 case UDF_ICB_FILETYPE_RANDOMACCESS :
5252 case UDF_ICB_FILETYPE_REALTIME :
5253 nvp->v_type = VREG;
5254 break;
5255 default:
5256 /* YIKES, something else */
5257 nvp->v_type = VNON;
5258 }
5259
5260 /* TODO specfs, fifofs etc etc. vnops setting */
5261
5262 /* don't forget to set vnode's v_size */
5263 uvm_vnp_setsize(nvp, file_size);
5264
5265 /* TODO ext attr and streamdir udf_nodes */
5266
5267 *udf_noderes = udf_node;
5268
5269 return 0;
5270 }
5271
5272 /* --------------------------------------------------------------------- */
5273
5274
5275 int
5276 udf_writeout_node(struct udf_node *udf_node, int waitfor)
5277 {
5278 union dscrptr *dscr;
5279 struct long_ad *loc;
5280 int extnr, flags, error;
5281
5282 DPRINTF(NODE, ("udf_writeout_node called\n"));
5283
5284 KASSERT(udf_node->outstanding_bufs == 0);
5285 KASSERT(udf_node->outstanding_nodedscr == 0);
5286
5287 KASSERT(LIST_EMPTY(&udf_node->vnode->v_dirtyblkhd));
5288
5289 if (udf_node->i_flags & IN_DELETED) {
5290 DPRINTF(NODE, ("\tnode deleted; not writing out\n"));
5291 return 0;
5292 }
5293
5294 /* lock node */
5295 flags = waitfor ? 0 : IN_CALLBACK_ULK;
5296 UDF_LOCK_NODE(udf_node, flags);
5297
5298 /* at least one descriptor writeout */
5299 udf_node->outstanding_nodedscr = 1;
5300
5301 /* we're going to write out the descriptor so clear the flags */
5302 udf_node->i_flags &= ~(IN_MODIFIED | IN_ACCESSED);
5303
5304 /* if we were rebuild, write out the allocation extents */
5305 if (udf_node->i_flags & IN_NODE_REBUILD) {
5306 /* mark outstanding node dscriptors and issue them */
5307 udf_node->outstanding_nodedscr += udf_node->num_extensions;
5308 for (extnr = 0; extnr < udf_node->num_extensions; extnr++) {
5309 loc = &udf_node->ext_loc[extnr];
5310 dscr = (union dscrptr *) udf_node->ext[extnr];
5311 error = udf_write_logvol_dscr(udf_node, dscr, loc, 0);
5312 if (error)
5313 return error;
5314 }
5315 /* mark allocation extents written out */
5316 udf_node->i_flags &= ~(IN_NODE_REBUILD);
5317 }
5318
5319 if (udf_node->fe) {
5320 dscr = (union dscrptr *) udf_node->fe;
5321 } else {
5322 KASSERT(udf_node->efe);
5323 dscr = (union dscrptr *) udf_node->efe;
5324 }
5325 KASSERT(dscr);
5326
5327 loc = &udf_node->write_loc;
5328 error = udf_write_logvol_dscr(udf_node, dscr, loc, waitfor);
5329 return error;
5330 }
5331
5332 /* --------------------------------------------------------------------- */
5333
5334 int
5335 udf_dispose_node(struct udf_node *udf_node)
5336 {
5337 struct vnode *vp;
5338 int extnr;
5339
5340 DPRINTF(NODE, ("udf_dispose_node called on node %p\n", udf_node));
5341 if (!udf_node) {
5342 DPRINTF(NODE, ("UDF: Dispose node on node NULL, ignoring\n"));
5343 return 0;
5344 }
5345
5346 vp = udf_node->vnode;
5347 #ifdef DIAGNOSTIC
5348 if (vp->v_numoutput)
5349 panic("disposing UDF node with pending I/O's, udf_node = %p, "
5350 "v_numoutput = %d", udf_node, vp->v_numoutput);
5351 #endif
5352
5353 /* wait until out of sync (just in case we happen to stumble over one */
5354 KASSERT(!mutex_owned(&mntvnode_lock));
5355 mutex_enter(&mntvnode_lock);
5356 while (udf_node->i_flags & IN_SYNCED) {
5357 cv_timedwait(&udf_node->ump->dirtynodes_cv, &mntvnode_lock,
5358 hz/16);
5359 }
5360 mutex_exit(&mntvnode_lock);
5361
5362 /* TODO extended attributes and streamdir */
5363
5364 /* remove dirhash if present */
5365 udf_dirhash_destroy(&udf_node->dir_hash);
5366
5367 /* remove from our hash lookup table */
5368 udf_deregister_node(udf_node);
5369
5370 /* destroy our lock */
5371 mutex_destroy(&udf_node->node_mutex);
5372 cv_destroy(&udf_node->node_lock);
5373
5374 /* dissociate our udf_node from the vnode */
5375 genfs_node_destroy(udf_node->vnode);
5376 vp->v_data = NULL;
5377
5378 /* free associated memory and the node itself */
5379 for (extnr = 0; extnr < udf_node->num_extensions; extnr++) {
5380 udf_free_logvol_dscr(udf_node->ump, &udf_node->ext_loc[extnr],
5381 udf_node->ext[extnr]);
5382 udf_node->ext[extnr] = (void *) 0xdeadcccc;
5383 }
5384
5385 if (udf_node->fe)
5386 udf_free_logvol_dscr(udf_node->ump, &udf_node->loc,
5387 udf_node->fe);
5388 if (udf_node->efe)
5389 udf_free_logvol_dscr(udf_node->ump, &udf_node->loc,
5390 udf_node->efe);
5391
5392 udf_node->fe = (void *) 0xdeadaaaa;
5393 udf_node->efe = (void *) 0xdeadbbbb;
5394 udf_node->ump = (void *) 0xdeadbeef;
5395 pool_put(&udf_node_pool, udf_node);
5396
5397 return 0;
5398 }
5399
5400
5401
5402 /*
5403 * create a new node using the specified vnodeops, vap and cnp but with the
5404 * udf_file_type. This allows special files to be created. Use with care.
5405 */
5406
5407 static int
5408 udf_create_node_raw(struct vnode *dvp, struct vnode **vpp, int udf_file_type,
5409 int (**vnodeops)(void *), struct vattr *vap, struct componentname *cnp)
5410 {
5411 union dscrptr *dscr;
5412 struct udf_node *dir_node = VTOI(dvp);;
5413 struct udf_node *udf_node;
5414 struct udf_mount *ump = dir_node->ump;
5415 struct vnode *nvp;
5416 struct long_ad node_icb_loc;
5417 uint64_t parent_unique_id;
5418 uint64_t lmapping, pmapping;
5419 uint32_t lb_size, lb_num;
5420 uint16_t vpart_num;
5421 uid_t uid;
5422 gid_t gid, parent_gid;
5423 int fid_size, error;
5424
5425 lb_size = udf_rw32(ump->logical_vol->lb_size);
5426 *vpp = NULL;
5427
5428 /* allocate vnode */
5429 error = getnewvnode(VT_UDF, ump->vfs_mountp, vnodeops, &nvp);
5430 if (error)
5431 return error;
5432
5433 /* lock node */
5434 error = vn_lock(nvp, LK_EXCLUSIVE | LK_RETRY);
5435 if (error) {
5436 nvp->v_data = NULL;
5437 ungetnewvnode(nvp);
5438 return error;
5439 }
5440
5441 /* get disc allocation for one logical block */
5442 error = udf_pre_allocate_space(ump, UDF_C_NODE, 1,
5443 &vpart_num, &lmapping, &pmapping);
5444 lb_num = lmapping;
5445 if (error) {
5446 vlockmgr(nvp->v_vnlock, LK_RELEASE);
5447 ungetnewvnode(nvp);
5448 return error;
5449 }
5450
5451 /* initialise pointer to location */
5452 memset(&node_icb_loc, 0, sizeof(struct long_ad));
5453 node_icb_loc.len = lb_size;
5454 node_icb_loc.loc.lb_num = udf_rw32(lb_num);
5455 node_icb_loc.loc.part_num = udf_rw16(vpart_num);
5456
5457 /* build udf_node (do initialise!) */
5458 udf_node = pool_get(&udf_node_pool, PR_WAITOK);
5459 memset(udf_node, 0, sizeof(struct udf_node));
5460
5461 /* initialise crosslinks, note location of fe/efe for hashing */
5462 /* bugalert: synchronise with udf_get_node() */
5463 udf_node->ump = ump;
5464 udf_node->vnode = nvp;
5465 nvp->v_data = udf_node;
5466 udf_node->loc = node_icb_loc;
5467 udf_node->write_loc = node_icb_loc;
5468 udf_node->lockf = 0;
5469 mutex_init(&udf_node->node_mutex, MUTEX_DEFAULT, IPL_NONE);
5470 cv_init(&udf_node->node_lock, "udf_nlk");
5471 udf_node->outstanding_bufs = 0;
5472 udf_node->outstanding_nodedscr = 0;
5473
5474 /* initialise genfs */
5475 genfs_node_init(nvp, &udf_genfsops);
5476
5477 /* insert into the hash lookup */
5478 udf_register_node(udf_node);
5479
5480 /* get parent's unique ID for refering '..' if its a directory */
5481 if (dir_node->fe) {
5482 parent_unique_id = udf_rw64(dir_node->fe->unique_id);
5483 parent_gid = (gid_t) udf_rw32(dir_node->fe->gid);
5484 } else {
5485 parent_unique_id = udf_rw64(dir_node->efe->unique_id);
5486 parent_gid = (gid_t) udf_rw32(dir_node->efe->gid);
5487 }
5488
5489 /* get descriptor */
5490 udf_create_logvol_dscr(ump, udf_node, &node_icb_loc, &dscr);
5491
5492 /* choose a fe or an efe for it */
5493 if (ump->logical_vol->tag.descriptor_ver == 2) {
5494 udf_node->fe = &dscr->fe;
5495 fid_size = udf_create_new_fe(ump, udf_node->fe,
5496 udf_file_type, &udf_node->loc,
5497 &dir_node->loc, parent_unique_id);
5498 /* TODO add extended attribute for creation time */
5499 } else {
5500 udf_node->efe = &dscr->efe;
5501 fid_size = udf_create_new_efe(ump, udf_node->efe,
5502 udf_file_type, &udf_node->loc,
5503 &dir_node->loc, parent_unique_id);
5504 }
5505 KASSERT(dscr->tag.tag_loc == udf_node->loc.loc.lb_num);
5506
5507 /* update vnode's size and type */
5508 nvp->v_type = vap->va_type;
5509 uvm_vnp_setsize(nvp, fid_size);
5510
5511 /* set access mode */
5512 udf_setaccessmode(udf_node, vap->va_mode);
5513
5514 /* set ownership */
5515 uid = kauth_cred_geteuid(cnp->cn_cred);
5516 gid = parent_gid;
5517 udf_setownership(udf_node, uid, gid);
5518
5519 error = udf_dir_attach(ump, dir_node, udf_node, vap, cnp);
5520 if (error) {
5521 /* free disc allocation for node */
5522 udf_free_allocated_space(ump, lb_num, vpart_num, 1);
5523
5524 /* recycle udf_node */
5525 udf_dispose_node(udf_node);
5526 vput(nvp);
5527
5528 *vpp = NULL;
5529 return error;
5530 }
5531
5532 /* adjust file count */
5533 udf_adjust_filecount(udf_node, 1);
5534
5535 /* return result */
5536 *vpp = nvp;
5537
5538 return 0;
5539 }
5540
5541
5542 int
5543 udf_create_node(struct vnode *dvp, struct vnode **vpp, struct vattr *vap,
5544 struct componentname *cnp)
5545 {
5546 int (**vnodeops)(void *);
5547 int udf_file_type;
5548
5549 DPRINTF(NODE, ("udf_create_node called\n"));
5550
5551 /* what type are we creating ? */
5552 vnodeops = udf_vnodeop_p;
5553 /* start with a default */
5554 udf_file_type = UDF_ICB_FILETYPE_RANDOMACCESS;
5555
5556 *vpp = NULL;
5557
5558 switch (vap->va_type) {
5559 case VREG :
5560 udf_file_type = UDF_ICB_FILETYPE_RANDOMACCESS;
5561 break;
5562 case VDIR :
5563 udf_file_type = UDF_ICB_FILETYPE_DIRECTORY;
5564 break;
5565 case VLNK :
5566 udf_file_type = UDF_ICB_FILETYPE_SYMLINK;
5567 break;
5568 case VBLK :
5569 udf_file_type = UDF_ICB_FILETYPE_BLOCKDEVICE;
5570 /* specfs */
5571 return ENOTSUP;
5572 break;
5573 case VCHR :
5574 udf_file_type = UDF_ICB_FILETYPE_CHARDEVICE;
5575 /* specfs */
5576 return ENOTSUP;
5577 break;
5578 case VFIFO :
5579 udf_file_type = UDF_ICB_FILETYPE_FIFO;
5580 /* specfs */
5581 return ENOTSUP;
5582 break;
5583 case VSOCK :
5584 udf_file_type = UDF_ICB_FILETYPE_SOCKET;
5585 /* specfs */
5586 return ENOTSUP;
5587 break;
5588 case VNON :
5589 case VBAD :
5590 default :
5591 /* nothing; can we even create these? */
5592 return EINVAL;
5593 }
5594
5595 return udf_create_node_raw(dvp, vpp, udf_file_type, vnodeops, vap, cnp);
5596 }
5597
5598 /* --------------------------------------------------------------------- */
5599
5600 static void
5601 udf_free_descriptor_space(struct udf_node *udf_node, struct long_ad *loc, void *mem)
5602 {
5603 struct udf_mount *ump = udf_node->ump;
5604 uint32_t lb_size, lb_num, len, num_lb;
5605 uint16_t vpart_num;
5606
5607 /* is there really one? */
5608 if (mem == NULL)
5609 return;
5610
5611 /* got a descriptor here */
5612 len = UDF_EXT_LEN(udf_rw32(loc->len));
5613 lb_num = udf_rw32(loc->loc.lb_num);
5614 vpart_num = udf_rw16(loc->loc.part_num);
5615
5616 lb_size = udf_rw32(ump->logical_vol->lb_size);
5617 num_lb = (len + lb_size -1) / lb_size;
5618
5619 udf_free_allocated_space(ump, lb_num, vpart_num, num_lb);
5620 }
5621
5622 void
5623 udf_delete_node(struct udf_node *udf_node)
5624 {
5625 void *dscr;
5626 struct udf_mount *ump;
5627 struct long_ad *loc;
5628 int extnr, lvint, dummy;
5629
5630 ump = udf_node->ump;
5631
5632 /* paranoia check on integrity; should be open!; we could panic */
5633 lvint = udf_rw32(udf_node->ump->logvol_integrity->integrity_type);
5634 if (lvint == UDF_INTEGRITY_CLOSED)
5635 printf("\tIntegrity was CLOSED!\n");
5636
5637 /* whatever the node type, change its size to zero */
5638 (void) udf_resize_node(udf_node, 0, &dummy);
5639
5640 /* force it to be `clean'; no use writing it out */
5641 udf_node->i_flags &= ~(IN_MODIFIED | IN_ACCESSED | IN_ACCESS |
5642 IN_CHANGE | IN_UPDATE | IN_MODIFY);
5643
5644 /* adjust file count */
5645 udf_adjust_filecount(udf_node, -1);
5646
5647 /*
5648 * Free its allocated descriptors; memory will be released when
5649 * vop_reclaim() is called.
5650 */
5651 loc = &udf_node->loc;
5652
5653 dscr = udf_node->fe;
5654 udf_free_descriptor_space(udf_node, loc, dscr);
5655 dscr = udf_node->efe;
5656 udf_free_descriptor_space(udf_node, loc, dscr);
5657
5658 for (extnr = 0; extnr < UDF_MAX_ALLOC_EXTENTS; extnr++) {
5659 dscr = udf_node->ext[extnr];
5660 loc = &udf_node->ext_loc[extnr];
5661 udf_free_descriptor_space(udf_node, loc, dscr);
5662 }
5663 }
5664
5665 /* --------------------------------------------------------------------- */
5666
5667 /* set new filesize; node but be LOCKED on entry and is locked on exit */
5668 int
5669 udf_resize_node(struct udf_node *udf_node, uint64_t new_size, int *extended)
5670 {
5671 struct file_entry *fe = udf_node->fe;
5672 struct extfile_entry *efe = udf_node->efe;
5673 uint64_t file_size;
5674 int error;
5675
5676 if (fe) {
5677 file_size = udf_rw64(fe->inf_len);
5678 } else {
5679 assert(udf_node->efe);
5680 file_size = udf_rw64(efe->inf_len);
5681 }
5682
5683 DPRINTF(ATTR, ("\tchanging file length from %"PRIu64" to %"PRIu64"\n",
5684 file_size, new_size));
5685
5686 /* if not changing, we're done */
5687 if (file_size == new_size)
5688 return 0;
5689
5690 *extended = (new_size > file_size);
5691 if (*extended) {
5692 error = udf_grow_node(udf_node, new_size);
5693 } else {
5694 error = udf_shrink_node(udf_node, new_size);
5695 }
5696
5697 return error;
5698 }
5699
5700
5701 /* --------------------------------------------------------------------- */
5702
5703 void
5704 udf_itimes(struct udf_node *udf_node, struct timespec *acc,
5705 struct timespec *mod, struct timespec *birth)
5706 {
5707 struct timespec now;
5708 struct file_entry *fe;
5709 struct extfile_entry *efe;
5710 struct filetimes_extattr_entry *ft_extattr;
5711 struct timestamp *atime, *mtime, *attrtime, *ctime;
5712 struct timestamp fe_ctime;
5713 struct timespec cur_birth;
5714 uint32_t offset, a_l;
5715 uint8_t *filedata;
5716 int error;
5717
5718 /* protect against rogue values */
5719 if (!udf_node)
5720 return;
5721
5722 fe = udf_node->fe;
5723 efe = udf_node->efe;
5724
5725 if (!(udf_node->i_flags & (IN_ACCESS|IN_CHANGE|IN_UPDATE|IN_MODIFY)))
5726 return;
5727
5728 /* get descriptor information */
5729 if (fe) {
5730 atime = &fe->atime;
5731 mtime = &fe->mtime;
5732 attrtime = &fe->attrtime;
5733 filedata = fe->data;
5734
5735 /* initial save dummy setting */
5736 ctime = &fe_ctime;
5737
5738 /* check our extended attribute if present */
5739 error = udf_extattr_search_intern(udf_node,
5740 UDF_FILETIMES_ATTR_NO, "", &offset, &a_l);
5741 if (!error) {
5742 ft_extattr = (struct filetimes_extattr_entry *)
5743 (filedata + offset);
5744 if (ft_extattr->existence & UDF_FILETIMES_FILE_CREATION)
5745 ctime = &ft_extattr->times[0];
5746 }
5747 /* TODO create the extended attribute if not found ? */
5748 } else {
5749 assert(udf_node->efe);
5750 atime = &efe->atime;
5751 mtime = &efe->mtime;
5752 attrtime = &efe->attrtime;
5753 ctime = &efe->ctime;
5754 }
5755
5756 vfs_timestamp(&now);
5757
5758 /* set access time */
5759 if (udf_node->i_flags & IN_ACCESS) {
5760 if (acc == NULL)
5761 acc = &now;
5762 udf_timespec_to_timestamp(acc, atime);
5763 }
5764
5765 /* set modification time */
5766 if (udf_node->i_flags & (IN_UPDATE | IN_MODIFY)) {
5767 if (mod == NULL)
5768 mod = &now;
5769 udf_timespec_to_timestamp(mod, mtime);
5770
5771 /* ensure birthtime is older than set modification! */
5772 udf_timestamp_to_timespec(udf_node->ump, ctime, &cur_birth);
5773 if ((cur_birth.tv_sec > mod->tv_sec) ||
5774 ((cur_birth.tv_sec == mod->tv_sec) &&
5775 (cur_birth.tv_nsec > mod->tv_nsec))) {
5776 udf_timespec_to_timestamp(mod, ctime);
5777 }
5778 }
5779
5780 /* update birthtime if specified */
5781 /* XXX we asume here that given birthtime is older than mod */
5782 if (birth && (birth->tv_sec != VNOVAL)) {
5783 udf_timespec_to_timestamp(birth, ctime);
5784 }
5785
5786 /* set change time */
5787 if (udf_node->i_flags & (IN_CHANGE | IN_MODIFY))
5788 udf_timespec_to_timestamp(&now, attrtime);
5789
5790 /* notify updates to the node itself */
5791 if (udf_node->i_flags & (IN_ACCESS | IN_MODIFY))
5792 udf_node->i_flags |= IN_ACCESSED;
5793 if (udf_node->i_flags & (IN_UPDATE | IN_CHANGE))
5794 udf_node->i_flags |= IN_MODIFIED;
5795
5796 /* clear modification flags */
5797 udf_node->i_flags &= ~(IN_ACCESS | IN_CHANGE | IN_UPDATE | IN_MODIFY);
5798 }
5799
5800 /* --------------------------------------------------------------------- */
5801
5802 int
5803 udf_update(struct vnode *vp, struct timespec *acc,
5804 struct timespec *mod, struct timespec *birth, int updflags)
5805 {
5806 struct udf_node *udf_node = VTOI(vp);
5807 struct udf_mount *ump = udf_node->ump;
5808 struct regid *impl_id;
5809 int mnt_async = (vp->v_mount->mnt_flag & MNT_ASYNC);
5810 int waitfor, flags;
5811
5812 #ifdef DEBUG
5813 char bits[128];
5814 DPRINTF(CALL, ("udf_update(node, %p, %p, %p, %d)\n", acc, mod, birth,
5815 updflags));
5816 bitmask_snprintf(udf_node->i_flags, IN_FLAGBITS, bits, sizeof(bits));
5817 DPRINTF(CALL, ("\tnode flags %s\n", bits));
5818 DPRINTF(CALL, ("\t\tmnt_async = %d\n", mnt_async));
5819 #endif
5820
5821 /* set our times */
5822 udf_itimes(udf_node, acc, mod, birth);
5823
5824 /* set our implementation id */
5825 if (udf_node->fe) {
5826 impl_id = &udf_node->fe->imp_id;
5827 } else {
5828 impl_id = &udf_node->efe->imp_id;
5829 }
5830 udf_set_regid(impl_id, IMPL_NAME);
5831 udf_add_impl_regid(ump, impl_id);
5832
5833 /* if called when mounted readonly, never write back */
5834 if (vp->v_mount->mnt_flag & MNT_RDONLY)
5835 return 0;
5836
5837 /* check if the node is dirty 'enough'*/
5838 if (updflags & UPDATE_CLOSE) {
5839 flags = udf_node->i_flags & (IN_MODIFIED | IN_ACCESSED);
5840 } else {
5841 flags = udf_node->i_flags & IN_MODIFIED;
5842 }
5843 if (flags == 0)
5844 return 0;
5845
5846 /* determine if we need to write sync or async */
5847 waitfor = 0;
5848 if ((flags & IN_MODIFIED) && (mnt_async == 0)) {
5849 /* sync mounted */
5850 waitfor = updflags & UPDATE_WAIT;
5851 if (updflags & UPDATE_DIROP)
5852 waitfor |= UPDATE_WAIT;
5853 }
5854 if (waitfor)
5855 return VOP_FSYNC(vp, FSCRED, FSYNC_WAIT, 0,0);
5856
5857 return 0;
5858 }
5859
5860
5861 /* --------------------------------------------------------------------- */
5862
5863
5864 /*
5865 * Read one fid and process it into a dirent and advance to the next (*fid)
5866 * has to be allocated a logical block in size, (*dirent) struct dirent length
5867 */
5868
5869 int
5870 udf_read_fid_stream(struct vnode *vp, uint64_t *offset,
5871 struct fileid_desc *fid, struct dirent *dirent)
5872 {
5873 struct udf_node *dir_node = VTOI(vp);
5874 struct udf_mount *ump = dir_node->ump;
5875 struct file_entry *fe = dir_node->fe;
5876 struct extfile_entry *efe = dir_node->efe;
5877 uint32_t fid_size, lb_size;
5878 uint64_t file_size;
5879 char *fid_name;
5880 int enough, error;
5881
5882 assert(fid);
5883 assert(dirent);
5884 assert(dir_node);
5885 assert(offset);
5886 assert(*offset != 1);
5887
5888 DPRINTF(FIDS, ("read_fid_stream called at offset %"PRIu64"\n", *offset));
5889 /* check if we're past the end of the directory */
5890 if (fe) {
5891 file_size = udf_rw64(fe->inf_len);
5892 } else {
5893 assert(dir_node->efe);
5894 file_size = udf_rw64(efe->inf_len);
5895 }
5896 if (*offset >= file_size)
5897 return EINVAL;
5898
5899 /* get maximum length of FID descriptor */
5900 lb_size = udf_rw32(ump->logical_vol->lb_size);
5901
5902 /* initialise return values */
5903 fid_size = 0;
5904 memset(dirent, 0, sizeof(struct dirent));
5905 memset(fid, 0, lb_size);
5906
5907 enough = (file_size - (*offset) >= UDF_FID_SIZE);
5908 if (!enough) {
5909 /* short dir ... */
5910 return EIO;
5911 }
5912
5913 error = vn_rdwr(UIO_READ, vp,
5914 fid, MIN(file_size - (*offset), lb_size), *offset,
5915 UIO_SYSSPACE, IO_ALTSEMANTICS | IO_NODELOCKED, FSCRED,
5916 NULL, NULL);
5917 if (error)
5918 return error;
5919
5920 DPRINTF(FIDS, ("\tfid piece read in fine\n"));
5921 /*
5922 * Check if we got a whole descriptor.
5923 * TODO Try to `resync' directory stream when something is very wrong.
5924 */
5925
5926 /* check if our FID header is OK */
5927 error = udf_check_tag(fid);
5928 if (error) {
5929 goto brokendir;
5930 }
5931 DPRINTF(FIDS, ("\ttag check ok\n"));
5932
5933 if (udf_rw16(fid->tag.id) != TAGID_FID) {
5934 error = EIO;
5935 goto brokendir;
5936 }
5937 DPRINTF(FIDS, ("\ttag checked ok: got TAGID_FID\n"));
5938
5939 /* check for length */
5940 fid_size = udf_fidsize(fid);
5941 enough = (file_size - (*offset) >= fid_size);
5942 if (!enough) {
5943 error = EIO;
5944 goto brokendir;
5945 }
5946 DPRINTF(FIDS, ("\tthe complete fid is read in\n"));
5947
5948 /* check FID contents */
5949 error = udf_check_tag_payload((union dscrptr *) fid, lb_size);
5950 brokendir:
5951 if (error) {
5952 /* note that is sometimes a bit quick to report */
5953 printf("BROKEN DIRECTORY ENTRY\n");
5954 /* RESYNC? */
5955 /* TODO: use udf_resync_fid_stream */
5956 return EIO;
5957 }
5958 DPRINTF(FIDS, ("\tpayload checked ok\n"));
5959
5960 /* we got a whole and valid descriptor! */
5961 DPRINTF(FIDS, ("\tinterpret FID\n"));
5962
5963 /* create resulting dirent structure */
5964 fid_name = (char *) fid->data + udf_rw16(fid->l_iu);
5965 udf_to_unix_name(dirent->d_name, MAXNAMLEN,
5966 fid_name, fid->l_fi, &ump->logical_vol->desc_charset);
5967
5968 /* '..' has no name, so provide one */
5969 if (fid->file_char & UDF_FILE_CHAR_PAR)
5970 strcpy(dirent->d_name, "..");
5971
5972 dirent->d_fileno = udf_calchash(&fid->icb); /* inode hash XXX */
5973 dirent->d_namlen = strlen(dirent->d_name);
5974 dirent->d_reclen = _DIRENT_SIZE(dirent);
5975
5976 /*
5977 * Note that its not worth trying to go for the filetypes now... its
5978 * too expensive too
5979 */
5980 dirent->d_type = DT_UNKNOWN;
5981
5982 /* initial guess for filetype we can make */
5983 if (fid->file_char & UDF_FILE_CHAR_DIR)
5984 dirent->d_type = DT_DIR;
5985
5986 /* advance */
5987 *offset += fid_size;
5988
5989 return error;
5990 }
5991
5992
5993 /* --------------------------------------------------------------------- */
5994
5995 static void
5996 udf_sync_pass(struct udf_mount *ump, kauth_cred_t cred, int waitfor,
5997 int pass, int *ndirty)
5998 {
5999 struct udf_node *udf_node, *n_udf_node;
6000 struct vnode *vp;
6001 int vdirty, error;
6002 int on_type, on_flags, on_vnode;
6003
6004 derailed:
6005 KASSERT(mutex_owned(&mntvnode_lock));
6006
6007 DPRINTF(SYNC, ("sync_pass %d\n", pass));
6008 udf_node = LIST_FIRST(&ump->sorted_udf_nodes);
6009 for (;udf_node; udf_node = n_udf_node) {
6010 DPRINTF(SYNC, ("."));
6011
6012 udf_node->i_flags &= ~IN_SYNCED;
6013 vp = udf_node->vnode;
6014
6015 mutex_enter(&vp->v_interlock);
6016 n_udf_node = LIST_NEXT(udf_node, sortchain);
6017 if (n_udf_node)
6018 n_udf_node->i_flags |= IN_SYNCED;
6019
6020 /* system nodes are not synced this way */
6021 if (vp->v_vflag & VV_SYSTEM) {
6022 mutex_exit(&vp->v_interlock);
6023 continue;
6024 }
6025
6026 /* check if its dirty enough to even try */
6027 on_type = (waitfor == MNT_LAZY || vp->v_type == VNON);
6028 on_flags = ((udf_node->i_flags &
6029 (IN_ACCESSED | IN_UPDATE | IN_MODIFIED)) == 0);
6030 on_vnode = LIST_EMPTY(&vp->v_dirtyblkhd)
6031 && UVM_OBJ_IS_CLEAN(&vp->v_uobj);
6032 if (on_type || (on_flags || on_vnode)) { /* XXX */
6033 /* not dirty (enough?) */
6034 mutex_exit(&vp->v_interlock);
6035 continue;
6036 }
6037
6038 mutex_exit(&mntvnode_lock);
6039 error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK);
6040 if (error) {
6041 mutex_enter(&mntvnode_lock);
6042 if (error == ENOENT)
6043 goto derailed;
6044 *ndirty += 1;
6045 continue;
6046 }
6047
6048 switch (pass) {
6049 case 1:
6050 VOP_FSYNC(vp, cred, 0 | FSYNC_DATAONLY,0,0);
6051 break;
6052 case 2:
6053 vdirty = vp->v_numoutput;
6054 if (vp->v_tag == VT_UDF)
6055 vdirty += udf_node->outstanding_bufs +
6056 udf_node->outstanding_nodedscr;
6057 if (vdirty == 0)
6058 VOP_FSYNC(vp, cred, 0,0,0);
6059 *ndirty += vdirty;
6060 break;
6061 case 3:
6062 vdirty = vp->v_numoutput;
6063 if (vp->v_tag == VT_UDF)
6064 vdirty += udf_node->outstanding_bufs +
6065 udf_node->outstanding_nodedscr;
6066 *ndirty += vdirty;
6067 break;
6068 }
6069
6070 vput(vp);
6071 mutex_enter(&mntvnode_lock);
6072 }
6073 DPRINTF(SYNC, ("END sync_pass %d\n", pass));
6074 }
6075
6076
6077 void
6078 udf_do_sync(struct udf_mount *ump, kauth_cred_t cred, int waitfor)
6079 {
6080 int dummy, ndirty;
6081
6082 mutex_enter(&mntvnode_lock);
6083 recount:
6084 dummy = 0;
6085 DPRINTF(CALL, ("issue VOP_FSYNC(DATA only) on all nodes\n"));
6086 DPRINTF(SYNC, ("issue VOP_FSYNC(DATA only) on all nodes\n"));
6087 udf_sync_pass(ump, cred, waitfor, 1, &dummy);
6088
6089 DPRINTF(CALL, ("issue VOP_FSYNC(COMPLETE) on all finished nodes\n"));
6090 DPRINTF(SYNC, ("issue VOP_FSYNC(COMPLETE) on all finished nodes\n"));
6091 udf_sync_pass(ump, cred, waitfor, 2, &dummy);
6092
6093 if (waitfor == MNT_WAIT) {
6094 ndirty = ump->devvp->v_numoutput;
6095 DPRINTF(NODE, ("counting pending blocks: on devvp %d\n",
6096 ndirty));
6097 udf_sync_pass(ump, cred, waitfor, 3, &ndirty);
6098 DPRINTF(NODE, ("counted num dirty pending blocks %d\n",
6099 ndirty));
6100
6101 if (ndirty) {
6102 /* 1/4 second wait */
6103 cv_timedwait(&ump->dirtynodes_cv, &mntvnode_lock,
6104 hz/4);
6105 goto recount;
6106 }
6107 }
6108
6109 mutex_exit(&mntvnode_lock);
6110 }
6111
6112 /* --------------------------------------------------------------------- */
6113
6114 /*
6115 * Read and write file extent in/from the buffer.
6116 *
6117 * The splitup of the extent into seperate request-buffers is to minimise
6118 * copying around as much as possible.
6119 *
6120 * block based file reading and writing
6121 */
6122
6123 static int
6124 udf_read_internal(struct udf_node *node, uint8_t *blob)
6125 {
6126 struct udf_mount *ump;
6127 struct file_entry *fe = node->fe;
6128 struct extfile_entry *efe = node->efe;
6129 uint64_t inflen;
6130 uint32_t sector_size;
6131 uint8_t *pos;
6132 int icbflags, addr_type;
6133
6134 /* get extent and do some paranoia checks */
6135 ump = node->ump;
6136 sector_size = ump->discinfo.sector_size;
6137
6138 if (fe) {
6139 inflen = udf_rw64(fe->inf_len);
6140 pos = &fe->data[0] + udf_rw32(fe->l_ea);
6141 icbflags = udf_rw16(fe->icbtag.flags);
6142 } else {
6143 assert(node->efe);
6144 inflen = udf_rw64(efe->inf_len);
6145 pos = &efe->data[0] + udf_rw32(efe->l_ea);
6146 icbflags = udf_rw16(efe->icbtag.flags);
6147 }
6148 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
6149
6150 assert(addr_type == UDF_ICB_INTERN_ALLOC);
6151 assert(inflen < sector_size);
6152
6153 /* copy out info */
6154 memset(blob, 0, sector_size);
6155 memcpy(blob, pos, inflen);
6156
6157 return 0;
6158 }
6159
6160
6161 static int
6162 udf_write_internal(struct udf_node *node, uint8_t *blob)
6163 {
6164 struct udf_mount *ump;
6165 struct file_entry *fe = node->fe;
6166 struct extfile_entry *efe = node->efe;
6167 uint64_t inflen;
6168 uint32_t sector_size;
6169 uint8_t *pos;
6170 int icbflags, addr_type;
6171
6172 /* get extent and do some paranoia checks */
6173 ump = node->ump;
6174 sector_size = ump->discinfo.sector_size;
6175
6176 if (fe) {
6177 inflen = udf_rw64(fe->inf_len);
6178 pos = &fe->data[0] + udf_rw32(fe->l_ea);
6179 icbflags = udf_rw16(fe->icbtag.flags);
6180 } else {
6181 assert(node->efe);
6182 inflen = udf_rw64(efe->inf_len);
6183 pos = &efe->data[0] + udf_rw32(efe->l_ea);
6184 icbflags = udf_rw16(efe->icbtag.flags);
6185 }
6186 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
6187
6188 assert(addr_type == UDF_ICB_INTERN_ALLOC);
6189 assert(inflen < sector_size);
6190
6191 /* copy in blob */
6192 /* memset(pos, 0, inflen); */
6193 memcpy(pos, blob, inflen);
6194
6195 return 0;
6196 }
6197
6198
6199 void
6200 udf_read_filebuf(struct udf_node *udf_node, struct buf *buf)
6201 {
6202 struct buf *nestbuf;
6203 struct udf_mount *ump = udf_node->ump;
6204 uint64_t *mapping;
6205 uint64_t run_start;
6206 uint32_t sector_size;
6207 uint32_t buf_offset, sector, rbuflen, rblk;
6208 uint32_t from, lblkno;
6209 uint32_t sectors;
6210 uint8_t *buf_pos;
6211 int error, run_length, isdir, what;
6212
6213 sector_size = udf_node->ump->discinfo.sector_size;
6214
6215 from = buf->b_blkno;
6216 sectors = buf->b_bcount / sector_size;
6217
6218 isdir = (udf_node->vnode->v_type == VDIR);
6219 what = isdir ? UDF_C_FIDS : UDF_C_USERDATA;
6220
6221 /* assure we have enough translation slots */
6222 KASSERT(buf->b_bcount / sector_size <= UDF_MAX_MAPPINGS);
6223 KASSERT(MAXPHYS / sector_size <= UDF_MAX_MAPPINGS);
6224
6225 if (sectors > UDF_MAX_MAPPINGS) {
6226 printf("udf_read_filebuf: implementation limit on bufsize\n");
6227 buf->b_error = EIO;
6228 biodone(buf);
6229 return;
6230 }
6231
6232 mapping = malloc(sizeof(*mapping) * UDF_MAX_MAPPINGS, M_TEMP, M_WAITOK);
6233
6234 error = 0;
6235 DPRINTF(READ, ("\ttranslate %d-%d\n", from, sectors));
6236 error = udf_translate_file_extent(udf_node, from, sectors, mapping);
6237 if (error) {
6238 buf->b_error = error;
6239 biodone(buf);
6240 goto out;
6241 }
6242 DPRINTF(READ, ("\ttranslate extent went OK\n"));
6243
6244 /* pre-check if its an internal */
6245 if (*mapping == UDF_TRANS_INTERN) {
6246 error = udf_read_internal(udf_node, (uint8_t *) buf->b_data);
6247 if (error)
6248 buf->b_error = error;
6249 biodone(buf);
6250 goto out;
6251 }
6252 DPRINTF(READ, ("\tnot intern\n"));
6253
6254 #ifdef DEBUG
6255 if (udf_verbose & UDF_DEBUG_TRANSLATE) {
6256 printf("Returned translation table:\n");
6257 for (sector = 0; sector < sectors; sector++) {
6258 printf("%d : %"PRIu64"\n", sector, mapping[sector]);
6259 }
6260 }
6261 #endif
6262
6263 /* request read-in of data from disc sheduler */
6264 buf->b_resid = buf->b_bcount;
6265 for (sector = 0; sector < sectors; sector++) {
6266 buf_offset = sector * sector_size;
6267 buf_pos = (uint8_t *) buf->b_data + buf_offset;
6268 DPRINTF(READ, ("\tprocessing rel sector %d\n", sector));
6269
6270 /* check if its zero or unmapped to stop reading */
6271 switch (mapping[sector]) {
6272 case UDF_TRANS_UNMAPPED:
6273 case UDF_TRANS_ZERO:
6274 /* copy zero sector TODO runlength like below */
6275 memset(buf_pos, 0, sector_size);
6276 DPRINTF(READ, ("\treturning zero sector\n"));
6277 nestiobuf_done(buf, sector_size, 0);
6278 break;
6279 default :
6280 DPRINTF(READ, ("\tread sector "
6281 "%"PRIu64"\n", mapping[sector]));
6282
6283 lblkno = from + sector;
6284 run_start = mapping[sector];
6285 run_length = 1;
6286 while (sector < sectors-1) {
6287 if (mapping[sector+1] != mapping[sector]+1)
6288 break;
6289 run_length++;
6290 sector++;
6291 }
6292
6293 /*
6294 * nest an iobuf and mark it for async reading. Since
6295 * we're using nested buffers, they can't be cached by
6296 * design.
6297 */
6298 rbuflen = run_length * sector_size;
6299 rblk = run_start * (sector_size/DEV_BSIZE);
6300
6301 nestbuf = getiobuf(NULL, true);
6302 nestiobuf_setup(buf, nestbuf, buf_offset, rbuflen);
6303 /* nestbuf is B_ASYNC */
6304
6305 /* identify this nestbuf */
6306 nestbuf->b_lblkno = lblkno;
6307 assert(nestbuf->b_vp == udf_node->vnode);
6308
6309 /* CD shedules on raw blkno */
6310 nestbuf->b_blkno = rblk;
6311 nestbuf->b_proc = NULL;
6312 nestbuf->b_rawblkno = rblk;
6313 nestbuf->b_udf_c_type = what;
6314
6315 udf_discstrat_queuebuf(ump, nestbuf);
6316 }
6317 }
6318 out:
6319 /* if we're synchronously reading, wait for the completion */
6320 if ((buf->b_flags & B_ASYNC) == 0)
6321 biowait(buf);
6322
6323 DPRINTF(READ, ("\tend of read_filebuf\n"));
6324 free(mapping, M_TEMP);
6325 return;
6326 }
6327
6328
6329 void
6330 udf_write_filebuf(struct udf_node *udf_node, struct buf *buf)
6331 {
6332 struct buf *nestbuf;
6333 struct udf_mount *ump = udf_node->ump;
6334 uint64_t *mapping;
6335 uint64_t run_start;
6336 uint32_t lb_size;
6337 uint32_t buf_offset, lb_num, rbuflen, rblk;
6338 uint32_t from, lblkno;
6339 uint32_t num_lb;
6340 uint8_t *buf_pos;
6341 int error, run_length, isdir, what, s;
6342
6343 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
6344
6345 from = buf->b_blkno;
6346 num_lb = buf->b_bcount / lb_size;
6347
6348 isdir = (udf_node->vnode->v_type == VDIR);
6349 what = isdir ? UDF_C_FIDS : UDF_C_USERDATA;
6350
6351 /* assure we have enough translation slots */
6352 KASSERT(buf->b_bcount / lb_size <= UDF_MAX_MAPPINGS);
6353 KASSERT(MAXPHYS / lb_size <= UDF_MAX_MAPPINGS);
6354
6355 if (num_lb > UDF_MAX_MAPPINGS) {
6356 printf("udf_write_filebuf: implementation limit on bufsize\n");
6357 buf->b_error = EIO;
6358 biodone(buf);
6359 return;
6360 }
6361
6362 mapping = malloc(sizeof(*mapping) * UDF_MAX_MAPPINGS, M_TEMP, M_WAITOK);
6363
6364 error = 0;
6365 DPRINTF(WRITE, ("\ttranslate %d-%d\n", from, num_lb));
6366 error = udf_translate_file_extent(udf_node, from, num_lb, mapping);
6367 if (error) {
6368 buf->b_error = error;
6369 biodone(buf);
6370 goto out;
6371 }
6372 DPRINTF(WRITE, ("\ttranslate extent went OK\n"));
6373
6374 /* if its internally mapped, we can write it in the descriptor itself */
6375 if (*mapping == UDF_TRANS_INTERN) {
6376 /* TODO paranoia check if we ARE going to have enough space */
6377 error = udf_write_internal(udf_node, (uint8_t *) buf->b_data);
6378 if (error)
6379 buf->b_error = error;
6380 biodone(buf);
6381 goto out;
6382 }
6383 DPRINTF(WRITE, ("\tnot intern\n"));
6384
6385 /* request write out of data to disc sheduler */
6386 buf->b_resid = buf->b_bcount;
6387 for (lb_num = 0; lb_num < num_lb; lb_num++) {
6388 buf_offset = lb_num * lb_size;
6389 buf_pos = (uint8_t *) buf->b_data + buf_offset;
6390 DPRINTF(WRITE, ("\tprocessing rel lb_num %d\n", lb_num));
6391
6392 /*
6393 * Mappings are not that important here. Just before we write
6394 * the lb_num we late-allocate them when needed and update the
6395 * mapping in the udf_node.
6396 */
6397
6398 /* XXX why not ignore the mapping altogether ? */
6399 /* TODO estimate here how much will be late-allocated */
6400 DPRINTF(WRITE, ("\twrite lb_num "
6401 "%"PRIu64, mapping[lb_num]));
6402
6403 lblkno = from + lb_num;
6404 run_start = mapping[lb_num];
6405 run_length = 1;
6406 while (lb_num < num_lb-1) {
6407 if (mapping[lb_num+1] != mapping[lb_num]+1)
6408 if (mapping[lb_num+1] != mapping[lb_num])
6409 break;
6410 run_length++;
6411 lb_num++;
6412 }
6413 DPRINTF(WRITE, ("+ %d\n", run_length));
6414
6415 /* nest an iobuf on the master buffer for the extent */
6416 rbuflen = run_length * lb_size;
6417 rblk = run_start * (lb_size/DEV_BSIZE);
6418
6419 #if 0
6420 /* if its zero or unmapped, our blknr gets -1 for unmapped */
6421 switch (mapping[lb_num]) {
6422 case UDF_TRANS_UNMAPPED:
6423 case UDF_TRANS_ZERO:
6424 rblk = -1;
6425 break;
6426 default:
6427 rblk = run_start * (lb_size/DEV_BSIZE);
6428 break;
6429 }
6430 #endif
6431
6432 nestbuf = getiobuf(NULL, true);
6433 nestiobuf_setup(buf, nestbuf, buf_offset, rbuflen);
6434 /* nestbuf is B_ASYNC */
6435
6436 /* identify this nestbuf */
6437 nestbuf->b_lblkno = lblkno;
6438 KASSERT(nestbuf->b_vp == udf_node->vnode);
6439
6440 /* CD shedules on raw blkno */
6441 nestbuf->b_blkno = rblk;
6442 nestbuf->b_proc = NULL;
6443 nestbuf->b_rawblkno = rblk;
6444 nestbuf->b_udf_c_type = what;
6445
6446 /* increment our outstanding bufs counter */
6447 s = splbio();
6448 udf_node->outstanding_bufs++;
6449 splx(s);
6450
6451 udf_discstrat_queuebuf(ump, nestbuf);
6452 }
6453 out:
6454 /* if we're synchronously writing, wait for the completion */
6455 if ((buf->b_flags & B_ASYNC) == 0)
6456 biowait(buf);
6457
6458 DPRINTF(WRITE, ("\tend of write_filebuf\n"));
6459 free(mapping, M_TEMP);
6460 return;
6461 }
6462
6463 /* --------------------------------------------------------------------- */
6464
6465
6466