udf_subr.c revision 1.69 1 /* $NetBSD: udf_subr.c,v 1.69 2008/07/27 11:38:23 reinoud Exp $ */
2
3 /*
4 * Copyright (c) 2006, 2008 Reinoud Zandijk
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 *
27 */
28
29
30 #include <sys/cdefs.h>
31 #ifndef lint
32 __KERNEL_RCSID(0, "$NetBSD: udf_subr.c,v 1.69 2008/07/27 11:38:23 reinoud Exp $");
33 #endif /* not lint */
34
35
36 #if defined(_KERNEL_OPT)
37 #include "opt_quota.h"
38 #include "opt_compat_netbsd.h"
39 #endif
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/sysctl.h>
44 #include <sys/namei.h>
45 #include <sys/proc.h>
46 #include <sys/kernel.h>
47 #include <sys/vnode.h>
48 #include <miscfs/genfs/genfs_node.h>
49 #include <sys/mount.h>
50 #include <sys/buf.h>
51 #include <sys/file.h>
52 #include <sys/device.h>
53 #include <sys/disklabel.h>
54 #include <sys/ioctl.h>
55 #include <sys/malloc.h>
56 #include <sys/dirent.h>
57 #include <sys/stat.h>
58 #include <sys/conf.h>
59 #include <sys/kauth.h>
60 #include <fs/unicode.h>
61 #include <dev/clock_subr.h>
62
63 #include <fs/udf/ecma167-udf.h>
64 #include <fs/udf/udf_mount.h>
65
66 #if defined(_KERNEL_OPT)
67 #include "opt_udf.h"
68 #endif
69
70 #include "udf.h"
71 #include "udf_subr.h"
72 #include "udf_bswap.h"
73
74
75 #define VTOI(vnode) ((struct udf_node *) (vnode)->v_data)
76
77 #define UDF_SET_SYSTEMFILE(vp) \
78 /* XXXAD Is the vnode locked? */ \
79 (vp)->v_vflag |= VV_SYSTEM; \
80 vref(vp); \
81 vput(vp); \
82
83 extern int syncer_maxdelay; /* maximum delay time */
84 extern int (**udf_vnodeop_p)(void *);
85
86 /* --------------------------------------------------------------------- */
87
88 //#ifdef DEBUG
89 #if 1
90
91 #if 0
92 static void
93 udf_dumpblob(boid *blob, uint32_t dlen)
94 {
95 int i, j;
96
97 printf("blob = %p\n", blob);
98 printf("dump of %d bytes\n", dlen);
99
100 for (i = 0; i < dlen; i+ = 16) {
101 printf("%04x ", i);
102 for (j = 0; j < 16; j++) {
103 if (i+j < dlen) {
104 printf("%02x ", blob[i+j]);
105 } else {
106 printf(" ");
107 }
108 }
109 for (j = 0; j < 16; j++) {
110 if (i+j < dlen) {
111 if (blob[i+j]>32 && blob[i+j]! = 127) {
112 printf("%c", blob[i+j]);
113 } else {
114 printf(".");
115 }
116 }
117 }
118 printf("\n");
119 }
120 printf("\n");
121 Debugger();
122 }
123 #endif
124
125 static void
126 udf_dump_discinfo(struct udf_mount *ump)
127 {
128 char bits[128];
129 struct mmc_discinfo *di = &ump->discinfo;
130
131 if ((udf_verbose & UDF_DEBUG_VOLUMES) == 0)
132 return;
133
134 printf("Device/media info :\n");
135 printf("\tMMC profile 0x%02x\n", di->mmc_profile);
136 printf("\tderived class %d\n", di->mmc_class);
137 printf("\tsector size %d\n", di->sector_size);
138 printf("\tdisc state %d\n", di->disc_state);
139 printf("\tlast ses state %d\n", di->last_session_state);
140 printf("\tbg format state %d\n", di->bg_format_state);
141 printf("\tfrst track %d\n", di->first_track);
142 printf("\tfst on last ses %d\n", di->first_track_last_session);
143 printf("\tlst on last ses %d\n", di->last_track_last_session);
144 printf("\tlink block penalty %d\n", di->link_block_penalty);
145 bitmask_snprintf(di->disc_flags, MMC_DFLAGS_FLAGBITS, bits,
146 sizeof(bits));
147 printf("\tdisc flags %s\n", bits);
148 printf("\tdisc id %x\n", di->disc_id);
149 printf("\tdisc barcode %"PRIx64"\n", di->disc_barcode);
150
151 printf("\tnum sessions %d\n", di->num_sessions);
152 printf("\tnum tracks %d\n", di->num_tracks);
153
154 bitmask_snprintf(di->mmc_cur, MMC_CAP_FLAGBITS, bits, sizeof(bits));
155 printf("\tcapabilities cur %s\n", bits);
156 bitmask_snprintf(di->mmc_cap, MMC_CAP_FLAGBITS, bits, sizeof(bits));
157 printf("\tcapabilities cap %s\n", bits);
158 }
159 #else
160 #define udf_dump_discinfo(a);
161 #endif
162
163
164 /* --------------------------------------------------------------------- */
165
166 /* not called often */
167 int
168 udf_update_discinfo(struct udf_mount *ump)
169 {
170 struct vnode *devvp = ump->devvp;
171 struct partinfo dpart;
172 struct mmc_discinfo *di;
173 int error;
174
175 DPRINTF(VOLUMES, ("read/update disc info\n"));
176 di = &ump->discinfo;
177 memset(di, 0, sizeof(struct mmc_discinfo));
178
179 /* check if we're on a MMC capable device, i.e. CD/DVD */
180 error = VOP_IOCTL(devvp, MMCGETDISCINFO, di, FKIOCTL, NOCRED);
181 if (error == 0) {
182 udf_dump_discinfo(ump);
183 return 0;
184 }
185
186 /* disc partition support */
187 error = VOP_IOCTL(devvp, DIOCGPART, &dpart, FREAD, NOCRED);
188 if (error)
189 return ENODEV;
190
191 /* set up a disc info profile for partitions */
192 di->mmc_profile = 0x01; /* disc type */
193 di->mmc_class = MMC_CLASS_DISC;
194 di->disc_state = MMC_STATE_CLOSED;
195 di->last_session_state = MMC_STATE_CLOSED;
196 di->bg_format_state = MMC_BGFSTATE_COMPLETED;
197 di->link_block_penalty = 0;
198
199 di->mmc_cur = MMC_CAP_RECORDABLE | MMC_CAP_REWRITABLE |
200 MMC_CAP_ZEROLINKBLK | MMC_CAP_HW_DEFECTFREE;
201 di->mmc_cap = di->mmc_cur;
202 di->disc_flags = MMC_DFLAGS_UNRESTRICTED;
203
204 /* TODO problem with last_possible_lba on resizable VND; request */
205 di->last_possible_lba = dpart.part->p_size;
206 di->sector_size = dpart.disklab->d_secsize;
207
208 di->num_sessions = 1;
209 di->num_tracks = 1;
210
211 di->first_track = 1;
212 di->first_track_last_session = di->last_track_last_session = 1;
213
214 udf_dump_discinfo(ump);
215 return 0;
216 }
217
218
219 int
220 udf_update_trackinfo(struct udf_mount *ump, struct mmc_trackinfo *ti)
221 {
222 struct vnode *devvp = ump->devvp;
223 struct mmc_discinfo *di = &ump->discinfo;
224 int error, class;
225
226 DPRINTF(VOLUMES, ("read track info\n"));
227
228 class = di->mmc_class;
229 if (class != MMC_CLASS_DISC) {
230 /* tracknr specified in struct ti */
231 error = VOP_IOCTL(devvp, MMCGETTRACKINFO, ti, FKIOCTL, NOCRED);
232 return error;
233 }
234
235 /* disc partition support */
236 if (ti->tracknr != 1)
237 return EIO;
238
239 /* create fake ti (TODO check for resized vnds) */
240 ti->sessionnr = 1;
241
242 ti->track_mode = 0; /* XXX */
243 ti->data_mode = 0; /* XXX */
244 ti->flags = MMC_TRACKINFO_LRA_VALID | MMC_TRACKINFO_NWA_VALID;
245
246 ti->track_start = 0;
247 ti->packet_size = 1;
248
249 /* TODO support for resizable vnd */
250 ti->track_size = di->last_possible_lba;
251 ti->next_writable = di->last_possible_lba;
252 ti->last_recorded = ti->next_writable;
253 ti->free_blocks = 0;
254
255 return 0;
256 }
257
258
259 int
260 udf_setup_writeparams(struct udf_mount *ump)
261 {
262 struct mmc_writeparams mmc_writeparams;
263 int error;
264
265 if (ump->discinfo.mmc_class == MMC_CLASS_DISC)
266 return 0;
267
268 /*
269 * only CD burning normally needs setting up, but other disc types
270 * might need other settings to be made. The MMC framework will set up
271 * the nessisary recording parameters according to the disc
272 * characteristics read in. Modifications can be made in the discinfo
273 * structure passed to change the nature of the disc.
274 */
275
276 memset(&mmc_writeparams, 0, sizeof(struct mmc_writeparams));
277 mmc_writeparams.mmc_class = ump->discinfo.mmc_class;
278 mmc_writeparams.mmc_cur = ump->discinfo.mmc_cur;
279
280 /*
281 * UDF dictates first track to determine track mode for the whole
282 * disc. [UDF 1.50/6.10.1.1, UDF 1.50/6.10.2.1]
283 * To prevent problems with a `reserved' track in front we start with
284 * the 2nd track and if that is not valid, go for the 1st.
285 */
286 mmc_writeparams.tracknr = 2;
287 mmc_writeparams.data_mode = MMC_DATAMODE_DEFAULT; /* XA disc */
288 mmc_writeparams.track_mode = MMC_TRACKMODE_DEFAULT; /* data */
289
290 error = VOP_IOCTL(ump->devvp, MMCSETUPWRITEPARAMS, &mmc_writeparams,
291 FKIOCTL, NOCRED);
292 if (error) {
293 mmc_writeparams.tracknr = 1;
294 error = VOP_IOCTL(ump->devvp, MMCSETUPWRITEPARAMS,
295 &mmc_writeparams, FKIOCTL, NOCRED);
296 }
297 return error;
298 }
299
300
301 int
302 udf_synchronise_caches(struct udf_mount *ump)
303 {
304 struct mmc_op mmc_op;
305
306 DPRINTF(CALL, ("udf_synchronise_caches()\n"));
307
308 if (ump->vfs_mountp->mnt_flag & MNT_RDONLY)
309 return 0;
310
311 /* discs are done now */
312 if (ump->discinfo.mmc_class == MMC_CLASS_DISC)
313 return 0;
314
315 bzero(&mmc_op, sizeof(struct mmc_op));
316 mmc_op.operation = MMC_OP_SYNCHRONISECACHE;
317
318 /* ignore return code */
319 (void) VOP_IOCTL(ump->devvp, MMCOP, &mmc_op, FKIOCTL, NOCRED);
320
321 return 0;
322 }
323
324 /* --------------------------------------------------------------------- */
325
326 /* track/session searching for mounting */
327 int
328 udf_search_tracks(struct udf_mount *ump, struct udf_args *args,
329 int *first_tracknr, int *last_tracknr)
330 {
331 struct mmc_trackinfo trackinfo;
332 uint32_t tracknr, start_track, num_tracks;
333 int error;
334
335 /* if negative, sessionnr is relative to last session */
336 if (args->sessionnr < 0) {
337 args->sessionnr += ump->discinfo.num_sessions;
338 }
339
340 /* sanity */
341 if (args->sessionnr < 0)
342 args->sessionnr = 0;
343 if (args->sessionnr > ump->discinfo.num_sessions)
344 args->sessionnr = ump->discinfo.num_sessions;
345
346 /* search the tracks for this session, zero session nr indicates last */
347 if (args->sessionnr == 0)
348 args->sessionnr = ump->discinfo.num_sessions;
349 if (ump->discinfo.last_session_state == MMC_STATE_EMPTY)
350 args->sessionnr--;
351
352 /* sanity again */
353 if (args->sessionnr < 0)
354 args->sessionnr = 0;
355
356 /* search the first and last track of the specified session */
357 num_tracks = ump->discinfo.num_tracks;
358 start_track = ump->discinfo.first_track;
359
360 /* search for first track of this session */
361 for (tracknr = start_track; tracknr <= num_tracks; tracknr++) {
362 /* get track info */
363 trackinfo.tracknr = tracknr;
364 error = udf_update_trackinfo(ump, &trackinfo);
365 if (error)
366 return error;
367
368 if (trackinfo.sessionnr == args->sessionnr)
369 break;
370 }
371 *first_tracknr = tracknr;
372
373 /* search for last track of this session */
374 for (;tracknr <= num_tracks; tracknr++) {
375 /* get track info */
376 trackinfo.tracknr = tracknr;
377 error = udf_update_trackinfo(ump, &trackinfo);
378 if (error || (trackinfo.sessionnr != args->sessionnr)) {
379 tracknr--;
380 break;
381 }
382 }
383 if (tracknr > num_tracks)
384 tracknr--;
385
386 *last_tracknr = tracknr;
387
388 if (*last_tracknr < *first_tracknr) {
389 printf( "udf_search_tracks: sanity check on drive+disc failed, "
390 "drive returned garbage\n");
391 return EINVAL;
392 }
393
394 assert(*last_tracknr >= *first_tracknr);
395 return 0;
396 }
397
398
399 /*
400 * NOTE: this is the only routine in this file that directly peeks into the
401 * metadata file but since its at a larval state of the mount it can't hurt.
402 *
403 * XXX candidate for udf_allocation.c
404 * XXX clean me up!, change to new node reading code.
405 */
406
407 static void
408 udf_check_track_metadata_overlap(struct udf_mount *ump,
409 struct mmc_trackinfo *trackinfo)
410 {
411 struct part_desc *part;
412 struct file_entry *fe;
413 struct extfile_entry *efe;
414 struct short_ad *s_ad;
415 struct long_ad *l_ad;
416 uint32_t track_start, track_end;
417 uint32_t phys_part_start, phys_part_end, part_start, part_end;
418 uint32_t sector_size, len, alloclen, plb_num;
419 uint8_t *pos;
420 int addr_type, icblen, icbflags, flags;
421
422 /* get our track extents */
423 track_start = trackinfo->track_start;
424 track_end = track_start + trackinfo->track_size;
425
426 /* get our base partition extent */
427 part = ump->partitions[ump->metadata_part];
428 phys_part_start = udf_rw32(part->start_loc);
429 phys_part_end = phys_part_start + udf_rw32(part->part_len);
430
431 /* no use if its outside the physical partition */
432 if ((phys_part_start >= track_end) || (phys_part_end < track_start))
433 return;
434
435 /*
436 * now follow all extents in the fe/efe to see if they refer to this
437 * track
438 */
439
440 sector_size = ump->discinfo.sector_size;
441
442 /* XXX should we claim exclusive access to the metafile ? */
443 /* TODO: move to new node read code */
444 fe = ump->metadata_node->fe;
445 efe = ump->metadata_node->efe;
446 if (fe) {
447 alloclen = udf_rw32(fe->l_ad);
448 pos = &fe->data[0] + udf_rw32(fe->l_ea);
449 icbflags = udf_rw16(fe->icbtag.flags);
450 } else {
451 assert(efe);
452 alloclen = udf_rw32(efe->l_ad);
453 pos = &efe->data[0] + udf_rw32(efe->l_ea);
454 icbflags = udf_rw16(efe->icbtag.flags);
455 }
456 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
457
458 while (alloclen) {
459 if (addr_type == UDF_ICB_SHORT_ALLOC) {
460 icblen = sizeof(struct short_ad);
461 s_ad = (struct short_ad *) pos;
462 len = udf_rw32(s_ad->len);
463 plb_num = udf_rw32(s_ad->lb_num);
464 } else {
465 /* should not be present, but why not */
466 icblen = sizeof(struct long_ad);
467 l_ad = (struct long_ad *) pos;
468 len = udf_rw32(l_ad->len);
469 plb_num = udf_rw32(l_ad->loc.lb_num);
470 /* pvpart_num = udf_rw16(l_ad->loc.part_num); */
471 }
472 /* process extent */
473 flags = UDF_EXT_FLAGS(len);
474 len = UDF_EXT_LEN(len);
475
476 part_start = phys_part_start + plb_num;
477 part_end = part_start + (len / sector_size);
478
479 if ((part_start >= track_start) && (part_end <= track_end)) {
480 /* extent is enclosed within this track */
481 ump->metadata_track = *trackinfo;
482 return;
483 }
484
485 pos += icblen;
486 alloclen -= icblen;
487 }
488 }
489
490
491 int
492 udf_search_writing_tracks(struct udf_mount *ump)
493 {
494 struct mmc_trackinfo trackinfo;
495 struct part_desc *part;
496 uint32_t tracknr, start_track, num_tracks;
497 uint32_t track_start, track_end, part_start, part_end;
498 int error;
499
500 /*
501 * in the CD/(HD)DVD/BD recordable device model a few tracks within
502 * the last session might be open but in the UDF device model at most
503 * three tracks can be open: a reserved track for delayed ISO VRS
504 * writing, a data track and a metadata track. We search here for the
505 * data track and the metadata track. Note that the reserved track is
506 * troublesome but can be detected by its small size of < 512 sectors.
507 */
508
509 num_tracks = ump->discinfo.num_tracks;
510 start_track = ump->discinfo.first_track;
511
512 /* fetch info on first and possibly only track */
513 trackinfo.tracknr = start_track;
514 error = udf_update_trackinfo(ump, &trackinfo);
515 if (error)
516 return error;
517
518 /* copy results to our mount point */
519 ump->data_track = trackinfo;
520 ump->metadata_track = trackinfo;
521
522 /* if not sequential, we're done */
523 if (num_tracks == 1)
524 return 0;
525
526 for (tracknr = start_track;tracknr <= num_tracks; tracknr++) {
527 /* get track info */
528 trackinfo.tracknr = tracknr;
529 error = udf_update_trackinfo(ump, &trackinfo);
530 if (error)
531 return error;
532
533 if ((trackinfo.flags & MMC_TRACKINFO_NWA_VALID) == 0)
534 continue;
535
536 track_start = trackinfo.track_start;
537 track_end = track_start + trackinfo.track_size;
538
539 /* check for overlap on data partition */
540 part = ump->partitions[ump->data_part];
541 part_start = udf_rw32(part->start_loc);
542 part_end = part_start + udf_rw32(part->part_len);
543 if ((part_start < track_end) && (part_end > track_start)) {
544 ump->data_track = trackinfo;
545 /* TODO check if UDF partition data_part is writable */
546 }
547
548 /* check for overlap on metadata partition */
549 if ((ump->meta_alloc == UDF_ALLOC_METASEQUENTIAL) ||
550 (ump->meta_alloc == UDF_ALLOC_METABITMAP)) {
551 udf_check_track_metadata_overlap(ump, &trackinfo);
552 } else {
553 ump->metadata_track = trackinfo;
554 }
555 }
556
557 if ((ump->data_track.flags & MMC_TRACKINFO_NWA_VALID) == 0)
558 return EROFS;
559
560 if ((ump->metadata_track.flags & MMC_TRACKINFO_NWA_VALID) == 0)
561 return EROFS;
562
563 return 0;
564 }
565
566 /* --------------------------------------------------------------------- */
567
568 /*
569 * Check if the blob starts with a good UDF tag. Tags are protected by a
570 * checksum over the reader except one byte at position 4 that is the checksum
571 * itself.
572 */
573
574 int
575 udf_check_tag(void *blob)
576 {
577 struct desc_tag *tag = blob;
578 uint8_t *pos, sum, cnt;
579
580 /* check TAG header checksum */
581 pos = (uint8_t *) tag;
582 sum = 0;
583
584 for(cnt = 0; cnt < 16; cnt++) {
585 if (cnt != 4)
586 sum += *pos;
587 pos++;
588 }
589 if (sum != tag->cksum) {
590 /* bad tag header checksum; this is not a valid tag */
591 return EINVAL;
592 }
593
594 return 0;
595 }
596
597
598 /*
599 * check tag payload will check descriptor CRC as specified.
600 * If the descriptor is too long, it will return EIO otherwise EINVAL.
601 */
602
603 int
604 udf_check_tag_payload(void *blob, uint32_t max_length)
605 {
606 struct desc_tag *tag = blob;
607 uint16_t crc, crc_len;
608
609 crc_len = udf_rw16(tag->desc_crc_len);
610
611 /* check payload CRC if applicable */
612 if (crc_len == 0)
613 return 0;
614
615 if (crc_len > max_length)
616 return EIO;
617
618 crc = udf_cksum(((uint8_t *) tag) + UDF_DESC_TAG_LENGTH, crc_len);
619 if (crc != udf_rw16(tag->desc_crc)) {
620 /* bad payload CRC; this is a broken tag */
621 return EINVAL;
622 }
623
624 return 0;
625 }
626
627
628 void
629 udf_validate_tag_sum(void *blob)
630 {
631 struct desc_tag *tag = blob;
632 uint8_t *pos, sum, cnt;
633
634 /* calculate TAG header checksum */
635 pos = (uint8_t *) tag;
636 sum = 0;
637
638 for(cnt = 0; cnt < 16; cnt++) {
639 if (cnt != 4) sum += *pos;
640 pos++;
641 }
642 tag->cksum = sum; /* 8 bit */
643 }
644
645
646 /* assumes sector number of descriptor to be saved already present */
647 void
648 udf_validate_tag_and_crc_sums(void *blob)
649 {
650 struct desc_tag *tag = blob;
651 uint8_t *btag = (uint8_t *) tag;
652 uint16_t crc, crc_len;
653
654 crc_len = udf_rw16(tag->desc_crc_len);
655
656 /* check payload CRC if applicable */
657 if (crc_len > 0) {
658 crc = udf_cksum(btag + UDF_DESC_TAG_LENGTH, crc_len);
659 tag->desc_crc = udf_rw16(crc);
660 }
661
662 /* calculate TAG header checksum */
663 udf_validate_tag_sum(blob);
664 }
665
666 /* --------------------------------------------------------------------- */
667
668 /*
669 * XXX note the different semantics from udfclient: for FIDs it still rounds
670 * up to sectors. Use udf_fidsize() for a correct length.
671 */
672
673 int
674 udf_tagsize(union dscrptr *dscr, uint32_t lb_size)
675 {
676 uint32_t size, tag_id, num_lb, elmsz;
677
678 tag_id = udf_rw16(dscr->tag.id);
679
680 switch (tag_id) {
681 case TAGID_LOGVOL :
682 size = sizeof(struct logvol_desc) - 1;
683 size += udf_rw32(dscr->lvd.mt_l);
684 break;
685 case TAGID_UNALLOC_SPACE :
686 elmsz = sizeof(struct extent_ad);
687 size = sizeof(struct unalloc_sp_desc) - elmsz;
688 size += udf_rw32(dscr->usd.alloc_desc_num) * elmsz;
689 break;
690 case TAGID_FID :
691 size = UDF_FID_SIZE + dscr->fid.l_fi + udf_rw16(dscr->fid.l_iu);
692 size = (size + 3) & ~3;
693 break;
694 case TAGID_LOGVOL_INTEGRITY :
695 size = sizeof(struct logvol_int_desc) - sizeof(uint32_t);
696 size += udf_rw32(dscr->lvid.l_iu);
697 size += (2 * udf_rw32(dscr->lvid.num_part) * sizeof(uint32_t));
698 break;
699 case TAGID_SPACE_BITMAP :
700 size = sizeof(struct space_bitmap_desc) - 1;
701 size += udf_rw32(dscr->sbd.num_bytes);
702 break;
703 case TAGID_SPARING_TABLE :
704 elmsz = sizeof(struct spare_map_entry);
705 size = sizeof(struct udf_sparing_table) - elmsz;
706 size += udf_rw16(dscr->spt.rt_l) * elmsz;
707 break;
708 case TAGID_FENTRY :
709 size = sizeof(struct file_entry);
710 size += udf_rw32(dscr->fe.l_ea) + udf_rw32(dscr->fe.l_ad)-1;
711 break;
712 case TAGID_EXTFENTRY :
713 size = sizeof(struct extfile_entry);
714 size += udf_rw32(dscr->efe.l_ea) + udf_rw32(dscr->efe.l_ad)-1;
715 break;
716 case TAGID_FSD :
717 size = sizeof(struct fileset_desc);
718 break;
719 default :
720 size = sizeof(union dscrptr);
721 break;
722 }
723
724 if ((size == 0) || (lb_size == 0)) return 0;
725
726 /* round up in sectors */
727 num_lb = (size + lb_size -1) / lb_size;
728 return num_lb * lb_size;
729 }
730
731
732 int
733 udf_fidsize(struct fileid_desc *fid)
734 {
735 uint32_t size;
736
737 if (udf_rw16(fid->tag.id) != TAGID_FID)
738 panic("got udf_fidsize on non FID\n");
739
740 size = UDF_FID_SIZE + fid->l_fi + udf_rw16(fid->l_iu);
741 size = (size + 3) & ~3;
742
743 return size;
744 }
745
746 /* --------------------------------------------------------------------- */
747
748 void
749 udf_lock_node(struct udf_node *udf_node, int flag, char const *fname, const int lineno)
750 {
751 int ret;
752
753 mutex_enter(&udf_node->node_mutex);
754 /* wait until free */
755 while (udf_node->i_flags & IN_LOCKED) {
756 ret = cv_timedwait(&udf_node->node_lock, &udf_node->node_mutex, hz/8);
757 /* TODO check if we should return error; abort */
758 if (ret == EWOULDBLOCK) {
759 DPRINTF(LOCKING, ( "udf_lock_node: udf_node %p would block "
760 "wanted at %s:%d, previously locked at %s:%d\n",
761 udf_node, fname, lineno,
762 udf_node->lock_fname, udf_node->lock_lineno));
763 }
764 }
765 /* grab */
766 udf_node->i_flags |= IN_LOCKED | flag;
767 /* debug */
768 udf_node->lock_fname = fname;
769 udf_node->lock_lineno = lineno;
770
771 mutex_exit(&udf_node->node_mutex);
772 }
773
774
775 void
776 udf_unlock_node(struct udf_node *udf_node, int flag)
777 {
778 mutex_enter(&udf_node->node_mutex);
779 udf_node->i_flags &= ~(IN_LOCKED | flag);
780 cv_broadcast(&udf_node->node_lock);
781 mutex_exit(&udf_node->node_mutex);
782 }
783
784
785 /* --------------------------------------------------------------------- */
786
787 static int
788 udf_read_anchor(struct udf_mount *ump, uint32_t sector, struct anchor_vdp **dst)
789 {
790 int error;
791
792 error = udf_read_phys_dscr(ump, sector, M_UDFVOLD,
793 (union dscrptr **) dst);
794 if (!error) {
795 /* blank terminator blocks are not allowed here */
796 if (*dst == NULL)
797 return ENOENT;
798 if (udf_rw16((*dst)->tag.id) != TAGID_ANCHOR) {
799 error = ENOENT;
800 free(*dst, M_UDFVOLD);
801 *dst = NULL;
802 DPRINTF(VOLUMES, ("Not an anchor\n"));
803 }
804 }
805
806 return error;
807 }
808
809
810 int
811 udf_read_anchors(struct udf_mount *ump)
812 {
813 struct udf_args *args = &ump->mount_args;
814 struct mmc_trackinfo first_track;
815 struct mmc_trackinfo second_track;
816 struct mmc_trackinfo last_track;
817 struct anchor_vdp **anchorsp;
818 uint32_t track_start;
819 uint32_t track_end;
820 uint32_t positions[4];
821 int first_tracknr, last_tracknr;
822 int error, anch, ok, first_anchor;
823
824 /* search the first and last track of the specified session */
825 error = udf_search_tracks(ump, args, &first_tracknr, &last_tracknr);
826 if (!error) {
827 first_track.tracknr = first_tracknr;
828 error = udf_update_trackinfo(ump, &first_track);
829 }
830 if (!error) {
831 last_track.tracknr = last_tracknr;
832 error = udf_update_trackinfo(ump, &last_track);
833 }
834 if ((!error) && (first_tracknr != last_tracknr)) {
835 second_track.tracknr = first_tracknr+1;
836 error = udf_update_trackinfo(ump, &second_track);
837 }
838 if (error) {
839 printf("UDF mount: reading disc geometry failed\n");
840 return 0;
841 }
842
843 track_start = first_track.track_start;
844
845 /* `end' is not as straitforward as start. */
846 track_end = last_track.track_start
847 + last_track.track_size - last_track.free_blocks - 1;
848
849 if (ump->discinfo.mmc_cur & MMC_CAP_SEQUENTIAL) {
850 /* end of track is not straitforward here */
851 if (last_track.flags & MMC_TRACKINFO_LRA_VALID)
852 track_end = last_track.last_recorded;
853 else if (last_track.flags & MMC_TRACKINFO_NWA_VALID)
854 track_end = last_track.next_writable
855 - ump->discinfo.link_block_penalty;
856 }
857
858 /* its no use reading a blank track */
859 first_anchor = 0;
860 if (first_track.flags & MMC_TRACKINFO_BLANK)
861 first_anchor = 1;
862
863 /* get our packet size */
864 ump->packet_size = first_track.packet_size;
865 if (first_track.flags & MMC_TRACKINFO_BLANK)
866 ump->packet_size = second_track.packet_size;
867
868 if (ump->packet_size <= 1) {
869 /* take max, but not bigger than 64 */
870 ump->packet_size = MAXPHYS / ump->discinfo.sector_size;
871 ump->packet_size = MIN(ump->packet_size, 64);
872 }
873 KASSERT(ump->packet_size >= 1);
874
875 /* read anchors start+256, start+512, end-256, end */
876 positions[0] = track_start+256;
877 positions[1] = track_end-256;
878 positions[2] = track_end;
879 positions[3] = track_start+512; /* [UDF 2.60/6.11.2] */
880 /* XXX shouldn't +512 be prefered above +256 for compat with Roxio CD */
881
882 ok = 0;
883 anchorsp = ump->anchors;
884 for (anch = first_anchor; anch < 4; anch++) {
885 DPRINTF(VOLUMES, ("Read anchor %d at sector %d\n", anch,
886 positions[anch]));
887 error = udf_read_anchor(ump, positions[anch], anchorsp);
888 if (!error) {
889 anchorsp++;
890 ok++;
891 }
892 }
893
894 /* VATs are only recorded on sequential media, but initialise */
895 ump->first_possible_vat_location = track_start + 2;
896 ump->last_possible_vat_location = track_end + last_track.packet_size;
897
898 return ok;
899 }
900
901 /* --------------------------------------------------------------------- */
902
903 /* we dont try to be smart; we just record the parts */
904 #define UDF_UPDATE_DSCR(name, dscr) \
905 if (name) \
906 free(name, M_UDFVOLD); \
907 name = dscr;
908
909 static int
910 udf_process_vds_descriptor(struct udf_mount *ump, union dscrptr *dscr)
911 {
912 struct part_desc *part;
913 uint16_t phys_part, raw_phys_part;
914
915 DPRINTF(VOLUMES, ("\tprocessing VDS descr %d\n",
916 udf_rw16(dscr->tag.id)));
917 switch (udf_rw16(dscr->tag.id)) {
918 case TAGID_PRI_VOL : /* primary partition */
919 UDF_UPDATE_DSCR(ump->primary_vol, &dscr->pvd);
920 break;
921 case TAGID_LOGVOL : /* logical volume */
922 UDF_UPDATE_DSCR(ump->logical_vol, &dscr->lvd);
923 break;
924 case TAGID_UNALLOC_SPACE : /* unallocated space */
925 UDF_UPDATE_DSCR(ump->unallocated, &dscr->usd);
926 break;
927 case TAGID_IMP_VOL : /* implementation */
928 /* XXX do we care about multiple impl. descr ? */
929 UDF_UPDATE_DSCR(ump->implementation, &dscr->ivd);
930 break;
931 case TAGID_PARTITION : /* physical partition */
932 /* not much use if its not allocated */
933 if ((udf_rw16(dscr->pd.flags) & UDF_PART_FLAG_ALLOCATED) == 0) {
934 free(dscr, M_UDFVOLD);
935 break;
936 }
937
938 /*
939 * BUGALERT: some rogue implementations use random physical
940 * partion numbers to break other implementations so lookup
941 * the number.
942 */
943 raw_phys_part = udf_rw16(dscr->pd.part_num);
944 for (phys_part = 0; phys_part < UDF_PARTITIONS; phys_part++) {
945 part = ump->partitions[phys_part];
946 if (part == NULL)
947 break;
948 if (udf_rw16(part->part_num) == raw_phys_part)
949 break;
950 }
951 if (phys_part == UDF_PARTITIONS) {
952 free(dscr, M_UDFVOLD);
953 return EINVAL;
954 }
955
956 UDF_UPDATE_DSCR(ump->partitions[phys_part], &dscr->pd);
957 break;
958 case TAGID_VOL : /* volume space extender; rare */
959 DPRINTF(VOLUMES, ("VDS extender ignored\n"));
960 free(dscr, M_UDFVOLD);
961 break;
962 default :
963 DPRINTF(VOLUMES, ("Unhandled VDS type %d\n",
964 udf_rw16(dscr->tag.id)));
965 free(dscr, M_UDFVOLD);
966 }
967
968 return 0;
969 }
970 #undef UDF_UPDATE_DSCR
971
972 /* --------------------------------------------------------------------- */
973
974 static int
975 udf_read_vds_extent(struct udf_mount *ump, uint32_t loc, uint32_t len)
976 {
977 union dscrptr *dscr;
978 uint32_t sector_size, dscr_size;
979 int error;
980
981 sector_size = ump->discinfo.sector_size;
982
983 /* loc is sectornr, len is in bytes */
984 error = EIO;
985 while (len) {
986 error = udf_read_phys_dscr(ump, loc, M_UDFVOLD, &dscr);
987 if (error)
988 return error;
989
990 /* blank block is a terminator */
991 if (dscr == NULL)
992 return 0;
993
994 /* TERM descriptor is a terminator */
995 if (udf_rw16(dscr->tag.id) == TAGID_TERM) {
996 free(dscr, M_UDFVOLD);
997 return 0;
998 }
999
1000 /* process all others */
1001 dscr_size = udf_tagsize(dscr, sector_size);
1002 error = udf_process_vds_descriptor(ump, dscr);
1003 if (error) {
1004 free(dscr, M_UDFVOLD);
1005 break;
1006 }
1007 assert((dscr_size % sector_size) == 0);
1008
1009 len -= dscr_size;
1010 loc += dscr_size / sector_size;
1011 }
1012
1013 return error;
1014 }
1015
1016
1017 int
1018 udf_read_vds_space(struct udf_mount *ump)
1019 {
1020 /* struct udf_args *args = &ump->mount_args; */
1021 struct anchor_vdp *anchor, *anchor2;
1022 size_t size;
1023 uint32_t main_loc, main_len;
1024 uint32_t reserve_loc, reserve_len;
1025 int error;
1026
1027 /*
1028 * read in VDS space provided by the anchors; if one descriptor read
1029 * fails, try the mirror sector.
1030 *
1031 * check if 2nd anchor is different from 1st; if so, go for 2nd. This
1032 * avoids the `compatibility features' of DirectCD that may confuse
1033 * stuff completely.
1034 */
1035
1036 anchor = ump->anchors[0];
1037 anchor2 = ump->anchors[1];
1038 assert(anchor);
1039
1040 if (anchor2) {
1041 size = sizeof(struct extent_ad);
1042 if (memcmp(&anchor->main_vds_ex, &anchor2->main_vds_ex, size))
1043 anchor = anchor2;
1044 /* reserve is specified to be a literal copy of main */
1045 }
1046
1047 main_loc = udf_rw32(anchor->main_vds_ex.loc);
1048 main_len = udf_rw32(anchor->main_vds_ex.len);
1049
1050 reserve_loc = udf_rw32(anchor->reserve_vds_ex.loc);
1051 reserve_len = udf_rw32(anchor->reserve_vds_ex.len);
1052
1053 error = udf_read_vds_extent(ump, main_loc, main_len);
1054 if (error) {
1055 printf("UDF mount: reading in reserve VDS extent\n");
1056 error = udf_read_vds_extent(ump, reserve_loc, reserve_len);
1057 }
1058
1059 return error;
1060 }
1061
1062 /* --------------------------------------------------------------------- */
1063
1064 /*
1065 * Read in the logical volume integrity sequence pointed to by our logical
1066 * volume descriptor. Its a sequence that can be extended using fields in the
1067 * integrity descriptor itself. On sequential media only one is found, on
1068 * rewritable media a sequence of descriptors can be found as a form of
1069 * history keeping and on non sequential write-once media the chain is vital
1070 * to allow more and more descriptors to be written. The last descriptor
1071 * written in an extent needs to claim space for a new extent.
1072 */
1073
1074 static int
1075 udf_retrieve_lvint(struct udf_mount *ump)
1076 {
1077 union dscrptr *dscr;
1078 struct logvol_int_desc *lvint;
1079 struct udf_lvintq *trace;
1080 uint32_t lb_size, lbnum, len;
1081 int dscr_type, error, trace_len;
1082
1083 lb_size = udf_rw32(ump->logical_vol->lb_size);
1084 len = udf_rw32(ump->logical_vol->integrity_seq_loc.len);
1085 lbnum = udf_rw32(ump->logical_vol->integrity_seq_loc.loc);
1086
1087 /* clean trace */
1088 memset(ump->lvint_trace, 0,
1089 UDF_LVDINT_SEGMENTS * sizeof(struct udf_lvintq));
1090
1091 trace_len = 0;
1092 trace = ump->lvint_trace;
1093 trace->start = lbnum;
1094 trace->end = lbnum + len/lb_size;
1095 trace->pos = 0;
1096 trace->wpos = 0;
1097
1098 lvint = NULL;
1099 dscr = NULL;
1100 error = 0;
1101 while (len) {
1102 trace->pos = lbnum - trace->start;
1103 trace->wpos = trace->pos + 1;
1104
1105 /* read in our integrity descriptor */
1106 error = udf_read_phys_dscr(ump, lbnum, M_UDFVOLD, &dscr);
1107 if (!error) {
1108 if (dscr == NULL) {
1109 trace->wpos = trace->pos;
1110 break; /* empty terminates */
1111 }
1112 dscr_type = udf_rw16(dscr->tag.id);
1113 if (dscr_type == TAGID_TERM) {
1114 trace->wpos = trace->pos;
1115 break; /* clean terminator */
1116 }
1117 if (dscr_type != TAGID_LOGVOL_INTEGRITY) {
1118 /* fatal... corrupt disc */
1119 error = ENOENT;
1120 break;
1121 }
1122 if (lvint)
1123 free(lvint, M_UDFVOLD);
1124 lvint = &dscr->lvid;
1125 dscr = NULL;
1126 } /* else hope for the best... maybe the next is ok */
1127
1128 DPRINTFIF(VOLUMES, lvint, ("logvol integrity read, state %s\n",
1129 udf_rw32(lvint->integrity_type) ? "CLOSED" : "OPEN"));
1130
1131 /* proceed sequential */
1132 lbnum += 1;
1133 len -= lb_size;
1134
1135 /* are we linking to a new piece? */
1136 if (dscr && lvint->next_extent.len) {
1137 len = udf_rw32(lvint->next_extent.len);
1138 lbnum = udf_rw32(lvint->next_extent.loc);
1139
1140 if (trace_len >= UDF_LVDINT_SEGMENTS-1) {
1141 /* IEK! segment link full... */
1142 DPRINTF(VOLUMES, ("lvdint segments full\n"));
1143 error = EINVAL;
1144 } else {
1145 trace++;
1146 trace_len++;
1147
1148 trace->start = lbnum;
1149 trace->end = lbnum + len/lb_size;
1150 trace->pos = 0;
1151 trace->wpos = 0;
1152 }
1153 }
1154 }
1155
1156 /* clean up the mess, esp. when there is an error */
1157 if (dscr)
1158 free(dscr, M_UDFVOLD);
1159
1160 if (error && lvint) {
1161 free(lvint, M_UDFVOLD);
1162 lvint = NULL;
1163 }
1164
1165 if (!lvint)
1166 error = ENOENT;
1167
1168 ump->logvol_integrity = lvint;
1169 return error;
1170 }
1171
1172
1173 static int
1174 udf_loose_lvint_history(struct udf_mount *ump)
1175 {
1176 union dscrptr **bufs, *dscr, *last_dscr;
1177 struct udf_lvintq *trace, *in_trace, *out_trace;
1178 struct logvol_int_desc *lvint;
1179 uint32_t in_ext, in_pos, in_len;
1180 uint32_t out_ext, out_wpos, out_len;
1181 uint32_t lb_size, packet_size, lb_num;
1182 uint32_t len, start;
1183 int ext, minext, extlen, cnt, cpy_len, dscr_type;
1184 int losing;
1185 int error;
1186
1187 DPRINTF(VOLUMES, ("need to lose some lvint history\n"));
1188
1189 lb_size = udf_rw32(ump->logical_vol->lb_size);
1190 packet_size = ump->data_track.packet_size; /* XXX data track */
1191
1192 /* search smallest extent */
1193 trace = &ump->lvint_trace[0];
1194 minext = trace->end - trace->start;
1195 for (ext = 1; ext < UDF_LVDINT_SEGMENTS; ext++) {
1196 trace = &ump->lvint_trace[ext];
1197 extlen = trace->end - trace->start;
1198 if (extlen == 0)
1199 break;
1200 minext = MIN(minext, extlen);
1201 }
1202 losing = MIN(minext, UDF_LVINT_LOSSAGE);
1203 /* no sense wiping all */
1204 if (losing == minext)
1205 losing--;
1206
1207 DPRINTF(VOLUMES, ("\tlosing %d entries\n", losing));
1208
1209 /* get buffer for pieces */
1210 bufs = malloc(UDF_LVDINT_SEGMENTS * sizeof(void *), M_TEMP, M_WAITOK);
1211
1212 in_ext = 0;
1213 in_pos = losing;
1214 in_trace = &ump->lvint_trace[in_ext];
1215 in_len = in_trace->end - in_trace->start;
1216 out_ext = 0;
1217 out_wpos = 0;
1218 out_trace = &ump->lvint_trace[out_ext];
1219 out_len = out_trace->end - out_trace->start;
1220
1221 last_dscr = NULL;
1222 for(;;) {
1223 out_trace->pos = out_wpos;
1224 out_trace->wpos = out_trace->pos;
1225 if (in_pos >= in_len) {
1226 in_ext++;
1227 in_pos = 0;
1228 in_trace = &ump->lvint_trace[in_ext];
1229 in_len = in_trace->end - in_trace->start;
1230 }
1231 if (out_wpos >= out_len) {
1232 out_ext++;
1233 out_wpos = 0;
1234 out_trace = &ump->lvint_trace[out_ext];
1235 out_len = out_trace->end - out_trace->start;
1236 }
1237 /* copy overlap contents */
1238 cpy_len = MIN(in_len - in_pos, out_len - out_wpos);
1239 cpy_len = MIN(cpy_len, in_len - in_trace->pos);
1240 if (cpy_len == 0)
1241 break;
1242
1243 /* copy */
1244 DPRINTF(VOLUMES, ("\treading %d lvid descriptors\n", cpy_len));
1245 for (cnt = 0; cnt < cpy_len; cnt++) {
1246 /* read in our integrity descriptor */
1247 lb_num = in_trace->start + in_pos + cnt;
1248 error = udf_read_phys_dscr(ump, lb_num, M_UDFVOLD,
1249 &dscr);
1250 if (error) {
1251 /* copy last one */
1252 dscr = last_dscr;
1253 }
1254 bufs[cnt] = dscr;
1255 if (!error) {
1256 if (dscr == NULL) {
1257 out_trace->pos = out_wpos + cnt;
1258 out_trace->wpos = out_trace->pos;
1259 break; /* empty terminates */
1260 }
1261 dscr_type = udf_rw16(dscr->tag.id);
1262 if (dscr_type == TAGID_TERM) {
1263 out_trace->pos = out_wpos + cnt;
1264 out_trace->wpos = out_trace->pos;
1265 break; /* clean terminator */
1266 }
1267 if (dscr_type != TAGID_LOGVOL_INTEGRITY) {
1268 panic( "UDF integrity sequence "
1269 "corrupted while mounted!\n");
1270 }
1271 last_dscr = dscr;
1272 }
1273 }
1274
1275 /* patch up if first entry was on error */
1276 if (bufs[0] == NULL) {
1277 for (cnt = 0; cnt < cpy_len; cnt++)
1278 if (bufs[cnt] != NULL)
1279 break;
1280 last_dscr = bufs[cnt];
1281 for (; cnt > 0; cnt--) {
1282 bufs[cnt] = last_dscr;
1283 }
1284 }
1285
1286 /* glue + write out */
1287 DPRINTF(VOLUMES, ("\twriting %d lvid descriptors\n", cpy_len));
1288 for (cnt = 0; cnt < cpy_len; cnt++) {
1289 lb_num = out_trace->start + out_wpos + cnt;
1290 lvint = &bufs[cnt]->lvid;
1291
1292 /* set continuation */
1293 len = 0;
1294 start = 0;
1295 if (out_wpos + cnt == out_len) {
1296 /* get continuation */
1297 trace = &ump->lvint_trace[out_ext+1];
1298 len = trace->end - trace->start;
1299 start = trace->start;
1300 }
1301 lvint->next_extent.len = udf_rw32(len);
1302 lvint->next_extent.loc = udf_rw32(start);
1303
1304 lb_num = trace->start + trace->wpos;
1305 error = udf_write_phys_dscr_sync(ump, NULL, UDF_C_DSCR,
1306 bufs[cnt], lb_num, lb_num);
1307 DPRINTFIF(VOLUMES, error,
1308 ("error writing lvint lb_num\n"));
1309 }
1310
1311 /* free non repeating descriptors */
1312 last_dscr = NULL;
1313 for (cnt = 0; cnt < cpy_len; cnt++) {
1314 if (bufs[cnt] != last_dscr)
1315 free(bufs[cnt], M_UDFVOLD);
1316 last_dscr = bufs[cnt];
1317 }
1318
1319 /* advance */
1320 in_pos += cpy_len;
1321 out_wpos += cpy_len;
1322 }
1323
1324 free(bufs, M_TEMP);
1325
1326 return 0;
1327 }
1328
1329
1330 static int
1331 udf_writeout_lvint(struct udf_mount *ump, int lvflag)
1332 {
1333 struct udf_lvintq *trace;
1334 struct timeval now_v;
1335 struct timespec now_s;
1336 uint32_t sector;
1337 int logvol_integrity;
1338 int space, error;
1339
1340 DPRINTF(VOLUMES, ("writing out logvol integrity descriptor\n"));
1341
1342 again:
1343 /* get free space in last chunk */
1344 trace = ump->lvint_trace;
1345 while (trace->wpos > (trace->end - trace->start)) {
1346 DPRINTF(VOLUMES, ("skip : start = %d, end = %d, pos = %d, "
1347 "wpos = %d\n", trace->start, trace->end,
1348 trace->pos, trace->wpos));
1349 trace++;
1350 }
1351
1352 /* check if there is space to append */
1353 space = (trace->end - trace->start) - trace->wpos;
1354 DPRINTF(VOLUMES, ("write start = %d, end = %d, pos = %d, wpos = %d, "
1355 "space = %d\n", trace->start, trace->end, trace->pos,
1356 trace->wpos, space));
1357
1358 /* get state */
1359 logvol_integrity = udf_rw32(ump->logvol_integrity->integrity_type);
1360 if (logvol_integrity == UDF_INTEGRITY_CLOSED) {
1361 if ((space < 3) && (lvflag & UDF_APPENDONLY_LVINT)) {
1362 /* don't allow this logvol to be opened */
1363 /* TODO extent LVINT space if possible */
1364 return EROFS;
1365 }
1366 }
1367
1368 if (space < 1) {
1369 if (lvflag & UDF_APPENDONLY_LVINT)
1370 return EROFS;
1371 /* loose history by re-writing extents */
1372 error = udf_loose_lvint_history(ump);
1373 if (error)
1374 return error;
1375 goto again;
1376 }
1377
1378 /* update our integrity descriptor to identify us and timestamp it */
1379 DPRINTF(VOLUMES, ("updating integrity descriptor\n"));
1380 microtime(&now_v);
1381 TIMEVAL_TO_TIMESPEC(&now_v, &now_s);
1382 udf_timespec_to_timestamp(&now_s, &ump->logvol_integrity->time);
1383 udf_set_regid(&ump->logvol_info->impl_id, IMPL_NAME);
1384 udf_add_impl_regid(ump, &ump->logvol_info->impl_id);
1385
1386 /* writeout integrity descriptor */
1387 sector = trace->start + trace->wpos;
1388 error = udf_write_phys_dscr_sync(ump, NULL, UDF_C_DSCR,
1389 (union dscrptr *) ump->logvol_integrity,
1390 sector, sector);
1391 DPRINTF(VOLUMES, ("writeout lvint : error = %d\n", error));
1392 if (error)
1393 return error;
1394
1395 /* advance write position */
1396 trace->wpos++; space--;
1397 if (space >= 1) {
1398 /* append terminator */
1399 sector = trace->start + trace->wpos;
1400 error = udf_write_terminator(ump, sector);
1401
1402 DPRINTF(VOLUMES, ("write terminator : error = %d\n", error));
1403 }
1404
1405 space = (trace->end - trace->start) - trace->wpos;
1406 DPRINTF(VOLUMES, ("write start = %d, end = %d, pos = %d, wpos = %d, "
1407 "space = %d\n", trace->start, trace->end, trace->pos,
1408 trace->wpos, space));
1409 DPRINTF(VOLUMES, ("finished writing out logvol integrity descriptor "
1410 "successfull\n"));
1411
1412 return error;
1413 }
1414
1415 /* --------------------------------------------------------------------- */
1416
1417 static int
1418 udf_read_physical_partition_spacetables(struct udf_mount *ump)
1419 {
1420 union dscrptr *dscr;
1421 /* struct udf_args *args = &ump->mount_args; */
1422 struct part_desc *partd;
1423 struct part_hdr_desc *parthdr;
1424 struct udf_bitmap *bitmap;
1425 uint32_t phys_part;
1426 uint32_t lb_num, len;
1427 int error, dscr_type;
1428
1429 /* unallocated space map */
1430 for (phys_part = 0; phys_part < UDF_PARTITIONS; phys_part++) {
1431 partd = ump->partitions[phys_part];
1432 if (partd == NULL)
1433 continue;
1434 parthdr = &partd->_impl_use.part_hdr;
1435
1436 lb_num = udf_rw32(partd->start_loc);
1437 lb_num += udf_rw32(parthdr->unalloc_space_bitmap.lb_num);
1438 len = udf_rw32(parthdr->unalloc_space_bitmap.len);
1439 if (len == 0)
1440 continue;
1441
1442 DPRINTF(VOLUMES, ("Read unalloc. space bitmap %d\n", lb_num));
1443 error = udf_read_phys_dscr(ump, lb_num, M_UDFVOLD, &dscr);
1444 if (!error && dscr) {
1445 /* analyse */
1446 dscr_type = udf_rw16(dscr->tag.id);
1447 if (dscr_type == TAGID_SPACE_BITMAP) {
1448 DPRINTF(VOLUMES, ("Accepting space bitmap\n"));
1449 ump->part_unalloc_dscr[phys_part] = &dscr->sbd;
1450
1451 /* fill in ump->part_unalloc_bits */
1452 bitmap = &ump->part_unalloc_bits[phys_part];
1453 bitmap->blob = (uint8_t *) dscr;
1454 bitmap->bits = dscr->sbd.data;
1455 bitmap->max_offset = udf_rw32(dscr->sbd.num_bits);
1456 bitmap->pages = NULL; /* TODO */
1457 bitmap->data_pos = 0;
1458 bitmap->metadata_pos = 0;
1459 } else {
1460 free(dscr, M_UDFVOLD);
1461
1462 printf( "UDF mount: error reading unallocated "
1463 "space bitmap\n");
1464 return EROFS;
1465 }
1466 } else {
1467 /* blank not allowed */
1468 printf("UDF mount: blank unallocated space bitmap\n");
1469 return EROFS;
1470 }
1471 }
1472
1473 /* unallocated space table (not supported) */
1474 for (phys_part = 0; phys_part < UDF_PARTITIONS; phys_part++) {
1475 partd = ump->partitions[phys_part];
1476 if (partd == NULL)
1477 continue;
1478 parthdr = &partd->_impl_use.part_hdr;
1479
1480 len = udf_rw32(parthdr->unalloc_space_table.len);
1481 if (len) {
1482 printf("UDF mount: space tables not supported\n");
1483 return EROFS;
1484 }
1485 }
1486
1487 /* freed space map */
1488 for (phys_part = 0; phys_part < UDF_PARTITIONS; phys_part++) {
1489 partd = ump->partitions[phys_part];
1490 if (partd == NULL)
1491 continue;
1492 parthdr = &partd->_impl_use.part_hdr;
1493
1494 /* freed space map */
1495 lb_num = udf_rw32(partd->start_loc);
1496 lb_num += udf_rw32(parthdr->freed_space_bitmap.lb_num);
1497 len = udf_rw32(parthdr->freed_space_bitmap.len);
1498 if (len == 0)
1499 continue;
1500
1501 DPRINTF(VOLUMES, ("Read unalloc. space bitmap %d\n", lb_num));
1502 error = udf_read_phys_dscr(ump, lb_num, M_UDFVOLD, &dscr);
1503 if (!error && dscr) {
1504 /* analyse */
1505 dscr_type = udf_rw16(dscr->tag.id);
1506 if (dscr_type == TAGID_SPACE_BITMAP) {
1507 DPRINTF(VOLUMES, ("Accepting space bitmap\n"));
1508 ump->part_freed_dscr[phys_part] = &dscr->sbd;
1509
1510 /* fill in ump->part_freed_bits */
1511 bitmap = &ump->part_unalloc_bits[phys_part];
1512 bitmap->blob = (uint8_t *) dscr;
1513 bitmap->bits = dscr->sbd.data;
1514 bitmap->max_offset = udf_rw32(dscr->sbd.num_bits);
1515 bitmap->pages = NULL; /* TODO */
1516 bitmap->data_pos = 0;
1517 bitmap->metadata_pos = 0;
1518 } else {
1519 free(dscr, M_UDFVOLD);
1520
1521 printf( "UDF mount: error reading freed "
1522 "space bitmap\n");
1523 return EROFS;
1524 }
1525 } else {
1526 /* blank not allowed */
1527 printf("UDF mount: blank freed space bitmap\n");
1528 return EROFS;
1529 }
1530 }
1531
1532 /* freed space table (not supported) */
1533 for (phys_part = 0; phys_part < UDF_PARTITIONS; phys_part++) {
1534 partd = ump->partitions[phys_part];
1535 if (partd == NULL)
1536 continue;
1537 parthdr = &partd->_impl_use.part_hdr;
1538
1539 len = udf_rw32(parthdr->freed_space_table.len);
1540 if (len) {
1541 printf("UDF mount: space tables not supported\n");
1542 return EROFS;
1543 }
1544 }
1545
1546 return 0;
1547 }
1548
1549
1550 /* TODO implement async writeout */
1551 int
1552 udf_write_physical_partition_spacetables(struct udf_mount *ump, int waitfor)
1553 {
1554 union dscrptr *dscr;
1555 /* struct udf_args *args = &ump->mount_args; */
1556 struct part_desc *partd;
1557 struct part_hdr_desc *parthdr;
1558 uint32_t phys_part;
1559 uint32_t lb_num, len, ptov;
1560 int error_all, error;
1561
1562 error_all = 0;
1563 /* unallocated space map */
1564 for (phys_part = 0; phys_part < UDF_PARTITIONS; phys_part++) {
1565 partd = ump->partitions[phys_part];
1566 if (partd == NULL)
1567 continue;
1568 parthdr = &partd->_impl_use.part_hdr;
1569
1570 ptov = udf_rw32(partd->start_loc);
1571 lb_num = udf_rw32(parthdr->unalloc_space_bitmap.lb_num);
1572 len = udf_rw32(parthdr->unalloc_space_bitmap.len);
1573 if (len == 0)
1574 continue;
1575
1576 DPRINTF(VOLUMES, ("Write unalloc. space bitmap %d\n",
1577 lb_num + ptov));
1578 dscr = (union dscrptr *) ump->part_unalloc_dscr[phys_part];
1579 error = udf_write_phys_dscr_sync(ump, NULL, UDF_C_DSCR,
1580 (union dscrptr *) dscr,
1581 ptov + lb_num, lb_num);
1582 if (error) {
1583 DPRINTF(VOLUMES, ("\tfailed!! (error %d)\n", error));
1584 error_all = error;
1585 }
1586 }
1587
1588 /* freed space map */
1589 for (phys_part = 0; phys_part < UDF_PARTITIONS; phys_part++) {
1590 partd = ump->partitions[phys_part];
1591 if (partd == NULL)
1592 continue;
1593 parthdr = &partd->_impl_use.part_hdr;
1594
1595 /* freed space map */
1596 ptov = udf_rw32(partd->start_loc);
1597 lb_num = udf_rw32(parthdr->freed_space_bitmap.lb_num);
1598 len = udf_rw32(parthdr->freed_space_bitmap.len);
1599 if (len == 0)
1600 continue;
1601
1602 DPRINTF(VOLUMES, ("Write freed space bitmap %d\n",
1603 lb_num + ptov));
1604 dscr = (union dscrptr *) ump->part_freed_dscr[phys_part];
1605 error = udf_write_phys_dscr_sync(ump, NULL, UDF_C_DSCR,
1606 (union dscrptr *) dscr,
1607 ptov + lb_num, lb_num);
1608 if (error) {
1609 DPRINTF(VOLUMES, ("\tfailed!! (error %d)\n", error));
1610 error_all = error;
1611 }
1612 }
1613
1614 return error_all;
1615 }
1616
1617 /*
1618 * Checks if ump's vds information is correct and complete
1619 */
1620
1621 int
1622 udf_process_vds(struct udf_mount *ump) {
1623 union udf_pmap *mapping;
1624 /* struct udf_args *args = &ump->mount_args; */
1625 struct logvol_int_desc *lvint;
1626 struct udf_logvol_info *lvinfo;
1627 struct part_desc *part;
1628 uint32_t n_pm, mt_l;
1629 uint8_t *pmap_pos;
1630 char *domain_name, *map_name;
1631 const char *check_name;
1632 char bits[128];
1633 int pmap_stype, pmap_size;
1634 int pmap_type, log_part, phys_part, raw_phys_part;
1635 int n_phys, n_virt, n_spar, n_meta;
1636 int len, error;
1637
1638 if (ump == NULL)
1639 return ENOENT;
1640
1641 /* we need at least an anchor (trivial, but for safety) */
1642 if (ump->anchors[0] == NULL)
1643 return EINVAL;
1644
1645 /* we need at least one primary and one logical volume descriptor */
1646 if ((ump->primary_vol == NULL) || (ump->logical_vol) == NULL)
1647 return EINVAL;
1648
1649 /* we need at least one partition descriptor */
1650 if (ump->partitions[0] == NULL)
1651 return EINVAL;
1652
1653 /* check logical volume sector size verses device sector size */
1654 if (udf_rw32(ump->logical_vol->lb_size) != ump->discinfo.sector_size) {
1655 printf("UDF mount: format violation, lb_size != sector size\n");
1656 return EINVAL;
1657 }
1658
1659 /* check domain name */
1660 domain_name = ump->logical_vol->domain_id.id;
1661 if (strncmp(domain_name, "*OSTA UDF Compliant", 20)) {
1662 printf("mount_udf: disc not OSTA UDF Compliant, aborting\n");
1663 return EINVAL;
1664 }
1665
1666 /* retrieve logical volume integrity sequence */
1667 error = udf_retrieve_lvint(ump);
1668
1669 /*
1670 * We need at least one logvol integrity descriptor recorded. Note
1671 * that its OK to have an open logical volume integrity here. The VAT
1672 * will close/update the integrity.
1673 */
1674 if (ump->logvol_integrity == NULL)
1675 return EINVAL;
1676
1677 /* process derived structures */
1678 n_pm = udf_rw32(ump->logical_vol->n_pm); /* num partmaps */
1679 lvint = ump->logvol_integrity;
1680 lvinfo = (struct udf_logvol_info *) (&lvint->tables[2 * n_pm]);
1681 ump->logvol_info = lvinfo;
1682
1683 /* TODO check udf versions? */
1684
1685 /*
1686 * check logvol mappings: effective virt->log partmap translation
1687 * check and recording of the mapping results. Saves expensive
1688 * strncmp() in tight places.
1689 */
1690 DPRINTF(VOLUMES, ("checking logvol mappings\n"));
1691 n_pm = udf_rw32(ump->logical_vol->n_pm); /* num partmaps */
1692 mt_l = udf_rw32(ump->logical_vol->mt_l); /* partmaps data length */
1693 pmap_pos = ump->logical_vol->maps;
1694
1695 if (n_pm > UDF_PMAPS) {
1696 printf("UDF mount: too many mappings\n");
1697 return EINVAL;
1698 }
1699
1700 ump->data_part = ump->metadata_part = 0;
1701 n_phys = n_virt = n_spar = n_meta = 0;
1702 for (log_part = 0; log_part < n_pm; log_part++) {
1703 mapping = (union udf_pmap *) pmap_pos;
1704 pmap_stype = pmap_pos[0];
1705 pmap_size = pmap_pos[1];
1706 switch (pmap_stype) {
1707 case 1: /* physical mapping */
1708 /* volseq = udf_rw16(mapping->pm1.vol_seq_num); */
1709 raw_phys_part = udf_rw16(mapping->pm1.part_num);
1710 pmap_type = UDF_VTOP_TYPE_PHYS;
1711 n_phys++;
1712 ump->data_part = log_part;
1713 ump->metadata_part = log_part;
1714 break;
1715 case 2: /* virtual/sparable/meta mapping */
1716 map_name = mapping->pm2.part_id.id;
1717 /* volseq = udf_rw16(mapping->pm2.vol_seq_num); */
1718 raw_phys_part = udf_rw16(mapping->pm2.part_num);
1719 pmap_type = UDF_VTOP_TYPE_UNKNOWN;
1720 len = UDF_REGID_ID_SIZE;
1721
1722 check_name = "*UDF Virtual Partition";
1723 if (strncmp(map_name, check_name, len) == 0) {
1724 pmap_type = UDF_VTOP_TYPE_VIRT;
1725 n_virt++;
1726 ump->metadata_part = log_part;
1727 break;
1728 }
1729 check_name = "*UDF Sparable Partition";
1730 if (strncmp(map_name, check_name, len) == 0) {
1731 pmap_type = UDF_VTOP_TYPE_SPARABLE;
1732 n_spar++;
1733 ump->data_part = log_part;
1734 ump->metadata_part = log_part;
1735 break;
1736 }
1737 check_name = "*UDF Metadata Partition";
1738 if (strncmp(map_name, check_name, len) == 0) {
1739 pmap_type = UDF_VTOP_TYPE_META;
1740 n_meta++;
1741 ump->metadata_part = log_part;
1742 break;
1743 }
1744 break;
1745 default:
1746 return EINVAL;
1747 }
1748
1749 /*
1750 * BUGALERT: some rogue implementations use random physical
1751 * partion numbers to break other implementations so lookup
1752 * the number.
1753 */
1754 for (phys_part = 0; phys_part < UDF_PARTITIONS; phys_part++) {
1755 part = ump->partitions[phys_part];
1756 if (part == NULL)
1757 continue;
1758 if (udf_rw16(part->part_num) == raw_phys_part)
1759 break;
1760 }
1761
1762 DPRINTF(VOLUMES, ("\t%d -> %d(%d) type %d\n", log_part,
1763 raw_phys_part, phys_part, pmap_type));
1764
1765 if (phys_part == UDF_PARTITIONS)
1766 return EINVAL;
1767 if (pmap_type == UDF_VTOP_TYPE_UNKNOWN)
1768 return EINVAL;
1769
1770 ump->vtop [log_part] = phys_part;
1771 ump->vtop_tp[log_part] = pmap_type;
1772
1773 pmap_pos += pmap_size;
1774 }
1775 /* not winning the beauty contest */
1776 ump->vtop_tp[UDF_VTOP_RAWPART] = UDF_VTOP_TYPE_RAW;
1777
1778 /* test some basic UDF assertions/requirements */
1779 if ((n_virt > 1) || (n_spar > 1) || (n_meta > 1))
1780 return EINVAL;
1781
1782 if (n_virt) {
1783 if ((n_phys == 0) || n_spar || n_meta)
1784 return EINVAL;
1785 }
1786 if (n_spar + n_phys == 0)
1787 return EINVAL;
1788
1789 /* determine allocation scheme's based on disc format */
1790 /* VAT's can only be on a sequential media */
1791 ump->data_alloc = UDF_ALLOC_SPACEMAP;
1792 if (n_virt)
1793 ump->data_alloc = UDF_ALLOC_SEQUENTIAL;
1794
1795 ump->meta_alloc = UDF_ALLOC_SPACEMAP;
1796 if (n_virt)
1797 ump->meta_alloc = UDF_ALLOC_VAT;
1798 if (n_meta)
1799 ump->meta_alloc = UDF_ALLOC_METABITMAP;
1800
1801 /* special cases for pseudo-overwrite */
1802 if (ump->discinfo.mmc_cur & MMC_CAP_PSEUDOOVERWRITE) {
1803 ump->data_alloc = UDF_ALLOC_SEQUENTIAL;
1804 if (n_meta) {
1805 ump->meta_alloc = UDF_ALLOC_METASEQUENTIAL;
1806 } else {
1807 ump->meta_alloc = UDF_ALLOC_RELAXEDSEQUENTIAL;
1808 }
1809 }
1810
1811 /* determine default allocation descriptors to use */
1812 ump->data_allocdscr = UDF_ICB_SHORT_ALLOC;
1813 ump->meta_allocdscr = UDF_ICB_SHORT_ALLOC;
1814 if (n_pm > 1) {
1815 ump->data_allocdscr = UDF_ICB_LONG_ALLOC;
1816 ump->meta_allocdscr = UDF_ICB_LONG_ALLOC;
1817 /* metadata partitions are forced to have short */
1818 if (n_meta)
1819 ump->meta_allocdscr = UDF_ICB_SHORT_ALLOC;
1820 }
1821
1822 /* determine logical volume open/closure actions */
1823 if (n_virt) {
1824 ump->lvopen = 0;
1825 if (ump->discinfo.last_session_state == MMC_STATE_CLOSED)
1826 ump->lvopen |= UDF_OPEN_SESSION ;
1827 ump->lvclose = UDF_WRITE_VAT;
1828 if (ump->mount_args.udfmflags & UDFMNT_CLOSESESSION)
1829 ump->lvclose |= UDF_CLOSE_SESSION;
1830 } else {
1831 /* `normal' rewritable or non sequential media */
1832 ump->lvopen = UDF_WRITE_LVINT;
1833 ump->lvclose = UDF_WRITE_LVINT;
1834 if ((ump->discinfo.mmc_cur & MMC_CAP_REWRITABLE) == 0)
1835 ump->lvopen |= UDF_APPENDONLY_LVINT;
1836 }
1837
1838 /*
1839 * Determine sheduler error behaviour. For virtual partions, update
1840 * the trackinfo; for sparable partitions replace a whole block on the
1841 * sparable table. Allways requeue.
1842 */
1843 ump->lvreadwrite = 0;
1844 if (n_virt)
1845 ump->lvreadwrite = UDF_UPDATE_TRACKINFO;
1846 if (n_spar)
1847 ump->lvreadwrite = UDF_REMAP_BLOCK;
1848
1849 /*
1850 * Select our sheduler
1851 */
1852 ump->strategy = &udf_strat_rmw;
1853 if (n_virt || (ump->discinfo.mmc_cur & MMC_CAP_PSEUDOOVERWRITE))
1854 ump->strategy = &udf_strat_sequential;
1855 if ((ump->discinfo.mmc_class == MMC_CLASS_DISC) ||
1856 (ump->discinfo.mmc_class == MMC_CLASS_UNKN))
1857 ump->strategy = &udf_strat_direct;
1858 if (n_spar)
1859 ump->strategy = &udf_strat_rmw;
1860
1861 /* print results */
1862 DPRINTF(VOLUMES, ("\tdata alloc scheme %d, meta alloc scheme %d\n",
1863 ump->data_alloc, ump->meta_alloc));
1864 DPRINTF(VOLUMES, ("\tdata partition %d, metadata partition %d\n",
1865 ump->data_part, ump->metadata_part));
1866
1867 bitmask_snprintf(ump->lvopen, UDFLOGVOL_BITS, bits, sizeof(bits));
1868 DPRINTF(VOLUMES, ("\tactions on logvol open %s\n", bits));
1869 bitmask_snprintf(ump->lvclose, UDFLOGVOL_BITS, bits, sizeof(bits));
1870 DPRINTF(VOLUMES, ("\tactions on logvol close %s\n", bits));
1871 bitmask_snprintf(ump->lvreadwrite, UDFONERROR_BITS, bits, sizeof(bits));
1872 DPRINTF(VOLUMES, ("\tactions on logvol errors %s\n", bits));
1873
1874 DPRINTF(VOLUMES, ("\tselected sheduler `%s`\n",
1875 (ump->strategy == &udf_strat_direct) ? "Direct" :
1876 (ump->strategy == &udf_strat_sequential) ? "Sequential" :
1877 (ump->strategy == &udf_strat_rmw) ? "RMW" : "UNKNOWN!"));
1878
1879 /* signal its OK for now */
1880 return 0;
1881 }
1882
1883 /* --------------------------------------------------------------------- */
1884
1885 /*
1886 * Update logical volume name in all structures that keep a record of it. We
1887 * use memmove since each of them might be specified as a source.
1888 *
1889 * Note that it doesn't update the VAT structure!
1890 */
1891
1892 static void
1893 udf_update_logvolname(struct udf_mount *ump, char *logvol_id)
1894 {
1895 struct logvol_desc *lvd = NULL;
1896 struct fileset_desc *fsd = NULL;
1897 struct udf_lv_info *lvi = NULL;
1898
1899 DPRINTF(VOLUMES, ("Updating logical volume name\n"));
1900 lvd = ump->logical_vol;
1901 fsd = ump->fileset_desc;
1902 if (ump->implementation)
1903 lvi = &ump->implementation->_impl_use.lv_info;
1904
1905 /* logvol's id might be specified as origional so use memmove here */
1906 memmove(lvd->logvol_id, logvol_id, 128);
1907 if (fsd)
1908 memmove(fsd->logvol_id, logvol_id, 128);
1909 if (lvi)
1910 memmove(lvi->logvol_id, logvol_id, 128);
1911 }
1912
1913 /* --------------------------------------------------------------------- */
1914
1915 void
1916 udf_inittag(struct udf_mount *ump, struct desc_tag *tag, int tagid,
1917 uint32_t sector)
1918 {
1919 assert(ump->logical_vol);
1920
1921 tag->id = udf_rw16(tagid);
1922 tag->descriptor_ver = ump->logical_vol->tag.descriptor_ver;
1923 tag->cksum = 0;
1924 tag->reserved = 0;
1925 tag->serial_num = ump->logical_vol->tag.serial_num;
1926 tag->tag_loc = udf_rw32(sector);
1927 }
1928
1929
1930 uint64_t
1931 udf_advance_uniqueid(struct udf_mount *ump)
1932 {
1933 uint64_t unique_id;
1934
1935 mutex_enter(&ump->logvol_mutex);
1936 unique_id = udf_rw64(ump->logvol_integrity->lvint_next_unique_id);
1937 if (unique_id < 0x10)
1938 unique_id = 0x10;
1939 ump->logvol_integrity->lvint_next_unique_id = udf_rw64(unique_id + 1);
1940 mutex_exit(&ump->logvol_mutex);
1941
1942 return unique_id;
1943 }
1944
1945
1946 static void
1947 udf_adjust_filecount(struct udf_node *udf_node, int sign)
1948 {
1949 struct udf_mount *ump = udf_node->ump;
1950 uint32_t num_dirs, num_files;
1951 int udf_file_type;
1952
1953 /* get file type */
1954 if (udf_node->fe) {
1955 udf_file_type = udf_node->fe->icbtag.file_type;
1956 } else {
1957 udf_file_type = udf_node->efe->icbtag.file_type;
1958 }
1959
1960 /* adjust file count */
1961 mutex_enter(&ump->allocate_mutex);
1962 if (udf_file_type == UDF_ICB_FILETYPE_DIRECTORY) {
1963 num_dirs = udf_rw32(ump->logvol_info->num_directories);
1964 ump->logvol_info->num_directories =
1965 udf_rw32((num_dirs + sign));
1966 } else {
1967 num_files = udf_rw32(ump->logvol_info->num_files);
1968 ump->logvol_info->num_files =
1969 udf_rw32((num_files + sign));
1970 }
1971 mutex_exit(&ump->allocate_mutex);
1972 }
1973
1974
1975 void
1976 udf_osta_charset(struct charspec *charspec)
1977 {
1978 bzero(charspec, sizeof(struct charspec));
1979 charspec->type = 0;
1980 strcpy((char *) charspec->inf, "OSTA Compressed Unicode");
1981 }
1982
1983
1984 /* first call udf_set_regid and then the suffix */
1985 void
1986 udf_set_regid(struct regid *regid, char const *name)
1987 {
1988 bzero(regid, sizeof(struct regid));
1989 regid->flags = 0; /* not dirty and not protected */
1990 strcpy((char *) regid->id, name);
1991 }
1992
1993
1994 void
1995 udf_add_domain_regid(struct udf_mount *ump, struct regid *regid)
1996 {
1997 uint16_t *ver;
1998
1999 ver = (uint16_t *) regid->id_suffix;
2000 *ver = ump->logvol_info->min_udf_readver;
2001 }
2002
2003
2004 void
2005 udf_add_udf_regid(struct udf_mount *ump, struct regid *regid)
2006 {
2007 uint16_t *ver;
2008
2009 ver = (uint16_t *) regid->id_suffix;
2010 *ver = ump->logvol_info->min_udf_readver;
2011
2012 regid->id_suffix[2] = 4; /* unix */
2013 regid->id_suffix[3] = 8; /* NetBSD */
2014 }
2015
2016
2017 void
2018 udf_add_impl_regid(struct udf_mount *ump, struct regid *regid)
2019 {
2020 regid->id_suffix[0] = 4; /* unix */
2021 regid->id_suffix[1] = 8; /* NetBSD */
2022 }
2023
2024
2025 void
2026 udf_add_app_regid(struct udf_mount *ump, struct regid *regid)
2027 {
2028 regid->id_suffix[0] = APP_VERSION_MAIN;
2029 regid->id_suffix[1] = APP_VERSION_SUB;
2030 }
2031
2032 static int
2033 udf_create_parentfid(struct udf_mount *ump, struct fileid_desc *fid,
2034 struct long_ad *parent, uint64_t unique_id)
2035 {
2036 /* the size of an empty FID is 38 but needs to be a multiple of 4 */
2037 int fidsize = 40;
2038
2039 udf_inittag(ump, &fid->tag, TAGID_FID, udf_rw32(parent->loc.lb_num));
2040 fid->file_version_num = udf_rw16(1); /* UDF 2.3.4.1 */
2041 fid->file_char = UDF_FILE_CHAR_DIR | UDF_FILE_CHAR_PAR;
2042 fid->icb = *parent;
2043 fid->icb.longad_uniqueid = udf_rw32((uint32_t) unique_id);
2044 fid->tag.desc_crc_len = fidsize - UDF_DESC_TAG_LENGTH;
2045 (void) udf_validate_tag_and_crc_sums((union dscrptr *) fid);
2046
2047 return fidsize;
2048 }
2049
2050 /* --------------------------------------------------------------------- */
2051
2052 /*
2053 * Extended attribute support. UDF knows of 3 places for extended attributes:
2054 *
2055 * (a) inside the file's (e)fe in the length of the extended attribute area
2056 * before the allocation descriptors/filedata
2057 *
2058 * (b) in a file referenced by (e)fe->ext_attr_icb and
2059 *
2060 * (c) in the e(fe)'s associated stream directory that can hold various
2061 * sub-files. In the stream directory a few fixed named subfiles are reserved
2062 * for NT/Unix ACL's and OS/2 attributes.
2063 *
2064 * NOTE: Extended attributes are read randomly but allways written
2065 * *atomicaly*. For ACL's this interface is propably different but not known
2066 * to me yet.
2067 *
2068 * Order of extended attributes in a space :
2069 * ECMA 167 EAs
2070 * Non block aligned Implementation Use EAs
2071 * Block aligned Implementation Use EAs
2072 * Application Use EAs
2073 */
2074
2075 static int
2076 udf_impl_extattr_check(struct impl_extattr_entry *implext)
2077 {
2078 uint16_t *spos;
2079
2080 if (strncmp(implext->imp_id.id, "*UDF", 4) == 0) {
2081 /* checksum valid? */
2082 DPRINTF(EXTATTR, ("checking UDF impl. attr checksum\n"));
2083 spos = (uint16_t *) implext->data;
2084 if (udf_rw16(*spos) != udf_ea_cksum((uint8_t *) implext))
2085 return EINVAL;
2086 }
2087 return 0;
2088 }
2089
2090 static void
2091 udf_calc_impl_extattr_checksum(struct impl_extattr_entry *implext)
2092 {
2093 uint16_t *spos;
2094
2095 if (strncmp(implext->imp_id.id, "*UDF", 4) == 0) {
2096 /* set checksum */
2097 spos = (uint16_t *) implext->data;
2098 *spos = udf_rw16(udf_ea_cksum((uint8_t *) implext));
2099 }
2100 }
2101
2102
2103 int
2104 udf_extattr_search_intern(struct udf_node *node,
2105 uint32_t sattr, char const *sattrname,
2106 uint32_t *offsetp, uint32_t *lengthp)
2107 {
2108 struct extattrhdr_desc *eahdr;
2109 struct extattr_entry *attrhdr;
2110 struct impl_extattr_entry *implext;
2111 uint32_t offset, a_l, sector_size;
2112 int32_t l_ea;
2113 uint8_t *pos;
2114 int error;
2115
2116 /* get mountpoint */
2117 sector_size = node->ump->discinfo.sector_size;
2118
2119 /* get information from fe/efe */
2120 if (node->fe) {
2121 l_ea = udf_rw32(node->fe->l_ea);
2122 eahdr = (struct extattrhdr_desc *) node->fe->data;
2123 } else {
2124 assert(node->efe);
2125 l_ea = udf_rw32(node->efe->l_ea);
2126 eahdr = (struct extattrhdr_desc *) node->efe->data;
2127 }
2128
2129 /* something recorded here? */
2130 if (l_ea == 0)
2131 return ENOENT;
2132
2133 /* check extended attribute tag; what to do if it fails? */
2134 error = udf_check_tag(eahdr);
2135 if (error)
2136 return EINVAL;
2137 if (udf_rw16(eahdr->tag.id) != TAGID_EXTATTR_HDR)
2138 return EINVAL;
2139 error = udf_check_tag_payload(eahdr, sizeof(struct extattrhdr_desc));
2140 if (error)
2141 return EINVAL;
2142
2143 DPRINTF(EXTATTR, ("Found %d bytes of extended attributes\n", l_ea));
2144
2145 /* looking for Ecma-167 attributes? */
2146 offset = sizeof(struct extattrhdr_desc);
2147
2148 /* looking for either implemenation use or application use */
2149 if (sattr == 2048) { /* [4/48.10.8] */
2150 offset = udf_rw32(eahdr->impl_attr_loc);
2151 if (offset == UDF_IMPL_ATTR_LOC_NOT_PRESENT)
2152 return ENOENT;
2153 }
2154 if (sattr == 65536) { /* [4/48.10.9] */
2155 offset = udf_rw32(eahdr->appl_attr_loc);
2156 if (offset == UDF_APPL_ATTR_LOC_NOT_PRESENT)
2157 return ENOENT;
2158 }
2159
2160 /* paranoia check offset and l_ea */
2161 if (l_ea + offset >= sector_size - sizeof(struct extattr_entry))
2162 return EINVAL;
2163
2164 DPRINTF(EXTATTR, ("Starting at offset %d\n", offset));
2165
2166 /* find our extended attribute */
2167 l_ea -= offset;
2168 pos = (uint8_t *) eahdr + offset;
2169
2170 while (l_ea >= sizeof(struct extattr_entry)) {
2171 DPRINTF(EXTATTR, ("%d extended attr bytes left\n", l_ea));
2172 attrhdr = (struct extattr_entry *) pos;
2173 implext = (struct impl_extattr_entry *) pos;
2174
2175 /* get complete attribute length and check for roque values */
2176 a_l = udf_rw32(attrhdr->a_l);
2177 DPRINTF(EXTATTR, ("attribute %d:%d, len %d/%d\n",
2178 udf_rw32(attrhdr->type),
2179 attrhdr->subtype, a_l, l_ea));
2180 if ((a_l == 0) || (a_l > l_ea))
2181 return EINVAL;
2182
2183 if (attrhdr->type != sattr)
2184 goto next_attribute;
2185
2186 /* we might have found it! */
2187 if (attrhdr->type < 2048) { /* Ecma-167 attribute */
2188 *offsetp = offset;
2189 *lengthp = a_l;
2190 return 0; /* success */
2191 }
2192
2193 /*
2194 * Implementation use and application use extended attributes
2195 * have a name to identify. They share the same structure only
2196 * UDF implementation use extended attributes have a checksum
2197 * we need to check
2198 */
2199
2200 DPRINTF(EXTATTR, ("named attribute %s\n", implext->imp_id.id));
2201 if (strcmp(implext->imp_id.id, sattrname) == 0) {
2202 /* we have found our appl/implementation attribute */
2203 *offsetp = offset;
2204 *lengthp = a_l;
2205 return 0; /* success */
2206 }
2207
2208 next_attribute:
2209 /* next attribute */
2210 pos += a_l;
2211 l_ea -= a_l;
2212 offset += a_l;
2213 }
2214 /* not found */
2215 return ENOENT;
2216 }
2217
2218
2219 static void
2220 udf_extattr_insert_internal(struct udf_mount *ump, union dscrptr *dscr,
2221 struct extattr_entry *extattr)
2222 {
2223 struct file_entry *fe;
2224 struct extfile_entry *efe;
2225 struct extattrhdr_desc *extattrhdr;
2226 struct impl_extattr_entry *implext;
2227 uint32_t impl_attr_loc, appl_attr_loc, l_ea, a_l, exthdr_len;
2228 uint32_t *l_eap, l_ad;
2229 uint16_t *spos;
2230 uint8_t *bpos, *data;
2231
2232 if (udf_rw16(dscr->tag.id) == TAGID_FENTRY) {
2233 fe = &dscr->fe;
2234 data = fe->data;
2235 l_eap = &fe->l_ea;
2236 l_ad = udf_rw32(fe->l_ad);
2237 } else if (udf_rw16(dscr->tag.id) == TAGID_EXTFENTRY) {
2238 efe = &dscr->efe;
2239 data = efe->data;
2240 l_eap = &efe->l_ea;
2241 l_ad = udf_rw32(efe->l_ad);
2242 } else {
2243 panic("Bad tag passed to udf_extattr_insert_internal");
2244 }
2245
2246 /* can't append already written to file descriptors yet */
2247 assert(l_ad == 0);
2248
2249 /* should have a header! */
2250 extattrhdr = (struct extattrhdr_desc *) data;
2251 l_ea = udf_rw32(*l_eap);
2252 if (l_ea == 0) {
2253 /* create empty extended attribute header */
2254 exthdr_len = sizeof(struct extattrhdr_desc);
2255
2256 udf_inittag(ump, &extattrhdr->tag, TAGID_EXTATTR_HDR,
2257 /* loc */ 0);
2258 extattrhdr->impl_attr_loc = udf_rw32(exthdr_len);
2259 extattrhdr->appl_attr_loc = udf_rw32(exthdr_len);
2260 extattrhdr->tag.desc_crc_len = udf_rw16(8);
2261
2262 /* record extended attribute header length */
2263 l_ea = exthdr_len;
2264 *l_eap = udf_rw32(l_ea);
2265 }
2266
2267 /* extract locations */
2268 impl_attr_loc = udf_rw32(extattrhdr->impl_attr_loc);
2269 appl_attr_loc = udf_rw32(extattrhdr->appl_attr_loc);
2270 if (impl_attr_loc == UDF_IMPL_ATTR_LOC_NOT_PRESENT)
2271 impl_attr_loc = l_ea;
2272 if (appl_attr_loc == UDF_IMPL_ATTR_LOC_NOT_PRESENT)
2273 appl_attr_loc = l_ea;
2274
2275 /* Ecma 167 EAs */
2276 if (udf_rw32(extattr->type) < 2048) {
2277 assert(impl_attr_loc == l_ea);
2278 assert(appl_attr_loc == l_ea);
2279 }
2280
2281 /* implementation use extended attributes */
2282 if (udf_rw32(extattr->type) == 2048) {
2283 assert(appl_attr_loc == l_ea);
2284
2285 /* calculate and write extended attribute header checksum */
2286 implext = (struct impl_extattr_entry *) extattr;
2287 assert(udf_rw32(implext->iu_l) == 4); /* [UDF 3.3.4.5] */
2288 spos = (uint16_t *) implext->data;
2289 *spos = udf_rw16(udf_ea_cksum((uint8_t *) implext));
2290 }
2291
2292 /* application use extended attributes */
2293 assert(udf_rw32(extattr->type) != 65536);
2294 assert(appl_attr_loc == l_ea);
2295
2296 /* append the attribute at the end of the current space */
2297 bpos = data + udf_rw32(*l_eap);
2298 a_l = udf_rw32(extattr->a_l);
2299
2300 /* update impl. attribute locations */
2301 if (udf_rw32(extattr->type) < 2048) {
2302 impl_attr_loc = l_ea + a_l;
2303 appl_attr_loc = l_ea + a_l;
2304 }
2305 if (udf_rw32(extattr->type) == 2048) {
2306 appl_attr_loc = l_ea + a_l;
2307 }
2308
2309 /* copy and advance */
2310 memcpy(bpos, extattr, a_l);
2311 l_ea += a_l;
2312 *l_eap = udf_rw32(l_ea);
2313
2314 /* do the `dance` again backwards */
2315 if (udf_rw16(ump->logical_vol->tag.descriptor_ver) != 2) {
2316 if (impl_attr_loc == l_ea)
2317 impl_attr_loc = UDF_IMPL_ATTR_LOC_NOT_PRESENT;
2318 if (appl_attr_loc == l_ea)
2319 appl_attr_loc = UDF_APPL_ATTR_LOC_NOT_PRESENT;
2320 }
2321
2322 /* store offsets */
2323 extattrhdr->impl_attr_loc = udf_rw32(impl_attr_loc);
2324 extattrhdr->appl_attr_loc = udf_rw32(appl_attr_loc);
2325 }
2326
2327
2328 /* --------------------------------------------------------------------- */
2329
2330 static int
2331 udf_update_lvid_from_vat_extattr(struct udf_node *vat_node)
2332 {
2333 struct udf_mount *ump;
2334 struct udf_logvol_info *lvinfo;
2335 struct impl_extattr_entry *implext;
2336 struct vatlvext_extattr_entry lvext;
2337 const char *extstr = "*UDF VAT LVExtension";
2338 uint64_t vat_uniqueid;
2339 uint32_t offset, a_l;
2340 uint8_t *ea_start, *lvextpos;
2341 int error;
2342
2343 /* get mountpoint and lvinfo */
2344 ump = vat_node->ump;
2345 lvinfo = ump->logvol_info;
2346
2347 /* get information from fe/efe */
2348 if (vat_node->fe) {
2349 vat_uniqueid = udf_rw64(vat_node->fe->unique_id);
2350 ea_start = vat_node->fe->data;
2351 } else {
2352 vat_uniqueid = udf_rw64(vat_node->efe->unique_id);
2353 ea_start = vat_node->efe->data;
2354 }
2355
2356 error = udf_extattr_search_intern(vat_node, 2048, extstr, &offset, &a_l);
2357 if (error)
2358 return error;
2359
2360 implext = (struct impl_extattr_entry *) (ea_start + offset);
2361 error = udf_impl_extattr_check(implext);
2362 if (error)
2363 return error;
2364
2365 /* paranoia */
2366 if (a_l != sizeof(*implext) -1 + udf_rw32(implext->iu_l) + sizeof(lvext)) {
2367 DPRINTF(VOLUMES, ("VAT LVExtension size doesn't compute\n"));
2368 return EINVAL;
2369 }
2370
2371 /*
2372 * we have found our "VAT LVExtension attribute. BUT due to a
2373 * bug in the specification it might not be word aligned so
2374 * copy first to avoid panics on some machines (!!)
2375 */
2376 DPRINTF(VOLUMES, ("Found VAT LVExtension attr\n"));
2377 lvextpos = implext->data + udf_rw32(implext->iu_l);
2378 memcpy(&lvext, lvextpos, sizeof(lvext));
2379
2380 /* check if it was updated the last time */
2381 if (udf_rw64(lvext.unique_id_chk) == vat_uniqueid) {
2382 lvinfo->num_files = lvext.num_files;
2383 lvinfo->num_directories = lvext.num_directories;
2384 udf_update_logvolname(ump, lvext.logvol_id);
2385 } else {
2386 DPRINTF(VOLUMES, ("VAT LVExtension out of date\n"));
2387 /* replace VAT LVExt by free space EA */
2388 memset(implext->imp_id.id, 0, UDF_REGID_ID_SIZE);
2389 strcpy(implext->imp_id.id, "*UDF FreeEASpace");
2390 udf_calc_impl_extattr_checksum(implext);
2391 }
2392
2393 return 0;
2394 }
2395
2396
2397 static int
2398 udf_update_vat_extattr_from_lvid(struct udf_node *vat_node)
2399 {
2400 struct udf_mount *ump;
2401 struct udf_logvol_info *lvinfo;
2402 struct impl_extattr_entry *implext;
2403 struct vatlvext_extattr_entry lvext;
2404 const char *extstr = "*UDF VAT LVExtension";
2405 uint64_t vat_uniqueid;
2406 uint32_t offset, a_l;
2407 uint8_t *ea_start, *lvextpos;
2408 int error;
2409
2410 /* get mountpoint and lvinfo */
2411 ump = vat_node->ump;
2412 lvinfo = ump->logvol_info;
2413
2414 /* get information from fe/efe */
2415 if (vat_node->fe) {
2416 vat_uniqueid = udf_rw64(vat_node->fe->unique_id);
2417 ea_start = vat_node->fe->data;
2418 } else {
2419 vat_uniqueid = udf_rw64(vat_node->efe->unique_id);
2420 ea_start = vat_node->efe->data;
2421 }
2422
2423 error = udf_extattr_search_intern(vat_node, 2048, extstr, &offset, &a_l);
2424 if (error)
2425 return error;
2426 /* found, it existed */
2427
2428 /* paranoia */
2429 implext = (struct impl_extattr_entry *) (ea_start + offset);
2430 error = udf_impl_extattr_check(implext);
2431 if (error) {
2432 DPRINTF(VOLUMES, ("VAT LVExtension bad on update\n"));
2433 return error;
2434 }
2435 /* it is correct */
2436
2437 /*
2438 * we have found our "VAT LVExtension attribute. BUT due to a
2439 * bug in the specification it might not be word aligned so
2440 * copy first to avoid panics on some machines (!!)
2441 */
2442 DPRINTF(VOLUMES, ("Updating VAT LVExtension attr\n"));
2443 lvextpos = implext->data + udf_rw32(implext->iu_l);
2444
2445 lvext.unique_id_chk = vat_uniqueid;
2446 lvext.num_files = lvinfo->num_files;
2447 lvext.num_directories = lvinfo->num_directories;
2448 memmove(lvext.logvol_id, ump->logical_vol->logvol_id, 128);
2449
2450 memcpy(lvextpos, &lvext, sizeof(lvext));
2451
2452 return 0;
2453 }
2454
2455 /* --------------------------------------------------------------------- */
2456
2457 int
2458 udf_vat_read(struct udf_node *vat_node, uint8_t *blob, int size, uint32_t offset)
2459 {
2460 struct udf_mount *ump = vat_node->ump;
2461
2462 if (offset + size > ump->vat_offset + ump->vat_entries * 4)
2463 return EINVAL;
2464
2465 memcpy(blob, ump->vat_table + offset, size);
2466 return 0;
2467 }
2468
2469 int
2470 udf_vat_write(struct udf_node *vat_node, uint8_t *blob, int size, uint32_t offset)
2471 {
2472 struct udf_mount *ump = vat_node->ump;
2473 uint32_t offset_high;
2474 uint8_t *new_vat_table;
2475
2476 /* extent VAT allocation if needed */
2477 offset_high = offset + size;
2478 if (offset_high >= ump->vat_table_alloc_len) {
2479 /* realloc */
2480 new_vat_table = realloc(ump->vat_table,
2481 ump->vat_table_alloc_len + UDF_VAT_CHUNKSIZE,
2482 M_UDFVOLD, M_WAITOK | M_CANFAIL);
2483 if (!new_vat_table) {
2484 printf("udf_vat_write: can't extent VAT, out of mem\n");
2485 return ENOMEM;
2486 }
2487 ump->vat_table = new_vat_table;
2488 ump->vat_table_alloc_len += UDF_VAT_CHUNKSIZE;
2489 }
2490 ump->vat_table_len = MAX(ump->vat_table_len, offset_high);
2491
2492 memcpy(ump->vat_table + offset, blob, size);
2493 return 0;
2494 }
2495
2496 /* --------------------------------------------------------------------- */
2497
2498 /* TODO support previous VAT location writeout */
2499 static int
2500 udf_update_vat_descriptor(struct udf_mount *ump)
2501 {
2502 struct udf_node *vat_node = ump->vat_node;
2503 struct udf_logvol_info *lvinfo = ump->logvol_info;
2504 struct icb_tag *icbtag;
2505 struct udf_oldvat_tail *oldvat_tl;
2506 struct udf_vat *vat;
2507 uint64_t unique_id;
2508 uint32_t lb_size;
2509 uint8_t *raw_vat;
2510 int filetype, error;
2511
2512 KASSERT(vat_node);
2513 KASSERT(lvinfo);
2514 lb_size = udf_rw32(ump->logical_vol->lb_size);
2515
2516 /* get our new unique_id */
2517 unique_id = udf_advance_uniqueid(ump);
2518
2519 /* get information from fe/efe */
2520 if (vat_node->fe) {
2521 icbtag = &vat_node->fe->icbtag;
2522 vat_node->fe->unique_id = udf_rw64(unique_id);
2523 } else {
2524 icbtag = &vat_node->efe->icbtag;
2525 vat_node->efe->unique_id = udf_rw64(unique_id);
2526 }
2527
2528 /* Check icb filetype! it has to be 0 or UDF_ICB_FILETYPE_VAT */
2529 filetype = icbtag->file_type;
2530 KASSERT((filetype == 0) || (filetype == UDF_ICB_FILETYPE_VAT));
2531
2532 /* allocate piece to process head or tail of VAT file */
2533 raw_vat = malloc(lb_size, M_TEMP, M_WAITOK);
2534
2535 if (filetype == 0) {
2536 /*
2537 * Update "*UDF VAT LVExtension" extended attribute from the
2538 * lvint if present.
2539 */
2540 udf_update_vat_extattr_from_lvid(vat_node);
2541
2542 /* setup identifying regid */
2543 oldvat_tl = (struct udf_oldvat_tail *) raw_vat;
2544 memset(oldvat_tl, 0, sizeof(struct udf_oldvat_tail));
2545
2546 udf_set_regid(&oldvat_tl->id, "*UDF Virtual Alloc Tbl");
2547 udf_add_udf_regid(ump, &oldvat_tl->id);
2548 oldvat_tl->prev_vat = udf_rw32(0xffffffff);
2549
2550 /* write out new tail of virtual allocation table file */
2551 error = udf_vat_write(vat_node, raw_vat,
2552 sizeof(struct udf_oldvat_tail), ump->vat_entries * 4);
2553 } else {
2554 /* compose the VAT2 header */
2555 vat = (struct udf_vat *) raw_vat;
2556 memset(vat, 0, sizeof(struct udf_vat));
2557
2558 vat->header_len = udf_rw16(152); /* as per spec */
2559 vat->impl_use_len = udf_rw16(0);
2560 memmove(vat->logvol_id, ump->logical_vol->logvol_id, 128);
2561 vat->prev_vat = udf_rw32(0xffffffff);
2562 vat->num_files = lvinfo->num_files;
2563 vat->num_directories = lvinfo->num_directories;
2564 vat->min_udf_readver = lvinfo->min_udf_readver;
2565 vat->min_udf_writever = lvinfo->min_udf_writever;
2566 vat->max_udf_writever = lvinfo->max_udf_writever;
2567
2568 error = udf_vat_write(vat_node, raw_vat,
2569 sizeof(struct udf_vat), 0);
2570 }
2571 free(raw_vat, M_TEMP);
2572
2573 return error; /* success! */
2574 }
2575
2576
2577 int
2578 udf_writeout_vat(struct udf_mount *ump)
2579 {
2580 struct udf_node *vat_node = ump->vat_node;
2581 uint32_t vat_length;
2582 int error;
2583
2584 KASSERT(vat_node);
2585
2586 DPRINTF(CALL, ("udf_writeout_vat\n"));
2587
2588 mutex_enter(&ump->allocate_mutex);
2589 udf_update_vat_descriptor(ump);
2590
2591 /* write out the VAT contents ; TODO intelligent writing */
2592 vat_length = ump->vat_table_len;
2593 error = vn_rdwr(UIO_WRITE, vat_node->vnode,
2594 ump->vat_table, ump->vat_table_len, 0,
2595 UIO_SYSSPACE, IO_NODELOCKED, FSCRED, NULL, NULL);
2596 if (error) {
2597 printf("udf_writeout_vat: failed to write out VAT contents\n");
2598 goto out;
2599 }
2600
2601 mutex_exit(&ump->allocate_mutex);
2602
2603 vflushbuf(ump->vat_node->vnode, 1 /* sync */);
2604 error = VOP_FSYNC(ump->vat_node->vnode,
2605 FSCRED, FSYNC_WAIT, 0, 0);
2606 if (error)
2607 printf("udf_writeout_vat: error writing VAT node!\n");
2608 out:
2609
2610 return error;
2611 }
2612
2613 /* --------------------------------------------------------------------- */
2614
2615 /*
2616 * Read in relevant pieces of VAT file and check if its indeed a VAT file
2617 * descriptor. If OK, read in complete VAT file.
2618 */
2619
2620 static int
2621 udf_check_for_vat(struct udf_node *vat_node)
2622 {
2623 struct udf_mount *ump;
2624 struct icb_tag *icbtag;
2625 struct timestamp *mtime;
2626 struct udf_vat *vat;
2627 struct udf_oldvat_tail *oldvat_tl;
2628 struct udf_logvol_info *lvinfo;
2629 uint64_t unique_id;
2630 uint32_t vat_length;
2631 uint32_t vat_offset, vat_entries, vat_table_alloc_len;
2632 uint32_t sector_size;
2633 uint32_t *raw_vat;
2634 uint8_t *vat_table;
2635 char *regid_name;
2636 int filetype;
2637 int error;
2638
2639 /* vat_length is really 64 bits though impossible */
2640
2641 DPRINTF(VOLUMES, ("Checking for VAT\n"));
2642 if (!vat_node)
2643 return ENOENT;
2644
2645 /* get mount info */
2646 ump = vat_node->ump;
2647 sector_size = udf_rw32(ump->logical_vol->lb_size);
2648
2649 /* check assertions */
2650 assert(vat_node->fe || vat_node->efe);
2651 assert(ump->logvol_integrity);
2652
2653 /* set vnode type to regular file or we can't read from it! */
2654 vat_node->vnode->v_type = VREG;
2655
2656 /* get information from fe/efe */
2657 if (vat_node->fe) {
2658 vat_length = udf_rw64(vat_node->fe->inf_len);
2659 icbtag = &vat_node->fe->icbtag;
2660 mtime = &vat_node->fe->mtime;
2661 unique_id = udf_rw64(vat_node->fe->unique_id);
2662 } else {
2663 vat_length = udf_rw64(vat_node->efe->inf_len);
2664 icbtag = &vat_node->efe->icbtag;
2665 mtime = &vat_node->efe->mtime;
2666 unique_id = udf_rw64(vat_node->efe->unique_id);
2667 }
2668
2669 /* Check icb filetype! it has to be 0 or UDF_ICB_FILETYPE_VAT */
2670 filetype = icbtag->file_type;
2671 if ((filetype != 0) && (filetype != UDF_ICB_FILETYPE_VAT))
2672 return ENOENT;
2673
2674 DPRINTF(VOLUMES, ("\tPossible VAT length %d\n", vat_length));
2675
2676 vat_table_alloc_len =
2677 ((vat_length + UDF_VAT_CHUNKSIZE-1) / UDF_VAT_CHUNKSIZE)
2678 * UDF_VAT_CHUNKSIZE;
2679
2680 vat_table = malloc(vat_table_alloc_len, M_UDFVOLD,
2681 M_CANFAIL | M_WAITOK);
2682 if (vat_table == NULL) {
2683 printf("allocation of %d bytes failed for VAT\n",
2684 vat_table_alloc_len);
2685 return ENOMEM;
2686 }
2687
2688 /* allocate piece to read in head or tail of VAT file */
2689 raw_vat = malloc(sector_size, M_TEMP, M_WAITOK);
2690
2691 /*
2692 * check contents of the file if its the old 1.50 VAT table format.
2693 * Its notoriously broken and allthough some implementations support an
2694 * extention as defined in the UDF 1.50 errata document, its doubtfull
2695 * to be useable since a lot of implementations don't maintain it.
2696 */
2697 lvinfo = ump->logvol_info;
2698
2699 if (filetype == 0) {
2700 /* definition */
2701 vat_offset = 0;
2702 vat_entries = (vat_length-36)/4;
2703
2704 /* read in tail of virtual allocation table file */
2705 error = vn_rdwr(UIO_READ, vat_node->vnode,
2706 (uint8_t *) raw_vat,
2707 sizeof(struct udf_oldvat_tail),
2708 vat_entries * 4,
2709 UIO_SYSSPACE, IO_SYNC | IO_NODELOCKED, FSCRED,
2710 NULL, NULL);
2711 if (error)
2712 goto out;
2713
2714 /* check 1.50 VAT */
2715 oldvat_tl = (struct udf_oldvat_tail *) raw_vat;
2716 regid_name = (char *) oldvat_tl->id.id;
2717 error = strncmp(regid_name, "*UDF Virtual Alloc Tbl", 22);
2718 if (error) {
2719 DPRINTF(VOLUMES, ("VAT format 1.50 rejected\n"));
2720 error = ENOENT;
2721 goto out;
2722 }
2723
2724 /*
2725 * update LVID from "*UDF VAT LVExtension" extended attribute
2726 * if present.
2727 */
2728 udf_update_lvid_from_vat_extattr(vat_node);
2729 } else {
2730 /* read in head of virtual allocation table file */
2731 error = vn_rdwr(UIO_READ, vat_node->vnode,
2732 (uint8_t *) raw_vat,
2733 sizeof(struct udf_vat), 0,
2734 UIO_SYSSPACE, IO_SYNC | IO_NODELOCKED, FSCRED,
2735 NULL, NULL);
2736 if (error)
2737 goto out;
2738
2739 /* definition */
2740 vat = (struct udf_vat *) raw_vat;
2741 vat_offset = vat->header_len;
2742 vat_entries = (vat_length - vat_offset)/4;
2743
2744 assert(lvinfo);
2745 lvinfo->num_files = vat->num_files;
2746 lvinfo->num_directories = vat->num_directories;
2747 lvinfo->min_udf_readver = vat->min_udf_readver;
2748 lvinfo->min_udf_writever = vat->min_udf_writever;
2749 lvinfo->max_udf_writever = vat->max_udf_writever;
2750
2751 udf_update_logvolname(ump, vat->logvol_id);
2752 }
2753
2754 /* read in complete VAT file */
2755 error = vn_rdwr(UIO_READ, vat_node->vnode,
2756 vat_table,
2757 vat_length, 0,
2758 UIO_SYSSPACE, IO_SYNC | IO_NODELOCKED, FSCRED,
2759 NULL, NULL);
2760 if (error)
2761 printf("read in of complete VAT file failed (error %d)\n",
2762 error);
2763 if (error)
2764 goto out;
2765
2766 DPRINTF(VOLUMES, ("VAT format accepted, marking it closed\n"));
2767 ump->logvol_integrity->lvint_next_unique_id = unique_id;
2768 ump->logvol_integrity->integrity_type = udf_rw32(UDF_INTEGRITY_CLOSED);
2769 ump->logvol_integrity->time = *mtime;
2770
2771 ump->vat_table_len = vat_length;
2772 ump->vat_table_alloc_len = vat_table_alloc_len;
2773 ump->vat_table = vat_table;
2774 ump->vat_offset = vat_offset;
2775 ump->vat_entries = vat_entries;
2776 ump->vat_last_free_lb = 0; /* start at beginning */
2777
2778 out:
2779 if (error) {
2780 if (vat_table)
2781 free(vat_table, M_UDFVOLD);
2782 }
2783 free(raw_vat, M_TEMP);
2784
2785 return error;
2786 }
2787
2788 /* --------------------------------------------------------------------- */
2789
2790 static int
2791 udf_search_vat(struct udf_mount *ump, union udf_pmap *mapping)
2792 {
2793 struct udf_node *vat_node;
2794 struct long_ad icb_loc;
2795 uint32_t early_vat_loc, late_vat_loc, vat_loc;
2796 int error;
2797
2798 /* mapping info not needed */
2799 mapping = mapping;
2800
2801 vat_loc = ump->last_possible_vat_location;
2802 early_vat_loc = vat_loc - 256; /* 8 blocks of 32 sectors */
2803
2804 DPRINTF(VOLUMES, ("1) last possible %d, early_vat_loc %d \n",
2805 vat_loc, early_vat_loc));
2806 early_vat_loc = MAX(early_vat_loc, ump->first_possible_vat_location);
2807 late_vat_loc = vat_loc + 1024;
2808
2809 DPRINTF(VOLUMES, ("2) last possible %d, early_vat_loc %d \n",
2810 vat_loc, early_vat_loc));
2811
2812 /* start looking from the end of the range */
2813 do {
2814 DPRINTF(VOLUMES, ("Checking for VAT at sector %d\n", vat_loc));
2815 icb_loc.loc.part_num = udf_rw16(UDF_VTOP_RAWPART);
2816 icb_loc.loc.lb_num = udf_rw32(vat_loc);
2817
2818 error = udf_get_node(ump, &icb_loc, &vat_node);
2819 if (!error) {
2820 error = udf_check_for_vat(vat_node);
2821 DPRINTFIF(VOLUMES, !error,
2822 ("VAT accepted at %d\n", vat_loc));
2823 if (!error)
2824 break;
2825 }
2826 if (vat_node) {
2827 vput(vat_node->vnode);
2828 vat_node = NULL;
2829 }
2830 vat_loc--; /* walk backwards */
2831 } while (vat_loc >= early_vat_loc);
2832
2833 /* keep our VAT node around */
2834 if (vat_node) {
2835 UDF_SET_SYSTEMFILE(vat_node->vnode);
2836 ump->vat_node = vat_node;
2837 }
2838
2839 return error;
2840 }
2841
2842 /* --------------------------------------------------------------------- */
2843
2844 static int
2845 udf_read_sparables(struct udf_mount *ump, union udf_pmap *mapping)
2846 {
2847 union dscrptr *dscr;
2848 struct part_map_spare *pms = &mapping->pms;
2849 uint32_t lb_num;
2850 int spar, error;
2851
2852 /*
2853 * The partition mapping passed on to us specifies the information we
2854 * need to locate and initialise the sparable partition mapping
2855 * information we need.
2856 */
2857
2858 DPRINTF(VOLUMES, ("Read sparable table\n"));
2859 ump->sparable_packet_size = udf_rw16(pms->packet_len);
2860 KASSERT(ump->sparable_packet_size >= ump->packet_size); /* XXX */
2861
2862 for (spar = 0; spar < pms->n_st; spar++) {
2863 lb_num = pms->st_loc[spar];
2864 DPRINTF(VOLUMES, ("Checking for sparing table %d\n", lb_num));
2865 error = udf_read_phys_dscr(ump, lb_num, M_UDFVOLD, &dscr);
2866 if (!error && dscr) {
2867 if (udf_rw16(dscr->tag.id) == TAGID_SPARING_TABLE) {
2868 if (ump->sparing_table)
2869 free(ump->sparing_table, M_UDFVOLD);
2870 ump->sparing_table = &dscr->spt;
2871 dscr = NULL;
2872 DPRINTF(VOLUMES,
2873 ("Sparing table accepted (%d entries)\n",
2874 udf_rw16(ump->sparing_table->rt_l)));
2875 break; /* we're done */
2876 }
2877 }
2878 if (dscr)
2879 free(dscr, M_UDFVOLD);
2880 }
2881
2882 if (ump->sparing_table)
2883 return 0;
2884
2885 return ENOENT;
2886 }
2887
2888 /* --------------------------------------------------------------------- */
2889
2890 static int
2891 udf_read_metadata_nodes(struct udf_mount *ump, union udf_pmap *mapping)
2892 {
2893 struct part_map_meta *pmm = &mapping->pmm;
2894 struct long_ad icb_loc;
2895 struct vnode *vp;
2896 int error;
2897
2898 DPRINTF(VOLUMES, ("Reading in Metadata files\n"));
2899 icb_loc.loc.part_num = pmm->part_num;
2900 icb_loc.loc.lb_num = pmm->meta_file_lbn;
2901 DPRINTF(VOLUMES, ("Metadata file\n"));
2902 error = udf_get_node(ump, &icb_loc, &ump->metadata_node);
2903 if (ump->metadata_node) {
2904 vp = ump->metadata_node->vnode;
2905 UDF_SET_SYSTEMFILE(vp);
2906 }
2907
2908 icb_loc.loc.lb_num = pmm->meta_mirror_file_lbn;
2909 if (icb_loc.loc.lb_num != -1) {
2910 DPRINTF(VOLUMES, ("Metadata copy file\n"));
2911 error = udf_get_node(ump, &icb_loc, &ump->metadatamirror_node);
2912 if (ump->metadatamirror_node) {
2913 vp = ump->metadatamirror_node->vnode;
2914 UDF_SET_SYSTEMFILE(vp);
2915 }
2916 }
2917
2918 icb_loc.loc.lb_num = pmm->meta_bitmap_file_lbn;
2919 if (icb_loc.loc.lb_num != -1) {
2920 DPRINTF(VOLUMES, ("Metadata bitmap file\n"));
2921 error = udf_get_node(ump, &icb_loc, &ump->metadatabitmap_node);
2922 if (ump->metadatabitmap_node) {
2923 vp = ump->metadatabitmap_node->vnode;
2924 UDF_SET_SYSTEMFILE(vp);
2925 }
2926 }
2927
2928 /* if we're mounting read-only we relax the requirements */
2929 if (ump->vfs_mountp->mnt_flag & MNT_RDONLY) {
2930 error = EFAULT;
2931 if (ump->metadata_node)
2932 error = 0;
2933 if ((ump->metadata_node == NULL) && (ump->metadatamirror_node)) {
2934 printf( "udf mount: Metadata file not readable, "
2935 "substituting Metadata copy file\n");
2936 ump->metadata_node = ump->metadatamirror_node;
2937 ump->metadatamirror_node = NULL;
2938 error = 0;
2939 }
2940 } else {
2941 /* mounting read/write */
2942 /* if (error) */
2943 error = EROFS;
2944 }
2945 DPRINTFIF(VOLUMES, error, ("udf mount: failed to read "
2946 "metadata files\n"));
2947 return error;
2948 }
2949
2950 /* --------------------------------------------------------------------- */
2951
2952 int
2953 udf_read_vds_tables(struct udf_mount *ump)
2954 {
2955 union udf_pmap *mapping;
2956 /* struct udf_args *args = &ump->mount_args; */
2957 uint32_t n_pm, mt_l;
2958 uint32_t log_part;
2959 uint8_t *pmap_pos;
2960 int pmap_size;
2961 int error;
2962
2963 /* read in and check unallocated and free space info if writing */
2964 if ((ump->vfs_mountp->mnt_flag & MNT_RDONLY) == 0) {
2965 error = udf_read_physical_partition_spacetables(ump);
2966 if (error)
2967 return error;
2968 }
2969
2970 /* Iterate (again) over the part mappings for locations */
2971 n_pm = udf_rw32(ump->logical_vol->n_pm); /* num partmaps */
2972 mt_l = udf_rw32(ump->logical_vol->mt_l); /* partmaps data length */
2973 pmap_pos = ump->logical_vol->maps;
2974
2975 for (log_part = 0; log_part < n_pm; log_part++) {
2976 mapping = (union udf_pmap *) pmap_pos;
2977 switch (ump->vtop_tp[log_part]) {
2978 case UDF_VTOP_TYPE_PHYS :
2979 /* nothing */
2980 break;
2981 case UDF_VTOP_TYPE_VIRT :
2982 /* search and load VAT */
2983 error = udf_search_vat(ump, mapping);
2984 if (error)
2985 return ENOENT;
2986 break;
2987 case UDF_VTOP_TYPE_SPARABLE :
2988 /* load one of the sparable tables */
2989 error = udf_read_sparables(ump, mapping);
2990 if (error)
2991 return ENOENT;
2992 break;
2993 case UDF_VTOP_TYPE_META :
2994 /* load the associated file descriptors */
2995 error = udf_read_metadata_nodes(ump, mapping);
2996 if (error)
2997 return ENOENT;
2998 break;
2999 default:
3000 break;
3001 }
3002 pmap_size = pmap_pos[1];
3003 pmap_pos += pmap_size;
3004 }
3005
3006 return 0;
3007 }
3008
3009 /* --------------------------------------------------------------------- */
3010
3011 int
3012 udf_read_rootdirs(struct udf_mount *ump)
3013 {
3014 union dscrptr *dscr;
3015 /* struct udf_args *args = &ump->mount_args; */
3016 struct udf_node *rootdir_node, *streamdir_node;
3017 struct long_ad fsd_loc, *dir_loc;
3018 uint32_t lb_num, dummy;
3019 uint32_t fsd_len;
3020 int dscr_type;
3021 int error;
3022
3023 /* TODO implement FSD reading in separate function like integrity? */
3024 /* get fileset descriptor sequence */
3025 fsd_loc = ump->logical_vol->lv_fsd_loc;
3026 fsd_len = udf_rw32(fsd_loc.len);
3027
3028 dscr = NULL;
3029 error = 0;
3030 while (fsd_len || error) {
3031 DPRINTF(VOLUMES, ("fsd_len = %d\n", fsd_len));
3032 /* translate fsd_loc to lb_num */
3033 error = udf_translate_vtop(ump, &fsd_loc, &lb_num, &dummy);
3034 if (error)
3035 break;
3036 DPRINTF(VOLUMES, ("Reading FSD at lb %d\n", lb_num));
3037 error = udf_read_phys_dscr(ump, lb_num, M_UDFVOLD, &dscr);
3038 /* end markers */
3039 if (error || (dscr == NULL))
3040 break;
3041
3042 /* analyse */
3043 dscr_type = udf_rw16(dscr->tag.id);
3044 if (dscr_type == TAGID_TERM)
3045 break;
3046 if (dscr_type != TAGID_FSD) {
3047 free(dscr, M_UDFVOLD);
3048 return ENOENT;
3049 }
3050
3051 /*
3052 * TODO check for multiple fileset descriptors; its only
3053 * picking the last now. Also check for FSD
3054 * correctness/interpretability
3055 */
3056
3057 /* update */
3058 if (ump->fileset_desc) {
3059 free(ump->fileset_desc, M_UDFVOLD);
3060 }
3061 ump->fileset_desc = &dscr->fsd;
3062 dscr = NULL;
3063
3064 /* continue to the next fsd */
3065 fsd_len -= ump->discinfo.sector_size;
3066 fsd_loc.loc.lb_num = udf_rw32(udf_rw32(fsd_loc.loc.lb_num)+1);
3067
3068 /* follow up to fsd->next_ex (long_ad) if its not null */
3069 if (udf_rw32(ump->fileset_desc->next_ex.len)) {
3070 DPRINTF(VOLUMES, ("follow up FSD extent\n"));
3071 fsd_loc = ump->fileset_desc->next_ex;
3072 fsd_len = udf_rw32(ump->fileset_desc->next_ex.len);
3073 }
3074 }
3075 if (dscr)
3076 free(dscr, M_UDFVOLD);
3077
3078 /* there has to be one */
3079 if (ump->fileset_desc == NULL)
3080 return ENOENT;
3081
3082 DPRINTF(VOLUMES, ("FSD read in fine\n"));
3083 DPRINTF(VOLUMES, ("Updating fsd logical volume id\n"));
3084 udf_update_logvolname(ump, ump->logical_vol->logvol_id);
3085
3086 /*
3087 * Now the FSD is known, read in the rootdirectory and if one exists,
3088 * the system stream dir. Some files in the system streamdir are not
3089 * wanted in this implementation since they are not maintained. If
3090 * writing is enabled we'll delete these files if they exist.
3091 */
3092
3093 rootdir_node = streamdir_node = NULL;
3094 dir_loc = NULL;
3095
3096 /* try to read in the rootdir */
3097 dir_loc = &ump->fileset_desc->rootdir_icb;
3098 error = udf_get_node(ump, dir_loc, &rootdir_node);
3099 if (error)
3100 return ENOENT;
3101
3102 /* aparently it read in fine */
3103
3104 /*
3105 * Try the system stream directory; not very likely in the ones we
3106 * test, but for completeness.
3107 */
3108 dir_loc = &ump->fileset_desc->streamdir_icb;
3109 if (udf_rw32(dir_loc->len)) {
3110 printf("udf_read_rootdirs: streamdir defined ");
3111 error = udf_get_node(ump, dir_loc, &streamdir_node);
3112 if (error) {
3113 printf("but error in streamdir reading\n");
3114 } else {
3115 printf("but ignored\n");
3116 /*
3117 * TODO process streamdir `baddies' i.e. files we dont
3118 * want if R/W
3119 */
3120 }
3121 }
3122
3123 DPRINTF(VOLUMES, ("Rootdir(s) read in fine\n"));
3124
3125 /* release the vnodes again; they'll be auto-recycled later */
3126 if (streamdir_node) {
3127 vput(streamdir_node->vnode);
3128 }
3129 if (rootdir_node) {
3130 vput(rootdir_node->vnode);
3131 }
3132
3133 return 0;
3134 }
3135
3136 /* --------------------------------------------------------------------- */
3137
3138 /* To make absolutely sure we are NOT returning zero, add one :) */
3139
3140 long
3141 udf_calchash(struct long_ad *icbptr)
3142 {
3143 /* ought to be enough since each mountpoint has its own chain */
3144 return udf_rw32(icbptr->loc.lb_num) + 1;
3145 }
3146
3147
3148 static struct udf_node *
3149 udf_hash_lookup(struct udf_mount *ump, struct long_ad *icbptr)
3150 {
3151 struct udf_node *node;
3152 struct vnode *vp;
3153 uint32_t hashline;
3154
3155 loop:
3156 mutex_enter(&ump->ihash_lock);
3157
3158 hashline = udf_calchash(icbptr) & UDF_INODE_HASHMASK;
3159 LIST_FOREACH(node, &ump->udf_nodes[hashline], hashchain) {
3160 assert(node);
3161 if (node->loc.loc.lb_num == icbptr->loc.lb_num &&
3162 node->loc.loc.part_num == icbptr->loc.part_num) {
3163 vp = node->vnode;
3164 assert(vp);
3165 mutex_enter(&vp->v_interlock);
3166 mutex_exit(&ump->ihash_lock);
3167 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK))
3168 goto loop;
3169 return node;
3170 }
3171 }
3172 mutex_exit(&ump->ihash_lock);
3173
3174 return NULL;
3175 }
3176
3177
3178 static void
3179 udf_sorted_list_insert(struct udf_node *node)
3180 {
3181 struct udf_mount *ump;
3182 struct udf_node *s_node, *last_node;
3183 uint32_t loc, s_loc;
3184
3185 ump = node->ump;
3186 last_node = NULL; /* XXX gcc */
3187
3188 if (LIST_EMPTY(&ump->sorted_udf_nodes)) {
3189 LIST_INSERT_HEAD(&ump->sorted_udf_nodes, node, sortchain);
3190 return;
3191 }
3192
3193 /*
3194 * We sort on logical block number here and not on physical block
3195 * number here. Ideally we should go for the physical block nr to get
3196 * better sync performance though this sort will ensure that packets
3197 * won't get spit up unnessisarily.
3198 */
3199
3200 loc = udf_rw32(node->loc.loc.lb_num);
3201 LIST_FOREACH(s_node, &ump->sorted_udf_nodes, sortchain) {
3202 s_loc = udf_rw32(s_node->loc.loc.lb_num);
3203 if (s_loc > loc) {
3204 LIST_INSERT_BEFORE(s_node, node, sortchain);
3205 return;
3206 }
3207 last_node = s_node;
3208 }
3209 LIST_INSERT_AFTER(last_node, node, sortchain);
3210 }
3211
3212
3213 static void
3214 udf_register_node(struct udf_node *node)
3215 {
3216 struct udf_mount *ump;
3217 struct udf_node *chk;
3218 uint32_t hashline;
3219
3220 ump = node->ump;
3221 mutex_enter(&ump->ihash_lock);
3222
3223 /* add to our hash table */
3224 hashline = udf_calchash(&node->loc) & UDF_INODE_HASHMASK;
3225 #ifdef DEBUG
3226 LIST_FOREACH(chk, &ump->udf_nodes[hashline], hashchain) {
3227 assert(chk);
3228 if (chk->loc.loc.lb_num == node->loc.loc.lb_num &&
3229 chk->loc.loc.part_num == node->loc.loc.part_num)
3230 panic("Double node entered\n");
3231 }
3232 #else
3233 chk = NULL;
3234 #endif
3235 LIST_INSERT_HEAD(&ump->udf_nodes[hashline], node, hashchain);
3236
3237 /* add to our sorted list */
3238 udf_sorted_list_insert(node);
3239
3240 mutex_exit(&ump->ihash_lock);
3241 }
3242
3243
3244 static void
3245 udf_deregister_node(struct udf_node *node)
3246 {
3247 struct udf_mount *ump;
3248
3249 ump = node->ump;
3250 mutex_enter(&ump->ihash_lock);
3251
3252 /* from hash and sorted list */
3253 LIST_REMOVE(node, hashchain);
3254 LIST_REMOVE(node, sortchain);
3255
3256 mutex_exit(&ump->ihash_lock);
3257 }
3258
3259 /* --------------------------------------------------------------------- */
3260
3261 int
3262 udf_open_logvol(struct udf_mount *ump)
3263 {
3264 int logvol_integrity;
3265 int error;
3266
3267 /* already/still open? */
3268 logvol_integrity = udf_rw32(ump->logvol_integrity->integrity_type);
3269 if (logvol_integrity == UDF_INTEGRITY_OPEN)
3270 return 0;
3271
3272 /* can we open it ? */
3273 if (ump->vfs_mountp->mnt_flag & MNT_RDONLY)
3274 return EROFS;
3275
3276 /* setup write parameters */
3277 DPRINTF(VOLUMES, ("Setting up write parameters\n"));
3278 if ((error = udf_setup_writeparams(ump)) != 0)
3279 return error;
3280
3281 /* determine data and metadata tracks (most likely same) */
3282 error = udf_search_writing_tracks(ump);
3283 if (error) {
3284 /* most likely lack of space */
3285 printf("udf_open_logvol: error searching writing tracks\n");
3286 return EROFS;
3287 }
3288
3289 /* writeout/update lvint on disc or only in memory */
3290 DPRINTF(VOLUMES, ("Opening logical volume\n"));
3291 if (ump->lvopen & UDF_OPEN_SESSION) {
3292 /* TODO implement writeout of VRS + VDS */
3293 printf( "udf_open_logvol:Opening a closed session not yet "
3294 "implemented\n");
3295 return EROFS;
3296
3297 /* determine data and metadata tracks again */
3298 error = udf_search_writing_tracks(ump);
3299 }
3300
3301 /* mark it open */
3302 ump->logvol_integrity->integrity_type = udf_rw32(UDF_INTEGRITY_OPEN);
3303
3304 /* do we need to write it out? */
3305 if (ump->lvopen & UDF_WRITE_LVINT) {
3306 error = udf_writeout_lvint(ump, ump->lvopen);
3307 /* if we couldn't write it mark it closed again */
3308 if (error) {
3309 ump->logvol_integrity->integrity_type =
3310 udf_rw32(UDF_INTEGRITY_CLOSED);
3311 return error;
3312 }
3313 }
3314
3315 return 0;
3316 }
3317
3318
3319 int
3320 udf_close_logvol(struct udf_mount *ump, int mntflags)
3321 {
3322 int logvol_integrity;
3323 int error = 0;
3324 int n;
3325
3326 /* already/still closed? */
3327 logvol_integrity = udf_rw32(ump->logvol_integrity->integrity_type);
3328 if (logvol_integrity == UDF_INTEGRITY_CLOSED)
3329 return 0;
3330
3331 /* writeout/update lvint or write out VAT */
3332 DPRINTF(VOLUMES, ("Closing logical volume\n"));
3333 if (ump->lvclose & UDF_WRITE_VAT) {
3334 DPRINTF(VOLUMES, ("lvclose & UDF_WRITE_VAT\n"));
3335
3336 /* preprocess the VAT node; its modified on every writeout */
3337 DPRINTF(VOLUMES, ("writeout vat_node\n"));
3338 udf_update_vat_descriptor(ump->vat_node->ump);
3339
3340 /* write out the VAT node */
3341 vflushbuf(ump->vat_node->vnode, 1 /* sync */);
3342 for (n = 0; n < 16; n++) {
3343 ump->vat_node->i_flags |= IN_MODIFIED;
3344 error = VOP_FSYNC(ump->vat_node->vnode,
3345 FSCRED, FSYNC_WAIT, 0, 0);
3346 }
3347 if (error) {
3348 printf("udf_close_logvol: writeout of VAT failed\n");
3349 return error;
3350 }
3351 }
3352
3353 if (ump->lvclose & UDF_WRITE_PART_BITMAPS) {
3354 /* sync writeout partition spacetables */
3355 error = udf_write_physical_partition_spacetables(ump, true);
3356 if (error) {
3357 printf( "udf_close_logvol: writeout of space tables "
3358 "failed\n");
3359 return error;
3360 }
3361 ump->lvclose &= ~UDF_WRITE_PART_BITMAPS;
3362 }
3363
3364 if (ump->lvclose & UDF_CLOSE_SESSION) {
3365 printf("TODO: Closing a session is not yet implemented\n");
3366 return EROFS;
3367 ump->lvopen |= UDF_OPEN_SESSION;
3368 }
3369
3370 /* mark it closed */
3371 ump->logvol_integrity->integrity_type = udf_rw32(UDF_INTEGRITY_CLOSED);
3372
3373 /* do we need to write out the logical volume integrity */
3374 if (ump->lvclose & UDF_WRITE_LVINT)
3375 error = udf_writeout_lvint(ump, ump->lvopen);
3376 if (error) {
3377 /* HELP now what? mark it open again for now */
3378 ump->logvol_integrity->integrity_type =
3379 udf_rw32(UDF_INTEGRITY_OPEN);
3380 return error;
3381 }
3382
3383 (void) udf_synchronise_caches(ump);
3384
3385 return 0;
3386 }
3387
3388 /* --------------------------------------------------------------------- */
3389
3390 /*
3391 * Genfs interfacing
3392 *
3393 * static const struct genfs_ops udf_genfsops = {
3394 * .gop_size = genfs_size,
3395 * size of transfers
3396 * .gop_alloc = udf_gop_alloc,
3397 * allocate len bytes at offset
3398 * .gop_write = genfs_gop_write,
3399 * putpages interface code
3400 * .gop_markupdate = udf_gop_markupdate,
3401 * set update/modify flags etc.
3402 * }
3403 */
3404
3405 /*
3406 * Genfs interface. These four functions are the only ones defined though not
3407 * documented... great....
3408 */
3409
3410 /*
3411 * Callback from genfs to allocate len bytes at offset off; only called when
3412 * filling up gaps in the allocation.
3413 */
3414 /* XXX should we check if there is space enough in udf_gop_alloc? */
3415 static int
3416 udf_gop_alloc(struct vnode *vp, off_t off,
3417 off_t len, int flags, kauth_cred_t cred)
3418 {
3419 #if 0
3420 struct udf_node *udf_node = VTOI(vp);
3421 struct udf_mount *ump = udf_node->ump;
3422 uint32_t lb_size, num_lb;
3423 #endif
3424
3425 DPRINTF(NOTIMPL, ("udf_gop_alloc not implemented\n"));
3426 DPRINTF(ALLOC, ("udf_gop_alloc called for %"PRIu64" bytes\n", len));
3427
3428 return 0;
3429 }
3430
3431
3432 /*
3433 * callback from genfs to update our flags
3434 */
3435 static void
3436 udf_gop_markupdate(struct vnode *vp, int flags)
3437 {
3438 struct udf_node *udf_node = VTOI(vp);
3439 u_long mask = 0;
3440
3441 if ((flags & GOP_UPDATE_ACCESSED) != 0) {
3442 mask = IN_ACCESS;
3443 }
3444 if ((flags & GOP_UPDATE_MODIFIED) != 0) {
3445 if (vp->v_type == VREG) {
3446 mask |= IN_CHANGE | IN_UPDATE;
3447 } else {
3448 mask |= IN_MODIFY;
3449 }
3450 }
3451 if (mask) {
3452 udf_node->i_flags |= mask;
3453 }
3454 }
3455
3456
3457 static const struct genfs_ops udf_genfsops = {
3458 .gop_size = genfs_size,
3459 .gop_alloc = udf_gop_alloc,
3460 .gop_write = genfs_gop_write_rwmap,
3461 .gop_markupdate = udf_gop_markupdate,
3462 };
3463
3464
3465 /* --------------------------------------------------------------------- */
3466
3467 int
3468 udf_write_terminator(struct udf_mount *ump, uint32_t sector)
3469 {
3470 union dscrptr *dscr;
3471 int error;
3472
3473 dscr = malloc(ump->discinfo.sector_size, M_TEMP, M_WAITOK);
3474 bzero(dscr, ump->discinfo.sector_size);
3475 udf_inittag(ump, &dscr->tag, TAGID_TERM, sector);
3476
3477 /* CRC length for an anchor is 512 - tag length; defined in Ecma 167 */
3478 dscr->tag.desc_crc_len = udf_rw16(512-UDF_DESC_TAG_LENGTH);
3479 (void) udf_validate_tag_and_crc_sums(dscr);
3480
3481 error = udf_write_phys_dscr_sync(ump, NULL, UDF_C_DSCR,
3482 dscr, sector, sector);
3483
3484 free(dscr, M_TEMP);
3485
3486 return error;
3487 }
3488
3489
3490 /* --------------------------------------------------------------------- */
3491
3492 /* UDF<->unix converters */
3493
3494 /* --------------------------------------------------------------------- */
3495
3496 static mode_t
3497 udf_perm_to_unix_mode(uint32_t perm)
3498 {
3499 mode_t mode;
3500
3501 mode = ((perm & UDF_FENTRY_PERM_USER_MASK) );
3502 mode |= ((perm & UDF_FENTRY_PERM_GRP_MASK ) >> 2);
3503 mode |= ((perm & UDF_FENTRY_PERM_OWNER_MASK) >> 4);
3504
3505 return mode;
3506 }
3507
3508 /* --------------------------------------------------------------------- */
3509
3510 static uint32_t
3511 unix_mode_to_udf_perm(mode_t mode)
3512 {
3513 uint32_t perm;
3514
3515 perm = ((mode & S_IRWXO) );
3516 perm |= ((mode & S_IRWXG) << 2);
3517 perm |= ((mode & S_IRWXU) << 4);
3518 perm |= ((mode & S_IWOTH) << 3);
3519 perm |= ((mode & S_IWGRP) << 5);
3520 perm |= ((mode & S_IWUSR) << 7);
3521
3522 return perm;
3523 }
3524
3525 /* --------------------------------------------------------------------- */
3526
3527 static uint32_t
3528 udf_icb_to_unix_filetype(uint32_t icbftype)
3529 {
3530 switch (icbftype) {
3531 case UDF_ICB_FILETYPE_DIRECTORY :
3532 case UDF_ICB_FILETYPE_STREAMDIR :
3533 return S_IFDIR;
3534 case UDF_ICB_FILETYPE_FIFO :
3535 return S_IFIFO;
3536 case UDF_ICB_FILETYPE_CHARDEVICE :
3537 return S_IFCHR;
3538 case UDF_ICB_FILETYPE_BLOCKDEVICE :
3539 return S_IFBLK;
3540 case UDF_ICB_FILETYPE_RANDOMACCESS :
3541 case UDF_ICB_FILETYPE_REALTIME :
3542 return S_IFREG;
3543 case UDF_ICB_FILETYPE_SYMLINK :
3544 return S_IFLNK;
3545 case UDF_ICB_FILETYPE_SOCKET :
3546 return S_IFSOCK;
3547 }
3548 /* no idea what this is */
3549 return 0;
3550 }
3551
3552 /* --------------------------------------------------------------------- */
3553
3554 void
3555 udf_to_unix_name(char *result, int result_len, char *id, int len,
3556 struct charspec *chsp)
3557 {
3558 uint16_t *raw_name, *unix_name;
3559 uint16_t *inchp, ch;
3560 uint8_t *outchp;
3561 const char *osta_id = "OSTA Compressed Unicode";
3562 int ucode_chars, nice_uchars, is_osta_typ0, nout;
3563
3564 raw_name = malloc(2048 * sizeof(uint16_t), M_UDFTEMP, M_WAITOK);
3565 unix_name = raw_name + 1024; /* split space in half */
3566 assert(sizeof(char) == sizeof(uint8_t));
3567 outchp = (uint8_t *) result;
3568
3569 is_osta_typ0 = (chsp->type == 0);
3570 is_osta_typ0 &= (strcmp((char *) chsp->inf, osta_id) == 0);
3571 if (is_osta_typ0) {
3572 /* TODO clean up */
3573 *raw_name = *unix_name = 0;
3574 ucode_chars = udf_UncompressUnicode(len, (uint8_t *) id, raw_name);
3575 ucode_chars = MIN(ucode_chars, UnicodeLength((unicode_t *) raw_name));
3576 nice_uchars = UDFTransName(unix_name, raw_name, ucode_chars);
3577 /* output UTF8 */
3578 for (inchp = unix_name; nice_uchars>0; inchp++, nice_uchars--) {
3579 ch = *inchp;
3580 nout = wput_utf8(outchp, result_len, ch);
3581 outchp += nout; result_len -= nout;
3582 if (!ch) break;
3583 }
3584 *outchp++ = 0;
3585 } else {
3586 /* assume 8bit char length byte latin-1 */
3587 assert(*id == 8);
3588 assert(strlen((char *) (id+1)) <= MAXNAMLEN);
3589 strncpy((char *) result, (char *) (id+1), strlen((char *) (id+1)));
3590 }
3591 free(raw_name, M_UDFTEMP);
3592 }
3593
3594 /* --------------------------------------------------------------------- */
3595
3596 void
3597 unix_to_udf_name(char *result, uint8_t *result_len, char const *name, int name_len,
3598 struct charspec *chsp)
3599 {
3600 uint16_t *raw_name;
3601 uint16_t *outchp;
3602 const char *inchp;
3603 const char *osta_id = "OSTA Compressed Unicode";
3604 int udf_chars, is_osta_typ0, bits;
3605 size_t cnt;
3606
3607 /* allocate temporary unicode-16 buffer */
3608 raw_name = malloc(1024, M_UDFTEMP, M_WAITOK);
3609
3610 /* convert utf8 to unicode-16 */
3611 *raw_name = 0;
3612 inchp = name;
3613 outchp = raw_name;
3614 bits = 8;
3615 for (cnt = name_len, udf_chars = 0; cnt;) {
3616 /*###3490 [cc] warning: passing argument 2 of 'wget_utf8' from incompatible pointer type%%%*/
3617 *outchp = wget_utf8(&inchp, &cnt);
3618 if (*outchp > 0xff)
3619 bits=16;
3620 outchp++;
3621 udf_chars++;
3622 }
3623 /* null terminate just in case */
3624 *outchp++ = 0;
3625
3626 is_osta_typ0 = (chsp->type == 0);
3627 is_osta_typ0 &= (strcmp((char *) chsp->inf, osta_id) == 0);
3628 if (is_osta_typ0) {
3629 udf_chars = udf_CompressUnicode(udf_chars, bits,
3630 (unicode_t *) raw_name,
3631 (byte *) result);
3632 } else {
3633 printf("unix to udf name: no CHSP0 ?\n");
3634 /* XXX assume 8bit char length byte latin-1 */
3635 *result++ = 8; udf_chars = 1;
3636 strncpy(result, name + 1, name_len);
3637 udf_chars += name_len;
3638 }
3639 *result_len = udf_chars;
3640 free(raw_name, M_UDFTEMP);
3641 }
3642
3643 /* --------------------------------------------------------------------- */
3644
3645 void
3646 udf_timestamp_to_timespec(struct udf_mount *ump,
3647 struct timestamp *timestamp,
3648 struct timespec *timespec)
3649 {
3650 struct clock_ymdhms ymdhms;
3651 uint32_t usecs, secs, nsecs;
3652 uint16_t tz;
3653
3654 /* fill in ymdhms structure from timestamp */
3655 memset(&ymdhms, 0, sizeof(ymdhms));
3656 ymdhms.dt_year = udf_rw16(timestamp->year);
3657 ymdhms.dt_mon = timestamp->month;
3658 ymdhms.dt_day = timestamp->day;
3659 ymdhms.dt_wday = 0; /* ? */
3660 ymdhms.dt_hour = timestamp->hour;
3661 ymdhms.dt_min = timestamp->minute;
3662 ymdhms.dt_sec = timestamp->second;
3663
3664 secs = clock_ymdhms_to_secs(&ymdhms);
3665 usecs = timestamp->usec +
3666 100*timestamp->hund_usec + 10000*timestamp->centisec;
3667 nsecs = usecs * 1000;
3668
3669 /*
3670 * Calculate the time zone. The timezone is 12 bit signed 2's
3671 * compliment, so we gotta do some extra magic to handle it right.
3672 */
3673 tz = udf_rw16(timestamp->type_tz);
3674 tz &= 0x0fff; /* only lower 12 bits are significant */
3675 if (tz & 0x0800) /* sign extention */
3676 tz |= 0xf000;
3677
3678 /* TODO check timezone conversion */
3679 /* check if we are specified a timezone to convert */
3680 if (udf_rw16(timestamp->type_tz) & 0x1000) {
3681 if ((int16_t) tz != -2047)
3682 secs -= (int16_t) tz * 60;
3683 } else {
3684 secs -= ump->mount_args.gmtoff;
3685 }
3686
3687 timespec->tv_sec = secs;
3688 timespec->tv_nsec = nsecs;
3689 }
3690
3691
3692 void
3693 udf_timespec_to_timestamp(struct timespec *timespec, struct timestamp *timestamp)
3694 {
3695 struct clock_ymdhms ymdhms;
3696 uint32_t husec, usec, csec;
3697
3698 (void) clock_secs_to_ymdhms(timespec->tv_sec, &ymdhms);
3699
3700 usec = timespec->tv_nsec / 1000;
3701 husec = usec / 100;
3702 usec -= husec * 100; /* only 0-99 in usec */
3703 csec = husec / 100; /* only 0-99 in csec */
3704 husec -= csec * 100; /* only 0-99 in husec */
3705
3706 /* set method 1 for CUT/GMT */
3707 timestamp->type_tz = udf_rw16((1<<12) + 0);
3708 timestamp->year = udf_rw16(ymdhms.dt_year);
3709 timestamp->month = ymdhms.dt_mon;
3710 timestamp->day = ymdhms.dt_day;
3711 timestamp->hour = ymdhms.dt_hour;
3712 timestamp->minute = ymdhms.dt_min;
3713 timestamp->second = ymdhms.dt_sec;
3714 timestamp->centisec = csec;
3715 timestamp->hund_usec = husec;
3716 timestamp->usec = usec;
3717 }
3718
3719 /* --------------------------------------------------------------------- */
3720
3721 /*
3722 * Attribute and filetypes converters with get/set pairs
3723 */
3724
3725 uint32_t
3726 udf_getaccessmode(struct udf_node *udf_node)
3727 {
3728 struct file_entry *fe = udf_node->fe;;
3729 struct extfile_entry *efe = udf_node->efe;
3730 uint32_t udf_perm, icbftype;
3731 uint32_t mode, ftype;
3732 uint16_t icbflags;
3733
3734 UDF_LOCK_NODE(udf_node, 0);
3735 if (fe) {
3736 udf_perm = udf_rw32(fe->perm);
3737 icbftype = fe->icbtag.file_type;
3738 icbflags = udf_rw16(fe->icbtag.flags);
3739 } else {
3740 assert(udf_node->efe);
3741 udf_perm = udf_rw32(efe->perm);
3742 icbftype = efe->icbtag.file_type;
3743 icbflags = udf_rw16(efe->icbtag.flags);
3744 }
3745
3746 mode = udf_perm_to_unix_mode(udf_perm);
3747 ftype = udf_icb_to_unix_filetype(icbftype);
3748
3749 /* set suid, sgid, sticky from flags in fe/efe */
3750 if (icbflags & UDF_ICB_TAG_FLAGS_SETUID)
3751 mode |= S_ISUID;
3752 if (icbflags & UDF_ICB_TAG_FLAGS_SETGID)
3753 mode |= S_ISGID;
3754 if (icbflags & UDF_ICB_TAG_FLAGS_STICKY)
3755 mode |= S_ISVTX;
3756
3757 UDF_UNLOCK_NODE(udf_node, 0);
3758
3759 return mode | ftype;
3760 }
3761
3762
3763 void
3764 udf_setaccessmode(struct udf_node *udf_node, mode_t mode)
3765 {
3766 struct file_entry *fe = udf_node->fe;
3767 struct extfile_entry *efe = udf_node->efe;
3768 uint32_t udf_perm;
3769 uint16_t icbflags;
3770
3771 UDF_LOCK_NODE(udf_node, 0);
3772 udf_perm = unix_mode_to_udf_perm(mode & ALLPERMS);
3773 if (fe) {
3774 icbflags = udf_rw16(fe->icbtag.flags);
3775 } else {
3776 icbflags = udf_rw16(efe->icbtag.flags);
3777 }
3778
3779 icbflags &= ~UDF_ICB_TAG_FLAGS_SETUID;
3780 icbflags &= ~UDF_ICB_TAG_FLAGS_SETGID;
3781 icbflags &= ~UDF_ICB_TAG_FLAGS_STICKY;
3782 if (mode & S_ISUID)
3783 icbflags |= UDF_ICB_TAG_FLAGS_SETUID;
3784 if (mode & S_ISGID)
3785 icbflags |= UDF_ICB_TAG_FLAGS_SETGID;
3786 if (mode & S_ISVTX)
3787 icbflags |= UDF_ICB_TAG_FLAGS_STICKY;
3788
3789 if (fe) {
3790 fe->perm = udf_rw32(udf_perm);
3791 fe->icbtag.flags = udf_rw16(icbflags);
3792 } else {
3793 efe->perm = udf_rw32(udf_perm);
3794 efe->icbtag.flags = udf_rw16(icbflags);
3795 }
3796
3797 UDF_UNLOCK_NODE(udf_node, 0);
3798 }
3799
3800
3801 void
3802 udf_getownership(struct udf_node *udf_node, uid_t *uidp, gid_t *gidp)
3803 {
3804 struct udf_mount *ump = udf_node->ump;
3805 struct file_entry *fe = udf_node->fe;
3806 struct extfile_entry *efe = udf_node->efe;
3807 uid_t uid;
3808 gid_t gid;
3809
3810 UDF_LOCK_NODE(udf_node, 0);
3811 if (fe) {
3812 uid = (uid_t)udf_rw32(fe->uid);
3813 gid = (gid_t)udf_rw32(fe->gid);
3814 } else {
3815 assert(udf_node->efe);
3816 uid = (uid_t)udf_rw32(efe->uid);
3817 gid = (gid_t)udf_rw32(efe->gid);
3818 }
3819
3820 /* do the uid/gid translation game */
3821 if ((uid == (uid_t) -1) && (gid == (gid_t) -1)) {
3822 uid = ump->mount_args.anon_uid;
3823 gid = ump->mount_args.anon_gid;
3824 }
3825 *uidp = uid;
3826 *gidp = gid;
3827
3828 UDF_UNLOCK_NODE(udf_node, 0);
3829 }
3830
3831
3832 void
3833 udf_setownership(struct udf_node *udf_node, uid_t uid, gid_t gid)
3834 {
3835 struct udf_mount *ump = udf_node->ump;
3836 struct file_entry *fe = udf_node->fe;
3837 struct extfile_entry *efe = udf_node->efe;
3838 uid_t nobody_uid;
3839 gid_t nobody_gid;
3840
3841 UDF_LOCK_NODE(udf_node, 0);
3842
3843 /* do the uid/gid translation game */
3844 nobody_uid = ump->mount_args.nobody_uid;
3845 nobody_gid = ump->mount_args.nobody_gid;
3846 if ((uid == nobody_uid) && (gid == nobody_gid)) {
3847 uid = (uid_t) -1;
3848 gid = (gid_t) -1;
3849 }
3850
3851 if (fe) {
3852 fe->uid = udf_rw32((uint32_t) uid);
3853 fe->gid = udf_rw32((uint32_t) gid);
3854 } else {
3855 efe->uid = udf_rw32((uint32_t) uid);
3856 efe->gid = udf_rw32((uint32_t) gid);
3857 }
3858
3859 UDF_UNLOCK_NODE(udf_node, 0);
3860 }
3861
3862
3863 /* --------------------------------------------------------------------- */
3864
3865 /*
3866 * UDF dirhash implementation
3867 */
3868
3869 static uint32_t
3870 udf_dirhash_hash(const char *str, int namelen)
3871 {
3872 uint32_t hash = 5381;
3873 int i, c;
3874
3875 for (i = 0; i < namelen; i++) {
3876 c = *str++;
3877 hash = ((hash << 5) + hash) + c; /* hash * 33 + c */
3878 }
3879 return hash;
3880 }
3881
3882
3883 static void
3884 udf_dirhash_purge(struct udf_dirhash *dirh)
3885 {
3886 struct udf_dirhash_entry *dirh_e;
3887 uint32_t hashline;
3888
3889 if (dirh == NULL)
3890 return;
3891
3892 if (dirh->size == 0)
3893 return;
3894
3895 for (hashline = 0; hashline < UDF_DIRHASH_HASHSIZE; hashline++) {
3896 dirh_e = LIST_FIRST(&dirh->entries[hashline]);
3897 while (dirh_e) {
3898 LIST_REMOVE(dirh_e, next);
3899 pool_put(&udf_dirhash_entry_pool, dirh_e);
3900 dirh_e = LIST_FIRST(&dirh->entries[hashline]);
3901 }
3902 }
3903 dirh_e = LIST_FIRST(&dirh->free_entries);
3904
3905 while (dirh_e) {
3906 LIST_REMOVE(dirh_e, next);
3907 pool_put(&udf_dirhash_entry_pool, dirh_e);
3908 dirh_e = LIST_FIRST(&dirh->entries[hashline]);
3909 }
3910
3911 dirh->flags &= ~UDF_DIRH_COMPLETE;
3912 dirh->flags |= UDF_DIRH_PURGED;
3913
3914 udf_dirhashsize -= dirh->size;
3915 dirh->size = 0;
3916 }
3917
3918
3919 static void
3920 udf_dirhash_destroy(struct udf_dirhash **dirhp)
3921 {
3922 struct udf_dirhash *dirh = *dirhp;
3923
3924 if (dirh == NULL)
3925 return;
3926
3927 mutex_enter(&udf_dirhashmutex);
3928
3929 udf_dirhash_purge(dirh);
3930 TAILQ_REMOVE(&udf_dirhash_queue, dirh, next);
3931 pool_put(&udf_dirhash_pool, dirh);
3932
3933 *dirhp = NULL;
3934
3935 mutex_exit(&udf_dirhashmutex);
3936 }
3937
3938
3939 static void
3940 udf_dirhash_get(struct udf_dirhash **dirhp)
3941 {
3942 struct udf_dirhash *dirh;
3943 uint32_t hashline;
3944
3945 mutex_enter(&udf_dirhashmutex);
3946
3947 dirh = *dirhp;
3948 if (*dirhp == NULL) {
3949 dirh = pool_get(&udf_dirhash_pool, PR_WAITOK);
3950 *dirhp = dirh;
3951 memset(dirh, 0, sizeof(struct udf_dirhash));
3952 for (hashline = 0; hashline < UDF_DIRHASH_HASHSIZE; hashline++)
3953 LIST_INIT(&dirh->entries[hashline]);
3954 dirh->size = 0;
3955 dirh->refcnt = 0;
3956 dirh->flags = 0;
3957 } else {
3958 TAILQ_REMOVE(&udf_dirhash_queue, dirh, next);
3959 }
3960
3961 dirh->refcnt++;
3962 TAILQ_INSERT_HEAD(&udf_dirhash_queue, dirh, next);
3963
3964 mutex_exit(&udf_dirhashmutex);
3965 }
3966
3967
3968 static void
3969 udf_dirhash_put(struct udf_dirhash *dirh)
3970 {
3971 mutex_enter(&udf_dirhashmutex);
3972 dirh->refcnt--;
3973 mutex_exit(&udf_dirhashmutex);
3974 }
3975
3976
3977 static void
3978 udf_dirhash_enter(struct udf_node *dir_node, struct fileid_desc *fid,
3979 struct dirent *dirent, uint64_t offset, uint32_t fid_size, int new)
3980 {
3981 struct udf_dirhash *dirh, *del_dirh, *prev_dirh;
3982 struct udf_dirhash_entry *dirh_e;
3983 uint32_t hashvalue, hashline;
3984 int entrysize;
3985
3986 /* make sure we have a dirhash to work on */
3987 dirh = dir_node->dir_hash;
3988 KASSERT(dirh);
3989 KASSERT(dirh->refcnt > 0);
3990
3991 /* are we trying to re-enter an entry? */
3992 if (!new && (dirh->flags & UDF_DIRH_COMPLETE))
3993 return;
3994
3995 /* calculate our hash */
3996 hashvalue = udf_dirhash_hash(dirent->d_name, dirent->d_namlen);
3997 hashline = hashvalue & UDF_DIRHASH_HASHMASK;
3998
3999 /* lookup and insert entry if not there yet */
4000 LIST_FOREACH(dirh_e, &dirh->entries[hashline], next) {
4001 /* check for hash collision */
4002 if (dirh_e->hashvalue != hashvalue)
4003 continue;
4004 if (dirh_e->offset != offset)
4005 continue;
4006 /* got it already */
4007 KASSERT(dirh_e->d_namlen == dirent->d_namlen);
4008 KASSERT(dirh_e->fid_size == fid_size);
4009 return;
4010 }
4011
4012 DPRINTF(DIRHASH, ("dirhash enter %"PRIu64", %d, %d for `%*.*s`\n",
4013 offset, fid_size, dirent->d_namlen,
4014 dirent->d_namlen, dirent->d_namlen, dirent->d_name));
4015
4016 /* check if entry is in free space list */
4017 LIST_FOREACH(dirh_e, &dirh->free_entries, next) {
4018 if (dirh_e->offset == offset) {
4019 DPRINTF(DIRHASH, ("\tremoving free entry\n"));
4020 LIST_REMOVE(dirh_e, next);
4021 break;
4022 }
4023 }
4024
4025 /* ensure we are not passing the dirhash limit */
4026 entrysize = sizeof(struct udf_dirhash_entry);
4027 if (udf_dirhashsize + entrysize > udf_maxdirhashsize) {
4028 del_dirh = TAILQ_LAST(&udf_dirhash_queue, _udf_dirhash);
4029 KASSERT(del_dirh);
4030 while (udf_dirhashsize + entrysize > udf_maxdirhashsize) {
4031 /* no use trying to delete myself */
4032 if (del_dirh == dirh)
4033 break;
4034 prev_dirh = TAILQ_PREV(del_dirh, _udf_dirhash, next);
4035 if (del_dirh->refcnt == 0)
4036 udf_dirhash_purge(del_dirh);
4037 del_dirh = prev_dirh;
4038 }
4039 }
4040
4041 /* add to the hashline */
4042 dirh_e = pool_get(&udf_dirhash_entry_pool, PR_WAITOK);
4043 memset(dirh_e, 0, sizeof(struct udf_dirhash_entry));
4044
4045 dirh_e->hashvalue = hashvalue;
4046 dirh_e->offset = offset;
4047 dirh_e->d_namlen = dirent->d_namlen;
4048 dirh_e->fid_size = fid_size;
4049
4050 dirh->size += sizeof(struct udf_dirhash_entry);
4051 udf_dirhashsize += sizeof(struct udf_dirhash_entry);
4052 LIST_INSERT_HEAD(&dirh->entries[hashline], dirh_e, next);
4053 }
4054
4055
4056 static void
4057 udf_dirhash_enter_freed(struct udf_node *dir_node, uint64_t offset,
4058 uint32_t fid_size)
4059 {
4060 struct udf_dirhash *dirh;
4061 struct udf_dirhash_entry *dirh_e;
4062
4063 /* make sure we have a dirhash to work on */
4064 dirh = dir_node->dir_hash;
4065 KASSERT(dirh);
4066 KASSERT(dirh->refcnt > 0);
4067
4068 #ifdef DEBUG
4069 /* check for double entry of free space */
4070 LIST_FOREACH(dirh_e, &dirh->free_entries, next)
4071 KASSERT(dirh_e->offset != offset);
4072 #endif
4073
4074 DPRINTF(DIRHASH, ("dirhash enter FREED %"PRIu64", %d\n",
4075 offset, fid_size));
4076 dirh_e = pool_get(&udf_dirhash_entry_pool, PR_WAITOK);
4077 memset(dirh_e, 0, sizeof(struct udf_dirhash_entry));
4078
4079 dirh_e->hashvalue = 0; /* not relevant */
4080 dirh_e->offset = offset;
4081 dirh_e->d_namlen = 0; /* not relevant */
4082 dirh_e->fid_size = fid_size;
4083
4084 /* XXX it might be preferable to append them at the tail */
4085 LIST_INSERT_HEAD(&dirh->free_entries, dirh_e, next);
4086 dirh->size += sizeof(struct udf_dirhash_entry);
4087 udf_dirhashsize += sizeof(struct udf_dirhash_entry);
4088 }
4089
4090
4091 static void
4092 udf_dirhash_remove(struct udf_node *dir_node, struct dirent *dirent,
4093 uint64_t offset, uint32_t fid_size)
4094 {
4095 struct udf_dirhash *dirh;
4096 struct udf_dirhash_entry *dirh_e;
4097 uint32_t hashvalue, hashline;
4098
4099 DPRINTF(DIRHASH, ("dirhash remove %"PRIu64", %d for `%*.*s`\n",
4100 offset, fid_size,
4101 dirent->d_namlen, dirent->d_namlen, dirent->d_name));
4102
4103 /* make sure we have a dirhash to work on */
4104 dirh = dir_node->dir_hash;
4105 KASSERT(dirh);
4106 KASSERT(dirh->refcnt > 0);
4107
4108 /* calculate our hash */
4109 hashvalue = udf_dirhash_hash(dirent->d_name, dirent->d_namlen);
4110 hashline = hashvalue & UDF_DIRHASH_HASHMASK;
4111
4112 /* lookup entry */
4113 LIST_FOREACH(dirh_e, &dirh->entries[hashline], next) {
4114 /* check for hash collision */
4115 if (dirh_e->hashvalue != hashvalue)
4116 continue;
4117 if (dirh_e->offset != offset)
4118 continue;
4119
4120 /* got it! */
4121 KASSERT(dirh_e->d_namlen == dirent->d_namlen);
4122 KASSERT(dirh_e->fid_size == fid_size);
4123 LIST_REMOVE(dirh_e, next);
4124 dirh->size -= sizeof(struct udf_dirhash_entry);
4125 udf_dirhashsize -= sizeof(struct udf_dirhash_entry);
4126
4127 udf_dirhash_enter_freed(dir_node, offset, fid_size);
4128 return;
4129 }
4130
4131 /* not found! */
4132 panic("dirhash_remove couldn't find entry in hash table\n");
4133 }
4134
4135
4136 /* BUGALERT: don't use result longer than needed, never past the node lock */
4137 /* call with NULL *result initially and it will return nonzero if again */
4138 static int
4139 udf_dirhash_lookup(struct udf_node *dir_node, const char *d_name, int d_namlen,
4140 struct udf_dirhash_entry **result)
4141 {
4142 struct udf_dirhash *dirh;
4143 struct udf_dirhash_entry *dirh_e;
4144 uint32_t hashvalue, hashline;
4145
4146 KASSERT(VOP_ISLOCKED(dir_node->vnode));
4147
4148 /* make sure we have a dirhash to work on */
4149 dirh = dir_node->dir_hash;
4150 KASSERT(dirh);
4151 KASSERT(dirh->refcnt > 0);
4152
4153 /* start where we were */
4154 if (*result) {
4155 KASSERT(dir_node->dir_hash);
4156 dirh_e = *result;
4157
4158 /* retrieve information to avoid recalculation and advance */
4159 hashvalue = dirh_e->hashvalue;
4160 dirh_e = LIST_NEXT(*result, next);
4161 } else {
4162 /* calculate our hash and lookup all entries in hashline */
4163 hashvalue = udf_dirhash_hash(d_name, d_namlen);
4164 hashline = hashvalue & UDF_DIRHASH_HASHMASK;
4165 dirh_e = LIST_FIRST(&dirh->entries[hashline]);
4166 }
4167
4168 for (; dirh_e; dirh_e = LIST_NEXT(dirh_e, next)) {
4169 /* check for hash collision */
4170 if (dirh_e->hashvalue != hashvalue)
4171 continue;
4172 if (dirh_e->d_namlen != d_namlen)
4173 continue;
4174 /* might have an entry in the cache */
4175 *result = dirh_e;
4176 return 1;
4177 }
4178
4179 *result = NULL;
4180 return 0;
4181 }
4182
4183
4184 /* BUGALERT: don't use result longer than needed, never past the node lock */
4185 /* call with NULL *result initially and it will return nonzero if again */
4186 static int
4187 udf_dirhash_lookup_freed(struct udf_node *dir_node, uint32_t min_fidsize,
4188 struct udf_dirhash_entry **result)
4189 {
4190 struct udf_dirhash *dirh;
4191 struct udf_dirhash_entry *dirh_e;
4192
4193 KASSERT(VOP_ISLOCKED(dir_node->vnode));
4194
4195 /* make sure we have a dirhash to work on */
4196 dirh = dir_node->dir_hash;
4197 KASSERT(dirh);
4198 KASSERT(dirh->refcnt > 0);
4199
4200 /* start where we were */
4201 if (*result) {
4202 KASSERT(dir_node->dir_hash);
4203 dirh_e = LIST_NEXT(*result, next);
4204 } else {
4205 /* lookup all entries that match */
4206 dirh_e = LIST_FIRST(&dirh->free_entries);
4207 }
4208
4209 for (; dirh_e; dirh_e = LIST_NEXT(dirh_e, next)) {
4210 /* check for minimum size */
4211 if (dirh_e->fid_size < min_fidsize)
4212 continue;
4213 /* might be a candidate */
4214 *result = dirh_e;
4215 return 1;
4216 }
4217
4218 *result = NULL;
4219 return 0;
4220 }
4221
4222
4223 static int
4224 udf_dirhash_fill(struct udf_node *dir_node)
4225 {
4226 struct vnode *dvp = dir_node->vnode;
4227 struct udf_dirhash *dirh;
4228 struct file_entry *fe = dir_node->fe;
4229 struct extfile_entry *efe = dir_node->efe;
4230 struct fileid_desc *fid;
4231 struct dirent *dirent;
4232 uint64_t file_size, pre_diroffset, diroffset;
4233 uint32_t lb_size;
4234 int error;
4235
4236 /* make sure we have a dirhash to work on */
4237 dirh = dir_node->dir_hash;
4238 KASSERT(dirh);
4239 KASSERT(dirh->refcnt > 0);
4240
4241 if (dirh->flags & UDF_DIRH_BROKEN)
4242 return EIO;
4243 if (dirh->flags & UDF_DIRH_COMPLETE)
4244 return 0;
4245
4246 /* make sure we have a clean dirhash to add to */
4247 udf_dirhash_purge(dirh);
4248
4249 /* get directory filesize */
4250 if (fe) {
4251 file_size = udf_rw64(fe->inf_len);
4252 } else {
4253 assert(efe);
4254 file_size = udf_rw64(efe->inf_len);
4255 }
4256
4257 /* allocate temporary space for fid */
4258 lb_size = udf_rw32(dir_node->ump->logical_vol->lb_size);
4259 fid = malloc(lb_size, M_UDFTEMP, M_WAITOK);
4260
4261 /* allocate temporary space for dirent */
4262 dirent = malloc(sizeof(struct dirent), M_UDFTEMP, M_WAITOK);
4263
4264 error = 0;
4265 diroffset = 0;
4266 while (diroffset < file_size) {
4267 /* transfer a new fid/dirent */
4268 pre_diroffset = diroffset;
4269 error = udf_read_fid_stream(dvp, &diroffset, fid, dirent);
4270 if (error) {
4271 /* TODO what to do? continue but not add? */
4272 dirh->flags |= UDF_DIRH_BROKEN;
4273 udf_dirhash_purge(dirh);
4274 break;
4275 }
4276
4277 if ((fid->file_char & UDF_FILE_CHAR_DEL)) {
4278 /* register deleted extent for reuse */
4279 udf_dirhash_enter_freed(dir_node, pre_diroffset,
4280 udf_fidsize(fid));
4281 } else {
4282 /* append to the dirhash */
4283 udf_dirhash_enter(dir_node, fid, dirent, pre_diroffset,
4284 udf_fidsize(fid), 0);
4285 }
4286 }
4287 dirh->flags |= UDF_DIRH_COMPLETE;
4288
4289 free(fid, M_UDFTEMP);
4290 free(dirent, M_UDFTEMP);
4291
4292 return error;
4293 }
4294
4295
4296 /* --------------------------------------------------------------------- */
4297
4298 /*
4299 * Directory read and manipulation functions.
4300 *
4301 * Note that if the file is found, the cached diroffset position *before* the
4302 * advance is remembered. Thus if the same filename is lookup again just after
4303 * this lookup its immediately found.
4304 */
4305
4306 int
4307 udf_lookup_name_in_dir(struct vnode *vp, const char *name, int namelen,
4308 struct long_ad *icb_loc, int *found)
4309 {
4310 struct udf_node *dir_node = VTOI(vp);
4311 struct udf_dirhash_entry *dirh_ep;
4312 struct fileid_desc *fid;
4313 struct dirent *dirent;
4314 uint64_t diroffset;
4315 uint32_t lb_size;
4316 int hit, error;
4317
4318 /* set default return */
4319 *found = 0;
4320
4321 /* get our dirhash and make sure its read in */
4322 udf_dirhash_get(&dir_node->dir_hash);
4323 error = udf_dirhash_fill(dir_node);
4324 if (error) {
4325 udf_dirhash_put(dir_node->dir_hash);
4326 return error;
4327 }
4328
4329 /* allocate temporary space for fid */
4330 lb_size = udf_rw32(dir_node->ump->logical_vol->lb_size);
4331 fid = malloc(lb_size, M_UDFTEMP, M_WAITOK);
4332 dirent = malloc(sizeof(struct dirent), M_UDFTEMP, M_WAITOK);
4333
4334 DPRINTF(DIRHASH, ("dirhash_lookup looking for `%*.*s`\n",
4335 namelen, namelen, name));
4336
4337 /* search our dirhash hits */
4338 memset(icb_loc, 0, sizeof(*icb_loc));
4339 dirh_ep = NULL;
4340 for (;;) {
4341 hit = udf_dirhash_lookup(dir_node, name, namelen, &dirh_ep);
4342 /* if no hit, abort the search */
4343 if (!hit)
4344 break;
4345
4346 /* check this hit */
4347 diroffset = dirh_ep->offset;
4348
4349 /* transfer a new fid/dirent */
4350 error = udf_read_fid_stream(vp, &diroffset, fid, dirent);
4351 if (error)
4352 break;
4353
4354 DPRINTF(DIRHASH, ("dirhash_lookup\tchecking `%*.*s`\n",
4355 dirent->d_namlen, dirent->d_namlen, dirent->d_name));
4356
4357 /* see if its our entry */
4358 KASSERT(dirent->d_namlen == namelen);
4359 if (strncmp(dirent->d_name, name, namelen) == 0) {
4360 *found = 1;
4361 *icb_loc = fid->icb;
4362 break;
4363 }
4364 }
4365 free(fid, M_UDFTEMP);
4366 free(dirent, M_UDFTEMP);
4367
4368 udf_dirhash_put(dir_node->dir_hash);
4369
4370 return error;
4371 }
4372
4373 /* --------------------------------------------------------------------- */
4374
4375 static int
4376 udf_create_new_fe(struct udf_mount *ump, struct file_entry *fe, int file_type,
4377 struct long_ad *node_icb, struct long_ad *parent_icb,
4378 uint64_t parent_unique_id)
4379 {
4380 struct timespec now;
4381 struct icb_tag *icb;
4382 struct filetimes_extattr_entry *ft_extattr;
4383 uint64_t unique_id;
4384 uint32_t fidsize, lb_num;
4385 uint8_t *bpos;
4386 int crclen, attrlen;
4387
4388 lb_num = udf_rw32(node_icb->loc.lb_num);
4389 udf_inittag(ump, &fe->tag, TAGID_FENTRY, lb_num);
4390 icb = &fe->icbtag;
4391
4392 /*
4393 * Always use strategy type 4 unless on WORM wich we don't support
4394 * (yet). Fill in defaults and set for internal allocation of data.
4395 */
4396 icb->strat_type = udf_rw16(4);
4397 icb->max_num_entries = udf_rw16(1);
4398 icb->file_type = file_type; /* 8 bit */
4399 icb->flags = udf_rw16(UDF_ICB_INTERN_ALLOC);
4400
4401 fe->perm = udf_rw32(0x7fff); /* all is allowed */
4402 fe->link_cnt = udf_rw16(0); /* explicit setting */
4403
4404 fe->ckpoint = udf_rw32(1); /* user supplied file version */
4405
4406 vfs_timestamp(&now);
4407 udf_timespec_to_timestamp(&now, &fe->atime);
4408 udf_timespec_to_timestamp(&now, &fe->attrtime);
4409 udf_timespec_to_timestamp(&now, &fe->mtime);
4410
4411 udf_set_regid(&fe->imp_id, IMPL_NAME);
4412 udf_add_impl_regid(ump, &fe->imp_id);
4413
4414 unique_id = udf_advance_uniqueid(ump);
4415 fe->unique_id = udf_rw64(unique_id);
4416 fe->l_ea = udf_rw32(0);
4417
4418 /* create extended attribute to record our creation time */
4419 attrlen = UDF_FILETIMES_ATTR_SIZE(1);
4420 ft_extattr = malloc(attrlen, M_UDFTEMP, M_WAITOK);
4421 memset(ft_extattr, 0, attrlen);
4422 ft_extattr->hdr.type = udf_rw32(UDF_FILETIMES_ATTR_NO);
4423 ft_extattr->hdr.subtype = 1; /* [4/48.10.5] */
4424 ft_extattr->hdr.a_l = udf_rw32(UDF_FILETIMES_ATTR_SIZE(1));
4425 ft_extattr->d_l = udf_rw32(UDF_TIMESTAMP_SIZE); /* one item */
4426 ft_extattr->existence = UDF_FILETIMES_FILE_CREATION;
4427 udf_timespec_to_timestamp(&now, &ft_extattr->times[0]);
4428
4429 udf_extattr_insert_internal(ump, (union dscrptr *) fe,
4430 (struct extattr_entry *) ft_extattr);
4431 free(ft_extattr, M_UDFTEMP);
4432
4433 /* if its a directory, create '..' */
4434 bpos = (uint8_t *) fe->data + udf_rw32(fe->l_ea);
4435 fidsize = 0;
4436 if (file_type == UDF_ICB_FILETYPE_DIRECTORY) {
4437 fidsize = udf_create_parentfid(ump,
4438 (struct fileid_desc *) bpos, parent_icb,
4439 parent_unique_id);
4440 }
4441
4442 /* record fidlength information */
4443 fe->inf_len = udf_rw64(fidsize);
4444 fe->l_ad = udf_rw32(fidsize);
4445 fe->logblks_rec = udf_rw64(0); /* intern */
4446
4447 crclen = sizeof(struct file_entry) - 1 - UDF_DESC_TAG_LENGTH;
4448 crclen += udf_rw32(fe->l_ea) + fidsize;
4449 fe->tag.desc_crc_len = udf_rw16(crclen);
4450
4451 (void) udf_validate_tag_and_crc_sums((union dscrptr *) fe);
4452
4453 return fidsize;
4454 }
4455
4456 /* --------------------------------------------------------------------- */
4457
4458 static int
4459 udf_create_new_efe(struct udf_mount *ump, struct extfile_entry *efe,
4460 int file_type, struct long_ad *node_icb, struct long_ad *parent_icb,
4461 uint64_t parent_unique_id)
4462 {
4463 struct timespec now;
4464 struct icb_tag *icb;
4465 uint64_t unique_id;
4466 uint32_t fidsize, lb_num;
4467 uint8_t *bpos;
4468 int crclen;
4469
4470 lb_num = udf_rw32(node_icb->loc.lb_num);
4471 udf_inittag(ump, &efe->tag, TAGID_EXTFENTRY, lb_num);
4472 icb = &efe->icbtag;
4473
4474 /*
4475 * Always use strategy type 4 unless on WORM wich we don't support
4476 * (yet). Fill in defaults and set for internal allocation of data.
4477 */
4478 icb->strat_type = udf_rw16(4);
4479 icb->max_num_entries = udf_rw16(1);
4480 icb->file_type = file_type; /* 8 bit */
4481 icb->flags = udf_rw16(UDF_ICB_INTERN_ALLOC);
4482
4483 efe->perm = udf_rw32(0x7fff); /* all is allowed */
4484 efe->link_cnt = udf_rw16(0); /* explicit setting */
4485
4486 efe->ckpoint = udf_rw32(1); /* user supplied file version */
4487
4488 vfs_timestamp(&now);
4489 udf_timespec_to_timestamp(&now, &efe->ctime);
4490 udf_timespec_to_timestamp(&now, &efe->atime);
4491 udf_timespec_to_timestamp(&now, &efe->attrtime);
4492 udf_timespec_to_timestamp(&now, &efe->mtime);
4493
4494 udf_set_regid(&efe->imp_id, IMPL_NAME);
4495 udf_add_impl_regid(ump, &efe->imp_id);
4496
4497 unique_id = udf_advance_uniqueid(ump);
4498 efe->unique_id = udf_rw64(unique_id);
4499 efe->l_ea = udf_rw32(0);
4500
4501 /* if its a directory, create '..' */
4502 bpos = (uint8_t *) efe->data + udf_rw32(efe->l_ea);
4503 fidsize = 0;
4504 if (file_type == UDF_ICB_FILETYPE_DIRECTORY) {
4505 fidsize = udf_create_parentfid(ump,
4506 (struct fileid_desc *) bpos, parent_icb,
4507 parent_unique_id);
4508 }
4509
4510 /* record fidlength information */
4511 efe->obj_size = udf_rw64(fidsize);
4512 efe->inf_len = udf_rw64(fidsize);
4513 efe->l_ad = udf_rw32(fidsize);
4514 efe->logblks_rec = udf_rw64(0); /* intern */
4515
4516 crclen = sizeof(struct extfile_entry) - 1 - UDF_DESC_TAG_LENGTH;
4517 crclen += udf_rw32(efe->l_ea) + fidsize;
4518 efe->tag.desc_crc_len = udf_rw16(crclen);
4519
4520 (void) udf_validate_tag_and_crc_sums((union dscrptr *) efe);
4521
4522 return fidsize;
4523 }
4524
4525 /* --------------------------------------------------------------------- */
4526
4527 int
4528 udf_dir_detach(struct udf_mount *ump, struct udf_node *dir_node,
4529 struct udf_node *udf_node, struct componentname *cnp)
4530 {
4531 struct vnode *dvp = dir_node->vnode;
4532 struct udf_dirhash_entry *dirh_ep;
4533 struct file_entry *fe = dir_node->fe;
4534 struct extfile_entry *efe = dir_node->efe;
4535 struct fileid_desc *fid;
4536 struct dirent *dirent;
4537 uint64_t file_size, diroffset;
4538 uint32_t lb_size, fidsize;
4539 int found, error;
4540 char const *name = cnp->cn_nameptr;
4541 int namelen = cnp->cn_namelen;
4542 int hit, refcnt;
4543
4544 /* get our dirhash and make sure its read in */
4545 udf_dirhash_get(&dir_node->dir_hash);
4546 error = udf_dirhash_fill(dir_node);
4547 if (error) {
4548 udf_dirhash_put(dir_node->dir_hash);
4549 return error;
4550 }
4551
4552 /* get directory filesize */
4553 if (fe) {
4554 file_size = udf_rw64(fe->inf_len);
4555 } else {
4556 assert(efe);
4557 file_size = udf_rw64(efe->inf_len);
4558 }
4559
4560 /* allocate temporary space for fid */
4561 lb_size = udf_rw32(dir_node->ump->logical_vol->lb_size);
4562 fid = malloc(lb_size, M_UDFTEMP, M_WAITOK);
4563 dirent = malloc(sizeof(struct dirent), M_UDFTEMP, M_WAITOK);
4564
4565 /* search our dirhash hits */
4566 found = 0;
4567 dirh_ep = NULL;
4568 for (;;) {
4569 hit = udf_dirhash_lookup(dir_node, name, namelen, &dirh_ep);
4570 /* if no hit, abort the search */
4571 if (!hit)
4572 break;
4573
4574 /* check this hit */
4575 diroffset = dirh_ep->offset;
4576
4577 /* transfer a new fid/dirent */
4578 error = udf_read_fid_stream(dvp, &diroffset, fid, dirent);
4579 if (error)
4580 break;
4581
4582 /* see if its our entry */
4583 KASSERT(dirent->d_namlen == namelen);
4584 if (strncmp(dirent->d_name, name, namelen) == 0) {
4585 found = 1;
4586 break;
4587 }
4588 }
4589
4590 if (!found)
4591 error = ENOENT;
4592 if (error)
4593 goto error_out;
4594
4595 /* mark deleted */
4596 fid->file_char |= UDF_FILE_CHAR_DEL;
4597 #ifdef UDF_COMPLETE_DELETE
4598 memset(&fid->icb, 0, sizeof(fid->icb));
4599 #endif
4600 (void) udf_validate_tag_and_crc_sums((union dscrptr *) fid);
4601
4602 /* get size of fid and compensate for the read_fid_stream advance */
4603 fidsize = udf_fidsize(fid);
4604 diroffset -= fidsize;
4605
4606 /* write out */
4607 error = vn_rdwr(UIO_WRITE, dir_node->vnode,
4608 fid, fidsize, diroffset,
4609 UIO_SYSSPACE, IO_ALTSEMANTICS | IO_NODELOCKED,
4610 FSCRED, NULL, NULL);
4611 if (error)
4612 goto error_out;
4613
4614 /* get reference count of attached node */
4615 if (udf_node->fe) {
4616 refcnt = udf_rw16(udf_node->fe->link_cnt);
4617 } else {
4618 KASSERT(udf_node->efe);
4619 refcnt = udf_rw16(udf_node->efe->link_cnt);
4620 }
4621 #ifdef UDF_COMPLETE_DELETE
4622 /* substract reference counter in attached node */
4623 refcnt -= 1;
4624 if (udf_node->fe) {
4625 udf_node->fe->link_cnt = udf_rw16(refcnt);
4626 } else {
4627 udf_node->efe->link_cnt = udf_rw16(refcnt);
4628 }
4629
4630 /* prevent writeout when refcnt == 0 */
4631 if (refcnt == 0)
4632 udf_node->i_flags |= IN_DELETED;
4633
4634 if (fid->file_char & UDF_FILE_CHAR_DIR) {
4635 int drefcnt;
4636
4637 /* substract reference counter in directory node */
4638 /* note subtract 2 (?) for its was also backreferenced */
4639 if (dir_node->fe) {
4640 drefcnt = udf_rw16(dir_node->fe->link_cnt);
4641 drefcnt -= 1;
4642 dir_node->fe->link_cnt = udf_rw16(drefcnt);
4643 } else {
4644 KASSERT(dir_node->efe);
4645 drefcnt = udf_rw16(dir_node->efe->link_cnt);
4646 drefcnt -= 1;
4647 dir_node->efe->link_cnt = udf_rw16(drefcnt);
4648 }
4649 }
4650
4651 udf_node->i_flags |= IN_MODIFIED;
4652 dir_node->i_flags |= IN_MODIFIED;
4653 #endif
4654 /* if it is/was a hardlink adjust the file count */
4655 if (refcnt > 0)
4656 udf_adjust_filecount(udf_node, -1);
4657
4658 /* remove from the dirhash */
4659 udf_dirhash_remove(dir_node, dirent, diroffset,
4660 udf_fidsize(fid));
4661
4662 error_out:
4663 free(fid, M_UDFTEMP);
4664 free(dirent, M_UDFTEMP);
4665
4666 udf_dirhash_put(dir_node->dir_hash);
4667
4668 return error;
4669 }
4670
4671 /* --------------------------------------------------------------------- */
4672
4673 /*
4674 * We are not allowed to split the fid tag itself over an logical block so
4675 * check the space remaining in the logical block.
4676 *
4677 * We try to select the smallest candidate for recycling or when none is
4678 * found, append a new one at the end of the directory.
4679 */
4680
4681 int
4682 udf_dir_attach(struct udf_mount *ump, struct udf_node *dir_node,
4683 struct udf_node *udf_node, struct vattr *vap, struct componentname *cnp)
4684 {
4685 struct vnode *dvp = dir_node->vnode;
4686 struct udf_dirhash_entry *dirh_ep;
4687 struct fileid_desc *fid;
4688 struct icb_tag *icbtag;
4689 struct charspec osta_charspec;
4690 struct dirent dirent;
4691 uint64_t unique_id, dir_size, diroffset;
4692 uint64_t fid_pos, end_fid_pos, chosen_fid_pos;
4693 uint32_t chosen_size, chosen_size_diff;
4694 int lb_size, lb_rest, fidsize, this_fidsize, size_diff;
4695 int file_char, refcnt, icbflags, addr_type, hit, error;
4696
4697 /* get our dirhash and make sure its read in */
4698 udf_dirhash_get(&dir_node->dir_hash);
4699 error = udf_dirhash_fill(dir_node);
4700 if (error) {
4701 udf_dirhash_put(dir_node->dir_hash);
4702 return error;
4703 }
4704
4705 /* get info */
4706 lb_size = udf_rw32(ump->logical_vol->lb_size);
4707 udf_osta_charset(&osta_charspec);
4708
4709 if (dir_node->fe) {
4710 dir_size = udf_rw64(dir_node->fe->inf_len);
4711 icbtag = &dir_node->fe->icbtag;
4712 } else {
4713 dir_size = udf_rw64(dir_node->efe->inf_len);
4714 icbtag = &dir_node->efe->icbtag;
4715 }
4716
4717 icbflags = udf_rw16(icbtag->flags);
4718 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
4719
4720 if (udf_node->fe) {
4721 unique_id = udf_rw64(udf_node->fe->unique_id);
4722 refcnt = udf_rw16(udf_node->fe->link_cnt);
4723 } else {
4724 unique_id = udf_rw64(udf_node->efe->unique_id);
4725 refcnt = udf_rw16(udf_node->efe->link_cnt);
4726 }
4727
4728 if (refcnt > 0) {
4729 unique_id = udf_advance_uniqueid(ump);
4730 udf_adjust_filecount(udf_node, 1);
4731 }
4732
4733 /* determine file characteristics */
4734 file_char = 0; /* visible non deleted file and not stream metadata */
4735 if (vap->va_type == VDIR)
4736 file_char = UDF_FILE_CHAR_DIR;
4737
4738 /* malloc scrap buffer */
4739 fid = malloc(lb_size, M_TEMP, M_WAITOK);
4740 bzero(fid, lb_size);
4741
4742 /* calculate _minimum_ fid size */
4743 unix_to_udf_name((char *) fid->data, &fid->l_fi,
4744 cnp->cn_nameptr, cnp->cn_namelen, &osta_charspec);
4745 fidsize = UDF_FID_SIZE + fid->l_fi;
4746 fidsize = (fidsize + 3) & ~3; /* multiple of 4 */
4747
4748 /* find position that will fit the FID */
4749 chosen_fid_pos = dir_size;
4750 chosen_size = 0;
4751 chosen_size_diff = UINT_MAX;
4752
4753 /* shut up gcc */
4754 dirent.d_namlen = 0;
4755
4756 /* search our dirhash hits */
4757 error = 0;
4758 dirh_ep = NULL;
4759 for (;;) {
4760 hit = udf_dirhash_lookup_freed(dir_node, fidsize, &dirh_ep);
4761 /* if no hit, abort the search */
4762 if (!hit)
4763 break;
4764
4765 /* check this hit for size */
4766 this_fidsize = dirh_ep->fid_size;
4767
4768 /* check this hit */
4769 fid_pos = dirh_ep->offset;
4770 end_fid_pos = fid_pos + this_fidsize;
4771 size_diff = this_fidsize - fidsize;
4772 lb_rest = lb_size - (end_fid_pos % lb_size);
4773
4774 #ifndef UDF_COMPLETE_DELETE
4775 /* transfer a new fid/dirent */
4776 error = udf_read_fid_stream(vp, &fid_pos, fid, dirent);
4777 if (error)
4778 goto error_out;
4779
4780 /* only reuse entries that are wiped */
4781 /* check if the len + loc are marked zero */
4782 if (udf_rw32(fid->icb.len != 0))
4783 continue;
4784 if (udf_rw32(fid->icb.loc.lb_num) != 0)
4785 continue;
4786 if (udf_rw16(fid->icb.loc.part_num != 0))
4787 continue;
4788 #endif /* UDF_COMPLETE_DELETE */
4789
4790 /* select if not splitting the tag and its smaller */
4791 if ((size_diff >= 0) &&
4792 (size_diff < chosen_size_diff) &&
4793 (lb_rest >= sizeof(struct desc_tag)))
4794 {
4795 /* UDF 2.3.4.2+3 specifies rules for iu size */
4796 if ((size_diff == 0) || (size_diff >= 32)) {
4797 chosen_fid_pos = fid_pos;
4798 chosen_size = this_fidsize;
4799 chosen_size_diff = size_diff;
4800 }
4801 }
4802 }
4803
4804
4805 /* extend directory if no other candidate found */
4806 if (chosen_size == 0) {
4807 chosen_fid_pos = dir_size;
4808 chosen_size = fidsize;
4809 chosen_size_diff = 0;
4810
4811 /* special case UDF 2.00+ 2.3.4.4, no splitting up fid tag */
4812 if (addr_type == UDF_ICB_INTERN_ALLOC) {
4813 /* pre-grow directory to see if we're to switch */
4814 udf_grow_node(dir_node, dir_size + chosen_size);
4815
4816 icbflags = udf_rw16(icbtag->flags);
4817 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
4818 }
4819
4820 /* make sure the next fid desc_tag won't be splitted */
4821 if (addr_type != UDF_ICB_INTERN_ALLOC) {
4822 end_fid_pos = chosen_fid_pos + chosen_size;
4823 lb_rest = lb_size - (end_fid_pos % lb_size);
4824
4825 /* pad with implementation use regid if needed */
4826 if (lb_rest < sizeof(struct desc_tag))
4827 chosen_size += 32;
4828 }
4829 }
4830 chosen_size_diff = chosen_size - fidsize;
4831 diroffset = chosen_fid_pos + chosen_size;
4832
4833 /* populate the FID */
4834 memset(fid, 0, lb_size);
4835 udf_inittag(ump, &fid->tag, TAGID_FID, 0);
4836 fid->file_version_num = udf_rw16(1); /* UDF 2.3.4.1 */
4837 fid->file_char = file_char;
4838 fid->icb = udf_node->loc;
4839 fid->icb.longad_uniqueid = udf_rw32((uint32_t) unique_id);
4840 fid->l_iu = udf_rw16(0);
4841
4842 if (chosen_size > fidsize) {
4843 /* insert implementation-use regid to space it correctly */
4844 fid->l_iu = udf_rw16(chosen_size_diff);
4845
4846 /* set implementation use */
4847 udf_set_regid((struct regid *) fid->data, IMPL_NAME);
4848 udf_add_impl_regid(ump, (struct regid *) fid->data);
4849 }
4850
4851 /* fill in name */
4852 unix_to_udf_name((char *) fid->data + udf_rw16(fid->l_iu),
4853 &fid->l_fi, cnp->cn_nameptr, cnp->cn_namelen, &osta_charspec);
4854
4855 fid->tag.desc_crc_len = chosen_size - UDF_DESC_TAG_LENGTH;
4856 (void) udf_validate_tag_and_crc_sums((union dscrptr *) fid);
4857
4858 /* writeout FID/update parent directory */
4859 error = vn_rdwr(UIO_WRITE, dvp,
4860 fid, chosen_size, chosen_fid_pos,
4861 UIO_SYSSPACE, IO_ALTSEMANTICS | IO_NODELOCKED,
4862 FSCRED, NULL, NULL);
4863
4864 if (error)
4865 goto error_out;
4866
4867 /* add reference counter in attached node */
4868 if (udf_node->fe) {
4869 refcnt = udf_rw16(udf_node->fe->link_cnt);
4870 udf_node->fe->link_cnt = udf_rw16(refcnt+1);
4871 } else {
4872 KASSERT(udf_node->efe);
4873 refcnt = udf_rw16(udf_node->efe->link_cnt);
4874 udf_node->efe->link_cnt = udf_rw16(refcnt+1);
4875 }
4876
4877 /* mark not deleted if it was... just in case, but do warn */
4878 if (udf_node->i_flags & IN_DELETED) {
4879 printf("udf: warning, marking a file undeleted\n");
4880 udf_node->i_flags &= ~IN_DELETED;
4881 }
4882
4883 if (file_char & UDF_FILE_CHAR_DIR) {
4884 /* add reference counter in directory node for '..' */
4885 if (dir_node->fe) {
4886 refcnt = udf_rw16(dir_node->fe->link_cnt);
4887 refcnt++;
4888 dir_node->fe->link_cnt = udf_rw16(refcnt);
4889 } else {
4890 KASSERT(dir_node->efe);
4891 refcnt = udf_rw16(dir_node->efe->link_cnt);
4892 refcnt++;
4893 dir_node->efe->link_cnt = udf_rw16(refcnt);
4894 }
4895 }
4896
4897 /* append to the dirhash */
4898 dirent.d_namlen = cnp->cn_namelen;
4899 memcpy(dirent.d_name, cnp->cn_nameptr, cnp->cn_namelen);
4900 udf_dirhash_enter(dir_node, fid, &dirent, chosen_fid_pos,
4901 udf_fidsize(fid), 1);
4902
4903 /* note updates */
4904 udf_node->i_flags |= IN_CHANGE | IN_MODIFY; /* | IN_CREATE? */
4905 /* VN_KNOTE(udf_node, ...) */
4906 udf_update(udf_node->vnode, NULL, NULL, NULL, 0);
4907
4908 error_out:
4909 free(fid, M_TEMP);
4910
4911 udf_dirhash_put(dir_node->dir_hash);
4912
4913 return error;
4914 }
4915
4916 /* --------------------------------------------------------------------- */
4917
4918 /*
4919 * Each node can have an attached streamdir node though not recursively. These
4920 * are otherwise known as named substreams/named extended attributes that have
4921 * no size limitations.
4922 *
4923 * `Normal' extended attributes are indicated with a number and are recorded
4924 * in either the fe/efe descriptor itself for small descriptors or recorded in
4925 * the attached extended attribute file. Since these spaces can get
4926 * fragmented, care ought to be taken.
4927 *
4928 * Since the size of the space reserved for allocation descriptors is limited,
4929 * there is a mechanim provided for extending this space; this is done by a
4930 * special extent to allow schrinking of the allocations without breaking the
4931 * linkage to the allocation extent descriptor.
4932 */
4933
4934 int
4935 udf_get_node(struct udf_mount *ump, struct long_ad *node_icb_loc,
4936 struct udf_node **udf_noderes)
4937 {
4938 union dscrptr *dscr;
4939 struct udf_node *udf_node;
4940 struct vnode *nvp;
4941 struct long_ad icb_loc, last_fe_icb_loc;
4942 uint64_t file_size;
4943 uint32_t lb_size, sector, dummy;
4944 uint8_t *file_data;
4945 int udf_file_type, dscr_type, strat, strat4096, needs_indirect;
4946 int slot, eof, error;
4947
4948 DPRINTF(NODE, ("udf_get_node called\n"));
4949 *udf_noderes = udf_node = NULL;
4950
4951 /* lock to disallow simultanious creation of same udf_node */
4952 mutex_enter(&ump->get_node_lock);
4953
4954 DPRINTF(NODE, ("\tlookup in hash table\n"));
4955 /* lookup in hash table */
4956 assert(ump);
4957 assert(node_icb_loc);
4958 udf_node = udf_hash_lookup(ump, node_icb_loc);
4959 if (udf_node) {
4960 DPRINTF(NODE, ("\tgot it from the hash!\n"));
4961 /* vnode is returned locked */
4962 *udf_noderes = udf_node;
4963 mutex_exit(&ump->get_node_lock);
4964 return 0;
4965 }
4966
4967 /* garbage check: translate udf_node_icb_loc to sectornr */
4968 error = udf_translate_vtop(ump, node_icb_loc, §or, &dummy);
4969 if (error) {
4970 /* no use, this will fail anyway */
4971 mutex_exit(&ump->get_node_lock);
4972 return EINVAL;
4973 }
4974
4975 /* build udf_node (do initialise!) */
4976 udf_node = pool_get(&udf_node_pool, PR_WAITOK);
4977 memset(udf_node, 0, sizeof(struct udf_node));
4978
4979 DPRINTF(NODE, ("\tget new vnode\n"));
4980 /* give it a vnode */
4981 error = getnewvnode(VT_UDF, ump->vfs_mountp, udf_vnodeop_p, &nvp);
4982 if (error) {
4983 pool_put(&udf_node_pool, udf_node);
4984 mutex_exit(&ump->get_node_lock);
4985 return error;
4986 }
4987
4988 /* always return locked vnode */
4989 if ((error = vn_lock(nvp, LK_EXCLUSIVE | LK_RETRY))) {
4990 /* recycle vnode and unlock; simultanious will fail too */
4991 ungetnewvnode(nvp);
4992 mutex_exit(&ump->get_node_lock);
4993 return error;
4994 }
4995
4996 /* initialise crosslinks, note location of fe/efe for hashing */
4997 udf_node->ump = ump;
4998 udf_node->vnode = nvp;
4999 nvp->v_data = udf_node;
5000 udf_node->loc = *node_icb_loc;
5001 udf_node->lockf = 0;
5002 mutex_init(&udf_node->node_mutex, MUTEX_DEFAULT, IPL_NONE);
5003 cv_init(&udf_node->node_lock, "udf_nlk");
5004 genfs_node_init(nvp, &udf_genfsops); /* inititise genfs */
5005 udf_node->outstanding_bufs = 0;
5006 udf_node->outstanding_nodedscr = 0;
5007
5008 /* insert into the hash lookup */
5009 udf_register_node(udf_node);
5010
5011 /* safe to unlock, the entry is in the hash table, vnode is locked */
5012 mutex_exit(&ump->get_node_lock);
5013
5014 icb_loc = *node_icb_loc;
5015 needs_indirect = 0;
5016 strat4096 = 0;
5017 udf_file_type = UDF_ICB_FILETYPE_UNKNOWN;
5018 file_size = 0;
5019 file_data = NULL;
5020 lb_size = udf_rw32(ump->logical_vol->lb_size);
5021
5022 DPRINTF(NODE, ("\tstart reading descriptors\n"));
5023 do {
5024 /* try to read in fe/efe */
5025 error = udf_read_logvol_dscr(ump, &icb_loc, &dscr);
5026
5027 /* blank sector marks end of sequence, check this */
5028 if ((dscr == NULL) && (!strat4096))
5029 error = ENOENT;
5030
5031 /* break if read error or blank sector */
5032 if (error || (dscr == NULL))
5033 break;
5034
5035 /* process descriptor based on the descriptor type */
5036 dscr_type = udf_rw16(dscr->tag.id);
5037 DPRINTF(NODE, ("\tread descriptor %d\n", dscr_type));
5038
5039 /* if dealing with an indirect entry, follow the link */
5040 if (dscr_type == TAGID_INDIRECTENTRY) {
5041 needs_indirect = 0;
5042 udf_free_logvol_dscr(ump, &icb_loc, dscr);
5043 icb_loc = dscr->inde.indirect_icb;
5044 continue;
5045 }
5046
5047 /* only file entries and extended file entries allowed here */
5048 if ((dscr_type != TAGID_FENTRY) &&
5049 (dscr_type != TAGID_EXTFENTRY)) {
5050 udf_free_logvol_dscr(ump, &icb_loc, dscr);
5051 error = ENOENT;
5052 break;
5053 }
5054
5055 KASSERT(udf_tagsize(dscr, lb_size) == lb_size);
5056
5057 /* choose this one */
5058 last_fe_icb_loc = icb_loc;
5059
5060 /* record and process/update (ext)fentry */
5061 file_data = NULL;
5062 if (dscr_type == TAGID_FENTRY) {
5063 if (udf_node->fe)
5064 udf_free_logvol_dscr(ump, &last_fe_icb_loc,
5065 udf_node->fe);
5066 udf_node->fe = &dscr->fe;
5067 strat = udf_rw16(udf_node->fe->icbtag.strat_type);
5068 udf_file_type = udf_node->fe->icbtag.file_type;
5069 file_size = udf_rw64(udf_node->fe->inf_len);
5070 file_data = udf_node->fe->data;
5071 } else {
5072 if (udf_node->efe)
5073 udf_free_logvol_dscr(ump, &last_fe_icb_loc,
5074 udf_node->efe);
5075 udf_node->efe = &dscr->efe;
5076 strat = udf_rw16(udf_node->efe->icbtag.strat_type);
5077 udf_file_type = udf_node->efe->icbtag.file_type;
5078 file_size = udf_rw64(udf_node->efe->inf_len);
5079 file_data = udf_node->efe->data;
5080 }
5081
5082 /* check recording strategy (structure) */
5083
5084 /*
5085 * Strategy 4096 is a daisy linked chain terminating with an
5086 * unrecorded sector or a TERM descriptor. The next
5087 * descriptor is to be found in the sector that follows the
5088 * current sector.
5089 */
5090 if (strat == 4096) {
5091 strat4096 = 1;
5092 needs_indirect = 1;
5093
5094 icb_loc.loc.lb_num = udf_rw32(icb_loc.loc.lb_num) + 1;
5095 }
5096
5097 /*
5098 * Strategy 4 is the normal strategy and terminates, but if
5099 * we're in strategy 4096, we can't have strategy 4 mixed in
5100 */
5101
5102 if (strat == 4) {
5103 if (strat4096) {
5104 error = EINVAL;
5105 break;
5106 }
5107 break; /* done */
5108 }
5109 } while (!error);
5110
5111 /* first round of cleanup code */
5112 if (error) {
5113 DPRINTF(NODE, ("\tnode fe/efe failed!\n"));
5114 /* recycle udf_node */
5115 udf_dispose_node(udf_node);
5116
5117 vlockmgr(nvp->v_vnlock, LK_RELEASE);
5118 nvp->v_data = NULL;
5119 ungetnewvnode(nvp);
5120
5121 return EINVAL; /* error code ok? */
5122 }
5123 DPRINTF(NODE, ("\tnode fe/efe read in fine\n"));
5124
5125 /* assert no references to dscr anymore beyong this point */
5126 assert((udf_node->fe) || (udf_node->efe));
5127 dscr = NULL;
5128
5129 /*
5130 * Remember where to record an updated version of the descriptor. If
5131 * there is a sequence of indirect entries, icb_loc will have been
5132 * updated. Its the write disipline to allocate new space and to make
5133 * sure the chain is maintained.
5134 *
5135 * `needs_indirect' flags if the next location is to be filled with
5136 * with an indirect entry.
5137 */
5138 udf_node->write_loc = icb_loc;
5139 udf_node->needs_indirect = needs_indirect;
5140
5141 /*
5142 * Go trough all allocations extents of this descriptor and when
5143 * encountering a redirect read in the allocation extension. These are
5144 * daisy-chained.
5145 */
5146 UDF_LOCK_NODE(udf_node, 0);
5147 udf_node->num_extensions = 0;
5148
5149 error = 0;
5150 slot = 0;
5151 for (;;) {
5152 udf_get_adslot(udf_node, slot, &icb_loc, &eof);
5153 DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, len = %d, "
5154 "lb_num = %d, part = %d\n", slot, eof,
5155 UDF_EXT_FLAGS(udf_rw32(icb_loc.len)),
5156 UDF_EXT_LEN(udf_rw32(icb_loc.len)),
5157 udf_rw32(icb_loc.loc.lb_num),
5158 udf_rw16(icb_loc.loc.part_num)));
5159 if (eof)
5160 break;
5161 slot++;
5162
5163 if (UDF_EXT_FLAGS(udf_rw32(icb_loc.len)) != UDF_EXT_REDIRECT)
5164 continue;
5165
5166 DPRINTF(NODE, ("\tgot redirect extent\n"));
5167 if (udf_node->num_extensions >= UDF_MAX_ALLOC_EXTENTS) {
5168 DPRINTF(ALLOC, ("udf_get_node: implementation limit, "
5169 "too many allocation extensions on "
5170 "udf_node\n"));
5171 error = EINVAL;
5172 break;
5173 }
5174
5175 /* length can only be *one* lb : UDF 2.50/2.3.7.1 */
5176 if (UDF_EXT_LEN(udf_rw32(icb_loc.len)) != lb_size) {
5177 DPRINTF(ALLOC, ("udf_get_node: bad allocation "
5178 "extension size in udf_node\n"));
5179 error = EINVAL;
5180 break;
5181 }
5182
5183 DPRINTF(NODE, ("read allocation extent at lb_num %d\n",
5184 UDF_EXT_LEN(udf_rw32(icb_loc.loc.lb_num))));
5185 /* load in allocation extent */
5186 error = udf_read_logvol_dscr(ump, &icb_loc, &dscr);
5187 if (error || (dscr == NULL))
5188 break;
5189
5190 /* process read-in descriptor */
5191 dscr_type = udf_rw16(dscr->tag.id);
5192
5193 if (dscr_type != TAGID_ALLOCEXTENT) {
5194 udf_free_logvol_dscr(ump, &icb_loc, dscr);
5195 error = ENOENT;
5196 break;
5197 }
5198
5199 DPRINTF(NODE, ("\trecording redirect extent\n"));
5200 udf_node->ext[udf_node->num_extensions] = &dscr->aee;
5201 udf_node->ext_loc[udf_node->num_extensions] = icb_loc;
5202
5203 udf_node->num_extensions++;
5204
5205 } /* while */
5206 UDF_UNLOCK_NODE(udf_node, 0);
5207
5208 /* second round of cleanup code */
5209 if (error) {
5210 /* recycle udf_node */
5211 udf_dispose_node(udf_node);
5212
5213 vlockmgr(nvp->v_vnlock, LK_RELEASE);
5214 nvp->v_data = NULL;
5215 ungetnewvnode(nvp);
5216
5217 return EINVAL; /* error code ok? */
5218 }
5219
5220 DPRINTF(NODE, ("\tnode read in fine\n"));
5221
5222 /*
5223 * Translate UDF filetypes into vnode types.
5224 *
5225 * Systemfiles like the meta main and mirror files are not treated as
5226 * normal files, so we type them as having no type. UDF dictates that
5227 * they are not allowed to be visible.
5228 */
5229
5230 switch (udf_file_type) {
5231 case UDF_ICB_FILETYPE_DIRECTORY :
5232 case UDF_ICB_FILETYPE_STREAMDIR :
5233 nvp->v_type = VDIR;
5234 break;
5235 case UDF_ICB_FILETYPE_BLOCKDEVICE :
5236 nvp->v_type = VBLK;
5237 break;
5238 case UDF_ICB_FILETYPE_CHARDEVICE :
5239 nvp->v_type = VCHR;
5240 break;
5241 case UDF_ICB_FILETYPE_SOCKET :
5242 nvp->v_type = VSOCK;
5243 break;
5244 case UDF_ICB_FILETYPE_FIFO :
5245 nvp->v_type = VFIFO;
5246 break;
5247 case UDF_ICB_FILETYPE_SYMLINK :
5248 nvp->v_type = VLNK;
5249 break;
5250 case UDF_ICB_FILETYPE_VAT :
5251 case UDF_ICB_FILETYPE_META_MAIN :
5252 case UDF_ICB_FILETYPE_META_MIRROR :
5253 nvp->v_type = VNON;
5254 break;
5255 case UDF_ICB_FILETYPE_RANDOMACCESS :
5256 case UDF_ICB_FILETYPE_REALTIME :
5257 nvp->v_type = VREG;
5258 break;
5259 default:
5260 /* YIKES, something else */
5261 nvp->v_type = VNON;
5262 }
5263
5264 /* TODO specfs, fifofs etc etc. vnops setting */
5265
5266 /* don't forget to set vnode's v_size */
5267 uvm_vnp_setsize(nvp, file_size);
5268
5269 /* TODO ext attr and streamdir udf_nodes */
5270
5271 *udf_noderes = udf_node;
5272
5273 return 0;
5274 }
5275
5276 /* --------------------------------------------------------------------- */
5277
5278
5279 int
5280 udf_writeout_node(struct udf_node *udf_node, int waitfor)
5281 {
5282 union dscrptr *dscr;
5283 struct long_ad *loc;
5284 int extnr, flags, error;
5285
5286 DPRINTF(NODE, ("udf_writeout_node called\n"));
5287
5288 KASSERT(udf_node->outstanding_bufs == 0);
5289 KASSERT(udf_node->outstanding_nodedscr == 0);
5290
5291 KASSERT(LIST_EMPTY(&udf_node->vnode->v_dirtyblkhd));
5292
5293 if (udf_node->i_flags & IN_DELETED) {
5294 DPRINTF(NODE, ("\tnode deleted; not writing out\n"));
5295 return 0;
5296 }
5297
5298 /* lock node */
5299 flags = waitfor ? 0 : IN_CALLBACK_ULK;
5300 UDF_LOCK_NODE(udf_node, flags);
5301
5302 /* at least one descriptor writeout */
5303 udf_node->outstanding_nodedscr = 1;
5304
5305 /* we're going to write out the descriptor so clear the flags */
5306 udf_node->i_flags &= ~(IN_MODIFIED | IN_ACCESSED);
5307
5308 /* if we were rebuild, write out the allocation extents */
5309 if (udf_node->i_flags & IN_NODE_REBUILD) {
5310 /* mark outstanding node dscriptors and issue them */
5311 udf_node->outstanding_nodedscr += udf_node->num_extensions;
5312 for (extnr = 0; extnr < udf_node->num_extensions; extnr++) {
5313 loc = &udf_node->ext_loc[extnr];
5314 dscr = (union dscrptr *) udf_node->ext[extnr];
5315 error = udf_write_logvol_dscr(udf_node, dscr, loc, 0);
5316 if (error)
5317 return error;
5318 }
5319 /* mark allocation extents written out */
5320 udf_node->i_flags &= ~(IN_NODE_REBUILD);
5321 }
5322
5323 if (udf_node->fe) {
5324 dscr = (union dscrptr *) udf_node->fe;
5325 } else {
5326 KASSERT(udf_node->efe);
5327 dscr = (union dscrptr *) udf_node->efe;
5328 }
5329 KASSERT(dscr);
5330
5331 loc = &udf_node->write_loc;
5332 error = udf_write_logvol_dscr(udf_node, dscr, loc, waitfor);
5333 return error;
5334 }
5335
5336 /* --------------------------------------------------------------------- */
5337
5338 int
5339 udf_dispose_node(struct udf_node *udf_node)
5340 {
5341 struct vnode *vp;
5342 int extnr;
5343
5344 DPRINTF(NODE, ("udf_dispose_node called on node %p\n", udf_node));
5345 if (!udf_node) {
5346 DPRINTF(NODE, ("UDF: Dispose node on node NULL, ignoring\n"));
5347 return 0;
5348 }
5349
5350 vp = udf_node->vnode;
5351 #ifdef DIAGNOSTIC
5352 if (vp->v_numoutput)
5353 panic("disposing UDF node with pending I/O's, udf_node = %p, "
5354 "v_numoutput = %d", udf_node, vp->v_numoutput);
5355 #endif
5356
5357 /* wait until out of sync (just in case we happen to stumble over one */
5358 KASSERT(!mutex_owned(&mntvnode_lock));
5359 mutex_enter(&mntvnode_lock);
5360 while (udf_node->i_flags & IN_SYNCED) {
5361 cv_timedwait(&udf_node->ump->dirtynodes_cv, &mntvnode_lock,
5362 hz/16);
5363 }
5364 mutex_exit(&mntvnode_lock);
5365
5366 /* TODO extended attributes and streamdir */
5367
5368 /* remove dirhash if present */
5369 udf_dirhash_destroy(&udf_node->dir_hash);
5370
5371 /* remove from our hash lookup table */
5372 udf_deregister_node(udf_node);
5373
5374 /* destroy our lock */
5375 mutex_destroy(&udf_node->node_mutex);
5376 cv_destroy(&udf_node->node_lock);
5377
5378 /* dissociate our udf_node from the vnode */
5379 genfs_node_destroy(udf_node->vnode);
5380 vp->v_data = NULL;
5381
5382 /* free associated memory and the node itself */
5383 for (extnr = 0; extnr < udf_node->num_extensions; extnr++) {
5384 udf_free_logvol_dscr(udf_node->ump, &udf_node->ext_loc[extnr],
5385 udf_node->ext[extnr]);
5386 udf_node->ext[extnr] = (void *) 0xdeadcccc;
5387 }
5388
5389 if (udf_node->fe)
5390 udf_free_logvol_dscr(udf_node->ump, &udf_node->loc,
5391 udf_node->fe);
5392 if (udf_node->efe)
5393 udf_free_logvol_dscr(udf_node->ump, &udf_node->loc,
5394 udf_node->efe);
5395
5396 udf_node->fe = (void *) 0xdeadaaaa;
5397 udf_node->efe = (void *) 0xdeadbbbb;
5398 udf_node->ump = (void *) 0xdeadbeef;
5399 pool_put(&udf_node_pool, udf_node);
5400
5401 return 0;
5402 }
5403
5404
5405
5406 /*
5407 * create a new node using the specified vnodeops, vap and cnp but with the
5408 * udf_file_type. This allows special files to be created. Use with care.
5409 */
5410
5411 static int
5412 udf_create_node_raw(struct vnode *dvp, struct vnode **vpp, int udf_file_type,
5413 int (**vnodeops)(void *), struct vattr *vap, struct componentname *cnp)
5414 {
5415 union dscrptr *dscr;
5416 struct udf_node *dir_node = VTOI(dvp);;
5417 struct udf_node *udf_node;
5418 struct udf_mount *ump = dir_node->ump;
5419 struct vnode *nvp;
5420 struct long_ad node_icb_loc;
5421 uint64_t parent_unique_id;
5422 uint64_t lmapping, pmapping;
5423 uint32_t lb_size, lb_num;
5424 uint16_t vpart_num;
5425 uid_t uid;
5426 gid_t gid, parent_gid;
5427 int fid_size, error;
5428
5429 lb_size = udf_rw32(ump->logical_vol->lb_size);
5430 *vpp = NULL;
5431
5432 /* allocate vnode */
5433 error = getnewvnode(VT_UDF, ump->vfs_mountp, vnodeops, &nvp);
5434 if (error)
5435 return error;
5436
5437 /* lock node */
5438 error = vn_lock(nvp, LK_EXCLUSIVE | LK_RETRY);
5439 if (error) {
5440 nvp->v_data = NULL;
5441 ungetnewvnode(nvp);
5442 return error;
5443 }
5444
5445 /* get disc allocation for one logical block */
5446 error = udf_pre_allocate_space(ump, UDF_C_NODE, 1,
5447 &vpart_num, &lmapping, &pmapping);
5448 lb_num = lmapping;
5449 if (error) {
5450 vlockmgr(nvp->v_vnlock, LK_RELEASE);
5451 ungetnewvnode(nvp);
5452 return error;
5453 }
5454
5455 /* initialise pointer to location */
5456 memset(&node_icb_loc, 0, sizeof(struct long_ad));
5457 node_icb_loc.len = lb_size;
5458 node_icb_loc.loc.lb_num = udf_rw32(lb_num);
5459 node_icb_loc.loc.part_num = udf_rw16(vpart_num);
5460
5461 /* build udf_node (do initialise!) */
5462 udf_node = pool_get(&udf_node_pool, PR_WAITOK);
5463 memset(udf_node, 0, sizeof(struct udf_node));
5464
5465 /* initialise crosslinks, note location of fe/efe for hashing */
5466 /* bugalert: synchronise with udf_get_node() */
5467 udf_node->ump = ump;
5468 udf_node->vnode = nvp;
5469 nvp->v_data = udf_node;
5470 udf_node->loc = node_icb_loc;
5471 udf_node->write_loc = node_icb_loc;
5472 udf_node->lockf = 0;
5473 mutex_init(&udf_node->node_mutex, MUTEX_DEFAULT, IPL_NONE);
5474 cv_init(&udf_node->node_lock, "udf_nlk");
5475 udf_node->outstanding_bufs = 0;
5476 udf_node->outstanding_nodedscr = 0;
5477
5478 /* initialise genfs */
5479 genfs_node_init(nvp, &udf_genfsops);
5480
5481 /* insert into the hash lookup */
5482 udf_register_node(udf_node);
5483
5484 /* get parent's unique ID for refering '..' if its a directory */
5485 if (dir_node->fe) {
5486 parent_unique_id = udf_rw64(dir_node->fe->unique_id);
5487 parent_gid = (gid_t) udf_rw32(dir_node->fe->gid);
5488 } else {
5489 parent_unique_id = udf_rw64(dir_node->efe->unique_id);
5490 parent_gid = (gid_t) udf_rw32(dir_node->efe->gid);
5491 }
5492
5493 /* get descriptor */
5494 udf_create_logvol_dscr(ump, udf_node, &node_icb_loc, &dscr);
5495
5496 /* choose a fe or an efe for it */
5497 if (ump->logical_vol->tag.descriptor_ver == 2) {
5498 udf_node->fe = &dscr->fe;
5499 fid_size = udf_create_new_fe(ump, udf_node->fe,
5500 udf_file_type, &udf_node->loc,
5501 &dir_node->loc, parent_unique_id);
5502 /* TODO add extended attribute for creation time */
5503 } else {
5504 udf_node->efe = &dscr->efe;
5505 fid_size = udf_create_new_efe(ump, udf_node->efe,
5506 udf_file_type, &udf_node->loc,
5507 &dir_node->loc, parent_unique_id);
5508 }
5509 KASSERT(dscr->tag.tag_loc == udf_node->loc.loc.lb_num);
5510
5511 /* update vnode's size and type */
5512 nvp->v_type = vap->va_type;
5513 uvm_vnp_setsize(nvp, fid_size);
5514
5515 /* set access mode */
5516 udf_setaccessmode(udf_node, vap->va_mode);
5517
5518 /* set ownership */
5519 uid = kauth_cred_geteuid(cnp->cn_cred);
5520 gid = parent_gid;
5521 udf_setownership(udf_node, uid, gid);
5522
5523 error = udf_dir_attach(ump, dir_node, udf_node, vap, cnp);
5524 if (error) {
5525 /* free disc allocation for node */
5526 udf_free_allocated_space(ump, lb_num, vpart_num, 1);
5527
5528 /* recycle udf_node */
5529 udf_dispose_node(udf_node);
5530 vput(nvp);
5531
5532 *vpp = NULL;
5533 return error;
5534 }
5535
5536 /* adjust file count */
5537 udf_adjust_filecount(udf_node, 1);
5538
5539 /* return result */
5540 *vpp = nvp;
5541
5542 return 0;
5543 }
5544
5545
5546 int
5547 udf_create_node(struct vnode *dvp, struct vnode **vpp, struct vattr *vap,
5548 struct componentname *cnp)
5549 {
5550 int (**vnodeops)(void *);
5551 int udf_file_type;
5552
5553 DPRINTF(NODE, ("udf_create_node called\n"));
5554
5555 /* what type are we creating ? */
5556 vnodeops = udf_vnodeop_p;
5557 /* start with a default */
5558 udf_file_type = UDF_ICB_FILETYPE_RANDOMACCESS;
5559
5560 *vpp = NULL;
5561
5562 switch (vap->va_type) {
5563 case VREG :
5564 udf_file_type = UDF_ICB_FILETYPE_RANDOMACCESS;
5565 break;
5566 case VDIR :
5567 udf_file_type = UDF_ICB_FILETYPE_DIRECTORY;
5568 break;
5569 case VLNK :
5570 udf_file_type = UDF_ICB_FILETYPE_SYMLINK;
5571 break;
5572 case VBLK :
5573 udf_file_type = UDF_ICB_FILETYPE_BLOCKDEVICE;
5574 /* specfs */
5575 return ENOTSUP;
5576 break;
5577 case VCHR :
5578 udf_file_type = UDF_ICB_FILETYPE_CHARDEVICE;
5579 /* specfs */
5580 return ENOTSUP;
5581 break;
5582 case VFIFO :
5583 udf_file_type = UDF_ICB_FILETYPE_FIFO;
5584 /* specfs */
5585 return ENOTSUP;
5586 break;
5587 case VSOCK :
5588 udf_file_type = UDF_ICB_FILETYPE_SOCKET;
5589 /* specfs */
5590 return ENOTSUP;
5591 break;
5592 case VNON :
5593 case VBAD :
5594 default :
5595 /* nothing; can we even create these? */
5596 return EINVAL;
5597 }
5598
5599 return udf_create_node_raw(dvp, vpp, udf_file_type, vnodeops, vap, cnp);
5600 }
5601
5602 /* --------------------------------------------------------------------- */
5603
5604 static void
5605 udf_free_descriptor_space(struct udf_node *udf_node, struct long_ad *loc, void *mem)
5606 {
5607 struct udf_mount *ump = udf_node->ump;
5608 uint32_t lb_size, lb_num, len, num_lb;
5609 uint16_t vpart_num;
5610
5611 /* is there really one? */
5612 if (mem == NULL)
5613 return;
5614
5615 /* got a descriptor here */
5616 len = UDF_EXT_LEN(udf_rw32(loc->len));
5617 lb_num = udf_rw32(loc->loc.lb_num);
5618 vpart_num = udf_rw16(loc->loc.part_num);
5619
5620 lb_size = udf_rw32(ump->logical_vol->lb_size);
5621 num_lb = (len + lb_size -1) / lb_size;
5622
5623 udf_free_allocated_space(ump, lb_num, vpart_num, num_lb);
5624 }
5625
5626 void
5627 udf_delete_node(struct udf_node *udf_node)
5628 {
5629 void *dscr;
5630 struct udf_mount *ump;
5631 struct long_ad *loc;
5632 int extnr, lvint, dummy;
5633
5634 ump = udf_node->ump;
5635
5636 /* paranoia check on integrity; should be open!; we could panic */
5637 lvint = udf_rw32(udf_node->ump->logvol_integrity->integrity_type);
5638 if (lvint == UDF_INTEGRITY_CLOSED)
5639 printf("\tIntegrity was CLOSED!\n");
5640
5641 /* whatever the node type, change its size to zero */
5642 (void) udf_resize_node(udf_node, 0, &dummy);
5643
5644 /* force it to be `clean'; no use writing it out */
5645 udf_node->i_flags &= ~(IN_MODIFIED | IN_ACCESSED | IN_ACCESS |
5646 IN_CHANGE | IN_UPDATE | IN_MODIFY);
5647
5648 /* adjust file count */
5649 udf_adjust_filecount(udf_node, -1);
5650
5651 /*
5652 * Free its allocated descriptors; memory will be released when
5653 * vop_reclaim() is called.
5654 */
5655 loc = &udf_node->loc;
5656
5657 dscr = udf_node->fe;
5658 udf_free_descriptor_space(udf_node, loc, dscr);
5659 dscr = udf_node->efe;
5660 udf_free_descriptor_space(udf_node, loc, dscr);
5661
5662 for (extnr = 0; extnr < UDF_MAX_ALLOC_EXTENTS; extnr++) {
5663 dscr = udf_node->ext[extnr];
5664 loc = &udf_node->ext_loc[extnr];
5665 udf_free_descriptor_space(udf_node, loc, dscr);
5666 }
5667 }
5668
5669 /* --------------------------------------------------------------------- */
5670
5671 /* set new filesize; node but be LOCKED on entry and is locked on exit */
5672 int
5673 udf_resize_node(struct udf_node *udf_node, uint64_t new_size, int *extended)
5674 {
5675 struct file_entry *fe = udf_node->fe;
5676 struct extfile_entry *efe = udf_node->efe;
5677 uint64_t file_size;
5678 int error;
5679
5680 if (fe) {
5681 file_size = udf_rw64(fe->inf_len);
5682 } else {
5683 assert(udf_node->efe);
5684 file_size = udf_rw64(efe->inf_len);
5685 }
5686
5687 DPRINTF(ATTR, ("\tchanging file length from %"PRIu64" to %"PRIu64"\n",
5688 file_size, new_size));
5689
5690 /* if not changing, we're done */
5691 if (file_size == new_size)
5692 return 0;
5693
5694 *extended = (new_size > file_size);
5695 if (*extended) {
5696 error = udf_grow_node(udf_node, new_size);
5697 } else {
5698 error = udf_shrink_node(udf_node, new_size);
5699 }
5700
5701 return error;
5702 }
5703
5704
5705 /* --------------------------------------------------------------------- */
5706
5707 void
5708 udf_itimes(struct udf_node *udf_node, struct timespec *acc,
5709 struct timespec *mod, struct timespec *birth)
5710 {
5711 struct timespec now;
5712 struct file_entry *fe;
5713 struct extfile_entry *efe;
5714 struct filetimes_extattr_entry *ft_extattr;
5715 struct timestamp *atime, *mtime, *attrtime, *ctime;
5716 struct timestamp fe_ctime;
5717 struct timespec cur_birth;
5718 uint32_t offset, a_l;
5719 uint8_t *filedata;
5720 int error;
5721
5722 /* protect against rogue values */
5723 if (!udf_node)
5724 return;
5725
5726 fe = udf_node->fe;
5727 efe = udf_node->efe;
5728
5729 if (!(udf_node->i_flags & (IN_ACCESS|IN_CHANGE|IN_UPDATE|IN_MODIFY)))
5730 return;
5731
5732 /* get descriptor information */
5733 if (fe) {
5734 atime = &fe->atime;
5735 mtime = &fe->mtime;
5736 attrtime = &fe->attrtime;
5737 filedata = fe->data;
5738
5739 /* initial save dummy setting */
5740 ctime = &fe_ctime;
5741
5742 /* check our extended attribute if present */
5743 error = udf_extattr_search_intern(udf_node,
5744 UDF_FILETIMES_ATTR_NO, "", &offset, &a_l);
5745 if (!error) {
5746 ft_extattr = (struct filetimes_extattr_entry *)
5747 (filedata + offset);
5748 if (ft_extattr->existence & UDF_FILETIMES_FILE_CREATION)
5749 ctime = &ft_extattr->times[0];
5750 }
5751 /* TODO create the extended attribute if not found ? */
5752 } else {
5753 assert(udf_node->efe);
5754 atime = &efe->atime;
5755 mtime = &efe->mtime;
5756 attrtime = &efe->attrtime;
5757 ctime = &efe->ctime;
5758 }
5759
5760 vfs_timestamp(&now);
5761
5762 /* set access time */
5763 if (udf_node->i_flags & IN_ACCESS) {
5764 if (acc == NULL)
5765 acc = &now;
5766 udf_timespec_to_timestamp(acc, atime);
5767 }
5768
5769 /* set modification time */
5770 if (udf_node->i_flags & (IN_UPDATE | IN_MODIFY)) {
5771 if (mod == NULL)
5772 mod = &now;
5773 udf_timespec_to_timestamp(mod, mtime);
5774
5775 /* ensure birthtime is older than set modification! */
5776 udf_timestamp_to_timespec(udf_node->ump, ctime, &cur_birth);
5777 if ((cur_birth.tv_sec > mod->tv_sec) ||
5778 ((cur_birth.tv_sec == mod->tv_sec) &&
5779 (cur_birth.tv_nsec > mod->tv_nsec))) {
5780 udf_timespec_to_timestamp(mod, ctime);
5781 }
5782 }
5783
5784 /* update birthtime if specified */
5785 /* XXX we asume here that given birthtime is older than mod */
5786 if (birth && (birth->tv_sec != VNOVAL)) {
5787 udf_timespec_to_timestamp(birth, ctime);
5788 }
5789
5790 /* set change time */
5791 if (udf_node->i_flags & (IN_CHANGE | IN_MODIFY))
5792 udf_timespec_to_timestamp(&now, attrtime);
5793
5794 /* notify updates to the node itself */
5795 if (udf_node->i_flags & (IN_ACCESS | IN_MODIFY))
5796 udf_node->i_flags |= IN_ACCESSED;
5797 if (udf_node->i_flags & (IN_UPDATE | IN_CHANGE))
5798 udf_node->i_flags |= IN_MODIFIED;
5799
5800 /* clear modification flags */
5801 udf_node->i_flags &= ~(IN_ACCESS | IN_CHANGE | IN_UPDATE | IN_MODIFY);
5802 }
5803
5804 /* --------------------------------------------------------------------- */
5805
5806 int
5807 udf_update(struct vnode *vp, struct timespec *acc,
5808 struct timespec *mod, struct timespec *birth, int updflags)
5809 {
5810 struct udf_node *udf_node = VTOI(vp);
5811 struct udf_mount *ump = udf_node->ump;
5812 struct regid *impl_id;
5813 int mnt_async = (vp->v_mount->mnt_flag & MNT_ASYNC);
5814 int waitfor, flags;
5815
5816 #ifdef DEBUG
5817 char bits[128];
5818 DPRINTF(CALL, ("udf_update(node, %p, %p, %p, %d)\n", acc, mod, birth,
5819 updflags));
5820 bitmask_snprintf(udf_node->i_flags, IN_FLAGBITS, bits, sizeof(bits));
5821 DPRINTF(CALL, ("\tnode flags %s\n", bits));
5822 DPRINTF(CALL, ("\t\tmnt_async = %d\n", mnt_async));
5823 #endif
5824
5825 /* set our times */
5826 udf_itimes(udf_node, acc, mod, birth);
5827
5828 /* set our implementation id */
5829 if (udf_node->fe) {
5830 impl_id = &udf_node->fe->imp_id;
5831 } else {
5832 impl_id = &udf_node->efe->imp_id;
5833 }
5834 udf_set_regid(impl_id, IMPL_NAME);
5835 udf_add_impl_regid(ump, impl_id);
5836
5837 /* if called when mounted readonly, never write back */
5838 if (vp->v_mount->mnt_flag & MNT_RDONLY)
5839 return 0;
5840
5841 /* check if the node is dirty 'enough'*/
5842 if (updflags & UPDATE_CLOSE) {
5843 flags = udf_node->i_flags & (IN_MODIFIED | IN_ACCESSED);
5844 } else {
5845 flags = udf_node->i_flags & IN_MODIFIED;
5846 }
5847 if (flags == 0)
5848 return 0;
5849
5850 /* determine if we need to write sync or async */
5851 waitfor = 0;
5852 if ((flags & IN_MODIFIED) && (mnt_async == 0)) {
5853 /* sync mounted */
5854 waitfor = updflags & UPDATE_WAIT;
5855 if (updflags & UPDATE_DIROP)
5856 waitfor |= UPDATE_WAIT;
5857 }
5858 if (waitfor)
5859 return VOP_FSYNC(vp, FSCRED, FSYNC_WAIT, 0,0);
5860
5861 return 0;
5862 }
5863
5864
5865 /* --------------------------------------------------------------------- */
5866
5867
5868 /*
5869 * Read one fid and process it into a dirent and advance to the next (*fid)
5870 * has to be allocated a logical block in size, (*dirent) struct dirent length
5871 */
5872
5873 int
5874 udf_read_fid_stream(struct vnode *vp, uint64_t *offset,
5875 struct fileid_desc *fid, struct dirent *dirent)
5876 {
5877 struct udf_node *dir_node = VTOI(vp);
5878 struct udf_mount *ump = dir_node->ump;
5879 struct file_entry *fe = dir_node->fe;
5880 struct extfile_entry *efe = dir_node->efe;
5881 uint32_t fid_size, lb_size;
5882 uint64_t file_size;
5883 char *fid_name;
5884 int enough, error;
5885
5886 assert(fid);
5887 assert(dirent);
5888 assert(dir_node);
5889 assert(offset);
5890 assert(*offset != 1);
5891
5892 DPRINTF(FIDS, ("read_fid_stream called at offset %"PRIu64"\n", *offset));
5893 /* check if we're past the end of the directory */
5894 if (fe) {
5895 file_size = udf_rw64(fe->inf_len);
5896 } else {
5897 assert(dir_node->efe);
5898 file_size = udf_rw64(efe->inf_len);
5899 }
5900 if (*offset >= file_size)
5901 return EINVAL;
5902
5903 /* get maximum length of FID descriptor */
5904 lb_size = udf_rw32(ump->logical_vol->lb_size);
5905
5906 /* initialise return values */
5907 fid_size = 0;
5908 memset(dirent, 0, sizeof(struct dirent));
5909 memset(fid, 0, lb_size);
5910
5911 enough = (file_size - (*offset) >= UDF_FID_SIZE);
5912 if (!enough) {
5913 /* short dir ... */
5914 return EIO;
5915 }
5916
5917 error = vn_rdwr(UIO_READ, vp,
5918 fid, MIN(file_size - (*offset), lb_size), *offset,
5919 UIO_SYSSPACE, IO_ALTSEMANTICS | IO_NODELOCKED, FSCRED,
5920 NULL, NULL);
5921 if (error)
5922 return error;
5923
5924 DPRINTF(FIDS, ("\tfid piece read in fine\n"));
5925 /*
5926 * Check if we got a whole descriptor.
5927 * TODO Try to `resync' directory stream when something is very wrong.
5928 */
5929
5930 /* check if our FID header is OK */
5931 error = udf_check_tag(fid);
5932 if (error) {
5933 goto brokendir;
5934 }
5935 DPRINTF(FIDS, ("\ttag check ok\n"));
5936
5937 if (udf_rw16(fid->tag.id) != TAGID_FID) {
5938 error = EIO;
5939 goto brokendir;
5940 }
5941 DPRINTF(FIDS, ("\ttag checked ok: got TAGID_FID\n"));
5942
5943 /* check for length */
5944 fid_size = udf_fidsize(fid);
5945 enough = (file_size - (*offset) >= fid_size);
5946 if (!enough) {
5947 error = EIO;
5948 goto brokendir;
5949 }
5950 DPRINTF(FIDS, ("\tthe complete fid is read in\n"));
5951
5952 /* check FID contents */
5953 error = udf_check_tag_payload((union dscrptr *) fid, lb_size);
5954 brokendir:
5955 if (error) {
5956 /* note that is sometimes a bit quick to report */
5957 printf("BROKEN DIRECTORY ENTRY\n");
5958 /* RESYNC? */
5959 /* TODO: use udf_resync_fid_stream */
5960 return EIO;
5961 }
5962 DPRINTF(FIDS, ("\tpayload checked ok\n"));
5963
5964 /* we got a whole and valid descriptor! */
5965 DPRINTF(FIDS, ("\tinterpret FID\n"));
5966
5967 /* create resulting dirent structure */
5968 fid_name = (char *) fid->data + udf_rw16(fid->l_iu);
5969 udf_to_unix_name(dirent->d_name, MAXNAMLEN,
5970 fid_name, fid->l_fi, &ump->logical_vol->desc_charset);
5971
5972 /* '..' has no name, so provide one */
5973 if (fid->file_char & UDF_FILE_CHAR_PAR)
5974 strcpy(dirent->d_name, "..");
5975
5976 dirent->d_fileno = udf_calchash(&fid->icb); /* inode hash XXX */
5977 dirent->d_namlen = strlen(dirent->d_name);
5978 dirent->d_reclen = _DIRENT_SIZE(dirent);
5979
5980 /*
5981 * Note that its not worth trying to go for the filetypes now... its
5982 * too expensive too
5983 */
5984 dirent->d_type = DT_UNKNOWN;
5985
5986 /* initial guess for filetype we can make */
5987 if (fid->file_char & UDF_FILE_CHAR_DIR)
5988 dirent->d_type = DT_DIR;
5989
5990 /* advance */
5991 *offset += fid_size;
5992
5993 return error;
5994 }
5995
5996
5997 /* --------------------------------------------------------------------- */
5998
5999 static void
6000 udf_sync_pass(struct udf_mount *ump, kauth_cred_t cred, int waitfor,
6001 int pass, int *ndirty)
6002 {
6003 struct udf_node *udf_node, *n_udf_node;
6004 struct vnode *vp;
6005 int vdirty, error;
6006 int on_type, on_flags, on_vnode;
6007
6008 derailed:
6009 KASSERT(mutex_owned(&mntvnode_lock));
6010
6011 DPRINTF(SYNC, ("sync_pass %d\n", pass));
6012 udf_node = LIST_FIRST(&ump->sorted_udf_nodes);
6013 for (;udf_node; udf_node = n_udf_node) {
6014 DPRINTF(SYNC, ("."));
6015
6016 udf_node->i_flags &= ~IN_SYNCED;
6017 vp = udf_node->vnode;
6018
6019 mutex_enter(&vp->v_interlock);
6020 n_udf_node = LIST_NEXT(udf_node, sortchain);
6021 if (n_udf_node)
6022 n_udf_node->i_flags |= IN_SYNCED;
6023
6024 /* system nodes are not synced this way */
6025 if (vp->v_vflag & VV_SYSTEM) {
6026 mutex_exit(&vp->v_interlock);
6027 continue;
6028 }
6029
6030 /* check if its dirty enough to even try */
6031 on_type = (waitfor == MNT_LAZY || vp->v_type == VNON);
6032 on_flags = ((udf_node->i_flags &
6033 (IN_ACCESSED | IN_UPDATE | IN_MODIFIED)) == 0);
6034 on_vnode = LIST_EMPTY(&vp->v_dirtyblkhd)
6035 && UVM_OBJ_IS_CLEAN(&vp->v_uobj);
6036 if (on_type || (on_flags || on_vnode)) { /* XXX */
6037 /* not dirty (enough?) */
6038 mutex_exit(&vp->v_interlock);
6039 continue;
6040 }
6041
6042 mutex_exit(&mntvnode_lock);
6043 error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK);
6044 if (error) {
6045 mutex_enter(&mntvnode_lock);
6046 if (error == ENOENT)
6047 goto derailed;
6048 *ndirty += 1;
6049 continue;
6050 }
6051
6052 switch (pass) {
6053 case 1:
6054 VOP_FSYNC(vp, cred, 0 | FSYNC_DATAONLY,0,0);
6055 break;
6056 case 2:
6057 vdirty = vp->v_numoutput;
6058 if (vp->v_tag == VT_UDF)
6059 vdirty += udf_node->outstanding_bufs +
6060 udf_node->outstanding_nodedscr;
6061 if (vdirty == 0)
6062 VOP_FSYNC(vp, cred, 0,0,0);
6063 *ndirty += vdirty;
6064 break;
6065 case 3:
6066 vdirty = vp->v_numoutput;
6067 if (vp->v_tag == VT_UDF)
6068 vdirty += udf_node->outstanding_bufs +
6069 udf_node->outstanding_nodedscr;
6070 *ndirty += vdirty;
6071 break;
6072 }
6073
6074 vput(vp);
6075 mutex_enter(&mntvnode_lock);
6076 }
6077 DPRINTF(SYNC, ("END sync_pass %d\n", pass));
6078 }
6079
6080
6081 void
6082 udf_do_sync(struct udf_mount *ump, kauth_cred_t cred, int waitfor)
6083 {
6084 int dummy, ndirty;
6085
6086 mutex_enter(&mntvnode_lock);
6087 recount:
6088 dummy = 0;
6089 DPRINTF(CALL, ("issue VOP_FSYNC(DATA only) on all nodes\n"));
6090 DPRINTF(SYNC, ("issue VOP_FSYNC(DATA only) on all nodes\n"));
6091 udf_sync_pass(ump, cred, waitfor, 1, &dummy);
6092
6093 DPRINTF(CALL, ("issue VOP_FSYNC(COMPLETE) on all finished nodes\n"));
6094 DPRINTF(SYNC, ("issue VOP_FSYNC(COMPLETE) on all finished nodes\n"));
6095 udf_sync_pass(ump, cred, waitfor, 2, &dummy);
6096
6097 if (waitfor == MNT_WAIT) {
6098 ndirty = ump->devvp->v_numoutput;
6099 DPRINTF(NODE, ("counting pending blocks: on devvp %d\n",
6100 ndirty));
6101 udf_sync_pass(ump, cred, waitfor, 3, &ndirty);
6102 DPRINTF(NODE, ("counted num dirty pending blocks %d\n",
6103 ndirty));
6104
6105 if (ndirty) {
6106 /* 1/4 second wait */
6107 cv_timedwait(&ump->dirtynodes_cv, &mntvnode_lock,
6108 hz/4);
6109 goto recount;
6110 }
6111 }
6112
6113 mutex_exit(&mntvnode_lock);
6114 }
6115
6116 /* --------------------------------------------------------------------- */
6117
6118 /*
6119 * Read and write file extent in/from the buffer.
6120 *
6121 * The splitup of the extent into seperate request-buffers is to minimise
6122 * copying around as much as possible.
6123 *
6124 * block based file reading and writing
6125 */
6126
6127 static int
6128 udf_read_internal(struct udf_node *node, uint8_t *blob)
6129 {
6130 struct udf_mount *ump;
6131 struct file_entry *fe = node->fe;
6132 struct extfile_entry *efe = node->efe;
6133 uint64_t inflen;
6134 uint32_t sector_size;
6135 uint8_t *pos;
6136 int icbflags, addr_type;
6137
6138 /* get extent and do some paranoia checks */
6139 ump = node->ump;
6140 sector_size = ump->discinfo.sector_size;
6141
6142 if (fe) {
6143 inflen = udf_rw64(fe->inf_len);
6144 pos = &fe->data[0] + udf_rw32(fe->l_ea);
6145 icbflags = udf_rw16(fe->icbtag.flags);
6146 } else {
6147 assert(node->efe);
6148 inflen = udf_rw64(efe->inf_len);
6149 pos = &efe->data[0] + udf_rw32(efe->l_ea);
6150 icbflags = udf_rw16(efe->icbtag.flags);
6151 }
6152 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
6153
6154 assert(addr_type == UDF_ICB_INTERN_ALLOC);
6155 assert(inflen < sector_size);
6156
6157 /* copy out info */
6158 memset(blob, 0, sector_size);
6159 memcpy(blob, pos, inflen);
6160
6161 return 0;
6162 }
6163
6164
6165 static int
6166 udf_write_internal(struct udf_node *node, uint8_t *blob)
6167 {
6168 struct udf_mount *ump;
6169 struct file_entry *fe = node->fe;
6170 struct extfile_entry *efe = node->efe;
6171 uint64_t inflen;
6172 uint32_t sector_size;
6173 uint8_t *pos;
6174 int icbflags, addr_type;
6175
6176 /* get extent and do some paranoia checks */
6177 ump = node->ump;
6178 sector_size = ump->discinfo.sector_size;
6179
6180 if (fe) {
6181 inflen = udf_rw64(fe->inf_len);
6182 pos = &fe->data[0] + udf_rw32(fe->l_ea);
6183 icbflags = udf_rw16(fe->icbtag.flags);
6184 } else {
6185 assert(node->efe);
6186 inflen = udf_rw64(efe->inf_len);
6187 pos = &efe->data[0] + udf_rw32(efe->l_ea);
6188 icbflags = udf_rw16(efe->icbtag.flags);
6189 }
6190 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
6191
6192 assert(addr_type == UDF_ICB_INTERN_ALLOC);
6193 assert(inflen < sector_size);
6194
6195 /* copy in blob */
6196 /* memset(pos, 0, inflen); */
6197 memcpy(pos, blob, inflen);
6198
6199 return 0;
6200 }
6201
6202
6203 void
6204 udf_read_filebuf(struct udf_node *udf_node, struct buf *buf)
6205 {
6206 struct buf *nestbuf;
6207 struct udf_mount *ump = udf_node->ump;
6208 uint64_t *mapping;
6209 uint64_t run_start;
6210 uint32_t sector_size;
6211 uint32_t buf_offset, sector, rbuflen, rblk;
6212 uint32_t from, lblkno;
6213 uint32_t sectors;
6214 uint8_t *buf_pos;
6215 int error, run_length, isdir, what;
6216
6217 sector_size = udf_node->ump->discinfo.sector_size;
6218
6219 from = buf->b_blkno;
6220 sectors = buf->b_bcount / sector_size;
6221
6222 isdir = (udf_node->vnode->v_type == VDIR);
6223 what = isdir ? UDF_C_FIDS : UDF_C_USERDATA;
6224
6225 /* assure we have enough translation slots */
6226 KASSERT(buf->b_bcount / sector_size <= UDF_MAX_MAPPINGS);
6227 KASSERT(MAXPHYS / sector_size <= UDF_MAX_MAPPINGS);
6228
6229 if (sectors > UDF_MAX_MAPPINGS) {
6230 printf("udf_read_filebuf: implementation limit on bufsize\n");
6231 buf->b_error = EIO;
6232 biodone(buf);
6233 return;
6234 }
6235
6236 mapping = malloc(sizeof(*mapping) * UDF_MAX_MAPPINGS, M_TEMP, M_WAITOK);
6237
6238 error = 0;
6239 DPRINTF(READ, ("\ttranslate %d-%d\n", from, sectors));
6240 error = udf_translate_file_extent(udf_node, from, sectors, mapping);
6241 if (error) {
6242 buf->b_error = error;
6243 biodone(buf);
6244 goto out;
6245 }
6246 DPRINTF(READ, ("\ttranslate extent went OK\n"));
6247
6248 /* pre-check if its an internal */
6249 if (*mapping == UDF_TRANS_INTERN) {
6250 error = udf_read_internal(udf_node, (uint8_t *) buf->b_data);
6251 if (error)
6252 buf->b_error = error;
6253 biodone(buf);
6254 goto out;
6255 }
6256 DPRINTF(READ, ("\tnot intern\n"));
6257
6258 #ifdef DEBUG
6259 if (udf_verbose & UDF_DEBUG_TRANSLATE) {
6260 printf("Returned translation table:\n");
6261 for (sector = 0; sector < sectors; sector++) {
6262 printf("%d : %"PRIu64"\n", sector, mapping[sector]);
6263 }
6264 }
6265 #endif
6266
6267 /* request read-in of data from disc sheduler */
6268 buf->b_resid = buf->b_bcount;
6269 for (sector = 0; sector < sectors; sector++) {
6270 buf_offset = sector * sector_size;
6271 buf_pos = (uint8_t *) buf->b_data + buf_offset;
6272 DPRINTF(READ, ("\tprocessing rel sector %d\n", sector));
6273
6274 /* check if its zero or unmapped to stop reading */
6275 switch (mapping[sector]) {
6276 case UDF_TRANS_UNMAPPED:
6277 case UDF_TRANS_ZERO:
6278 /* copy zero sector TODO runlength like below */
6279 memset(buf_pos, 0, sector_size);
6280 DPRINTF(READ, ("\treturning zero sector\n"));
6281 nestiobuf_done(buf, sector_size, 0);
6282 break;
6283 default :
6284 DPRINTF(READ, ("\tread sector "
6285 "%"PRIu64"\n", mapping[sector]));
6286
6287 lblkno = from + sector;
6288 run_start = mapping[sector];
6289 run_length = 1;
6290 while (sector < sectors-1) {
6291 if (mapping[sector+1] != mapping[sector]+1)
6292 break;
6293 run_length++;
6294 sector++;
6295 }
6296
6297 /*
6298 * nest an iobuf and mark it for async reading. Since
6299 * we're using nested buffers, they can't be cached by
6300 * design.
6301 */
6302 rbuflen = run_length * sector_size;
6303 rblk = run_start * (sector_size/DEV_BSIZE);
6304
6305 nestbuf = getiobuf(NULL, true);
6306 nestiobuf_setup(buf, nestbuf, buf_offset, rbuflen);
6307 /* nestbuf is B_ASYNC */
6308
6309 /* identify this nestbuf */
6310 nestbuf->b_lblkno = lblkno;
6311 assert(nestbuf->b_vp == udf_node->vnode);
6312
6313 /* CD shedules on raw blkno */
6314 nestbuf->b_blkno = rblk;
6315 nestbuf->b_proc = NULL;
6316 nestbuf->b_rawblkno = rblk;
6317 nestbuf->b_udf_c_type = what;
6318
6319 udf_discstrat_queuebuf(ump, nestbuf);
6320 }
6321 }
6322 out:
6323 /* if we're synchronously reading, wait for the completion */
6324 if ((buf->b_flags & B_ASYNC) == 0)
6325 biowait(buf);
6326
6327 DPRINTF(READ, ("\tend of read_filebuf\n"));
6328 free(mapping, M_TEMP);
6329 return;
6330 }
6331
6332
6333 void
6334 udf_write_filebuf(struct udf_node *udf_node, struct buf *buf)
6335 {
6336 struct buf *nestbuf;
6337 struct udf_mount *ump = udf_node->ump;
6338 uint64_t *mapping;
6339 uint64_t run_start;
6340 uint32_t lb_size;
6341 uint32_t buf_offset, lb_num, rbuflen, rblk;
6342 uint32_t from, lblkno;
6343 uint32_t num_lb;
6344 uint8_t *buf_pos;
6345 int error, run_length, isdir, what, s;
6346
6347 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
6348
6349 from = buf->b_blkno;
6350 num_lb = buf->b_bcount / lb_size;
6351
6352 isdir = (udf_node->vnode->v_type == VDIR);
6353 what = isdir ? UDF_C_FIDS : UDF_C_USERDATA;
6354
6355 /* assure we have enough translation slots */
6356 KASSERT(buf->b_bcount / lb_size <= UDF_MAX_MAPPINGS);
6357 KASSERT(MAXPHYS / lb_size <= UDF_MAX_MAPPINGS);
6358
6359 if (num_lb > UDF_MAX_MAPPINGS) {
6360 printf("udf_write_filebuf: implementation limit on bufsize\n");
6361 buf->b_error = EIO;
6362 biodone(buf);
6363 return;
6364 }
6365
6366 mapping = malloc(sizeof(*mapping) * UDF_MAX_MAPPINGS, M_TEMP, M_WAITOK);
6367
6368 error = 0;
6369 DPRINTF(WRITE, ("\ttranslate %d-%d\n", from, num_lb));
6370 error = udf_translate_file_extent(udf_node, from, num_lb, mapping);
6371 if (error) {
6372 buf->b_error = error;
6373 biodone(buf);
6374 goto out;
6375 }
6376 DPRINTF(WRITE, ("\ttranslate extent went OK\n"));
6377
6378 /* if its internally mapped, we can write it in the descriptor itself */
6379 if (*mapping == UDF_TRANS_INTERN) {
6380 /* TODO paranoia check if we ARE going to have enough space */
6381 error = udf_write_internal(udf_node, (uint8_t *) buf->b_data);
6382 if (error)
6383 buf->b_error = error;
6384 biodone(buf);
6385 goto out;
6386 }
6387 DPRINTF(WRITE, ("\tnot intern\n"));
6388
6389 /* request write out of data to disc sheduler */
6390 buf->b_resid = buf->b_bcount;
6391 for (lb_num = 0; lb_num < num_lb; lb_num++) {
6392 buf_offset = lb_num * lb_size;
6393 buf_pos = (uint8_t *) buf->b_data + buf_offset;
6394 DPRINTF(WRITE, ("\tprocessing rel lb_num %d\n", lb_num));
6395
6396 /*
6397 * Mappings are not that important here. Just before we write
6398 * the lb_num we late-allocate them when needed and update the
6399 * mapping in the udf_node.
6400 */
6401
6402 /* XXX why not ignore the mapping altogether ? */
6403 /* TODO estimate here how much will be late-allocated */
6404 DPRINTF(WRITE, ("\twrite lb_num "
6405 "%"PRIu64, mapping[lb_num]));
6406
6407 lblkno = from + lb_num;
6408 run_start = mapping[lb_num];
6409 run_length = 1;
6410 while (lb_num < num_lb-1) {
6411 if (mapping[lb_num+1] != mapping[lb_num]+1)
6412 if (mapping[lb_num+1] != mapping[lb_num])
6413 break;
6414 run_length++;
6415 lb_num++;
6416 }
6417 DPRINTF(WRITE, ("+ %d\n", run_length));
6418
6419 /* nest an iobuf on the master buffer for the extent */
6420 rbuflen = run_length * lb_size;
6421 rblk = run_start * (lb_size/DEV_BSIZE);
6422
6423 #if 0
6424 /* if its zero or unmapped, our blknr gets -1 for unmapped */
6425 switch (mapping[lb_num]) {
6426 case UDF_TRANS_UNMAPPED:
6427 case UDF_TRANS_ZERO:
6428 rblk = -1;
6429 break;
6430 default:
6431 rblk = run_start * (lb_size/DEV_BSIZE);
6432 break;
6433 }
6434 #endif
6435
6436 nestbuf = getiobuf(NULL, true);
6437 nestiobuf_setup(buf, nestbuf, buf_offset, rbuflen);
6438 /* nestbuf is B_ASYNC */
6439
6440 /* identify this nestbuf */
6441 nestbuf->b_lblkno = lblkno;
6442 KASSERT(nestbuf->b_vp == udf_node->vnode);
6443
6444 /* CD shedules on raw blkno */
6445 nestbuf->b_blkno = rblk;
6446 nestbuf->b_proc = NULL;
6447 nestbuf->b_rawblkno = rblk;
6448 nestbuf->b_udf_c_type = what;
6449
6450 /* increment our outstanding bufs counter */
6451 s = splbio();
6452 udf_node->outstanding_bufs++;
6453 splx(s);
6454
6455 udf_discstrat_queuebuf(ump, nestbuf);
6456 }
6457 out:
6458 /* if we're synchronously writing, wait for the completion */
6459 if ((buf->b_flags & B_ASYNC) == 0)
6460 biowait(buf);
6461
6462 DPRINTF(WRITE, ("\tend of write_filebuf\n"));
6463 free(mapping, M_TEMP);
6464 return;
6465 }
6466
6467 /* --------------------------------------------------------------------- */
6468
6469
6470