udf_allocation.c revision 1.26 1 /* $NetBSD: udf_allocation.c,v 1.26 2009/06/24 17:09:13 reinoud Exp $ */
2
3 /*
4 * Copyright (c) 2006, 2008 Reinoud Zandijk
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 *
27 */
28
29 #include <sys/cdefs.h>
30 #ifndef lint
31 __KERNEL_RCSID(0, "$NetBSD: udf_allocation.c,v 1.26 2009/06/24 17:09:13 reinoud Exp $");
32 #endif /* not lint */
33
34
35 #if defined(_KERNEL_OPT)
36 #include "opt_compat_netbsd.h"
37 #endif
38
39 /* TODO strip */
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/sysctl.h>
43 #include <sys/namei.h>
44 #include <sys/proc.h>
45 #include <sys/kernel.h>
46 #include <sys/vnode.h>
47 #include <miscfs/genfs/genfs_node.h>
48 #include <sys/mount.h>
49 #include <sys/buf.h>
50 #include <sys/file.h>
51 #include <sys/device.h>
52 #include <sys/disklabel.h>
53 #include <sys/ioctl.h>
54 #include <sys/malloc.h>
55 #include <sys/dirent.h>
56 #include <sys/stat.h>
57 #include <sys/conf.h>
58 #include <sys/kauth.h>
59 #include <sys/kthread.h>
60 #include <dev/clock_subr.h>
61
62 #include <fs/udf/ecma167-udf.h>
63 #include <fs/udf/udf_mount.h>
64
65 #include "udf.h"
66 #include "udf_subr.h"
67 #include "udf_bswap.h"
68
69
70 #define VTOI(vnode) ((struct udf_node *) vnode->v_data)
71
72 static void udf_record_allocation_in_node(struct udf_mount *ump,
73 struct buf *buf, uint16_t vpart_num, uint64_t *mapping,
74 struct long_ad *node_ad_cpy);
75
76 /*
77 * IDEA/BUSY: Each udf_node gets its own extentwalker state for all operations;
78 * this will hopefully/likely reduce O(nlog(n)) to O(1) for most functionality
79 * since actions are most likely sequencial and thus seeking doesn't need
80 * searching for the same or adjacent position again.
81 */
82
83 /* --------------------------------------------------------------------- */
84
85 #if 0
86 #if 1
87 static void
88 udf_node_dump(struct udf_node *udf_node) {
89 struct file_entry *fe;
90 struct extfile_entry *efe;
91 struct icb_tag *icbtag;
92 struct long_ad s_ad;
93 uint64_t inflen;
94 uint32_t icbflags, addr_type;
95 uint32_t len, lb_num;
96 uint32_t flags;
97 int part_num;
98 int lb_size, eof, slot;
99
100 if ((udf_verbose & UDF_DEBUG_NODEDUMP) == 0)
101 return;
102
103 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
104
105 fe = udf_node->fe;
106 efe = udf_node->efe;
107 if (fe) {
108 icbtag = &fe->icbtag;
109 inflen = udf_rw64(fe->inf_len);
110 } else {
111 icbtag = &efe->icbtag;
112 inflen = udf_rw64(efe->inf_len);
113 }
114
115 icbflags = udf_rw16(icbtag->flags);
116 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
117
118 printf("udf_node_dump %p :\n", udf_node);
119
120 if (addr_type == UDF_ICB_INTERN_ALLOC) {
121 printf("\tIntern alloc, len = %"PRIu64"\n", inflen);
122 return;
123 }
124
125 printf("\tInflen = %"PRIu64"\n", inflen);
126 printf("\t\t");
127
128 slot = 0;
129 for (;;) {
130 udf_get_adslot(udf_node, slot, &s_ad, &eof);
131 if (eof)
132 break;
133 part_num = udf_rw16(s_ad.loc.part_num);
134 lb_num = udf_rw32(s_ad.loc.lb_num);
135 len = udf_rw32(s_ad.len);
136 flags = UDF_EXT_FLAGS(len);
137 len = UDF_EXT_LEN(len);
138
139 printf("[");
140 if (part_num >= 0)
141 printf("part %d, ", part_num);
142 printf("lb_num %d, len %d", lb_num, len);
143 if (flags)
144 printf(", flags %d", flags>>30);
145 printf("] ");
146
147 if (flags == UDF_EXT_REDIRECT) {
148 printf("\n\textent END\n\tallocation extent\n\t\t");
149 }
150
151 slot++;
152 }
153 printf("\n\tl_ad END\n\n");
154 }
155 #else
156 #define udf_node_dump(a)
157 #endif
158
159
160 static void
161 udf_assert_allocated(struct udf_mount *ump, uint16_t vpart_num,
162 uint32_t lb_num, uint32_t num_lb)
163 {
164 struct udf_bitmap *bitmap;
165 struct part_desc *pdesc;
166 uint32_t ptov;
167 uint32_t bitval;
168 uint8_t *bpos;
169 int bit;
170 int phys_part;
171 int ok;
172
173 DPRINTF(PARANOIA, ("udf_assert_allocated: check virt lbnum %d "
174 "part %d + %d sect\n", lb_num, vpart_num, num_lb));
175
176 /* get partition backing up this vpart_num */
177 pdesc = ump->partitions[ump->vtop[vpart_num]];
178
179 switch (ump->vtop_tp[vpart_num]) {
180 case UDF_VTOP_TYPE_PHYS :
181 case UDF_VTOP_TYPE_SPARABLE :
182 /* free space to freed or unallocated space bitmap */
183 ptov = udf_rw32(pdesc->start_loc);
184 phys_part = ump->vtop[vpart_num];
185
186 /* use unallocated bitmap */
187 bitmap = &ump->part_unalloc_bits[phys_part];
188
189 /* if no bitmaps are defined, bail out */
190 if (bitmap->bits == NULL)
191 break;
192
193 /* check bits */
194 KASSERT(bitmap->bits);
195 ok = 1;
196 bpos = bitmap->bits + lb_num/8;
197 bit = lb_num % 8;
198 while (num_lb > 0) {
199 bitval = (1 << bit);
200 DPRINTF(PARANOIA, ("XXX : check %d, %p, bit %d\n",
201 lb_num, bpos, bit));
202 KASSERT(bitmap->bits + lb_num/8 == bpos);
203 if (*bpos & bitval) {
204 printf("\tlb_num %d is NOT marked busy\n",
205 lb_num);
206 ok = 0;
207 }
208 lb_num++; num_lb--;
209 bit = (bit + 1) % 8;
210 if (bit == 0)
211 bpos++;
212 }
213 if (!ok) {
214 /* KASSERT(0); */
215 }
216
217 break;
218 case UDF_VTOP_TYPE_VIRT :
219 /* TODO check space */
220 KASSERT(num_lb == 1);
221 break;
222 case UDF_VTOP_TYPE_META :
223 /* TODO check space in the metadata bitmap */
224 default:
225 /* not implemented */
226 break;
227 }
228 }
229
230
231 static void
232 udf_node_sanity_check(struct udf_node *udf_node,
233 uint64_t *cnt_inflen, uint64_t *cnt_logblksrec)
234 {
235 union dscrptr *dscr;
236 struct file_entry *fe;
237 struct extfile_entry *efe;
238 struct icb_tag *icbtag;
239 struct long_ad s_ad;
240 uint64_t inflen, logblksrec;
241 uint32_t icbflags, addr_type;
242 uint32_t len, lb_num, l_ea, l_ad, max_l_ad;
243 uint16_t part_num;
244 uint8_t *data_pos;
245 int dscr_size, lb_size, flags, whole_lb;
246 int i, slot, eof;
247
248 // KASSERT(mutex_owned(&udf_node->ump->allocate_mutex));
249
250 if (1)
251 udf_node_dump(udf_node);
252
253 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
254
255 fe = udf_node->fe;
256 efe = udf_node->efe;
257 if (fe) {
258 dscr = (union dscrptr *) fe;
259 icbtag = &fe->icbtag;
260 inflen = udf_rw64(fe->inf_len);
261 dscr_size = sizeof(struct file_entry) -1;
262 logblksrec = udf_rw64(fe->logblks_rec);
263 l_ad = udf_rw32(fe->l_ad);
264 l_ea = udf_rw32(fe->l_ea);
265 } else {
266 dscr = (union dscrptr *) efe;
267 icbtag = &efe->icbtag;
268 inflen = udf_rw64(efe->inf_len);
269 dscr_size = sizeof(struct extfile_entry) -1;
270 logblksrec = udf_rw64(efe->logblks_rec);
271 l_ad = udf_rw32(efe->l_ad);
272 l_ea = udf_rw32(efe->l_ea);
273 }
274 data_pos = (uint8_t *) dscr + dscr_size + l_ea;
275 max_l_ad = lb_size - dscr_size - l_ea;
276 icbflags = udf_rw16(icbtag->flags);
277 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
278
279 /* check if tail is zero */
280 DPRINTF(PARANOIA, ("Sanity check blank tail\n"));
281 for (i = l_ad; i < max_l_ad; i++) {
282 if (data_pos[i] != 0)
283 printf( "sanity_check: violation: node byte %d "
284 "has value %d\n", i, data_pos[i]);
285 }
286
287 /* reset counters */
288 *cnt_inflen = 0;
289 *cnt_logblksrec = 0;
290
291 if (addr_type == UDF_ICB_INTERN_ALLOC) {
292 KASSERT(l_ad <= max_l_ad);
293 KASSERT(l_ad == inflen);
294 *cnt_inflen = inflen;
295 return;
296 }
297
298 /* start counting */
299 whole_lb = 1;
300 slot = 0;
301 for (;;) {
302 udf_get_adslot(udf_node, slot, &s_ad, &eof);
303 if (eof)
304 break;
305 KASSERT(whole_lb == 1);
306
307 part_num = udf_rw16(s_ad.loc.part_num);
308 lb_num = udf_rw32(s_ad.loc.lb_num);
309 len = udf_rw32(s_ad.len);
310 flags = UDF_EXT_FLAGS(len);
311 len = UDF_EXT_LEN(len);
312
313 if (flags != UDF_EXT_REDIRECT) {
314 *cnt_inflen += len;
315 if (flags == UDF_EXT_ALLOCATED) {
316 *cnt_logblksrec += (len + lb_size -1) / lb_size;
317 }
318 } else {
319 KASSERT(len == lb_size);
320 }
321 /* check allocation */
322 if (flags == UDF_EXT_ALLOCATED)
323 udf_assert_allocated(udf_node->ump, part_num, lb_num,
324 (len + lb_size - 1) / lb_size);
325
326 /* check whole lb */
327 whole_lb = ((len % lb_size) == 0);
328
329 slot++;
330 }
331 /* rest should be zero (ad_off > l_ad < max_l_ad - adlen) */
332
333 KASSERT(*cnt_inflen == inflen);
334 KASSERT(*cnt_logblksrec == logblksrec);
335
336 // KASSERT(mutex_owned(&udf_node->ump->allocate_mutex));
337 }
338 #else
339 static void
340 udf_node_sanity_check(struct udf_node *udf_node,
341 uint64_t *cnt_inflen, uint64_t *cnt_logblksrec) {
342 struct file_entry *fe;
343 struct extfile_entry *efe;
344 struct icb_tag *icbtag;
345 uint64_t inflen, logblksrec;
346 int dscr_size, lb_size;
347
348 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
349
350 fe = udf_node->fe;
351 efe = udf_node->efe;
352 if (fe) {
353 icbtag = &fe->icbtag;
354 inflen = udf_rw64(fe->inf_len);
355 dscr_size = sizeof(struct file_entry) -1;
356 logblksrec = udf_rw64(fe->logblks_rec);
357 } else {
358 icbtag = &efe->icbtag;
359 inflen = udf_rw64(efe->inf_len);
360 dscr_size = sizeof(struct extfile_entry) -1;
361 logblksrec = udf_rw64(efe->logblks_rec);
362 }
363 *cnt_logblksrec = logblksrec;
364 *cnt_inflen = inflen;
365 }
366 #endif
367
368 /* --------------------------------------------------------------------- */
369
370 void
371 udf_calc_freespace(struct udf_mount *ump, uint64_t *sizeblks, uint64_t *freeblks)
372 {
373 struct logvol_int_desc *lvid;
374 uint32_t *pos1, *pos2;
375 int vpart, num_vpart;
376
377 lvid = ump->logvol_integrity;
378 *freeblks = *sizeblks = 0;
379
380 /*
381 * Sequentials media report free space directly (CD/DVD/BD-R), for the
382 * other media we need the logical volume integrity.
383 *
384 * We sum all free space up here regardless of type.
385 */
386
387 KASSERT(lvid);
388 num_vpart = udf_rw32(lvid->num_part);
389
390 if (ump->discinfo.mmc_cur & MMC_CAP_SEQUENTIAL) {
391 /* use track info directly summing if there are 2 open */
392 /* XXX assumption at most two tracks open */
393 *freeblks = ump->data_track.free_blocks;
394 if (ump->data_track.tracknr != ump->metadata_track.tracknr)
395 *freeblks += ump->metadata_track.free_blocks;
396 *sizeblks = ump->discinfo.last_possible_lba;
397 } else {
398 /* free and used space for mountpoint based on logvol integrity */
399 for (vpart = 0; vpart < num_vpart; vpart++) {
400 pos1 = &lvid->tables[0] + vpart;
401 pos2 = &lvid->tables[0] + num_vpart + vpart;
402 if (udf_rw32(*pos1) != (uint32_t) -1) {
403 *freeblks += udf_rw32(*pos1);
404 *sizeblks += udf_rw32(*pos2);
405 }
406 }
407 }
408 /* adjust for accounted uncommitted blocks */
409 for (vpart = 0; vpart < num_vpart; vpart++)
410 *freeblks -= ump->uncommitted_lbs[vpart];
411
412 if (*freeblks > UDF_DISC_SLACK) {
413 *freeblks -= UDF_DISC_SLACK;
414 } else {
415 *freeblks = 0;
416 }
417 }
418
419
420 static void
421 udf_calc_vpart_freespace(struct udf_mount *ump, uint16_t vpart_num, uint64_t *freeblks)
422 {
423 struct logvol_int_desc *lvid;
424 uint32_t *pos1;
425
426 lvid = ump->logvol_integrity;
427 *freeblks = 0;
428
429 /*
430 * Sequentials media report free space directly (CD/DVD/BD-R), for the
431 * other media we need the logical volume integrity.
432 *
433 * We sum all free space up here regardless of type.
434 */
435
436 KASSERT(lvid);
437 if (ump->discinfo.mmc_cur & MMC_CAP_SEQUENTIAL) {
438 /* XXX assumption at most two tracks open */
439 if (vpart_num == ump->data_part) {
440 *freeblks = ump->data_track.free_blocks;
441 } else {
442 *freeblks = ump->metadata_track.free_blocks;
443 }
444 } else {
445 /* free and used space for mountpoint based on logvol integrity */
446 pos1 = &lvid->tables[0] + vpart_num;
447 if (udf_rw32(*pos1) != (uint32_t) -1)
448 *freeblks += udf_rw32(*pos1);
449 }
450
451 /* adjust for accounted uncommitted blocks */
452 *freeblks -= ump->uncommitted_lbs[vpart_num];
453 }
454
455 /* --------------------------------------------------------------------- */
456
457 int
458 udf_translate_vtop(struct udf_mount *ump, struct long_ad *icb_loc,
459 uint32_t *lb_numres, uint32_t *extres)
460 {
461 struct part_desc *pdesc;
462 struct spare_map_entry *sme;
463 struct long_ad s_icb_loc;
464 uint64_t foffset, end_foffset;
465 uint32_t lb_size, len;
466 uint32_t lb_num, lb_rel, lb_packet;
467 uint32_t udf_rw32_lbmap, ext_offset;
468 uint16_t vpart;
469 int rel, part, error, eof, slot, flags;
470
471 assert(ump && icb_loc && lb_numres);
472
473 vpart = udf_rw16(icb_loc->loc.part_num);
474 lb_num = udf_rw32(icb_loc->loc.lb_num);
475 if (vpart > UDF_VTOP_RAWPART)
476 return EINVAL;
477
478 translate_again:
479 part = ump->vtop[vpart];
480 pdesc = ump->partitions[part];
481
482 switch (ump->vtop_tp[vpart]) {
483 case UDF_VTOP_TYPE_RAW :
484 /* 1:1 to the end of the device */
485 *lb_numres = lb_num;
486 *extres = INT_MAX;
487 return 0;
488 case UDF_VTOP_TYPE_PHYS :
489 /* transform into its disc logical block */
490 if (lb_num > udf_rw32(pdesc->part_len))
491 return EINVAL;
492 *lb_numres = lb_num + udf_rw32(pdesc->start_loc);
493
494 /* extent from here to the end of the partition */
495 *extres = udf_rw32(pdesc->part_len) - lb_num;
496 return 0;
497 case UDF_VTOP_TYPE_VIRT :
498 /* only maps one logical block, lookup in VAT */
499 if (lb_num >= ump->vat_entries) /* XXX > or >= ? */
500 return EINVAL;
501
502 /* lookup in virtual allocation table file */
503 mutex_enter(&ump->allocate_mutex);
504 error = udf_vat_read(ump->vat_node,
505 (uint8_t *) &udf_rw32_lbmap, 4,
506 ump->vat_offset + lb_num * 4);
507 mutex_exit(&ump->allocate_mutex);
508
509 if (error)
510 return error;
511
512 lb_num = udf_rw32(udf_rw32_lbmap);
513
514 /* transform into its disc logical block */
515 if (lb_num > udf_rw32(pdesc->part_len))
516 return EINVAL;
517 *lb_numres = lb_num + udf_rw32(pdesc->start_loc);
518
519 /* just one logical block */
520 *extres = 1;
521 return 0;
522 case UDF_VTOP_TYPE_SPARABLE :
523 /* check if the packet containing the lb_num is remapped */
524 lb_packet = lb_num / ump->sparable_packet_size;
525 lb_rel = lb_num % ump->sparable_packet_size;
526
527 for (rel = 0; rel < udf_rw16(ump->sparing_table->rt_l); rel++) {
528 sme = &ump->sparing_table->entries[rel];
529 if (lb_packet == udf_rw32(sme->org)) {
530 /* NOTE maps to absolute disc logical block! */
531 *lb_numres = udf_rw32(sme->map) + lb_rel;
532 *extres = ump->sparable_packet_size - lb_rel;
533 return 0;
534 }
535 }
536
537 /* transform into its disc logical block */
538 if (lb_num > udf_rw32(pdesc->part_len))
539 return EINVAL;
540 *lb_numres = lb_num + udf_rw32(pdesc->start_loc);
541
542 /* rest of block */
543 *extres = ump->sparable_packet_size - lb_rel;
544 return 0;
545 case UDF_VTOP_TYPE_META :
546 /* we have to look into the file's allocation descriptors */
547
548 /* use metadatafile allocation mutex */
549 lb_size = udf_rw32(ump->logical_vol->lb_size);
550
551 UDF_LOCK_NODE(ump->metadata_node, 0);
552
553 /* get first overlapping extent */
554 foffset = 0;
555 slot = 0;
556 for (;;) {
557 udf_get_adslot(ump->metadata_node,
558 slot, &s_icb_loc, &eof);
559 DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, "
560 "len = %d, lb_num = %d, part = %d\n",
561 slot, eof,
562 UDF_EXT_FLAGS(udf_rw32(s_icb_loc.len)),
563 UDF_EXT_LEN(udf_rw32(s_icb_loc.len)),
564 udf_rw32(s_icb_loc.loc.lb_num),
565 udf_rw16(s_icb_loc.loc.part_num)));
566 if (eof) {
567 DPRINTF(TRANSLATE,
568 ("Meta partition translation "
569 "failed: can't seek location\n"));
570 UDF_UNLOCK_NODE(ump->metadata_node, 0);
571 return EINVAL;
572 }
573 len = udf_rw32(s_icb_loc.len);
574 flags = UDF_EXT_FLAGS(len);
575 len = UDF_EXT_LEN(len);
576
577 if (flags == UDF_EXT_REDIRECT) {
578 slot++;
579 continue;
580 }
581
582 end_foffset = foffset + len;
583
584 if (end_foffset > lb_num * lb_size)
585 break; /* found */
586 foffset = end_foffset;
587 slot++;
588 }
589 /* found overlapping slot */
590 ext_offset = lb_num * lb_size - foffset;
591
592 /* process extent offset */
593 lb_num = udf_rw32(s_icb_loc.loc.lb_num);
594 vpart = udf_rw16(s_icb_loc.loc.part_num);
595 lb_num += (ext_offset + lb_size -1) / lb_size;
596 ext_offset = 0;
597
598 UDF_UNLOCK_NODE(ump->metadata_node, 0);
599 if (flags != UDF_EXT_ALLOCATED) {
600 DPRINTF(TRANSLATE, ("Metadata partition translation "
601 "failed: not allocated\n"));
602 return EINVAL;
603 }
604
605 /*
606 * vpart and lb_num are updated, translate again since we
607 * might be mapped on sparable media
608 */
609 goto translate_again;
610 default:
611 printf("UDF vtop translation scheme %d unimplemented yet\n",
612 ump->vtop_tp[vpart]);
613 }
614
615 return EINVAL;
616 }
617
618
619 /* XXX provisional primitive braindead version */
620 /* TODO use ext_res */
621 void
622 udf_translate_vtop_list(struct udf_mount *ump, uint32_t sectors,
623 uint16_t vpart_num, uint64_t *lmapping, uint64_t *pmapping)
624 {
625 struct long_ad loc;
626 uint32_t lb_numres, ext_res;
627 int sector;
628
629 for (sector = 0; sector < sectors; sector++) {
630 memset(&loc, 0, sizeof(struct long_ad));
631 loc.loc.part_num = udf_rw16(vpart_num);
632 loc.loc.lb_num = udf_rw32(*lmapping);
633 udf_translate_vtop(ump, &loc, &lb_numres, &ext_res);
634 *pmapping = lb_numres;
635 lmapping++; pmapping++;
636 }
637 }
638
639
640 /* --------------------------------------------------------------------- */
641
642 /*
643 * Translate an extent (in logical_blocks) into logical block numbers; used
644 * for read and write operations. DOESNT't check extents.
645 */
646
647 int
648 udf_translate_file_extent(struct udf_node *udf_node,
649 uint32_t from, uint32_t num_lb,
650 uint64_t *map)
651 {
652 struct udf_mount *ump;
653 struct icb_tag *icbtag;
654 struct long_ad t_ad, s_ad;
655 uint64_t transsec;
656 uint64_t foffset, end_foffset;
657 uint32_t transsec32;
658 uint32_t lb_size;
659 uint32_t ext_offset;
660 uint32_t lb_num, len;
661 uint32_t overlap, translen;
662 uint16_t vpart_num;
663 int eof, error, flags;
664 int slot, addr_type, icbflags;
665
666 if (!udf_node)
667 return ENOENT;
668
669 KASSERT(num_lb > 0);
670
671 UDF_LOCK_NODE(udf_node, 0);
672
673 /* initialise derivative vars */
674 ump = udf_node->ump;
675 lb_size = udf_rw32(ump->logical_vol->lb_size);
676
677 if (udf_node->fe) {
678 icbtag = &udf_node->fe->icbtag;
679 } else {
680 icbtag = &udf_node->efe->icbtag;
681 }
682 icbflags = udf_rw16(icbtag->flags);
683 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
684
685 /* do the work */
686 if (addr_type == UDF_ICB_INTERN_ALLOC) {
687 *map = UDF_TRANS_INTERN;
688 UDF_UNLOCK_NODE(udf_node, 0);
689 return 0;
690 }
691
692 /* find first overlapping extent */
693 foffset = 0;
694 slot = 0;
695 for (;;) {
696 udf_get_adslot(udf_node, slot, &s_ad, &eof);
697 DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, len = %d, "
698 "lb_num = %d, part = %d\n", slot, eof,
699 UDF_EXT_FLAGS(udf_rw32(s_ad.len)),
700 UDF_EXT_LEN(udf_rw32(s_ad.len)),
701 udf_rw32(s_ad.loc.lb_num),
702 udf_rw16(s_ad.loc.part_num)));
703 if (eof) {
704 DPRINTF(TRANSLATE,
705 ("Translate file extent "
706 "failed: can't seek location\n"));
707 UDF_UNLOCK_NODE(udf_node, 0);
708 return EINVAL;
709 }
710 len = udf_rw32(s_ad.len);
711 flags = UDF_EXT_FLAGS(len);
712 len = UDF_EXT_LEN(len);
713 lb_num = udf_rw32(s_ad.loc.lb_num);
714
715 if (flags == UDF_EXT_REDIRECT) {
716 slot++;
717 continue;
718 }
719
720 end_foffset = foffset + len;
721
722 if (end_foffset > from * lb_size)
723 break; /* found */
724 foffset = end_foffset;
725 slot++;
726 }
727 /* found overlapping slot */
728 ext_offset = from * lb_size - foffset;
729
730 for (;;) {
731 udf_get_adslot(udf_node, slot, &s_ad, &eof);
732 DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, len = %d, "
733 "lb_num = %d, part = %d\n", slot, eof,
734 UDF_EXT_FLAGS(udf_rw32(s_ad.len)),
735 UDF_EXT_LEN(udf_rw32(s_ad.len)),
736 udf_rw32(s_ad.loc.lb_num),
737 udf_rw16(s_ad.loc.part_num)));
738 if (eof) {
739 DPRINTF(TRANSLATE,
740 ("Translate file extent "
741 "failed: past eof\n"));
742 UDF_UNLOCK_NODE(udf_node, 0);
743 return EINVAL;
744 }
745
746 len = udf_rw32(s_ad.len);
747 flags = UDF_EXT_FLAGS(len);
748 len = UDF_EXT_LEN(len);
749
750 lb_num = udf_rw32(s_ad.loc.lb_num);
751 vpart_num = udf_rw16(s_ad.loc.part_num);
752
753 end_foffset = foffset + len;
754
755 /* process extent, don't forget to advance on ext_offset! */
756 lb_num += (ext_offset + lb_size -1) / lb_size;
757 overlap = (len - ext_offset + lb_size -1) / lb_size;
758 ext_offset = 0;
759
760 /*
761 * note that the while(){} is nessisary for the extent that
762 * the udf_translate_vtop() returns doens't have to span the
763 * whole extent.
764 */
765
766 overlap = MIN(overlap, num_lb);
767 while (overlap && (flags != UDF_EXT_REDIRECT)) {
768 switch (flags) {
769 case UDF_EXT_FREE :
770 case UDF_EXT_ALLOCATED_BUT_NOT_USED :
771 transsec = UDF_TRANS_ZERO;
772 translen = overlap;
773 while (overlap && num_lb && translen) {
774 *map++ = transsec;
775 lb_num++;
776 overlap--; num_lb--; translen--;
777 }
778 break;
779 case UDF_EXT_ALLOCATED :
780 t_ad.loc.lb_num = udf_rw32(lb_num);
781 t_ad.loc.part_num = udf_rw16(vpart_num);
782 error = udf_translate_vtop(ump,
783 &t_ad, &transsec32, &translen);
784 transsec = transsec32;
785 if (error) {
786 UDF_UNLOCK_NODE(udf_node, 0);
787 return error;
788 }
789 while (overlap && num_lb && translen) {
790 *map++ = transsec;
791 lb_num++; transsec++;
792 overlap--; num_lb--; translen--;
793 }
794 break;
795 default:
796 DPRINTF(TRANSLATE,
797 ("Translate file extent "
798 "failed: bad flags %x\n", flags));
799 UDF_UNLOCK_NODE(udf_node, 0);
800 return EINVAL;
801 }
802 }
803 if (num_lb == 0)
804 break;
805
806 if (flags != UDF_EXT_REDIRECT)
807 foffset = end_foffset;
808 slot++;
809 }
810 UDF_UNLOCK_NODE(udf_node, 0);
811
812 return 0;
813 }
814
815 /* --------------------------------------------------------------------- */
816
817 static int
818 udf_search_free_vatloc(struct udf_mount *ump, uint32_t *lbnumres)
819 {
820 uint32_t lb_size, lb_num, lb_map, udf_rw32_lbmap;
821 uint8_t *blob;
822 int entry, chunk, found, error;
823
824 KASSERT(ump);
825 KASSERT(ump->logical_vol);
826
827 lb_size = udf_rw32(ump->logical_vol->lb_size);
828 blob = malloc(lb_size, M_UDFTEMP, M_WAITOK);
829
830 /* TODO static allocation of search chunk */
831
832 lb_num = MIN(ump->vat_entries, ump->vat_last_free_lb);
833 found = 0;
834 error = 0;
835 entry = 0;
836 do {
837 chunk = MIN(lb_size, (ump->vat_entries - lb_num) * 4);
838 if (chunk <= 0)
839 break;
840 /* load in chunk */
841 error = udf_vat_read(ump->vat_node, blob, chunk,
842 ump->vat_offset + lb_num * 4);
843
844 if (error)
845 break;
846
847 /* search this chunk */
848 for (entry=0; entry < chunk /4; entry++, lb_num++) {
849 udf_rw32_lbmap = *((uint32_t *) (blob + entry * 4));
850 lb_map = udf_rw32(udf_rw32_lbmap);
851 if (lb_map == 0xffffffff) {
852 found = 1;
853 break;
854 }
855 }
856 } while (!found);
857 if (error) {
858 printf("udf_search_free_vatloc: error reading in vat chunk "
859 "(lb %d, size %d)\n", lb_num, chunk);
860 }
861
862 if (!found) {
863 /* extend VAT */
864 DPRINTF(WRITE, ("udf_search_free_vatloc: extending\n"));
865 lb_num = ump->vat_entries;
866 ump->vat_entries++;
867 }
868
869 /* mark entry with initialiser just in case */
870 lb_map = udf_rw32(0xfffffffe);
871 udf_vat_write(ump->vat_node, (uint8_t *) &lb_map, 4,
872 ump->vat_offset + lb_num *4);
873 ump->vat_last_free_lb = lb_num;
874
875 free(blob, M_UDFTEMP);
876 *lbnumres = lb_num;
877 return 0;
878 }
879
880
881 static void
882 udf_bitmap_allocate(struct udf_bitmap *bitmap, int ismetadata,
883 uint32_t *num_lb, uint64_t *lmappos)
884 {
885 uint32_t offset, lb_num, bit;
886 int32_t diff;
887 uint8_t *bpos;
888 int pass;
889
890 if (!ismetadata) {
891 /* heuristic to keep the two pointers not too close */
892 diff = bitmap->data_pos - bitmap->metadata_pos;
893 if ((diff >= 0) && (diff < 1024))
894 bitmap->data_pos = bitmap->metadata_pos + 1024;
895 }
896 offset = ismetadata ? bitmap->metadata_pos : bitmap->data_pos;
897 offset &= ~7;
898 for (pass = 0; pass < 2; pass++) {
899 if (offset >= bitmap->max_offset)
900 offset = 0;
901
902 while (offset < bitmap->max_offset) {
903 if (*num_lb == 0)
904 break;
905
906 /* use first bit not set */
907 bpos = bitmap->bits + offset/8;
908 bit = ffs(*bpos); /* returns 0 or 1..8 */
909 if (bit == 0) {
910 offset += 8;
911 continue;
912 }
913
914 /* check for ffs overshoot */
915 if (offset + bit-1 >= bitmap->max_offset) {
916 offset = bitmap->max_offset;
917 break;
918 }
919
920 DPRINTF(PARANOIA, ("XXX : allocate %d, %p, bit %d\n",
921 offset + bit -1, bpos, bit-1));
922 *bpos &= ~(1 << (bit-1));
923 lb_num = offset + bit-1;
924 *lmappos++ = lb_num;
925 *num_lb = *num_lb - 1;
926 // offset = (offset & ~7);
927 }
928 }
929
930 if (ismetadata) {
931 bitmap->metadata_pos = offset;
932 } else {
933 bitmap->data_pos = offset;
934 }
935 }
936
937
938 static void
939 udf_bitmap_free(struct udf_bitmap *bitmap, uint32_t lb_num, uint32_t num_lb)
940 {
941 uint32_t offset;
942 uint32_t bit, bitval;
943 uint8_t *bpos;
944
945 offset = lb_num;
946
947 /* starter bits */
948 bpos = bitmap->bits + offset/8;
949 bit = offset % 8;
950 while ((bit != 0) && (num_lb > 0)) {
951 bitval = (1 << bit);
952 KASSERT((*bpos & bitval) == 0);
953 DPRINTF(PARANOIA, ("XXX : free %d, %p, %d\n",
954 offset, bpos, bit));
955 *bpos |= bitval;
956 offset++; num_lb--;
957 bit = (bit + 1) % 8;
958 }
959 if (num_lb == 0)
960 return;
961
962 /* whole bytes */
963 KASSERT(bit == 0);
964 bpos = bitmap->bits + offset / 8;
965 while (num_lb >= 8) {
966 KASSERT((*bpos == 0));
967 DPRINTF(PARANOIA, ("XXX : free %d + 8, %p\n", offset, bpos));
968 *bpos = 255;
969 offset += 8; num_lb -= 8;
970 bpos++;
971 }
972
973 /* stop bits */
974 KASSERT(num_lb < 8);
975 bit = 0;
976 while (num_lb > 0) {
977 bitval = (1 << bit);
978 KASSERT((*bpos & bitval) == 0);
979 DPRINTF(PARANOIA, ("XXX : free %d, %p, %d\n",
980 offset, bpos, bit));
981 *bpos |= bitval;
982 offset++; num_lb--;
983 bit = (bit + 1) % 8;
984 }
985 }
986
987 /* --------------------------------------------------------------------- */
988
989 /*
990 * We check for overall disc space with a margin to prevent critical
991 * conditions. If disc space is low we try to force a sync() to improve our
992 * estimates. When confronted with meta-data partition size shortage we know
993 * we have to check if it can be extended and we need to extend it when
994 * needed.
995 *
996 * A 2nd strategy we could use when disc space is getting low on a disc
997 * formatted with a meta-data partition is to see if there are sparse areas in
998 * the meta-data partition and free blocks there for extra data.
999 */
1000
1001 void
1002 udf_do_reserve_space(struct udf_mount *ump, struct udf_node *udf_node,
1003 uint16_t vpart_num, uint32_t num_lb)
1004 {
1005 ump->uncommitted_lbs[vpart_num] += num_lb;
1006 if (udf_node)
1007 udf_node->uncommitted_lbs += num_lb;
1008 }
1009
1010
1011 void
1012 udf_do_unreserve_space(struct udf_mount *ump, struct udf_node *udf_node,
1013 uint16_t vpart_num, uint32_t num_lb)
1014 {
1015 ump->uncommitted_lbs[vpart_num] -= num_lb;
1016 if (ump->uncommitted_lbs[vpart_num] < 0) {
1017 DPRINTF(RESERVE, ("UDF: underflow on partition reservation, "
1018 "part %d: %d\n", vpart_num,
1019 ump->uncommitted_lbs[vpart_num]));
1020 ump->uncommitted_lbs[vpart_num] = 0;
1021 }
1022 if (udf_node) {
1023 udf_node->uncommitted_lbs -= num_lb;
1024 if (udf_node->uncommitted_lbs < 0) {
1025 DPRINTF(RESERVE, ("UDF: underflow of node "
1026 "reservation : %d\n",
1027 udf_node->uncommitted_lbs));
1028 udf_node->uncommitted_lbs = 0;
1029 }
1030 }
1031 }
1032
1033
1034 int
1035 udf_reserve_space(struct udf_mount *ump, struct udf_node *udf_node,
1036 int udf_c_type, uint16_t vpart_num, uint32_t num_lb, int can_fail)
1037 {
1038 uint64_t freeblks;
1039 uint64_t slack;
1040 int i, error;
1041
1042 slack = 0;
1043 if (can_fail)
1044 slack = UDF_DISC_SLACK;
1045
1046 error = 0;
1047 mutex_enter(&ump->allocate_mutex);
1048
1049 /* check if there is enough space available */
1050 for (i = 0; i < 16; i++) { /* XXX arbitrary number */
1051 udf_calc_vpart_freespace(ump, vpart_num, &freeblks);
1052 if (num_lb + slack < freeblks)
1053 break;
1054 /* issue SYNC */
1055 DPRINTF(RESERVE, ("udf_reserve_space: issuing sync\n"));
1056 mutex_exit(&ump->allocate_mutex);
1057 udf_do_sync(ump, FSCRED, 0);
1058 mutex_enter(&mntvnode_lock);
1059 /* 1/4 second wait */
1060 cv_timedwait(&ump->dirtynodes_cv, &mntvnode_lock,
1061 hz/4);
1062 mutex_exit(&mntvnode_lock);
1063 mutex_enter(&ump->allocate_mutex);
1064 }
1065
1066 /* check if there is enough space available now */
1067 udf_calc_vpart_freespace(ump, vpart_num, &freeblks);
1068 if (num_lb + slack >= freeblks) {
1069 DPRINTF(RESERVE, ("udf_reserve_space: try to juggle partitions\n"));
1070 /* TODO juggle with data and metadata partitions if possible */
1071 }
1072
1073 /* check if there is enough space available now */
1074 udf_calc_vpart_freespace(ump, vpart_num, &freeblks);
1075 if (num_lb + slack <= freeblks) {
1076 udf_do_reserve_space(ump, udf_node, vpart_num, num_lb);
1077 } else {
1078 DPRINTF(RESERVE, ("udf_reserve_space: out of disc space\n"));
1079 error = ENOSPC;
1080 }
1081
1082 mutex_exit(&ump->allocate_mutex);
1083 return error;
1084 }
1085
1086
1087 void
1088 udf_cleanup_reservation(struct udf_node *udf_node)
1089 {
1090 struct udf_mount *ump = udf_node->ump;
1091 int vpart_num;
1092
1093 mutex_enter(&ump->allocate_mutex);
1094
1095 /* compensate for overlapping blocks */
1096 DPRINTF(RESERVE, ("UDF: overlapped %d blocks in count\n", udf_node->uncommitted_lbs));
1097
1098 vpart_num = udf_get_record_vpart(ump, udf_get_c_type(udf_node));
1099 udf_do_unreserve_space(ump, udf_node, vpart_num, udf_node->uncommitted_lbs);
1100
1101 DPRINTF(RESERVE, ("\ttotal now %d\n", ump->uncommitted_lbs[vpart_num]));
1102
1103 /* sanity */
1104 if (ump->uncommitted_lbs[vpart_num] < 0)
1105 ump->uncommitted_lbs[vpart_num] = 0;
1106
1107 mutex_exit(&ump->allocate_mutex);
1108 }
1109
1110 /* --------------------------------------------------------------------- */
1111
1112 /*
1113 * Allocate an extent of given length on given virt. partition. It doesn't
1114 * have to be one stretch.
1115 */
1116
1117 int
1118 udf_allocate_space(struct udf_mount *ump, struct udf_node *udf_node,
1119 int udf_c_type, uint16_t vpart_num, uint32_t num_lb, uint64_t *lmapping)
1120 {
1121 struct mmc_trackinfo *alloc_track, *other_track;
1122 struct udf_bitmap *bitmap;
1123 struct part_desc *pdesc;
1124 struct logvol_int_desc *lvid;
1125 uint64_t *lmappos;
1126 uint32_t ptov, lb_num, *freepos, free_lbs;
1127 int lb_size, alloc_num_lb;
1128 int alloc_type, error;
1129 int is_node;
1130
1131 DPRINTF(CALL, ("udf_allocate_space(ctype %d, vpart %d, num_lb %d\n",
1132 udf_c_type, vpart_num, num_lb));
1133 mutex_enter(&ump->allocate_mutex);
1134
1135 lb_size = udf_rw32(ump->logical_vol->lb_size);
1136 KASSERT(lb_size == ump->discinfo.sector_size);
1137
1138 alloc_type = ump->vtop_alloc[vpart_num];
1139 is_node = (udf_c_type == UDF_C_NODE);
1140
1141 lmappos = lmapping;
1142 error = 0;
1143 switch (alloc_type) {
1144 case UDF_ALLOC_VAT :
1145 /* search empty slot in VAT file */
1146 KASSERT(num_lb == 1);
1147 error = udf_search_free_vatloc(ump, &lb_num);
1148 if (!error) {
1149 *lmappos = lb_num;
1150
1151 /* reserve on the backing sequential partition since
1152 * that partition is credited back later */
1153 udf_do_reserve_space(ump, udf_node,
1154 ump->vtop[vpart_num], num_lb);
1155 }
1156 break;
1157 case UDF_ALLOC_SEQUENTIAL :
1158 /* sequential allocation on recordable media */
1159 /* get partition backing up this vpart_num_num */
1160 pdesc = ump->partitions[ump->vtop[vpart_num]];
1161
1162 /* calculate offset from physical base partition */
1163 ptov = udf_rw32(pdesc->start_loc);
1164
1165 /* get our track descriptors */
1166 if (vpart_num == ump->node_part) {
1167 alloc_track = &ump->metadata_track;
1168 other_track = &ump->data_track;
1169 } else {
1170 alloc_track = &ump->data_track;
1171 other_track = &ump->metadata_track;
1172 }
1173
1174 /* allocate */
1175 for (lb_num = 0; lb_num < num_lb; lb_num++) {
1176 *lmappos++ = alloc_track->next_writable - ptov;
1177 alloc_track->next_writable++;
1178 alloc_track->free_blocks--;
1179 }
1180
1181 /* keep other track up-to-date */
1182 if (alloc_track->tracknr == other_track->tracknr)
1183 memcpy(other_track, alloc_track,
1184 sizeof(struct mmc_trackinfo));
1185 break;
1186 case UDF_ALLOC_SPACEMAP :
1187 /* try to allocate on unallocated bits */
1188 alloc_num_lb = num_lb;
1189 bitmap = &ump->part_unalloc_bits[vpart_num];
1190 udf_bitmap_allocate(bitmap, is_node, &alloc_num_lb, lmappos);
1191 ump->lvclose |= UDF_WRITE_PART_BITMAPS;
1192
1193 /* have we allocated all? */
1194 if (alloc_num_lb) {
1195 /* TODO convert freed to unalloc and try again */
1196 /* free allocated piece for now */
1197 lmappos = lmapping;
1198 for (lb_num=0; lb_num < num_lb-alloc_num_lb; lb_num++) {
1199 udf_bitmap_free(bitmap, *lmappos++, 1);
1200 }
1201 error = ENOSPC;
1202 }
1203 if (!error) {
1204 /* adjust freecount */
1205 lvid = ump->logvol_integrity;
1206 freepos = &lvid->tables[0] + vpart_num;
1207 free_lbs = udf_rw32(*freepos);
1208 *freepos = udf_rw32(free_lbs - num_lb);
1209 }
1210 break;
1211 case UDF_ALLOC_METABITMAP : /* UDF 2.50, 2.60 BluRay-RE */
1212 /* allocate on metadata unallocated bits */
1213 alloc_num_lb = num_lb;
1214 bitmap = &ump->metadata_unalloc_bits;
1215 udf_bitmap_allocate(bitmap, is_node, &alloc_num_lb, lmappos);
1216 ump->lvclose |= UDF_WRITE_PART_BITMAPS;
1217
1218 /* have we allocated all? */
1219 if (alloc_num_lb) {
1220 /* YIKES! TODO we need to extend the metadata partition */
1221 /* free allocated piece for now */
1222 lmappos = lmapping;
1223 for (lb_num=0; lb_num < num_lb-alloc_num_lb; lb_num++) {
1224 udf_bitmap_free(bitmap, *lmappos++, 1);
1225 }
1226 error = ENOSPC;
1227 }
1228 if (!error) {
1229 /* adjust freecount */
1230 lvid = ump->logvol_integrity;
1231 freepos = &lvid->tables[0] + vpart_num;
1232 free_lbs = udf_rw32(*freepos);
1233 *freepos = udf_rw32(free_lbs - num_lb);
1234 }
1235 break;
1236 case UDF_ALLOC_METASEQUENTIAL : /* UDF 2.60 BluRay-R */
1237 case UDF_ALLOC_RELAXEDSEQUENTIAL : /* UDF 2.50/~meta BluRay-R */
1238 printf("ALERT: udf_allocate_space : allocation %d "
1239 "not implemented yet!\n", alloc_type);
1240 /* TODO implement, doesn't have to be contiguous */
1241 error = ENOSPC;
1242 break;
1243 }
1244
1245 if (!error) {
1246 /* credit our partition since we have committed the space */
1247 udf_do_unreserve_space(ump, udf_node, vpart_num, num_lb);
1248 }
1249
1250 #ifdef DEBUG
1251 if (udf_verbose & UDF_DEBUG_ALLOC) {
1252 lmappos = lmapping;
1253 printf("udf_allocate_space, allocated logical lba :\n");
1254 for (lb_num = 0; lb_num < num_lb; lb_num++) {
1255 printf("%s %"PRIu64, (lb_num > 0)?",":"",
1256 *lmappos++);
1257 }
1258 printf("\n");
1259 }
1260 #endif
1261 mutex_exit(&ump->allocate_mutex);
1262
1263 return error;
1264 }
1265
1266 /* --------------------------------------------------------------------- */
1267
1268 void
1269 udf_free_allocated_space(struct udf_mount *ump, uint32_t lb_num,
1270 uint16_t vpart_num, uint32_t num_lb)
1271 {
1272 struct udf_bitmap *bitmap;
1273 struct part_desc *pdesc;
1274 struct logvol_int_desc *lvid;
1275 uint32_t ptov, lb_map, udf_rw32_lbmap;
1276 uint32_t *freepos, free_lbs;
1277 int phys_part;
1278 int error;
1279
1280 DPRINTF(ALLOC, ("udf_free_allocated_space: freeing virt lbnum %d "
1281 "part %d + %d sect\n", lb_num, vpart_num, num_lb));
1282
1283 /* no use freeing zero length */
1284 if (num_lb == 0)
1285 return;
1286
1287 mutex_enter(&ump->allocate_mutex);
1288
1289 /* get partition backing up this vpart_num */
1290 pdesc = ump->partitions[ump->vtop[vpart_num]];
1291
1292 switch (ump->vtop_tp[vpart_num]) {
1293 case UDF_VTOP_TYPE_PHYS :
1294 case UDF_VTOP_TYPE_SPARABLE :
1295 /* free space to freed or unallocated space bitmap */
1296 ptov = udf_rw32(pdesc->start_loc);
1297 phys_part = ump->vtop[vpart_num];
1298
1299 /* first try freed space bitmap */
1300 bitmap = &ump->part_freed_bits[phys_part];
1301
1302 /* if not defined, use unallocated bitmap */
1303 if (bitmap->bits == NULL)
1304 bitmap = &ump->part_unalloc_bits[phys_part];
1305
1306 /* if no bitmaps are defined, bail out; XXX OK? */
1307 if (bitmap->bits == NULL)
1308 break;
1309
1310 /* free bits if its defined */
1311 KASSERT(bitmap->bits);
1312 ump->lvclose |= UDF_WRITE_PART_BITMAPS;
1313 udf_bitmap_free(bitmap, lb_num, num_lb);
1314
1315 /* adjust freecount */
1316 lvid = ump->logvol_integrity;
1317 freepos = &lvid->tables[0] + vpart_num;
1318 free_lbs = udf_rw32(*freepos);
1319 *freepos = udf_rw32(free_lbs + num_lb);
1320 break;
1321 case UDF_VTOP_TYPE_VIRT :
1322 /* free this VAT entry */
1323 KASSERT(num_lb == 1);
1324
1325 lb_map = 0xffffffff;
1326 udf_rw32_lbmap = udf_rw32(lb_map);
1327 error = udf_vat_write(ump->vat_node,
1328 (uint8_t *) &udf_rw32_lbmap, 4,
1329 ump->vat_offset + lb_num * 4);
1330 KASSERT(error == 0);
1331 ump->vat_last_free_lb = MIN(ump->vat_last_free_lb, lb_num);
1332 break;
1333 case UDF_VTOP_TYPE_META :
1334 /* free space in the metadata bitmap */
1335 bitmap = &ump->metadata_unalloc_bits;
1336 KASSERT(bitmap->bits);
1337
1338 ump->lvclose |= UDF_WRITE_PART_BITMAPS;
1339 udf_bitmap_free(bitmap, lb_num, num_lb);
1340
1341 /* adjust freecount */
1342 lvid = ump->logvol_integrity;
1343 freepos = &lvid->tables[0] + vpart_num;
1344 free_lbs = udf_rw32(*freepos);
1345 *freepos = udf_rw32(free_lbs + num_lb);
1346 break;
1347 default:
1348 printf("ALERT: udf_free_allocated_space : allocation %d "
1349 "not implemented yet!\n", ump->vtop_tp[vpart_num]);
1350 break;
1351 }
1352
1353 mutex_exit(&ump->allocate_mutex);
1354 }
1355
1356 /* --------------------------------------------------------------------- */
1357
1358 /*
1359 * Allocate a buf on disc for direct write out. The space doesn't have to be
1360 * contiguous as the caller takes care of this.
1361 */
1362
1363 void
1364 udf_late_allocate_buf(struct udf_mount *ump, struct buf *buf,
1365 uint64_t *lmapping, struct long_ad *node_ad_cpy, uint16_t *vpart_nump)
1366 {
1367 struct udf_node *udf_node = VTOI(buf->b_vp);
1368 int lb_size, blks, udf_c_type;
1369 int vpart_num, num_lb;
1370 int error, s;
1371
1372 /*
1373 * for each sector in the buf, allocate a sector on disc and record
1374 * its position in the provided mapping array.
1375 *
1376 * If its userdata or FIDs, record its location in its node.
1377 */
1378
1379 lb_size = udf_rw32(ump->logical_vol->lb_size);
1380 num_lb = (buf->b_bcount + lb_size -1) / lb_size;
1381 blks = lb_size / DEV_BSIZE;
1382 udf_c_type = buf->b_udf_c_type;
1383
1384 KASSERT(lb_size == ump->discinfo.sector_size);
1385
1386 /* select partition to record the buffer on */
1387 vpart_num = *vpart_nump = udf_get_record_vpart(ump, udf_c_type);
1388
1389 if (udf_c_type == UDF_C_NODE) {
1390 /* if not VAT, its allready allocated */
1391 if (ump->vtop_alloc[ump->node_part] != UDF_ALLOC_VAT)
1392 return;
1393
1394 /* allocate on its backing sequential partition */
1395 vpart_num = ump->data_part;
1396 }
1397
1398 /* XXX can this still happen? */
1399 /* do allocation on the selected partition */
1400 error = udf_allocate_space(ump, udf_node, udf_c_type,
1401 vpart_num, num_lb, lmapping);
1402 if (error) {
1403 /*
1404 * ARGH! we haven't done our accounting right! it should
1405 * allways succeed.
1406 */
1407 panic("UDF disc allocation accounting gone wrong");
1408 }
1409
1410 /* If its userdata or FIDs, record its allocation in its node. */
1411 if ((udf_c_type == UDF_C_USERDATA) ||
1412 (udf_c_type == UDF_C_FIDS) ||
1413 (udf_c_type == UDF_C_METADATA_SBM))
1414 {
1415 udf_record_allocation_in_node(ump, buf, vpart_num, lmapping,
1416 node_ad_cpy);
1417 /* decrement our outstanding bufs counter */
1418 s = splbio();
1419 udf_node->outstanding_bufs--;
1420 splx(s);
1421 }
1422 }
1423
1424 /* --------------------------------------------------------------------- */
1425
1426 /*
1427 * Try to merge a1 with the new piece a2. udf_ads_merge returns error when not
1428 * possible (anymore); a2 returns the rest piece.
1429 */
1430
1431 static int
1432 udf_ads_merge(uint32_t lb_size, struct long_ad *a1, struct long_ad *a2)
1433 {
1434 uint32_t max_len, merge_len;
1435 uint32_t a1_len, a2_len;
1436 uint32_t a1_flags, a2_flags;
1437 uint32_t a1_lbnum, a2_lbnum;
1438 uint16_t a1_part, a2_part;
1439
1440 max_len = ((UDF_EXT_MAXLEN / lb_size) * lb_size);
1441
1442 a1_flags = UDF_EXT_FLAGS(udf_rw32(a1->len));
1443 a1_len = UDF_EXT_LEN(udf_rw32(a1->len));
1444 a1_lbnum = udf_rw32(a1->loc.lb_num);
1445 a1_part = udf_rw16(a1->loc.part_num);
1446
1447 a2_flags = UDF_EXT_FLAGS(udf_rw32(a2->len));
1448 a2_len = UDF_EXT_LEN(udf_rw32(a2->len));
1449 a2_lbnum = udf_rw32(a2->loc.lb_num);
1450 a2_part = udf_rw16(a2->loc.part_num);
1451
1452 /* defines same space */
1453 if (a1_flags != a2_flags)
1454 return 1;
1455
1456 if (a1_flags != UDF_EXT_FREE) {
1457 /* the same partition */
1458 if (a1_part != a2_part)
1459 return 1;
1460
1461 /* a2 is successor of a1 */
1462 if (a1_lbnum * lb_size + a1_len != a2_lbnum * lb_size)
1463 return 1;
1464 }
1465
1466 /* merge as most from a2 if possible */
1467 merge_len = MIN(a2_len, max_len - a1_len);
1468 a1_len += merge_len;
1469 a2_len -= merge_len;
1470 a2_lbnum += merge_len/lb_size;
1471
1472 a1->len = udf_rw32(a1_len | a1_flags);
1473 a2->len = udf_rw32(a2_len | a2_flags);
1474 a2->loc.lb_num = udf_rw32(a2_lbnum);
1475
1476 if (a2_len > 0)
1477 return 1;
1478
1479 /* there is space over to merge */
1480 return 0;
1481 }
1482
1483 /* --------------------------------------------------------------------- */
1484
1485 static void
1486 udf_wipe_adslots(struct udf_node *udf_node)
1487 {
1488 struct file_entry *fe;
1489 struct extfile_entry *efe;
1490 struct alloc_ext_entry *ext;
1491 uint64_t inflen, objsize;
1492 uint32_t lb_size, dscr_size, l_ea, l_ad, max_l_ad, crclen;
1493 uint8_t *data_pos;
1494 int extnr;
1495
1496 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1497
1498 fe = udf_node->fe;
1499 efe = udf_node->efe;
1500 if (fe) {
1501 inflen = udf_rw64(fe->inf_len);
1502 objsize = inflen;
1503 dscr_size = sizeof(struct file_entry) -1;
1504 l_ea = udf_rw32(fe->l_ea);
1505 l_ad = udf_rw32(fe->l_ad);
1506 data_pos = (uint8_t *) fe + dscr_size + l_ea;
1507 } else {
1508 inflen = udf_rw64(efe->inf_len);
1509 objsize = udf_rw64(efe->obj_size);
1510 dscr_size = sizeof(struct extfile_entry) -1;
1511 l_ea = udf_rw32(efe->l_ea);
1512 l_ad = udf_rw32(efe->l_ad);
1513 data_pos = (uint8_t *) efe + dscr_size + l_ea;
1514 }
1515 max_l_ad = lb_size - dscr_size - l_ea;
1516
1517 /* wipe fe/efe */
1518 memset(data_pos, 0, max_l_ad);
1519 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea;
1520 if (fe) {
1521 fe->l_ad = udf_rw32(0);
1522 fe->logblks_rec = udf_rw64(0);
1523 fe->tag.desc_crc_len = udf_rw16(crclen);
1524 } else {
1525 efe->l_ad = udf_rw32(0);
1526 efe->logblks_rec = udf_rw64(0);
1527 efe->tag.desc_crc_len = udf_rw16(crclen);
1528 }
1529
1530 /* wipe all allocation extent entries */
1531 for (extnr = 0; extnr < udf_node->num_extensions; extnr++) {
1532 ext = udf_node->ext[extnr];
1533 dscr_size = sizeof(struct alloc_ext_entry) -1;
1534 data_pos = (uint8_t *) ext->data;
1535 max_l_ad = lb_size - dscr_size;
1536 memset(data_pos, 0, max_l_ad);
1537 ext->l_ad = udf_rw32(0);
1538
1539 crclen = dscr_size - UDF_DESC_TAG_LENGTH;
1540 ext->tag.desc_crc_len = udf_rw16(crclen);
1541 }
1542 udf_node->i_flags |= IN_NODE_REBUILD;
1543 }
1544
1545 /* --------------------------------------------------------------------- */
1546
1547 void
1548 udf_get_adslot(struct udf_node *udf_node, int slot, struct long_ad *icb,
1549 int *eof) {
1550 struct file_entry *fe;
1551 struct extfile_entry *efe;
1552 struct alloc_ext_entry *ext;
1553 struct icb_tag *icbtag;
1554 struct short_ad *short_ad;
1555 struct long_ad *long_ad, l_icb;
1556 uint32_t offset;
1557 uint32_t lb_size, dscr_size, l_ea, l_ad, flags;
1558 uint8_t *data_pos;
1559 int icbflags, addr_type, adlen, extnr;
1560
1561 /* determine what descriptor we are in */
1562 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1563
1564 fe = udf_node->fe;
1565 efe = udf_node->efe;
1566 if (fe) {
1567 icbtag = &fe->icbtag;
1568 dscr_size = sizeof(struct file_entry) -1;
1569 l_ea = udf_rw32(fe->l_ea);
1570 l_ad = udf_rw32(fe->l_ad);
1571 data_pos = (uint8_t *) fe + dscr_size + l_ea;
1572 } else {
1573 icbtag = &efe->icbtag;
1574 dscr_size = sizeof(struct extfile_entry) -1;
1575 l_ea = udf_rw32(efe->l_ea);
1576 l_ad = udf_rw32(efe->l_ad);
1577 data_pos = (uint8_t *) efe + dscr_size + l_ea;
1578 }
1579
1580 icbflags = udf_rw16(icbtag->flags);
1581 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
1582
1583 /* just in case we're called on an intern, its EOF */
1584 if (addr_type == UDF_ICB_INTERN_ALLOC) {
1585 memset(icb, 0, sizeof(struct long_ad));
1586 *eof = 1;
1587 return;
1588 }
1589
1590 adlen = 0;
1591 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1592 adlen = sizeof(struct short_ad);
1593 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1594 adlen = sizeof(struct long_ad);
1595 }
1596
1597 /* if offset too big, we go to the allocation extensions */
1598 offset = slot * adlen;
1599 extnr = -1;
1600 while (offset >= l_ad) {
1601 /* check if our last entry is a redirect */
1602 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1603 short_ad = (struct short_ad *) (data_pos + l_ad-adlen);
1604 l_icb.len = short_ad->len;
1605 l_icb.loc.part_num = udf_node->loc.loc.part_num;
1606 l_icb.loc.lb_num = short_ad->lb_num;
1607 } else {
1608 KASSERT(addr_type == UDF_ICB_LONG_ALLOC);
1609 long_ad = (struct long_ad *) (data_pos + l_ad-adlen);
1610 l_icb = *long_ad;
1611 }
1612 flags = UDF_EXT_FLAGS(udf_rw32(l_icb.len));
1613 if (flags != UDF_EXT_REDIRECT) {
1614 l_ad = 0; /* force EOF */
1615 break;
1616 }
1617
1618 /* advance to next extent */
1619 extnr++;
1620 if (extnr >= udf_node->num_extensions) {
1621 l_ad = 0; /* force EOF */
1622 break;
1623 }
1624 offset = offset - l_ad;
1625 ext = udf_node->ext[extnr];
1626 dscr_size = sizeof(struct alloc_ext_entry) -1;
1627 l_ad = udf_rw32(ext->l_ad);
1628 data_pos = (uint8_t *) ext + dscr_size;
1629 }
1630
1631 /* XXX l_ad == 0 should be enough to check */
1632 *eof = (offset >= l_ad) || (l_ad == 0);
1633 if (*eof) {
1634 DPRINTF(PARANOIDADWLK, ("returning EOF, extnr %d, offset %d, "
1635 "l_ad %d\n", extnr, offset, l_ad));
1636 memset(icb, 0, sizeof(struct long_ad));
1637 return;
1638 }
1639
1640 /* get the element */
1641 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1642 short_ad = (struct short_ad *) (data_pos + offset);
1643 icb->len = short_ad->len;
1644 icb->loc.part_num = udf_node->loc.loc.part_num;
1645 icb->loc.lb_num = short_ad->lb_num;
1646 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1647 long_ad = (struct long_ad *) (data_pos + offset);
1648 *icb = *long_ad;
1649 }
1650 DPRINTF(PARANOIDADWLK, ("returning element : v %d, lb %d, len %d, "
1651 "flags %d\n", icb->loc.part_num, icb->loc.lb_num,
1652 UDF_EXT_LEN(icb->len), UDF_EXT_FLAGS(icb->len)));
1653 }
1654
1655 /* --------------------------------------------------------------------- */
1656
1657 int
1658 udf_append_adslot(struct udf_node *udf_node, int *slot, struct long_ad *icb) {
1659 struct udf_mount *ump = udf_node->ump;
1660 union dscrptr *dscr, *extdscr;
1661 struct file_entry *fe;
1662 struct extfile_entry *efe;
1663 struct alloc_ext_entry *ext;
1664 struct icb_tag *icbtag;
1665 struct short_ad *short_ad;
1666 struct long_ad *long_ad, o_icb, l_icb;
1667 uint64_t logblks_rec, *logblks_rec_p;
1668 uint64_t lmapping;
1669 uint32_t offset, rest, len, lb_num;
1670 uint32_t lb_size, dscr_size, l_ea, l_ad, *l_ad_p, max_l_ad, crclen;
1671 uint32_t flags;
1672 uint16_t vpart_num;
1673 uint8_t *data_pos;
1674 int icbflags, addr_type, adlen, extnr;
1675 int error;
1676
1677 lb_size = udf_rw32(ump->logical_vol->lb_size);
1678 vpart_num = udf_rw16(udf_node->loc.loc.part_num);
1679
1680 /* determine what descriptor we are in */
1681 fe = udf_node->fe;
1682 efe = udf_node->efe;
1683 if (fe) {
1684 icbtag = &fe->icbtag;
1685 dscr = (union dscrptr *) fe;
1686 dscr_size = sizeof(struct file_entry) -1;
1687
1688 l_ea = udf_rw32(fe->l_ea);
1689 l_ad_p = &fe->l_ad;
1690 logblks_rec_p = &fe->logblks_rec;
1691 } else {
1692 icbtag = &efe->icbtag;
1693 dscr = (union dscrptr *) efe;
1694 dscr_size = sizeof(struct extfile_entry) -1;
1695
1696 l_ea = udf_rw32(efe->l_ea);
1697 l_ad_p = &efe->l_ad;
1698 logblks_rec_p = &efe->logblks_rec;
1699 }
1700 data_pos = (uint8_t *) dscr + dscr_size + l_ea;
1701 max_l_ad = lb_size - dscr_size - l_ea;
1702
1703 icbflags = udf_rw16(icbtag->flags);
1704 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
1705
1706 /* just in case we're called on an intern, its EOF */
1707 if (addr_type == UDF_ICB_INTERN_ALLOC) {
1708 panic("udf_append_adslot on UDF_ICB_INTERN_ALLOC\n");
1709 }
1710
1711 adlen = 0;
1712 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1713 adlen = sizeof(struct short_ad);
1714 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1715 adlen = sizeof(struct long_ad);
1716 }
1717
1718 /* clean up given long_ad since it can be a synthesized one */
1719 flags = UDF_EXT_FLAGS(udf_rw32(icb->len));
1720 if (flags == UDF_EXT_FREE) {
1721 icb->loc.part_num = udf_rw16(0);
1722 icb->loc.lb_num = udf_rw32(0);
1723 }
1724
1725 /* if offset too big, we go to the allocation extensions */
1726 l_ad = udf_rw32(*l_ad_p);
1727 offset = (*slot) * adlen;
1728 extnr = -1;
1729 while (offset >= l_ad) {
1730 /* check if our last entry is a redirect */
1731 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1732 short_ad = (struct short_ad *) (data_pos + l_ad-adlen);
1733 l_icb.len = short_ad->len;
1734 l_icb.loc.part_num = udf_node->loc.loc.part_num;
1735 l_icb.loc.lb_num = short_ad->lb_num;
1736 } else {
1737 KASSERT(addr_type == UDF_ICB_LONG_ALLOC);
1738 long_ad = (struct long_ad *) (data_pos + l_ad-adlen);
1739 l_icb = *long_ad;
1740 }
1741 flags = UDF_EXT_FLAGS(udf_rw32(l_icb.len));
1742 if (flags != UDF_EXT_REDIRECT) {
1743 /* only one past the last one is adressable */
1744 break;
1745 }
1746
1747 /* advance to next extent */
1748 extnr++;
1749 KASSERT(extnr < udf_node->num_extensions);
1750 offset = offset - l_ad;
1751
1752 ext = udf_node->ext[extnr];
1753 dscr = (union dscrptr *) ext;
1754 dscr_size = sizeof(struct alloc_ext_entry) -1;
1755 max_l_ad = lb_size - dscr_size;
1756 l_ad_p = &ext->l_ad;
1757 l_ad = udf_rw32(*l_ad_p);
1758 data_pos = (uint8_t *) ext + dscr_size;
1759 }
1760 DPRINTF(PARANOIDADWLK, ("append, ext %d, offset %d, l_ad %d\n",
1761 extnr, offset, udf_rw32(*l_ad_p)));
1762 KASSERT(l_ad == udf_rw32(*l_ad_p));
1763
1764 /* offset is offset within the current (E)FE/AED */
1765 l_ad = udf_rw32(*l_ad_p);
1766 crclen = udf_rw16(dscr->tag.desc_crc_len);
1767 logblks_rec = udf_rw64(*logblks_rec_p);
1768
1769 /* overwriting old piece? */
1770 if (offset < l_ad) {
1771 /* overwrite entry; compensate for the old element */
1772 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1773 short_ad = (struct short_ad *) (data_pos + offset);
1774 o_icb.len = short_ad->len;
1775 o_icb.loc.part_num = udf_rw16(0); /* ignore */
1776 o_icb.loc.lb_num = short_ad->lb_num;
1777 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1778 long_ad = (struct long_ad *) (data_pos + offset);
1779 o_icb = *long_ad;
1780 } else {
1781 panic("Invalid address type in udf_append_adslot\n");
1782 }
1783
1784 len = udf_rw32(o_icb.len);
1785 if (UDF_EXT_FLAGS(len) == UDF_EXT_ALLOCATED) {
1786 /* adjust counts */
1787 len = UDF_EXT_LEN(len);
1788 logblks_rec -= (len + lb_size -1) / lb_size;
1789 }
1790 }
1791
1792 /* check if we're not appending a redirection */
1793 flags = UDF_EXT_FLAGS(udf_rw32(icb->len));
1794 KASSERT(flags != UDF_EXT_REDIRECT);
1795
1796 /* round down available space */
1797 rest = adlen * ((max_l_ad - offset) / adlen);
1798 if (rest <= adlen) {
1799 /* have to append aed, see if we already have a spare one */
1800 extnr++;
1801 ext = udf_node->ext[extnr];
1802 l_icb = udf_node->ext_loc[extnr];
1803 if (ext == NULL) {
1804 DPRINTF(ALLOC,("adding allocation extent %d\n", extnr));
1805
1806 error = udf_reserve_space(ump, NULL, UDF_C_NODE,
1807 vpart_num, 1, /* can fail */ false);
1808 if (error) {
1809 printf("UDF: couldn't reserve space for AED!\n");
1810 return error;
1811 }
1812 error = udf_allocate_space(ump, NULL, UDF_C_NODE,
1813 vpart_num, 1, &lmapping);
1814 lb_num = lmapping;
1815 if (error)
1816 panic("UDF: couldn't allocate AED!\n");
1817
1818 /* initialise pointer to location */
1819 memset(&l_icb, 0, sizeof(struct long_ad));
1820 l_icb.len = udf_rw32(lb_size | UDF_EXT_REDIRECT);
1821 l_icb.loc.lb_num = udf_rw32(lb_num);
1822 l_icb.loc.part_num = udf_rw16(vpart_num);
1823
1824 /* create new aed descriptor */
1825 udf_create_logvol_dscr(ump, udf_node, &l_icb, &extdscr);
1826 ext = &extdscr->aee;
1827
1828 udf_inittag(ump, &ext->tag, TAGID_ALLOCEXTENT, lb_num);
1829 dscr_size = sizeof(struct alloc_ext_entry) -1;
1830 max_l_ad = lb_size - dscr_size;
1831 memset(ext->data, 0, max_l_ad);
1832 ext->l_ad = udf_rw32(0);
1833 ext->tag.desc_crc_len =
1834 udf_rw16(dscr_size - UDF_DESC_TAG_LENGTH);
1835
1836 /* declare aed */
1837 udf_node->num_extensions++;
1838 udf_node->ext_loc[extnr] = l_icb;
1839 udf_node->ext[extnr] = ext;
1840 }
1841 /* add redirect and adjust l_ad and crclen for old descr */
1842 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1843 short_ad = (struct short_ad *) (data_pos + offset);
1844 short_ad->len = l_icb.len;
1845 short_ad->lb_num = l_icb.loc.lb_num;
1846 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1847 long_ad = (struct long_ad *) (data_pos + offset);
1848 *long_ad = l_icb;
1849 }
1850 l_ad += adlen;
1851 crclen += adlen;
1852 dscr->tag.desc_crc_len = udf_rw16(crclen);
1853 *l_ad_p = udf_rw32(l_ad);
1854
1855 /* advance to the new extension */
1856 KASSERT(ext != NULL);
1857 dscr = (union dscrptr *) ext;
1858 dscr_size = sizeof(struct alloc_ext_entry) -1;
1859 max_l_ad = lb_size - dscr_size;
1860 data_pos = (uint8_t *) dscr + dscr_size;
1861
1862 l_ad_p = &ext->l_ad;
1863 l_ad = udf_rw32(*l_ad_p);
1864 crclen = udf_rw16(dscr->tag.desc_crc_len);
1865 offset = 0;
1866
1867 /* adjust callees slot count for link insert */
1868 *slot += 1;
1869 }
1870
1871 /* write out the element */
1872 DPRINTF(PARANOIDADWLK, ("adding element : %p : v %d, lb %d, "
1873 "len %d, flags %d\n", data_pos + offset,
1874 icb->loc.part_num, icb->loc.lb_num,
1875 UDF_EXT_LEN(icb->len), UDF_EXT_FLAGS(icb->len)));
1876 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1877 short_ad = (struct short_ad *) (data_pos + offset);
1878 short_ad->len = icb->len;
1879 short_ad->lb_num = icb->loc.lb_num;
1880 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1881 long_ad = (struct long_ad *) (data_pos + offset);
1882 *long_ad = *icb;
1883 }
1884
1885 /* adjust logblks recorded count */
1886 len = udf_rw32(icb->len);
1887 flags = UDF_EXT_FLAGS(len);
1888 if (flags == UDF_EXT_ALLOCATED)
1889 logblks_rec += (UDF_EXT_LEN(len) + lb_size -1) / lb_size;
1890 *logblks_rec_p = udf_rw64(logblks_rec);
1891
1892 /* adjust l_ad and crclen when needed */
1893 if (offset >= l_ad) {
1894 l_ad += adlen;
1895 crclen += adlen;
1896 dscr->tag.desc_crc_len = udf_rw16(crclen);
1897 *l_ad_p = udf_rw32(l_ad);
1898 }
1899
1900 return 0;
1901 }
1902
1903 /* --------------------------------------------------------------------- */
1904
1905 static void
1906 udf_count_alloc_exts(struct udf_node *udf_node)
1907 {
1908 struct long_ad s_ad;
1909 uint32_t lb_num, len, flags;
1910 uint16_t vpart_num;
1911 int slot, eof;
1912 int num_extents, extnr;
1913 int lb_size;
1914
1915 if (udf_node->num_extensions == 0)
1916 return;
1917
1918 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1919 /* count number of allocation extents in use */
1920 num_extents = 0;
1921 slot = 0;
1922 for (;;) {
1923 udf_get_adslot(udf_node, slot, &s_ad, &eof);
1924 if (eof)
1925 break;
1926 len = udf_rw32(s_ad.len);
1927 flags = UDF_EXT_FLAGS(len);
1928
1929 if (flags == UDF_EXT_REDIRECT)
1930 num_extents++;
1931
1932 slot++;
1933 }
1934
1935 DPRINTF(ALLOC, ("udf_count_alloc_ext counted %d live extents\n",
1936 num_extents));
1937
1938 /* XXX choice: we could delay freeing them on node writeout */
1939 /* free excess entries */
1940 extnr = num_extents;
1941 for (;extnr < udf_node->num_extensions; extnr++) {
1942 DPRINTF(ALLOC, ("freeing alloc ext %d\n", extnr));
1943 /* free dscriptor */
1944 s_ad = udf_node->ext_loc[extnr];
1945 udf_free_logvol_dscr(udf_node->ump, &s_ad,
1946 udf_node->ext[extnr]);
1947 udf_node->ext[extnr] = NULL;
1948
1949 /* free disc space */
1950 lb_num = udf_rw32(s_ad.loc.lb_num);
1951 vpart_num = udf_rw16(s_ad.loc.part_num);
1952 udf_free_allocated_space(udf_node->ump, lb_num, vpart_num, 1);
1953
1954 memset(&udf_node->ext_loc[extnr], 0, sizeof(struct long_ad));
1955 }
1956
1957 /* set our new number of allocation extents */
1958 udf_node->num_extensions = num_extents;
1959 }
1960
1961
1962 /* --------------------------------------------------------------------- */
1963
1964 /*
1965 * Adjust the node's allocation descriptors to reflect the new mapping; do
1966 * take note that we might glue to existing allocation descriptors.
1967 *
1968 * XXX Note there can only be one allocation being recorded/mount; maybe
1969 * explicit allocation in shedule thread?
1970 */
1971
1972 static void
1973 udf_record_allocation_in_node(struct udf_mount *ump, struct buf *buf,
1974 uint16_t vpart_num, uint64_t *mapping, struct long_ad *node_ad_cpy)
1975 {
1976 struct vnode *vp = buf->b_vp;
1977 struct udf_node *udf_node = VTOI(vp);
1978 struct file_entry *fe;
1979 struct extfile_entry *efe;
1980 struct icb_tag *icbtag;
1981 struct long_ad s_ad, c_ad;
1982 uint64_t inflen, from, till;
1983 uint64_t foffset, end_foffset, restart_foffset;
1984 uint64_t orig_inflen, orig_lbrec, new_inflen, new_lbrec;
1985 uint32_t num_lb, len, flags, lb_num;
1986 uint32_t run_start;
1987 uint32_t slot_offset, replace_len, replace;
1988 int addr_type, icbflags;
1989 // int udf_c_type = buf->b_udf_c_type;
1990 int lb_size, run_length, eof;
1991 int slot, cpy_slot, cpy_slots, restart_slot;
1992 int error;
1993
1994 DPRINTF(ALLOC, ("udf_record_allocation_in_node\n"));
1995
1996 #if 0
1997 /* XXX disable sanity check for now */
1998 /* sanity check ... should be panic ? */
1999 if ((udf_c_type != UDF_C_USERDATA) && (udf_c_type != UDF_C_FIDS))
2000 return;
2001 #endif
2002
2003 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
2004
2005 /* do the job */
2006 UDF_LOCK_NODE(udf_node, 0); /* XXX can deadlock ? */
2007 udf_node_sanity_check(udf_node, &orig_inflen, &orig_lbrec);
2008
2009 fe = udf_node->fe;
2010 efe = udf_node->efe;
2011 if (fe) {
2012 icbtag = &fe->icbtag;
2013 inflen = udf_rw64(fe->inf_len);
2014 } else {
2015 icbtag = &efe->icbtag;
2016 inflen = udf_rw64(efe->inf_len);
2017 }
2018
2019 /* do check if `till' is not past file information length */
2020 from = buf->b_lblkno * lb_size;
2021 till = MIN(inflen, from + buf->b_resid);
2022
2023 num_lb = (till - from + lb_size -1) / lb_size;
2024
2025 DPRINTF(ALLOC, ("record allocation from %"PRIu64" + %d\n", from, buf->b_bcount));
2026
2027 icbflags = udf_rw16(icbtag->flags);
2028 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2029
2030 if (addr_type == UDF_ICB_INTERN_ALLOC) {
2031 /* nothing to do */
2032 /* XXX clean up rest of node? just in case? */
2033 UDF_UNLOCK_NODE(udf_node, 0);
2034 return;
2035 }
2036
2037 slot = 0;
2038 cpy_slot = 0;
2039 foffset = 0;
2040
2041 /* 1) copy till first overlap piece to the rewrite buffer */
2042 for (;;) {
2043 udf_get_adslot(udf_node, slot, &s_ad, &eof);
2044 if (eof) {
2045 DPRINTF(WRITE,
2046 ("Record allocation in node "
2047 "failed: encountered EOF\n"));
2048 UDF_UNLOCK_NODE(udf_node, 0);
2049 buf->b_error = EINVAL;
2050 return;
2051 }
2052 len = udf_rw32(s_ad.len);
2053 flags = UDF_EXT_FLAGS(len);
2054 len = UDF_EXT_LEN(len);
2055
2056 if (flags == UDF_EXT_REDIRECT) {
2057 slot++;
2058 continue;
2059 }
2060
2061 end_foffset = foffset + len;
2062 if (end_foffset > from)
2063 break; /* found */
2064
2065 node_ad_cpy[cpy_slot++] = s_ad;
2066
2067 DPRINTF(ALLOC, ("\t1: vp %d, lb %d, len %d, flags %d "
2068 "-> stack\n",
2069 udf_rw16(s_ad.loc.part_num),
2070 udf_rw32(s_ad.loc.lb_num),
2071 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2072 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2073
2074 foffset = end_foffset;
2075 slot++;
2076 }
2077 restart_slot = slot;
2078 restart_foffset = foffset;
2079
2080 /* 2) trunc overlapping slot at overlap and copy it */
2081 slot_offset = from - foffset;
2082 if (slot_offset > 0) {
2083 DPRINTF(ALLOC, ("\tslot_offset = %d, flags = %d (%d)\n",
2084 slot_offset, flags >> 30, flags));
2085
2086 s_ad.len = udf_rw32(slot_offset | flags);
2087 node_ad_cpy[cpy_slot++] = s_ad;
2088
2089 DPRINTF(ALLOC, ("\t2: vp %d, lb %d, len %d, flags %d "
2090 "-> stack\n",
2091 udf_rw16(s_ad.loc.part_num),
2092 udf_rw32(s_ad.loc.lb_num),
2093 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2094 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2095 }
2096 foffset += slot_offset;
2097
2098 /* 3) insert new mappings */
2099 memset(&s_ad, 0, sizeof(struct long_ad));
2100 lb_num = 0;
2101 for (lb_num = 0; lb_num < num_lb; lb_num++) {
2102 run_start = mapping[lb_num];
2103 run_length = 1;
2104 while (lb_num < num_lb-1) {
2105 if (mapping[lb_num+1] != mapping[lb_num]+1)
2106 if (mapping[lb_num+1] != mapping[lb_num])
2107 break;
2108 run_length++;
2109 lb_num++;
2110 }
2111 /* insert slot for this mapping */
2112 len = run_length * lb_size;
2113
2114 /* bounds checking */
2115 if (foffset + len > till)
2116 len = till - foffset;
2117 KASSERT(foffset + len <= inflen);
2118
2119 s_ad.len = udf_rw32(len | UDF_EXT_ALLOCATED);
2120 s_ad.loc.part_num = udf_rw16(vpart_num);
2121 s_ad.loc.lb_num = udf_rw32(run_start);
2122
2123 foffset += len;
2124
2125 /* paranoia */
2126 if (len == 0) {
2127 DPRINTF(WRITE,
2128 ("Record allocation in node "
2129 "failed: insert failed\n"));
2130 UDF_UNLOCK_NODE(udf_node, 0);
2131 buf->b_error = EINVAL;
2132 return;
2133 }
2134 node_ad_cpy[cpy_slot++] = s_ad;
2135
2136 DPRINTF(ALLOC, ("\t3: insert new mapping vp %d lb %d, len %d, "
2137 "flags %d -> stack\n",
2138 udf_rw16(s_ad.loc.part_num), udf_rw32(s_ad.loc.lb_num),
2139 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2140 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2141 }
2142
2143 /* 4) pop replaced length */
2144 slot = restart_slot;
2145 foffset = restart_foffset;
2146
2147 replace_len = till - foffset; /* total amount of bytes to pop */
2148 slot_offset = from - foffset; /* offset in first encounted slot */
2149 KASSERT((slot_offset % lb_size) == 0);
2150
2151 for (;;) {
2152 udf_get_adslot(udf_node, slot, &s_ad, &eof);
2153 if (eof)
2154 break;
2155
2156 len = udf_rw32(s_ad.len);
2157 flags = UDF_EXT_FLAGS(len);
2158 len = UDF_EXT_LEN(len);
2159 lb_num = udf_rw32(s_ad.loc.lb_num);
2160
2161 if (flags == UDF_EXT_REDIRECT) {
2162 slot++;
2163 continue;
2164 }
2165
2166 DPRINTF(ALLOC, ("\t4i: got slot %d, slot_offset %d, "
2167 "replace_len %d, "
2168 "vp %d, lb %d, len %d, flags %d\n",
2169 slot, slot_offset, replace_len,
2170 udf_rw16(s_ad.loc.part_num),
2171 udf_rw32(s_ad.loc.lb_num),
2172 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2173 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2174
2175 /* adjust for slot offset */
2176 if (slot_offset) {
2177 DPRINTF(ALLOC, ("\t4s: skipping %d\n", slot_offset));
2178 lb_num += slot_offset / lb_size;
2179 len -= slot_offset;
2180 foffset += slot_offset;
2181 replace_len -= slot_offset;
2182
2183 /* mark adjusted */
2184 slot_offset = 0;
2185 }
2186
2187 /* advance for (the rest of) this slot */
2188 replace = MIN(len, replace_len);
2189 DPRINTF(ALLOC, ("\t4d: replacing %d\n", replace));
2190
2191 /* advance for this slot */
2192 if (replace) {
2193 /* note: dont round DOWN on num_lb since we then
2194 * forget the last partial one */
2195 num_lb = (replace + lb_size - 1) / lb_size;
2196 if (flags != UDF_EXT_FREE) {
2197 udf_free_allocated_space(ump, lb_num,
2198 udf_rw16(s_ad.loc.part_num), num_lb);
2199 }
2200 lb_num += num_lb;
2201 len -= replace;
2202 foffset += replace;
2203 replace_len -= replace;
2204 }
2205
2206 /* do we have a slot tail ? */
2207 if (len) {
2208 KASSERT(foffset % lb_size == 0);
2209
2210 /* we arrived at our point, push remainder */
2211 s_ad.len = udf_rw32(len | flags);
2212 s_ad.loc.lb_num = udf_rw32(lb_num);
2213 if (flags == UDF_EXT_FREE)
2214 s_ad.loc.lb_num = udf_rw32(0);
2215 node_ad_cpy[cpy_slot++] = s_ad;
2216 foffset += len;
2217 slot++;
2218
2219 DPRINTF(ALLOC, ("\t4: vp %d, lb %d, len %d, flags %d "
2220 "-> stack\n",
2221 udf_rw16(s_ad.loc.part_num),
2222 udf_rw32(s_ad.loc.lb_num),
2223 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2224 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2225 break;
2226 }
2227
2228 slot++;
2229 }
2230
2231 /* 5) copy remainder */
2232 for (;;) {
2233 udf_get_adslot(udf_node, slot, &s_ad, &eof);
2234 if (eof)
2235 break;
2236
2237 len = udf_rw32(s_ad.len);
2238 flags = UDF_EXT_FLAGS(len);
2239 len = UDF_EXT_LEN(len);
2240
2241 if (flags == UDF_EXT_REDIRECT) {
2242 slot++;
2243 continue;
2244 }
2245
2246 node_ad_cpy[cpy_slot++] = s_ad;
2247
2248 DPRINTF(ALLOC, ("\t5: insert new mapping "
2249 "vp %d lb %d, len %d, flags %d "
2250 "-> stack\n",
2251 udf_rw16(s_ad.loc.part_num),
2252 udf_rw32(s_ad.loc.lb_num),
2253 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2254 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2255
2256 slot++;
2257 }
2258
2259 /* 6) reset node descriptors */
2260 udf_wipe_adslots(udf_node);
2261
2262 /* 7) copy back extents; merge when possible. Recounting on the fly */
2263 cpy_slots = cpy_slot;
2264
2265 c_ad = node_ad_cpy[0];
2266 slot = 0;
2267 DPRINTF(ALLOC, ("\t7s: stack -> got mapping vp %d "
2268 "lb %d, len %d, flags %d\n",
2269 udf_rw16(c_ad.loc.part_num),
2270 udf_rw32(c_ad.loc.lb_num),
2271 UDF_EXT_LEN(udf_rw32(c_ad.len)),
2272 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2273
2274 for (cpy_slot = 1; cpy_slot < cpy_slots; cpy_slot++) {
2275 s_ad = node_ad_cpy[cpy_slot];
2276
2277 DPRINTF(ALLOC, ("\t7i: stack -> got mapping vp %d "
2278 "lb %d, len %d, flags %d\n",
2279 udf_rw16(s_ad.loc.part_num),
2280 udf_rw32(s_ad.loc.lb_num),
2281 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2282 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2283
2284 /* see if we can merge */
2285 if (udf_ads_merge(lb_size, &c_ad, &s_ad)) {
2286 /* not mergable (anymore) */
2287 DPRINTF(ALLOC, ("\t7: appending vp %d lb %d, "
2288 "len %d, flags %d\n",
2289 udf_rw16(c_ad.loc.part_num),
2290 udf_rw32(c_ad.loc.lb_num),
2291 UDF_EXT_LEN(udf_rw32(c_ad.len)),
2292 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2293
2294 error = udf_append_adslot(udf_node, &slot, &c_ad);
2295 if (error) {
2296 buf->b_error = error;
2297 goto out;
2298 }
2299 c_ad = s_ad;
2300 slot++;
2301 }
2302 }
2303
2304 /* 8) push rest slot (if any) */
2305 if (UDF_EXT_LEN(c_ad.len) > 0) {
2306 DPRINTF(ALLOC, ("\t8: last append vp %d lb %d, "
2307 "len %d, flags %d\n",
2308 udf_rw16(c_ad.loc.part_num),
2309 udf_rw32(c_ad.loc.lb_num),
2310 UDF_EXT_LEN(udf_rw32(c_ad.len)),
2311 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2312
2313 error = udf_append_adslot(udf_node, &slot, &c_ad);
2314 if (error) {
2315 buf->b_error = error;
2316 goto out;
2317 }
2318 }
2319
2320 out:
2321 udf_count_alloc_exts(udf_node);
2322
2323 /* the node's descriptors should now be sane */
2324 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2325 UDF_UNLOCK_NODE(udf_node, 0);
2326
2327 KASSERT(orig_inflen == new_inflen);
2328 KASSERT(new_lbrec >= orig_lbrec);
2329
2330 return;
2331 }
2332
2333 /* --------------------------------------------------------------------- */
2334
2335 int
2336 udf_grow_node(struct udf_node *udf_node, uint64_t new_size)
2337 {
2338 union dscrptr *dscr;
2339 struct vnode *vp = udf_node->vnode;
2340 struct udf_mount *ump = udf_node->ump;
2341 struct file_entry *fe;
2342 struct extfile_entry *efe;
2343 struct icb_tag *icbtag;
2344 struct long_ad c_ad, s_ad;
2345 uint64_t size_diff, old_size, inflen, objsize, chunk, append_len;
2346 uint64_t foffset, end_foffset;
2347 uint64_t orig_inflen, orig_lbrec, new_inflen, new_lbrec;
2348 uint32_t lb_size, dscr_size, crclen, lastblock_grow;
2349 uint32_t icbflags, len, flags, max_len;
2350 uint32_t max_l_ad, l_ad, l_ea;
2351 uint16_t my_part, dst_part;
2352 uint8_t *data_pos, *evacuated_data;
2353 int addr_type;
2354 int slot, cpy_slot;
2355 int eof, error;
2356
2357 DPRINTF(ALLOC, ("udf_grow_node\n"));
2358
2359 UDF_LOCK_NODE(udf_node, 0);
2360 udf_node_sanity_check(udf_node, &orig_inflen, &orig_lbrec);
2361
2362 lb_size = udf_rw32(ump->logical_vol->lb_size);
2363 max_len = ((UDF_EXT_MAXLEN / lb_size) * lb_size);
2364
2365 fe = udf_node->fe;
2366 efe = udf_node->efe;
2367 if (fe) {
2368 dscr = (union dscrptr *) fe;
2369 icbtag = &fe->icbtag;
2370 inflen = udf_rw64(fe->inf_len);
2371 objsize = inflen;
2372 dscr_size = sizeof(struct file_entry) -1;
2373 l_ea = udf_rw32(fe->l_ea);
2374 l_ad = udf_rw32(fe->l_ad);
2375 } else {
2376 dscr = (union dscrptr *) efe;
2377 icbtag = &efe->icbtag;
2378 inflen = udf_rw64(efe->inf_len);
2379 objsize = udf_rw64(efe->obj_size);
2380 dscr_size = sizeof(struct extfile_entry) -1;
2381 l_ea = udf_rw32(efe->l_ea);
2382 l_ad = udf_rw32(efe->l_ad);
2383 }
2384 data_pos = (uint8_t *) dscr + dscr_size + l_ea;
2385 max_l_ad = lb_size - dscr_size - l_ea;
2386
2387 icbflags = udf_rw16(icbtag->flags);
2388 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2389
2390 old_size = inflen;
2391 size_diff = new_size - old_size;
2392
2393 DPRINTF(ALLOC, ("\tfrom %"PRIu64" to %"PRIu64"\n", old_size, new_size));
2394
2395 evacuated_data = NULL;
2396 if (addr_type == UDF_ICB_INTERN_ALLOC) {
2397 if (l_ad + size_diff <= max_l_ad) {
2398 /* only reflect size change directly in the node */
2399 inflen += size_diff;
2400 objsize += size_diff;
2401 l_ad += size_diff;
2402 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea + l_ad;
2403 if (fe) {
2404 fe->inf_len = udf_rw64(inflen);
2405 fe->l_ad = udf_rw32(l_ad);
2406 fe->tag.desc_crc_len = udf_rw16(crclen);
2407 } else {
2408 efe->inf_len = udf_rw64(inflen);
2409 efe->obj_size = udf_rw64(objsize);
2410 efe->l_ad = udf_rw32(l_ad);
2411 efe->tag.desc_crc_len = udf_rw16(crclen);
2412 }
2413 error = 0;
2414
2415 /* set new size for uvm */
2416 uvm_vnp_setsize(vp, old_size);
2417 uvm_vnp_setwritesize(vp, new_size);
2418
2419 #if 0
2420 /* zero append space in buffer */
2421 uvm_vnp_zerorange(vp, old_size, new_size - old_size);
2422 #endif
2423
2424 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2425
2426 /* unlock */
2427 UDF_UNLOCK_NODE(udf_node, 0);
2428
2429 KASSERT(new_inflen == orig_inflen + size_diff);
2430 KASSERT(new_lbrec == orig_lbrec);
2431 KASSERT(new_lbrec == 0);
2432 return 0;
2433 }
2434
2435 DPRINTF(ALLOC, ("\tCONVERT from internal\n"));
2436
2437 if (old_size > 0) {
2438 /* allocate some space and copy in the stuff to keep */
2439 evacuated_data = malloc(lb_size, M_UDFTEMP, M_WAITOK);
2440 memset(evacuated_data, 0, lb_size);
2441
2442 /* node is locked, so safe to exit mutex */
2443 UDF_UNLOCK_NODE(udf_node, 0);
2444
2445 /* read in using the `normal' vn_rdwr() */
2446 error = vn_rdwr(UIO_READ, udf_node->vnode,
2447 evacuated_data, old_size, 0,
2448 UIO_SYSSPACE, IO_ALTSEMANTICS | IO_NODELOCKED,
2449 FSCRED, NULL, NULL);
2450
2451 /* enter again */
2452 UDF_LOCK_NODE(udf_node, 0);
2453 }
2454
2455 /* convert to a normal alloc and select type */
2456 my_part = udf_rw16(udf_node->loc.loc.part_num);
2457 dst_part = udf_get_record_vpart(ump, udf_get_c_type(udf_node));
2458 addr_type = UDF_ICB_SHORT_ALLOC;
2459 if (dst_part != my_part)
2460 addr_type = UDF_ICB_LONG_ALLOC;
2461
2462 icbflags &= ~UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2463 icbflags |= addr_type;
2464 icbtag->flags = udf_rw16(icbflags);
2465
2466 /* wipe old descriptor space */
2467 udf_wipe_adslots(udf_node);
2468
2469 memset(&c_ad, 0, sizeof(struct long_ad));
2470 c_ad.len = udf_rw32(old_size | UDF_EXT_FREE);
2471 c_ad.loc.part_num = udf_rw16(0); /* not relevant */
2472 c_ad.loc.lb_num = udf_rw32(0); /* not relevant */
2473
2474 slot = 0;
2475 } else {
2476 /* goto the last entry (if any) */
2477 slot = 0;
2478 cpy_slot = 0;
2479 foffset = 0;
2480 memset(&c_ad, 0, sizeof(struct long_ad));
2481 for (;;) {
2482 udf_get_adslot(udf_node, slot, &c_ad, &eof);
2483 if (eof)
2484 break;
2485
2486 len = udf_rw32(c_ad.len);
2487 flags = UDF_EXT_FLAGS(len);
2488 len = UDF_EXT_LEN(len);
2489
2490 end_foffset = foffset + len;
2491 if (flags != UDF_EXT_REDIRECT)
2492 foffset = end_foffset;
2493
2494 slot++;
2495 }
2496 /* at end of adslots */
2497
2498 /* special case if the old size was zero, then there is no last slot */
2499 if (old_size == 0) {
2500 c_ad.len = udf_rw32(0 | UDF_EXT_FREE);
2501 c_ad.loc.part_num = udf_rw16(0); /* not relevant */
2502 c_ad.loc.lb_num = udf_rw32(0); /* not relevant */
2503 } else {
2504 /* refetch last slot */
2505 slot--;
2506 udf_get_adslot(udf_node, slot, &c_ad, &eof);
2507 }
2508 }
2509
2510 /*
2511 * If the length of the last slot is not a multiple of lb_size, adjust
2512 * length so that it is; don't forget to adjust `append_len'! relevant for
2513 * extending existing files
2514 */
2515 len = udf_rw32(c_ad.len);
2516 flags = UDF_EXT_FLAGS(len);
2517 len = UDF_EXT_LEN(len);
2518
2519 lastblock_grow = 0;
2520 if (len % lb_size > 0) {
2521 lastblock_grow = lb_size - (len % lb_size);
2522 lastblock_grow = MIN(size_diff, lastblock_grow);
2523 len += lastblock_grow;
2524 c_ad.len = udf_rw32(len | flags);
2525
2526 /* TODO zero appened space in buffer! */
2527 /* using uvm_vnp_zerorange(vp, old_size, new_size - old_size); ? */
2528 }
2529 memset(&s_ad, 0, sizeof(struct long_ad));
2530
2531 /* size_diff can be bigger than allowed, so grow in chunks */
2532 append_len = size_diff - lastblock_grow;
2533 while (append_len > 0) {
2534 chunk = MIN(append_len, max_len);
2535 s_ad.len = udf_rw32(chunk | UDF_EXT_FREE);
2536 s_ad.loc.part_num = udf_rw16(0);
2537 s_ad.loc.lb_num = udf_rw32(0);
2538
2539 if (udf_ads_merge(lb_size, &c_ad, &s_ad)) {
2540 /* not mergable (anymore) */
2541 error = udf_append_adslot(udf_node, &slot, &c_ad);
2542 if (error)
2543 goto errorout;
2544 slot++;
2545 c_ad = s_ad;
2546 memset(&s_ad, 0, sizeof(struct long_ad));
2547 }
2548 append_len -= chunk;
2549 }
2550
2551 /* if there is a rest piece in the accumulator, append it */
2552 if (UDF_EXT_LEN(udf_rw32(c_ad.len)) > 0) {
2553 error = udf_append_adslot(udf_node, &slot, &c_ad);
2554 if (error)
2555 goto errorout;
2556 slot++;
2557 }
2558
2559 /* if there is a rest piece that didn't fit, append it */
2560 if (UDF_EXT_LEN(udf_rw32(s_ad.len)) > 0) {
2561 error = udf_append_adslot(udf_node, &slot, &s_ad);
2562 if (error)
2563 goto errorout;
2564 slot++;
2565 }
2566
2567 inflen += size_diff;
2568 objsize += size_diff;
2569 if (fe) {
2570 fe->inf_len = udf_rw64(inflen);
2571 } else {
2572 efe->inf_len = udf_rw64(inflen);
2573 efe->obj_size = udf_rw64(objsize);
2574 }
2575 error = 0;
2576
2577 if (evacuated_data) {
2578 /* set new write size for uvm */
2579 uvm_vnp_setwritesize(vp, old_size);
2580
2581 /* write out evacuated data */
2582 error = vn_rdwr(UIO_WRITE, udf_node->vnode,
2583 evacuated_data, old_size, 0,
2584 UIO_SYSSPACE, IO_ALTSEMANTICS | IO_NODELOCKED,
2585 FSCRED, NULL, NULL);
2586 uvm_vnp_setsize(vp, old_size);
2587 }
2588
2589 errorout:
2590 if (evacuated_data)
2591 free(evacuated_data, M_UDFTEMP);
2592
2593 udf_count_alloc_exts(udf_node);
2594
2595 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2596 UDF_UNLOCK_NODE(udf_node, 0);
2597
2598 KASSERT(new_inflen == orig_inflen + size_diff);
2599 KASSERT(new_lbrec == orig_lbrec);
2600
2601 return error;
2602 }
2603
2604 /* --------------------------------------------------------------------- */
2605
2606 int
2607 udf_shrink_node(struct udf_node *udf_node, uint64_t new_size)
2608 {
2609 struct vnode *vp = udf_node->vnode;
2610 struct udf_mount *ump = udf_node->ump;
2611 struct file_entry *fe;
2612 struct extfile_entry *efe;
2613 struct icb_tag *icbtag;
2614 struct long_ad c_ad, s_ad, *node_ad_cpy;
2615 uint64_t size_diff, old_size, inflen, objsize;
2616 uint64_t foffset, end_foffset;
2617 uint64_t orig_inflen, orig_lbrec, new_inflen, new_lbrec;
2618 uint32_t lb_size, dscr_size, crclen;
2619 uint32_t slot_offset;
2620 uint32_t len, flags, max_len;
2621 uint32_t num_lb, lb_num;
2622 uint32_t max_l_ad, l_ad, l_ea;
2623 uint16_t vpart_num;
2624 uint8_t *data_pos;
2625 int icbflags, addr_type;
2626 int slot, cpy_slot, cpy_slots;
2627 int eof, error;
2628
2629 DPRINTF(ALLOC, ("udf_shrink_node\n"));
2630
2631 UDF_LOCK_NODE(udf_node, 0);
2632 udf_node_sanity_check(udf_node, &orig_inflen, &orig_lbrec);
2633
2634 lb_size = udf_rw32(ump->logical_vol->lb_size);
2635 max_len = ((UDF_EXT_MAXLEN / lb_size) * lb_size);
2636
2637 /* do the work */
2638 fe = udf_node->fe;
2639 efe = udf_node->efe;
2640 if (fe) {
2641 icbtag = &fe->icbtag;
2642 inflen = udf_rw64(fe->inf_len);
2643 objsize = inflen;
2644 dscr_size = sizeof(struct file_entry) -1;
2645 l_ea = udf_rw32(fe->l_ea);
2646 l_ad = udf_rw32(fe->l_ad);
2647 data_pos = (uint8_t *) fe + dscr_size + l_ea;
2648 } else {
2649 icbtag = &efe->icbtag;
2650 inflen = udf_rw64(efe->inf_len);
2651 objsize = udf_rw64(efe->obj_size);
2652 dscr_size = sizeof(struct extfile_entry) -1;
2653 l_ea = udf_rw32(efe->l_ea);
2654 l_ad = udf_rw32(efe->l_ad);
2655 data_pos = (uint8_t *) efe + dscr_size + l_ea;
2656 }
2657 max_l_ad = lb_size - dscr_size - l_ea;
2658
2659 icbflags = udf_rw16(icbtag->flags);
2660 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2661
2662 old_size = inflen;
2663 size_diff = old_size - new_size;
2664
2665 DPRINTF(ALLOC, ("\tfrom %"PRIu64" to %"PRIu64"\n", old_size, new_size));
2666
2667 /* shrink the node to its new size */
2668 if (addr_type == UDF_ICB_INTERN_ALLOC) {
2669 /* only reflect size change directly in the node */
2670 KASSERT(new_size <= max_l_ad);
2671 inflen -= size_diff;
2672 objsize -= size_diff;
2673 l_ad -= size_diff;
2674 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea + l_ad;
2675 if (fe) {
2676 fe->inf_len = udf_rw64(inflen);
2677 fe->l_ad = udf_rw32(l_ad);
2678 fe->tag.desc_crc_len = udf_rw16(crclen);
2679 } else {
2680 efe->inf_len = udf_rw64(inflen);
2681 efe->obj_size = udf_rw64(objsize);
2682 efe->l_ad = udf_rw32(l_ad);
2683 efe->tag.desc_crc_len = udf_rw16(crclen);
2684 }
2685 error = 0;
2686
2687 /* clear the space in the descriptor */
2688 KASSERT(old_size > new_size);
2689 memset(data_pos + new_size, 0, old_size - new_size);
2690
2691 /* TODO zero appened space in buffer! */
2692 /* using uvm_vnp_zerorange(vp, old_size, old_size - new_size); ? */
2693
2694 /* set new size for uvm */
2695 uvm_vnp_setsize(vp, new_size);
2696
2697 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2698 UDF_UNLOCK_NODE(udf_node, 0);
2699
2700 KASSERT(new_inflen == orig_inflen - size_diff);
2701 KASSERT(new_lbrec == orig_lbrec);
2702 KASSERT(new_lbrec == 0);
2703
2704 return 0;
2705 }
2706
2707 /* setup node cleanup extents copy space */
2708 node_ad_cpy = malloc(lb_size * UDF_MAX_ALLOC_EXTENTS,
2709 M_UDFMNT, M_WAITOK);
2710 memset(node_ad_cpy, 0, lb_size * UDF_MAX_ALLOC_EXTENTS);
2711
2712 /*
2713 * Shrink the node by releasing the allocations and truncate the last
2714 * allocation to the new size. If the new size fits into the
2715 * allocation descriptor itself, transform it into an
2716 * UDF_ICB_INTERN_ALLOC.
2717 */
2718 slot = 0;
2719 cpy_slot = 0;
2720 foffset = 0;
2721
2722 /* 1) copy till first overlap piece to the rewrite buffer */
2723 for (;;) {
2724 udf_get_adslot(udf_node, slot, &s_ad, &eof);
2725 if (eof) {
2726 DPRINTF(WRITE,
2727 ("Shrink node failed: "
2728 "encountered EOF\n"));
2729 error = EINVAL;
2730 goto errorout; /* panic? */
2731 }
2732 len = udf_rw32(s_ad.len);
2733 flags = UDF_EXT_FLAGS(len);
2734 len = UDF_EXT_LEN(len);
2735
2736 if (flags == UDF_EXT_REDIRECT) {
2737 slot++;
2738 continue;
2739 }
2740
2741 end_foffset = foffset + len;
2742 if (end_foffset > new_size)
2743 break; /* found */
2744
2745 node_ad_cpy[cpy_slot++] = s_ad;
2746
2747 DPRINTF(ALLOC, ("\t1: vp %d, lb %d, len %d, flags %d "
2748 "-> stack\n",
2749 udf_rw16(s_ad.loc.part_num),
2750 udf_rw32(s_ad.loc.lb_num),
2751 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2752 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2753
2754 foffset = end_foffset;
2755 slot++;
2756 }
2757 slot_offset = new_size - foffset;
2758
2759 /* 2) trunc overlapping slot at overlap and copy it */
2760 if (slot_offset > 0) {
2761 lb_num = udf_rw32(s_ad.loc.lb_num);
2762 vpart_num = udf_rw16(s_ad.loc.part_num);
2763
2764 if (flags == UDF_EXT_ALLOCATED) {
2765 /* note: round DOWN on num_lb */
2766 lb_num += (slot_offset + lb_size -1) / lb_size;
2767 num_lb = (len - slot_offset) / lb_size;
2768
2769 udf_free_allocated_space(ump, lb_num, vpart_num, num_lb);
2770 }
2771
2772 s_ad.len = udf_rw32(slot_offset | flags);
2773 node_ad_cpy[cpy_slot++] = s_ad;
2774 slot++;
2775
2776 DPRINTF(ALLOC, ("\t2: vp %d, lb %d, len %d, flags %d "
2777 "-> stack\n",
2778 udf_rw16(s_ad.loc.part_num),
2779 udf_rw32(s_ad.loc.lb_num),
2780 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2781 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2782 }
2783
2784 /* 3) delete remainder */
2785 for (;;) {
2786 udf_get_adslot(udf_node, slot, &s_ad, &eof);
2787 if (eof)
2788 break;
2789
2790 len = udf_rw32(s_ad.len);
2791 flags = UDF_EXT_FLAGS(len);
2792 len = UDF_EXT_LEN(len);
2793
2794 if (flags == UDF_EXT_REDIRECT) {
2795 slot++;
2796 continue;
2797 }
2798
2799 DPRINTF(ALLOC, ("\t3: delete remainder "
2800 "vp %d lb %d, len %d, flags %d\n",
2801 udf_rw16(s_ad.loc.part_num),
2802 udf_rw32(s_ad.loc.lb_num),
2803 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2804 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2805
2806 if (flags == UDF_EXT_ALLOCATED) {
2807 lb_num = udf_rw32(s_ad.loc.lb_num);
2808 vpart_num = udf_rw16(s_ad.loc.part_num);
2809 num_lb = (len + lb_size - 1) / lb_size;
2810
2811 udf_free_allocated_space(ump, lb_num, vpart_num,
2812 num_lb);
2813 }
2814
2815 slot++;
2816 }
2817
2818 /* 4) if it will fit into the descriptor then convert */
2819 if (new_size < max_l_ad) {
2820 /*
2821 * resque/evacuate old piece by reading it in, and convert it
2822 * to internal alloc.
2823 */
2824 if (new_size == 0) {
2825 /* XXX/TODO only for zero sizing now */
2826 udf_wipe_adslots(udf_node);
2827
2828 icbflags &= ~UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2829 icbflags |= UDF_ICB_INTERN_ALLOC;
2830 icbtag->flags = udf_rw16(icbflags);
2831
2832 inflen -= size_diff; KASSERT(inflen == 0);
2833 objsize -= size_diff;
2834 l_ad = new_size;
2835 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea + l_ad;
2836 if (fe) {
2837 fe->inf_len = udf_rw64(inflen);
2838 fe->l_ad = udf_rw32(l_ad);
2839 fe->tag.desc_crc_len = udf_rw16(crclen);
2840 } else {
2841 efe->inf_len = udf_rw64(inflen);
2842 efe->obj_size = udf_rw64(objsize);
2843 efe->l_ad = udf_rw32(l_ad);
2844 efe->tag.desc_crc_len = udf_rw16(crclen);
2845 }
2846 /* eventually copy in evacuated piece */
2847 /* set new size for uvm */
2848 uvm_vnp_setsize(vp, new_size);
2849
2850 free(node_ad_cpy, M_UDFMNT);
2851 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2852
2853 UDF_UNLOCK_NODE(udf_node, 0);
2854
2855 KASSERT(new_inflen == orig_inflen - size_diff);
2856 KASSERT(new_inflen == 0);
2857 KASSERT(new_lbrec == 0);
2858
2859 return 0;
2860 }
2861
2862 printf("UDF_SHRINK_NODE: could convert to internal alloc!\n");
2863 }
2864
2865 /* 5) reset node descriptors */
2866 udf_wipe_adslots(udf_node);
2867
2868 /* 6) copy back extents; merge when possible. Recounting on the fly */
2869 cpy_slots = cpy_slot;
2870
2871 c_ad = node_ad_cpy[0];
2872 slot = 0;
2873 for (cpy_slot = 1; cpy_slot < cpy_slots; cpy_slot++) {
2874 s_ad = node_ad_cpy[cpy_slot];
2875
2876 DPRINTF(ALLOC, ("\t6: stack -> got mapping vp %d "
2877 "lb %d, len %d, flags %d\n",
2878 udf_rw16(s_ad.loc.part_num),
2879 udf_rw32(s_ad.loc.lb_num),
2880 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2881 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2882
2883 /* see if we can merge */
2884 if (udf_ads_merge(lb_size, &c_ad, &s_ad)) {
2885 /* not mergable (anymore) */
2886 DPRINTF(ALLOC, ("\t6: appending vp %d lb %d, "
2887 "len %d, flags %d\n",
2888 udf_rw16(c_ad.loc.part_num),
2889 udf_rw32(c_ad.loc.lb_num),
2890 UDF_EXT_LEN(udf_rw32(c_ad.len)),
2891 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2892
2893 error = udf_append_adslot(udf_node, &slot, &c_ad);
2894 if (error)
2895 goto errorout; /* panic? */
2896 c_ad = s_ad;
2897 slot++;
2898 }
2899 }
2900
2901 /* 7) push rest slot (if any) */
2902 if (UDF_EXT_LEN(c_ad.len) > 0) {
2903 DPRINTF(ALLOC, ("\t7: last append vp %d lb %d, "
2904 "len %d, flags %d\n",
2905 udf_rw16(c_ad.loc.part_num),
2906 udf_rw32(c_ad.loc.lb_num),
2907 UDF_EXT_LEN(udf_rw32(c_ad.len)),
2908 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2909
2910 error = udf_append_adslot(udf_node, &slot, &c_ad);
2911 if (error)
2912 goto errorout; /* panic? */
2913 ;
2914 }
2915
2916 inflen -= size_diff;
2917 objsize -= size_diff;
2918 if (fe) {
2919 fe->inf_len = udf_rw64(inflen);
2920 } else {
2921 efe->inf_len = udf_rw64(inflen);
2922 efe->obj_size = udf_rw64(objsize);
2923 }
2924 error = 0;
2925
2926 /* set new size for uvm */
2927 uvm_vnp_setsize(vp, new_size);
2928
2929 errorout:
2930 free(node_ad_cpy, M_UDFMNT);
2931
2932 udf_count_alloc_exts(udf_node);
2933
2934 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2935 UDF_UNLOCK_NODE(udf_node, 0);
2936
2937 KASSERT(new_inflen == orig_inflen - size_diff);
2938
2939 return error;
2940 }
2941
2942