udf_allocation.c revision 1.30 1 /* $NetBSD: udf_allocation.c,v 1.30 2010/12/22 12:15:02 reinoud Exp $ */
2
3 /*
4 * Copyright (c) 2006, 2008 Reinoud Zandijk
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 *
27 */
28
29 #include <sys/cdefs.h>
30 #ifndef lint
31 __KERNEL_RCSID(0, "$NetBSD: udf_allocation.c,v 1.30 2010/12/22 12:15:02 reinoud Exp $");
32 #endif /* not lint */
33
34
35 #if defined(_KERNEL_OPT)
36 #include "opt_compat_netbsd.h"
37 #endif
38
39 /* TODO strip */
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/sysctl.h>
43 #include <sys/namei.h>
44 #include <sys/proc.h>
45 #include <sys/kernel.h>
46 #include <sys/vnode.h>
47 #include <miscfs/genfs/genfs_node.h>
48 #include <sys/mount.h>
49 #include <sys/buf.h>
50 #include <sys/file.h>
51 #include <sys/device.h>
52 #include <sys/disklabel.h>
53 #include <sys/ioctl.h>
54 #include <sys/malloc.h>
55 #include <sys/dirent.h>
56 #include <sys/stat.h>
57 #include <sys/conf.h>
58 #include <sys/kauth.h>
59 #include <sys/kthread.h>
60 #include <dev/clock_subr.h>
61
62 #include <fs/udf/ecma167-udf.h>
63 #include <fs/udf/udf_mount.h>
64
65 #include "udf.h"
66 #include "udf_subr.h"
67 #include "udf_bswap.h"
68
69
70 #define VTOI(vnode) ((struct udf_node *) vnode->v_data)
71
72 static void udf_record_allocation_in_node(struct udf_mount *ump,
73 struct buf *buf, uint16_t vpart_num, uint64_t *mapping,
74 struct long_ad *node_ad_cpy);
75
76 static void udf_collect_free_space_for_vpart(struct udf_mount *ump,
77 uint16_t vpart_num, uint32_t num_lb);
78
79 static void udf_wipe_adslots(struct udf_node *udf_node);
80 static void udf_count_alloc_exts(struct udf_node *udf_node);
81
82 /*
83 * IDEA/BUSY: Each udf_node gets its own extentwalker state for all operations;
84 * this will hopefully/likely reduce O(nlog(n)) to O(1) for most functionality
85 * since actions are most likely sequencial and thus seeking doesn't need
86 * searching for the same or adjacent position again.
87 */
88
89 /* --------------------------------------------------------------------- */
90
91 #if 0
92 #if 1
93 static void
94 udf_node_dump(struct udf_node *udf_node) {
95 struct file_entry *fe;
96 struct extfile_entry *efe;
97 struct icb_tag *icbtag;
98 struct long_ad s_ad;
99 uint64_t inflen;
100 uint32_t icbflags, addr_type;
101 uint32_t len, lb_num;
102 uint32_t flags;
103 int part_num;
104 int lb_size, eof, slot;
105
106 if ((udf_verbose & UDF_DEBUG_NODEDUMP) == 0)
107 return;
108
109 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
110
111 fe = udf_node->fe;
112 efe = udf_node->efe;
113 if (fe) {
114 icbtag = &fe->icbtag;
115 inflen = udf_rw64(fe->inf_len);
116 } else {
117 icbtag = &efe->icbtag;
118 inflen = udf_rw64(efe->inf_len);
119 }
120
121 icbflags = udf_rw16(icbtag->flags);
122 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
123
124 printf("udf_node_dump %p :\n", udf_node);
125
126 if (addr_type == UDF_ICB_INTERN_ALLOC) {
127 printf("\tIntern alloc, len = %"PRIu64"\n", inflen);
128 return;
129 }
130
131 printf("\tInflen = %"PRIu64"\n", inflen);
132 printf("\t\t");
133
134 slot = 0;
135 for (;;) {
136 udf_get_adslot(udf_node, slot, &s_ad, &eof);
137 if (eof)
138 break;
139 part_num = udf_rw16(s_ad.loc.part_num);
140 lb_num = udf_rw32(s_ad.loc.lb_num);
141 len = udf_rw32(s_ad.len);
142 flags = UDF_EXT_FLAGS(len);
143 len = UDF_EXT_LEN(len);
144
145 printf("[");
146 if (part_num >= 0)
147 printf("part %d, ", part_num);
148 printf("lb_num %d, len %d", lb_num, len);
149 if (flags)
150 printf(", flags %d", flags>>30);
151 printf("] ");
152
153 if (flags == UDF_EXT_REDIRECT) {
154 printf("\n\textent END\n\tallocation extent\n\t\t");
155 }
156
157 slot++;
158 }
159 printf("\n\tl_ad END\n\n");
160 }
161 #else
162 #define udf_node_dump(a)
163 #endif
164
165
166 static void
167 udf_assert_allocated(struct udf_mount *ump, uint16_t vpart_num,
168 uint32_t lb_num, uint32_t num_lb)
169 {
170 struct udf_bitmap *bitmap;
171 struct part_desc *pdesc;
172 uint32_t ptov;
173 uint32_t bitval;
174 uint8_t *bpos;
175 int bit;
176 int phys_part;
177 int ok;
178
179 DPRINTF(PARANOIA, ("udf_assert_allocated: check virt lbnum %d "
180 "part %d + %d sect\n", lb_num, vpart_num, num_lb));
181
182 /* get partition backing up this vpart_num */
183 pdesc = ump->partitions[ump->vtop[vpart_num]];
184
185 switch (ump->vtop_tp[vpart_num]) {
186 case UDF_VTOP_TYPE_PHYS :
187 case UDF_VTOP_TYPE_SPARABLE :
188 /* free space to freed or unallocated space bitmap */
189 ptov = udf_rw32(pdesc->start_loc);
190 phys_part = ump->vtop[vpart_num];
191
192 /* use unallocated bitmap */
193 bitmap = &ump->part_unalloc_bits[phys_part];
194
195 /* if no bitmaps are defined, bail out */
196 if (bitmap->bits == NULL)
197 break;
198
199 /* check bits */
200 KASSERT(bitmap->bits);
201 ok = 1;
202 bpos = bitmap->bits + lb_num/8;
203 bit = lb_num % 8;
204 while (num_lb > 0) {
205 bitval = (1 << bit);
206 DPRINTF(PARANOIA, ("XXX : check %d, %p, bit %d\n",
207 lb_num, bpos, bit));
208 KASSERT(bitmap->bits + lb_num/8 == bpos);
209 if (*bpos & bitval) {
210 printf("\tlb_num %d is NOT marked busy\n",
211 lb_num);
212 ok = 0;
213 }
214 lb_num++; num_lb--;
215 bit = (bit + 1) % 8;
216 if (bit == 0)
217 bpos++;
218 }
219 if (!ok) {
220 /* KASSERT(0); */
221 }
222
223 break;
224 case UDF_VTOP_TYPE_VIRT :
225 /* TODO check space */
226 KASSERT(num_lb == 1);
227 break;
228 case UDF_VTOP_TYPE_META :
229 /* TODO check space in the metadata bitmap */
230 default:
231 /* not implemented */
232 break;
233 }
234 }
235
236
237 static void
238 udf_node_sanity_check(struct udf_node *udf_node,
239 uint64_t *cnt_inflen, uint64_t *cnt_logblksrec)
240 {
241 union dscrptr *dscr;
242 struct file_entry *fe;
243 struct extfile_entry *efe;
244 struct icb_tag *icbtag;
245 struct long_ad s_ad;
246 uint64_t inflen, logblksrec;
247 uint32_t icbflags, addr_type;
248 uint32_t len, lb_num, l_ea, l_ad, max_l_ad;
249 uint16_t part_num;
250 uint8_t *data_pos;
251 int dscr_size, lb_size, flags, whole_lb;
252 int i, slot, eof;
253
254 // KASSERT(mutex_owned(&udf_node->ump->allocate_mutex));
255
256 if (1)
257 udf_node_dump(udf_node);
258
259 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
260
261 fe = udf_node->fe;
262 efe = udf_node->efe;
263 if (fe) {
264 dscr = (union dscrptr *) fe;
265 icbtag = &fe->icbtag;
266 inflen = udf_rw64(fe->inf_len);
267 dscr_size = sizeof(struct file_entry) -1;
268 logblksrec = udf_rw64(fe->logblks_rec);
269 l_ad = udf_rw32(fe->l_ad);
270 l_ea = udf_rw32(fe->l_ea);
271 } else {
272 dscr = (union dscrptr *) efe;
273 icbtag = &efe->icbtag;
274 inflen = udf_rw64(efe->inf_len);
275 dscr_size = sizeof(struct extfile_entry) -1;
276 logblksrec = udf_rw64(efe->logblks_rec);
277 l_ad = udf_rw32(efe->l_ad);
278 l_ea = udf_rw32(efe->l_ea);
279 }
280 data_pos = (uint8_t *) dscr + dscr_size + l_ea;
281 max_l_ad = lb_size - dscr_size - l_ea;
282 icbflags = udf_rw16(icbtag->flags);
283 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
284
285 /* check if tail is zero */
286 DPRINTF(PARANOIA, ("Sanity check blank tail\n"));
287 for (i = l_ad; i < max_l_ad; i++) {
288 if (data_pos[i] != 0)
289 printf( "sanity_check: violation: node byte %d "
290 "has value %d\n", i, data_pos[i]);
291 }
292
293 /* reset counters */
294 *cnt_inflen = 0;
295 *cnt_logblksrec = 0;
296
297 if (addr_type == UDF_ICB_INTERN_ALLOC) {
298 KASSERT(l_ad <= max_l_ad);
299 KASSERT(l_ad == inflen);
300 *cnt_inflen = inflen;
301 return;
302 }
303
304 /* start counting */
305 whole_lb = 1;
306 slot = 0;
307 for (;;) {
308 udf_get_adslot(udf_node, slot, &s_ad, &eof);
309 if (eof)
310 break;
311 KASSERT(whole_lb == 1);
312
313 part_num = udf_rw16(s_ad.loc.part_num);
314 lb_num = udf_rw32(s_ad.loc.lb_num);
315 len = udf_rw32(s_ad.len);
316 flags = UDF_EXT_FLAGS(len);
317 len = UDF_EXT_LEN(len);
318
319 if (flags != UDF_EXT_REDIRECT) {
320 *cnt_inflen += len;
321 if (flags == UDF_EXT_ALLOCATED) {
322 *cnt_logblksrec += (len + lb_size -1) / lb_size;
323 }
324 } else {
325 KASSERT(len == lb_size);
326 }
327 /* check allocation */
328 if (flags == UDF_EXT_ALLOCATED)
329 udf_assert_allocated(udf_node->ump, part_num, lb_num,
330 (len + lb_size - 1) / lb_size);
331
332 /* check whole lb */
333 whole_lb = ((len % lb_size) == 0);
334
335 slot++;
336 }
337 /* rest should be zero (ad_off > l_ad < max_l_ad - adlen) */
338
339 KASSERT(*cnt_inflen == inflen);
340 KASSERT(*cnt_logblksrec == logblksrec);
341
342 // KASSERT(mutex_owned(&udf_node->ump->allocate_mutex));
343 }
344 #else
345 static void
346 udf_node_sanity_check(struct udf_node *udf_node,
347 uint64_t *cnt_inflen, uint64_t *cnt_logblksrec) {
348 struct file_entry *fe;
349 struct extfile_entry *efe;
350 struct icb_tag *icbtag;
351 uint64_t inflen, logblksrec;
352 int dscr_size, lb_size;
353
354 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
355
356 fe = udf_node->fe;
357 efe = udf_node->efe;
358 if (fe) {
359 icbtag = &fe->icbtag;
360 inflen = udf_rw64(fe->inf_len);
361 dscr_size = sizeof(struct file_entry) -1;
362 logblksrec = udf_rw64(fe->logblks_rec);
363 } else {
364 icbtag = &efe->icbtag;
365 inflen = udf_rw64(efe->inf_len);
366 dscr_size = sizeof(struct extfile_entry) -1;
367 logblksrec = udf_rw64(efe->logblks_rec);
368 }
369 *cnt_logblksrec = logblksrec;
370 *cnt_inflen = inflen;
371 }
372 #endif
373
374 /* --------------------------------------------------------------------- */
375
376 void
377 udf_calc_freespace(struct udf_mount *ump, uint64_t *sizeblks, uint64_t *freeblks)
378 {
379 struct logvol_int_desc *lvid;
380 uint32_t *pos1, *pos2;
381 int vpart, num_vpart;
382
383 lvid = ump->logvol_integrity;
384 *freeblks = *sizeblks = 0;
385
386 /*
387 * Sequentials media report free space directly (CD/DVD/BD-R), for the
388 * other media we need the logical volume integrity.
389 *
390 * We sum all free space up here regardless of type.
391 */
392
393 KASSERT(lvid);
394 num_vpart = udf_rw32(lvid->num_part);
395
396 if (ump->discinfo.mmc_cur & MMC_CAP_SEQUENTIAL) {
397 /* use track info directly summing if there are 2 open */
398 /* XXX assumption at most two tracks open */
399 *freeblks = ump->data_track.free_blocks;
400 if (ump->data_track.tracknr != ump->metadata_track.tracknr)
401 *freeblks += ump->metadata_track.free_blocks;
402 *sizeblks = ump->discinfo.last_possible_lba;
403 } else {
404 /* free and used space for mountpoint based on logvol integrity */
405 for (vpart = 0; vpart < num_vpart; vpart++) {
406 pos1 = &lvid->tables[0] + vpart;
407 pos2 = &lvid->tables[0] + num_vpart + vpart;
408 if (udf_rw32(*pos1) != (uint32_t) -1) {
409 *freeblks += udf_rw32(*pos1);
410 *sizeblks += udf_rw32(*pos2);
411 }
412 }
413 }
414 /* adjust for accounted uncommitted blocks */
415 for (vpart = 0; vpart < num_vpart; vpart++)
416 *freeblks -= ump->uncommitted_lbs[vpart];
417
418 if (*freeblks > UDF_DISC_SLACK) {
419 *freeblks -= UDF_DISC_SLACK;
420 } else {
421 *freeblks = 0;
422 }
423 }
424
425
426 static void
427 udf_calc_vpart_freespace(struct udf_mount *ump, uint16_t vpart_num, uint64_t *freeblks)
428 {
429 struct logvol_int_desc *lvid;
430 uint32_t *pos1;
431
432 lvid = ump->logvol_integrity;
433 *freeblks = 0;
434
435 /*
436 * Sequentials media report free space directly (CD/DVD/BD-R), for the
437 * other media we need the logical volume integrity.
438 *
439 * We sum all free space up here regardless of type.
440 */
441
442 KASSERT(lvid);
443 if (ump->discinfo.mmc_cur & MMC_CAP_SEQUENTIAL) {
444 /* XXX assumption at most two tracks open */
445 if (vpart_num == ump->data_part) {
446 *freeblks = ump->data_track.free_blocks;
447 } else {
448 *freeblks = ump->metadata_track.free_blocks;
449 }
450 } else {
451 /* free and used space for mountpoint based on logvol integrity */
452 pos1 = &lvid->tables[0] + vpart_num;
453 if (udf_rw32(*pos1) != (uint32_t) -1)
454 *freeblks += udf_rw32(*pos1);
455 }
456
457 /* adjust for accounted uncommitted blocks */
458 if (*freeblks > ump->uncommitted_lbs[vpart_num]) {
459 *freeblks -= ump->uncommitted_lbs[vpart_num];
460 } else {
461 *freeblks = 0;
462 }
463 }
464
465 /* --------------------------------------------------------------------- */
466
467 int
468 udf_translate_vtop(struct udf_mount *ump, struct long_ad *icb_loc,
469 uint32_t *lb_numres, uint32_t *extres)
470 {
471 struct part_desc *pdesc;
472 struct spare_map_entry *sme;
473 struct long_ad s_icb_loc;
474 uint64_t foffset, end_foffset;
475 uint32_t lb_size, len;
476 uint32_t lb_num, lb_rel, lb_packet;
477 uint32_t udf_rw32_lbmap, ext_offset;
478 uint16_t vpart;
479 int rel, part, error, eof, slot, flags;
480
481 assert(ump && icb_loc && lb_numres);
482
483 vpart = udf_rw16(icb_loc->loc.part_num);
484 lb_num = udf_rw32(icb_loc->loc.lb_num);
485 if (vpart > UDF_VTOP_RAWPART)
486 return EINVAL;
487
488 translate_again:
489 part = ump->vtop[vpart];
490 pdesc = ump->partitions[part];
491
492 switch (ump->vtop_tp[vpart]) {
493 case UDF_VTOP_TYPE_RAW :
494 /* 1:1 to the end of the device */
495 *lb_numres = lb_num;
496 *extres = INT_MAX;
497 return 0;
498 case UDF_VTOP_TYPE_PHYS :
499 /* transform into its disc logical block */
500 if (lb_num > udf_rw32(pdesc->part_len))
501 return EINVAL;
502 *lb_numres = lb_num + udf_rw32(pdesc->start_loc);
503
504 /* extent from here to the end of the partition */
505 *extres = udf_rw32(pdesc->part_len) - lb_num;
506 return 0;
507 case UDF_VTOP_TYPE_VIRT :
508 /* only maps one logical block, lookup in VAT */
509 if (lb_num >= ump->vat_entries) /* XXX > or >= ? */
510 return EINVAL;
511
512 /* lookup in virtual allocation table file */
513 mutex_enter(&ump->allocate_mutex);
514 error = udf_vat_read(ump->vat_node,
515 (uint8_t *) &udf_rw32_lbmap, 4,
516 ump->vat_offset + lb_num * 4);
517 mutex_exit(&ump->allocate_mutex);
518
519 if (error)
520 return error;
521
522 lb_num = udf_rw32(udf_rw32_lbmap);
523
524 /* transform into its disc logical block */
525 if (lb_num > udf_rw32(pdesc->part_len))
526 return EINVAL;
527 *lb_numres = lb_num + udf_rw32(pdesc->start_loc);
528
529 /* just one logical block */
530 *extres = 1;
531 return 0;
532 case UDF_VTOP_TYPE_SPARABLE :
533 /* check if the packet containing the lb_num is remapped */
534 lb_packet = lb_num / ump->sparable_packet_size;
535 lb_rel = lb_num % ump->sparable_packet_size;
536
537 for (rel = 0; rel < udf_rw16(ump->sparing_table->rt_l); rel++) {
538 sme = &ump->sparing_table->entries[rel];
539 if (lb_packet == udf_rw32(sme->org)) {
540 /* NOTE maps to absolute disc logical block! */
541 *lb_numres = udf_rw32(sme->map) + lb_rel;
542 *extres = ump->sparable_packet_size - lb_rel;
543 return 0;
544 }
545 }
546
547 /* transform into its disc logical block */
548 if (lb_num > udf_rw32(pdesc->part_len))
549 return EINVAL;
550 *lb_numres = lb_num + udf_rw32(pdesc->start_loc);
551
552 /* rest of block */
553 *extres = ump->sparable_packet_size - lb_rel;
554 return 0;
555 case UDF_VTOP_TYPE_META :
556 /* we have to look into the file's allocation descriptors */
557
558 /* use metadatafile allocation mutex */
559 lb_size = udf_rw32(ump->logical_vol->lb_size);
560
561 UDF_LOCK_NODE(ump->metadata_node, 0);
562
563 /* get first overlapping extent */
564 foffset = 0;
565 slot = 0;
566 for (;;) {
567 udf_get_adslot(ump->metadata_node,
568 slot, &s_icb_loc, &eof);
569 DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, "
570 "len = %d, lb_num = %d, part = %d\n",
571 slot, eof,
572 UDF_EXT_FLAGS(udf_rw32(s_icb_loc.len)),
573 UDF_EXT_LEN(udf_rw32(s_icb_loc.len)),
574 udf_rw32(s_icb_loc.loc.lb_num),
575 udf_rw16(s_icb_loc.loc.part_num)));
576 if (eof) {
577 DPRINTF(TRANSLATE,
578 ("Meta partition translation "
579 "failed: can't seek location\n"));
580 UDF_UNLOCK_NODE(ump->metadata_node, 0);
581 return EINVAL;
582 }
583 len = udf_rw32(s_icb_loc.len);
584 flags = UDF_EXT_FLAGS(len);
585 len = UDF_EXT_LEN(len);
586
587 if (flags == UDF_EXT_REDIRECT) {
588 slot++;
589 continue;
590 }
591
592 end_foffset = foffset + len;
593
594 if (end_foffset > lb_num * lb_size)
595 break; /* found */
596 foffset = end_foffset;
597 slot++;
598 }
599 /* found overlapping slot */
600 ext_offset = lb_num * lb_size - foffset;
601
602 /* process extent offset */
603 lb_num = udf_rw32(s_icb_loc.loc.lb_num);
604 vpart = udf_rw16(s_icb_loc.loc.part_num);
605 lb_num += (ext_offset + lb_size -1) / lb_size;
606 ext_offset = 0;
607
608 UDF_UNLOCK_NODE(ump->metadata_node, 0);
609 if (flags != UDF_EXT_ALLOCATED) {
610 DPRINTF(TRANSLATE, ("Metadata partition translation "
611 "failed: not allocated\n"));
612 return EINVAL;
613 }
614
615 /*
616 * vpart and lb_num are updated, translate again since we
617 * might be mapped on sparable media
618 */
619 goto translate_again;
620 default:
621 printf("UDF vtop translation scheme %d unimplemented yet\n",
622 ump->vtop_tp[vpart]);
623 }
624
625 return EINVAL;
626 }
627
628
629 /* XXX provisional primitive braindead version */
630 /* TODO use ext_res */
631 void
632 udf_translate_vtop_list(struct udf_mount *ump, uint32_t sectors,
633 uint16_t vpart_num, uint64_t *lmapping, uint64_t *pmapping)
634 {
635 struct long_ad loc;
636 uint32_t lb_numres, ext_res;
637 int sector;
638
639 for (sector = 0; sector < sectors; sector++) {
640 memset(&loc, 0, sizeof(struct long_ad));
641 loc.loc.part_num = udf_rw16(vpart_num);
642 loc.loc.lb_num = udf_rw32(*lmapping);
643 udf_translate_vtop(ump, &loc, &lb_numres, &ext_res);
644 *pmapping = lb_numres;
645 lmapping++; pmapping++;
646 }
647 }
648
649
650 /* --------------------------------------------------------------------- */
651
652 /*
653 * Translate an extent (in logical_blocks) into logical block numbers; used
654 * for read and write operations. DOESNT't check extents.
655 */
656
657 int
658 udf_translate_file_extent(struct udf_node *udf_node,
659 uint32_t from, uint32_t num_lb,
660 uint64_t *map)
661 {
662 struct udf_mount *ump;
663 struct icb_tag *icbtag;
664 struct long_ad t_ad, s_ad;
665 uint64_t transsec;
666 uint64_t foffset, end_foffset;
667 uint32_t transsec32;
668 uint32_t lb_size;
669 uint32_t ext_offset;
670 uint32_t lb_num, len;
671 uint32_t overlap, translen;
672 uint16_t vpart_num;
673 int eof, error, flags;
674 int slot, addr_type, icbflags;
675
676 if (!udf_node)
677 return ENOENT;
678
679 KASSERT(num_lb > 0);
680
681 UDF_LOCK_NODE(udf_node, 0);
682
683 /* initialise derivative vars */
684 ump = udf_node->ump;
685 lb_size = udf_rw32(ump->logical_vol->lb_size);
686
687 if (udf_node->fe) {
688 icbtag = &udf_node->fe->icbtag;
689 } else {
690 icbtag = &udf_node->efe->icbtag;
691 }
692 icbflags = udf_rw16(icbtag->flags);
693 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
694
695 /* do the work */
696 if (addr_type == UDF_ICB_INTERN_ALLOC) {
697 *map = UDF_TRANS_INTERN;
698 UDF_UNLOCK_NODE(udf_node, 0);
699 return 0;
700 }
701
702 /* find first overlapping extent */
703 foffset = 0;
704 slot = 0;
705 for (;;) {
706 udf_get_adslot(udf_node, slot, &s_ad, &eof);
707 DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, len = %d, "
708 "lb_num = %d, part = %d\n", slot, eof,
709 UDF_EXT_FLAGS(udf_rw32(s_ad.len)),
710 UDF_EXT_LEN(udf_rw32(s_ad.len)),
711 udf_rw32(s_ad.loc.lb_num),
712 udf_rw16(s_ad.loc.part_num)));
713 if (eof) {
714 DPRINTF(TRANSLATE,
715 ("Translate file extent "
716 "failed: can't seek location\n"));
717 UDF_UNLOCK_NODE(udf_node, 0);
718 return EINVAL;
719 }
720 len = udf_rw32(s_ad.len);
721 flags = UDF_EXT_FLAGS(len);
722 len = UDF_EXT_LEN(len);
723 lb_num = udf_rw32(s_ad.loc.lb_num);
724
725 if (flags == UDF_EXT_REDIRECT) {
726 slot++;
727 continue;
728 }
729
730 end_foffset = foffset + len;
731
732 if (end_foffset > from * lb_size)
733 break; /* found */
734 foffset = end_foffset;
735 slot++;
736 }
737 /* found overlapping slot */
738 ext_offset = from * lb_size - foffset;
739
740 for (;;) {
741 udf_get_adslot(udf_node, slot, &s_ad, &eof);
742 DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, len = %d, "
743 "lb_num = %d, part = %d\n", slot, eof,
744 UDF_EXT_FLAGS(udf_rw32(s_ad.len)),
745 UDF_EXT_LEN(udf_rw32(s_ad.len)),
746 udf_rw32(s_ad.loc.lb_num),
747 udf_rw16(s_ad.loc.part_num)));
748 if (eof) {
749 DPRINTF(TRANSLATE,
750 ("Translate file extent "
751 "failed: past eof\n"));
752 UDF_UNLOCK_NODE(udf_node, 0);
753 return EINVAL;
754 }
755
756 len = udf_rw32(s_ad.len);
757 flags = UDF_EXT_FLAGS(len);
758 len = UDF_EXT_LEN(len);
759
760 lb_num = udf_rw32(s_ad.loc.lb_num);
761 vpart_num = udf_rw16(s_ad.loc.part_num);
762
763 end_foffset = foffset + len;
764
765 /* process extent, don't forget to advance on ext_offset! */
766 lb_num += (ext_offset + lb_size -1) / lb_size;
767 overlap = (len - ext_offset + lb_size -1) / lb_size;
768 ext_offset = 0;
769
770 /*
771 * note that the while(){} is nessisary for the extent that
772 * the udf_translate_vtop() returns doens't have to span the
773 * whole extent.
774 */
775
776 overlap = MIN(overlap, num_lb);
777 while (overlap && (flags != UDF_EXT_REDIRECT)) {
778 switch (flags) {
779 case UDF_EXT_FREE :
780 case UDF_EXT_ALLOCATED_BUT_NOT_USED :
781 transsec = UDF_TRANS_ZERO;
782 translen = overlap;
783 while (overlap && num_lb && translen) {
784 *map++ = transsec;
785 lb_num++;
786 overlap--; num_lb--; translen--;
787 }
788 break;
789 case UDF_EXT_ALLOCATED :
790 t_ad.loc.lb_num = udf_rw32(lb_num);
791 t_ad.loc.part_num = udf_rw16(vpart_num);
792 error = udf_translate_vtop(ump,
793 &t_ad, &transsec32, &translen);
794 transsec = transsec32;
795 if (error) {
796 UDF_UNLOCK_NODE(udf_node, 0);
797 return error;
798 }
799 while (overlap && num_lb && translen) {
800 *map++ = transsec;
801 lb_num++; transsec++;
802 overlap--; num_lb--; translen--;
803 }
804 break;
805 default:
806 DPRINTF(TRANSLATE,
807 ("Translate file extent "
808 "failed: bad flags %x\n", flags));
809 UDF_UNLOCK_NODE(udf_node, 0);
810 return EINVAL;
811 }
812 }
813 if (num_lb == 0)
814 break;
815
816 if (flags != UDF_EXT_REDIRECT)
817 foffset = end_foffset;
818 slot++;
819 }
820 UDF_UNLOCK_NODE(udf_node, 0);
821
822 return 0;
823 }
824
825 /* --------------------------------------------------------------------- */
826
827 static int
828 udf_search_free_vatloc(struct udf_mount *ump, uint32_t *lbnumres)
829 {
830 uint32_t lb_size, lb_num, lb_map, udf_rw32_lbmap;
831 uint8_t *blob;
832 int entry, chunk, found, error;
833
834 KASSERT(ump);
835 KASSERT(ump->logical_vol);
836
837 lb_size = udf_rw32(ump->logical_vol->lb_size);
838 blob = malloc(lb_size, M_UDFTEMP, M_WAITOK);
839
840 /* TODO static allocation of search chunk */
841
842 lb_num = MIN(ump->vat_entries, ump->vat_last_free_lb);
843 found = 0;
844 error = 0;
845 entry = 0;
846 do {
847 chunk = MIN(lb_size, (ump->vat_entries - lb_num) * 4);
848 if (chunk <= 0)
849 break;
850 /* load in chunk */
851 error = udf_vat_read(ump->vat_node, blob, chunk,
852 ump->vat_offset + lb_num * 4);
853
854 if (error)
855 break;
856
857 /* search this chunk */
858 for (entry=0; entry < chunk /4; entry++, lb_num++) {
859 udf_rw32_lbmap = *((uint32_t *) (blob + entry * 4));
860 lb_map = udf_rw32(udf_rw32_lbmap);
861 if (lb_map == 0xffffffff) {
862 found = 1;
863 break;
864 }
865 }
866 } while (!found);
867 if (error) {
868 printf("udf_search_free_vatloc: error reading in vat chunk "
869 "(lb %d, size %d)\n", lb_num, chunk);
870 }
871
872 if (!found) {
873 /* extend VAT */
874 DPRINTF(WRITE, ("udf_search_free_vatloc: extending\n"));
875 lb_num = ump->vat_entries;
876 ump->vat_entries++;
877 }
878
879 /* mark entry with initialiser just in case */
880 lb_map = udf_rw32(0xfffffffe);
881 udf_vat_write(ump->vat_node, (uint8_t *) &lb_map, 4,
882 ump->vat_offset + lb_num *4);
883 ump->vat_last_free_lb = lb_num;
884
885 free(blob, M_UDFTEMP);
886 *lbnumres = lb_num;
887 return 0;
888 }
889
890
891 static void
892 udf_bitmap_allocate(struct udf_bitmap *bitmap, int ismetadata,
893 uint32_t *num_lb, uint64_t *lmappos)
894 {
895 uint32_t offset, lb_num, bit;
896 int32_t diff;
897 uint8_t *bpos;
898 int pass;
899
900 if (!ismetadata) {
901 /* heuristic to keep the two pointers not too close */
902 diff = bitmap->data_pos - bitmap->metadata_pos;
903 if ((diff >= 0) && (diff < 1024))
904 bitmap->data_pos = bitmap->metadata_pos + 1024;
905 }
906 offset = ismetadata ? bitmap->metadata_pos : bitmap->data_pos;
907 offset &= ~7;
908 for (pass = 0; pass < 2; pass++) {
909 if (offset >= bitmap->max_offset)
910 offset = 0;
911
912 while (offset < bitmap->max_offset) {
913 if (*num_lb == 0)
914 break;
915
916 /* use first bit not set */
917 bpos = bitmap->bits + offset/8;
918 bit = ffs(*bpos); /* returns 0 or 1..8 */
919 if (bit == 0) {
920 offset += 8;
921 continue;
922 }
923
924 /* check for ffs overshoot */
925 if (offset + bit-1 >= bitmap->max_offset) {
926 offset = bitmap->max_offset;
927 break;
928 }
929
930 DPRINTF(PARANOIA, ("XXX : allocate %d, %p, bit %d\n",
931 offset + bit -1, bpos, bit-1));
932 *bpos &= ~(1 << (bit-1));
933 lb_num = offset + bit-1;
934 *lmappos++ = lb_num;
935 *num_lb = *num_lb - 1;
936 // offset = (offset & ~7);
937 }
938 }
939
940 if (ismetadata) {
941 bitmap->metadata_pos = offset;
942 } else {
943 bitmap->data_pos = offset;
944 }
945 }
946
947
948 static void
949 udf_bitmap_free(struct udf_bitmap *bitmap, uint32_t lb_num, uint32_t num_lb)
950 {
951 uint32_t offset;
952 uint32_t bit, bitval;
953 uint8_t *bpos;
954
955 offset = lb_num;
956
957 /* starter bits */
958 bpos = bitmap->bits + offset/8;
959 bit = offset % 8;
960 while ((bit != 0) && (num_lb > 0)) {
961 bitval = (1 << bit);
962 KASSERT((*bpos & bitval) == 0);
963 DPRINTF(PARANOIA, ("XXX : free %d, %p, %d\n",
964 offset, bpos, bit));
965 *bpos |= bitval;
966 offset++; num_lb--;
967 bit = (bit + 1) % 8;
968 }
969 if (num_lb == 0)
970 return;
971
972 /* whole bytes */
973 KASSERT(bit == 0);
974 bpos = bitmap->bits + offset / 8;
975 while (num_lb >= 8) {
976 KASSERT((*bpos == 0));
977 DPRINTF(PARANOIA, ("XXX : free %d + 8, %p\n", offset, bpos));
978 *bpos = 255;
979 offset += 8; num_lb -= 8;
980 bpos++;
981 }
982
983 /* stop bits */
984 KASSERT(num_lb < 8);
985 bit = 0;
986 while (num_lb > 0) {
987 bitval = (1 << bit);
988 KASSERT((*bpos & bitval) == 0);
989 DPRINTF(PARANOIA, ("XXX : free %d, %p, %d\n",
990 offset, bpos, bit));
991 *bpos |= bitval;
992 offset++; num_lb--;
993 bit = (bit + 1) % 8;
994 }
995 }
996
997
998 static uint32_t
999 udf_bitmap_check_trunc_free(struct udf_bitmap *bitmap, uint32_t to_trunc)
1000 {
1001 uint32_t seq_free, offset;
1002 uint8_t *bpos;
1003 uint8_t bit, bitval;
1004
1005 DPRINTF(RESERVE, ("\ttrying to trunc %d bits from bitmap\n", to_trunc));
1006 offset = bitmap->max_offset - to_trunc;
1007
1008 /* starter bits (if any) */
1009 bpos = bitmap->bits + offset/8;
1010 bit = offset % 8;
1011 seq_free = 0;
1012 while (to_trunc > 0) {
1013 seq_free++;
1014 bitval = (1 << bit);
1015 if (!(*bpos & bitval))
1016 seq_free = 0;
1017 offset++; to_trunc--;
1018 bit++;
1019 if (bit == 8) {
1020 bpos++;
1021 bit = 0;
1022 }
1023 }
1024
1025 DPRINTF(RESERVE, ("\tfound %d sequential free bits in bitmap\n", seq_free));
1026 return seq_free;
1027 }
1028
1029 /* --------------------------------------------------------------------- */
1030
1031 /*
1032 * We check for overall disc space with a margin to prevent critical
1033 * conditions. If disc space is low we try to force a sync() to improve our
1034 * estimates. When confronted with meta-data partition size shortage we know
1035 * we have to check if it can be extended and we need to extend it when
1036 * needed.
1037 *
1038 * A 2nd strategy we could use when disc space is getting low on a disc
1039 * formatted with a meta-data partition is to see if there are sparse areas in
1040 * the meta-data partition and free blocks there for extra data.
1041 */
1042
1043 void
1044 udf_do_reserve_space(struct udf_mount *ump, struct udf_node *udf_node,
1045 uint16_t vpart_num, uint32_t num_lb)
1046 {
1047 ump->uncommitted_lbs[vpart_num] += num_lb;
1048 if (udf_node)
1049 udf_node->uncommitted_lbs += num_lb;
1050 }
1051
1052
1053 void
1054 udf_do_unreserve_space(struct udf_mount *ump, struct udf_node *udf_node,
1055 uint16_t vpart_num, uint32_t num_lb)
1056 {
1057 ump->uncommitted_lbs[vpart_num] -= num_lb;
1058 if (ump->uncommitted_lbs[vpart_num] < 0) {
1059 DPRINTF(RESERVE, ("UDF: underflow on partition reservation, "
1060 "part %d: %d\n", vpart_num,
1061 ump->uncommitted_lbs[vpart_num]));
1062 ump->uncommitted_lbs[vpart_num] = 0;
1063 }
1064 if (udf_node) {
1065 udf_node->uncommitted_lbs -= num_lb;
1066 if (udf_node->uncommitted_lbs < 0) {
1067 DPRINTF(RESERVE, ("UDF: underflow of node "
1068 "reservation : %d\n",
1069 udf_node->uncommitted_lbs));
1070 udf_node->uncommitted_lbs = 0;
1071 }
1072 }
1073 }
1074
1075
1076 int
1077 udf_reserve_space(struct udf_mount *ump, struct udf_node *udf_node,
1078 int udf_c_type, uint16_t vpart_num, uint32_t num_lb, int can_fail)
1079 {
1080 uint64_t freeblks;
1081 uint64_t slack;
1082 int i, error;
1083
1084 slack = 0;
1085 if (can_fail)
1086 slack = UDF_DISC_SLACK;
1087
1088 error = 0;
1089 mutex_enter(&ump->allocate_mutex);
1090
1091 /* check if there is enough space available */
1092 for (i = 0; i < 3; i++) { /* XXX arbitrary number */
1093 udf_calc_vpart_freespace(ump, vpart_num, &freeblks);
1094 if (num_lb + slack < freeblks)
1095 break;
1096 /* issue SYNC */
1097 DPRINTF(RESERVE, ("udf_reserve_space: issuing sync\n"));
1098 mutex_exit(&ump->allocate_mutex);
1099 udf_do_sync(ump, FSCRED, 0);
1100 mutex_enter(&mntvnode_lock);
1101 /* 1/8 second wait */
1102 cv_timedwait(&ump->dirtynodes_cv, &mntvnode_lock,
1103 hz/8);
1104 mutex_exit(&mntvnode_lock);
1105 mutex_enter(&ump->allocate_mutex);
1106 }
1107
1108 /* check if there is enough space available now */
1109 udf_calc_vpart_freespace(ump, vpart_num, &freeblks);
1110 if (num_lb + slack >= freeblks) {
1111 DPRINTF(RESERVE, ("udf_reserve_space: try to redistribute "
1112 "partition space\n"));
1113 DPRINTF(RESERVE, ("\tvpart %d, type %d is full\n",
1114 vpart_num, ump->vtop_alloc[vpart_num]));
1115 /* Try to redistribute space if possible */
1116 udf_collect_free_space_for_vpart(ump, vpart_num, num_lb + slack);
1117 }
1118
1119 /* check if there is enough space available now */
1120 udf_calc_vpart_freespace(ump, vpart_num, &freeblks);
1121 if (num_lb + slack <= freeblks) {
1122 udf_do_reserve_space(ump, udf_node, vpart_num, num_lb);
1123 } else {
1124 DPRINTF(RESERVE, ("udf_reserve_space: out of disc space\n"));
1125 error = ENOSPC;
1126 }
1127
1128 mutex_exit(&ump->allocate_mutex);
1129 return error;
1130 }
1131
1132
1133 void
1134 udf_cleanup_reservation(struct udf_node *udf_node)
1135 {
1136 struct udf_mount *ump = udf_node->ump;
1137 int vpart_num;
1138
1139 mutex_enter(&ump->allocate_mutex);
1140
1141 /* compensate for overlapping blocks */
1142 DPRINTF(RESERVE, ("UDF: overlapped %d blocks in count\n", udf_node->uncommitted_lbs));
1143
1144 vpart_num = udf_get_record_vpart(ump, udf_get_c_type(udf_node));
1145 udf_do_unreserve_space(ump, udf_node, vpart_num, udf_node->uncommitted_lbs);
1146
1147 DPRINTF(RESERVE, ("\ttotal now %d\n", ump->uncommitted_lbs[vpart_num]));
1148
1149 /* sanity */
1150 if (ump->uncommitted_lbs[vpart_num] < 0)
1151 ump->uncommitted_lbs[vpart_num] = 0;
1152
1153 mutex_exit(&ump->allocate_mutex);
1154 }
1155
1156 /* --------------------------------------------------------------------- */
1157
1158 /*
1159 * Allocate an extent of given length on given virt. partition. It doesn't
1160 * have to be one stretch.
1161 */
1162
1163 int
1164 udf_allocate_space(struct udf_mount *ump, struct udf_node *udf_node,
1165 int udf_c_type, uint16_t vpart_num, uint32_t num_lb, uint64_t *lmapping)
1166 {
1167 struct mmc_trackinfo *alloc_track, *other_track;
1168 struct udf_bitmap *bitmap;
1169 struct part_desc *pdesc;
1170 struct logvol_int_desc *lvid;
1171 uint64_t *lmappos;
1172 uint32_t ptov, lb_num, *freepos, free_lbs;
1173 int lb_size, alloc_num_lb;
1174 int alloc_type, error;
1175 int is_node;
1176
1177 DPRINTF(CALL, ("udf_allocate_space(ctype %d, vpart %d, num_lb %d\n",
1178 udf_c_type, vpart_num, num_lb));
1179 mutex_enter(&ump->allocate_mutex);
1180
1181 lb_size = udf_rw32(ump->logical_vol->lb_size);
1182 KASSERT(lb_size == ump->discinfo.sector_size);
1183
1184 alloc_type = ump->vtop_alloc[vpart_num];
1185 is_node = (udf_c_type == UDF_C_NODE);
1186
1187 lmappos = lmapping;
1188 error = 0;
1189 switch (alloc_type) {
1190 case UDF_ALLOC_VAT :
1191 /* search empty slot in VAT file */
1192 KASSERT(num_lb == 1);
1193 error = udf_search_free_vatloc(ump, &lb_num);
1194 if (!error) {
1195 *lmappos = lb_num;
1196
1197 /* reserve on the backing sequential partition since
1198 * that partition is credited back later */
1199 udf_do_reserve_space(ump, udf_node,
1200 ump->vtop[vpart_num], num_lb);
1201 }
1202 break;
1203 case UDF_ALLOC_SEQUENTIAL :
1204 /* sequential allocation on recordable media */
1205 /* get partition backing up this vpart_num_num */
1206 pdesc = ump->partitions[ump->vtop[vpart_num]];
1207
1208 /* calculate offset from physical base partition */
1209 ptov = udf_rw32(pdesc->start_loc);
1210
1211 /* get our track descriptors */
1212 if (vpart_num == ump->node_part) {
1213 alloc_track = &ump->metadata_track;
1214 other_track = &ump->data_track;
1215 } else {
1216 alloc_track = &ump->data_track;
1217 other_track = &ump->metadata_track;
1218 }
1219
1220 /* allocate */
1221 for (lb_num = 0; lb_num < num_lb; lb_num++) {
1222 *lmappos++ = alloc_track->next_writable - ptov;
1223 alloc_track->next_writable++;
1224 alloc_track->free_blocks--;
1225 }
1226
1227 /* keep other track up-to-date */
1228 if (alloc_track->tracknr == other_track->tracknr)
1229 memcpy(other_track, alloc_track,
1230 sizeof(struct mmc_trackinfo));
1231 break;
1232 case UDF_ALLOC_SPACEMAP :
1233 /* try to allocate on unallocated bits */
1234 alloc_num_lb = num_lb;
1235 bitmap = &ump->part_unalloc_bits[vpart_num];
1236 udf_bitmap_allocate(bitmap, is_node, &alloc_num_lb, lmappos);
1237 ump->lvclose |= UDF_WRITE_PART_BITMAPS;
1238
1239 /* have we allocated all? */
1240 if (alloc_num_lb) {
1241 /* TODO convert freed to unalloc and try again */
1242 /* free allocated piece for now */
1243 lmappos = lmapping;
1244 for (lb_num=0; lb_num < num_lb-alloc_num_lb; lb_num++) {
1245 udf_bitmap_free(bitmap, *lmappos++, 1);
1246 }
1247 error = ENOSPC;
1248 }
1249 if (!error) {
1250 /* adjust freecount */
1251 lvid = ump->logvol_integrity;
1252 freepos = &lvid->tables[0] + vpart_num;
1253 free_lbs = udf_rw32(*freepos);
1254 *freepos = udf_rw32(free_lbs - num_lb);
1255 }
1256 break;
1257 case UDF_ALLOC_METABITMAP : /* UDF 2.50, 2.60 BluRay-RE */
1258 /* allocate on metadata unallocated bits */
1259 alloc_num_lb = num_lb;
1260 bitmap = &ump->metadata_unalloc_bits;
1261 udf_bitmap_allocate(bitmap, is_node, &alloc_num_lb, lmappos);
1262 ump->lvclose |= UDF_WRITE_PART_BITMAPS;
1263
1264 /* have we allocated all? */
1265 if (alloc_num_lb) {
1266 /* YIKES! TODO we need to extend the metadata partition */
1267 /* free allocated piece for now */
1268 lmappos = lmapping;
1269 for (lb_num=0; lb_num < num_lb-alloc_num_lb; lb_num++) {
1270 udf_bitmap_free(bitmap, *lmappos++, 1);
1271 }
1272 error = ENOSPC;
1273 }
1274 if (!error) {
1275 /* adjust freecount */
1276 lvid = ump->logvol_integrity;
1277 freepos = &lvid->tables[0] + vpart_num;
1278 free_lbs = udf_rw32(*freepos);
1279 *freepos = udf_rw32(free_lbs - num_lb);
1280 }
1281 break;
1282 case UDF_ALLOC_METASEQUENTIAL : /* UDF 2.60 BluRay-R */
1283 case UDF_ALLOC_RELAXEDSEQUENTIAL : /* UDF 2.50/~meta BluRay-R */
1284 printf("ALERT: udf_allocate_space : allocation %d "
1285 "not implemented yet!\n", alloc_type);
1286 /* TODO implement, doesn't have to be contiguous */
1287 error = ENOSPC;
1288 break;
1289 }
1290
1291 if (!error) {
1292 /* credit our partition since we have committed the space */
1293 udf_do_unreserve_space(ump, udf_node, vpart_num, num_lb);
1294 }
1295
1296 #ifdef DEBUG
1297 if (udf_verbose & UDF_DEBUG_ALLOC) {
1298 lmappos = lmapping;
1299 printf("udf_allocate_space, allocated logical lba :\n");
1300 for (lb_num = 0; lb_num < num_lb; lb_num++) {
1301 printf("%s %"PRIu64, (lb_num > 0)?",":"",
1302 *lmappos++);
1303 }
1304 printf("\n");
1305 }
1306 #endif
1307 mutex_exit(&ump->allocate_mutex);
1308
1309 return error;
1310 }
1311
1312 /* --------------------------------------------------------------------- */
1313
1314 void
1315 udf_free_allocated_space(struct udf_mount *ump, uint32_t lb_num,
1316 uint16_t vpart_num, uint32_t num_lb)
1317 {
1318 struct udf_bitmap *bitmap;
1319 struct part_desc *pdesc;
1320 struct logvol_int_desc *lvid;
1321 uint32_t ptov, lb_map, udf_rw32_lbmap;
1322 uint32_t *freepos, free_lbs;
1323 int phys_part;
1324 int error;
1325
1326 DPRINTF(ALLOC, ("udf_free_allocated_space: freeing virt lbnum %d "
1327 "part %d + %d sect\n", lb_num, vpart_num, num_lb));
1328
1329 /* no use freeing zero length */
1330 if (num_lb == 0)
1331 return;
1332
1333 mutex_enter(&ump->allocate_mutex);
1334
1335 /* get partition backing up this vpart_num */
1336 pdesc = ump->partitions[ump->vtop[vpart_num]];
1337
1338 switch (ump->vtop_tp[vpart_num]) {
1339 case UDF_VTOP_TYPE_PHYS :
1340 case UDF_VTOP_TYPE_SPARABLE :
1341 /* free space to freed or unallocated space bitmap */
1342 ptov = udf_rw32(pdesc->start_loc);
1343 phys_part = ump->vtop[vpart_num];
1344
1345 /* first try freed space bitmap */
1346 bitmap = &ump->part_freed_bits[phys_part];
1347
1348 /* if not defined, use unallocated bitmap */
1349 if (bitmap->bits == NULL)
1350 bitmap = &ump->part_unalloc_bits[phys_part];
1351
1352 /* if no bitmaps are defined, bail out; XXX OK? */
1353 if (bitmap->bits == NULL)
1354 break;
1355
1356 /* free bits if its defined */
1357 KASSERT(bitmap->bits);
1358 ump->lvclose |= UDF_WRITE_PART_BITMAPS;
1359 udf_bitmap_free(bitmap, lb_num, num_lb);
1360
1361 /* adjust freecount */
1362 lvid = ump->logvol_integrity;
1363 freepos = &lvid->tables[0] + vpart_num;
1364 free_lbs = udf_rw32(*freepos);
1365 *freepos = udf_rw32(free_lbs + num_lb);
1366 break;
1367 case UDF_VTOP_TYPE_VIRT :
1368 /* free this VAT entry */
1369 KASSERT(num_lb == 1);
1370
1371 lb_map = 0xffffffff;
1372 udf_rw32_lbmap = udf_rw32(lb_map);
1373 error = udf_vat_write(ump->vat_node,
1374 (uint8_t *) &udf_rw32_lbmap, 4,
1375 ump->vat_offset + lb_num * 4);
1376 KASSERT(error == 0);
1377 ump->vat_last_free_lb = MIN(ump->vat_last_free_lb, lb_num);
1378 break;
1379 case UDF_VTOP_TYPE_META :
1380 /* free space in the metadata bitmap */
1381 bitmap = &ump->metadata_unalloc_bits;
1382 KASSERT(bitmap->bits);
1383
1384 ump->lvclose |= UDF_WRITE_PART_BITMAPS;
1385 udf_bitmap_free(bitmap, lb_num, num_lb);
1386
1387 /* adjust freecount */
1388 lvid = ump->logvol_integrity;
1389 freepos = &lvid->tables[0] + vpart_num;
1390 free_lbs = udf_rw32(*freepos);
1391 *freepos = udf_rw32(free_lbs + num_lb);
1392 break;
1393 default:
1394 printf("ALERT: udf_free_allocated_space : allocation %d "
1395 "not implemented yet!\n", ump->vtop_tp[vpart_num]);
1396 break;
1397 }
1398
1399 mutex_exit(&ump->allocate_mutex);
1400 }
1401
1402 /* --------------------------------------------------------------------- */
1403
1404 /*
1405 * Special function to synchronise the metadatamirror file when they change on
1406 * resizing. When the metadatafile is actually duplicated, this action is a
1407 * no-op since they describe different extents on the disc.
1408 */
1409
1410 void udf_synchronise_metadatamirror_node(struct udf_mount *ump)
1411 {
1412 struct udf_node *meta_node, *metamirror_node;
1413 struct long_ad s_ad;
1414 int slot, cpy_slot;
1415 int error, eof;
1416
1417 if (ump->metadata_flags & METADATA_DUPLICATED)
1418 return;
1419
1420 meta_node = ump->metadata_node;
1421 metamirror_node = ump->metadatamirror_node;
1422
1423 /* 1) wipe mirror node */
1424 udf_wipe_adslots(metamirror_node);
1425
1426 /* 2) copy all node descriptors from the meta_node */
1427 slot = 0;
1428 cpy_slot = 0;
1429 for (;;) {
1430 udf_get_adslot(meta_node, slot, &s_ad, &eof);
1431 if (eof)
1432 break;
1433 error = udf_append_adslot(metamirror_node, &cpy_slot, &s_ad);
1434 if (error) {
1435 /* WTF, this shouldn't happen, what to do now? */
1436 panic("udf_synchronise_metadatamirror_node failed!");
1437 }
1438 slot++;
1439 }
1440
1441 /* 3) adjust metamirror_node size */
1442 if (meta_node->fe) {
1443 KASSERT(metamirror_node->fe);
1444 metamirror_node->fe->inf_len = meta_node->fe->inf_len;
1445 } else {
1446 KASSERT(meta_node->efe);
1447 KASSERT(metamirror_node->efe);
1448 metamirror_node->efe->inf_len = meta_node->efe->inf_len;
1449 metamirror_node->efe->obj_size = meta_node->efe->obj_size;
1450 }
1451
1452 /* for sanity */
1453 udf_count_alloc_exts(metamirror_node);
1454 }
1455
1456 /* --------------------------------------------------------------------- */
1457
1458 /*
1459 * When faced with an out of space but there is still space available on other
1460 * partitions, try to redistribute the space. This is only defined for media
1461 * using Metadata partitions.
1462 *
1463 * There are two formats to deal with. Either its a `normal' metadata
1464 * partition and we can move blocks between a metadata bitmap and its
1465 * companion data spacemap OR its a UDF 2.60 formatted BluRay-R disc with POW
1466 * and a metadata partition.
1467 */
1468
1469 static uint32_t
1470 udf_trunc_metadatapart(struct udf_mount *ump, uint32_t num_lb)
1471 {
1472 struct udf_node *bitmap_node;
1473 struct udf_bitmap *bitmap;
1474 struct space_bitmap_desc *sbd, *new_sbd;
1475 struct logvol_int_desc *lvid;
1476 uint64_t inf_len;
1477 uint64_t meta_free_lbs, data_free_lbs;
1478 uint32_t *freepos, *sizepos;
1479 uint32_t unit, lb_size, to_trunc;
1480 uint16_t meta_vpart_num, data_vpart_num, num_vpart;
1481 int err;
1482
1483 unit = ump->metadata_alloc_unit_size;
1484 lb_size = udf_rw32(ump->logical_vol->lb_size);
1485 lvid = ump->logvol_integrity;
1486
1487 /* lookup vpart for metadata partition */
1488 meta_vpart_num = ump->node_part;
1489 KASSERT(ump->vtop_alloc[meta_vpart_num] == UDF_ALLOC_METABITMAP);
1490
1491 /* lookup vpart for data partition */
1492 data_vpart_num = ump->data_part;
1493 KASSERT(ump->vtop_alloc[data_vpart_num] == UDF_ALLOC_SPACEMAP);
1494
1495 udf_calc_vpart_freespace(ump, data_vpart_num, &data_free_lbs);
1496 udf_calc_vpart_freespace(ump, meta_vpart_num, &meta_free_lbs);
1497
1498 DPRINTF(RESERVE, ("\tfree space on data partition %"PRIu64" blks\n", data_free_lbs));
1499 DPRINTF(RESERVE, ("\tfree space on metadata partition %"PRIu64" blks\n", meta_free_lbs));
1500
1501 /* give away some of the free meta space, in unit block sizes */
1502 to_trunc = meta_free_lbs/4; /* give out a quarter */
1503 to_trunc = MAX(to_trunc, num_lb);
1504 to_trunc = unit * ((to_trunc + unit-1) / unit); /* round up */
1505
1506 /* scale down if needed and bail out when out of space */
1507 if (to_trunc >= meta_free_lbs)
1508 return num_lb;
1509
1510 /* check extent of bits marked free at the end of the map */
1511 bitmap = &ump->metadata_unalloc_bits;
1512 to_trunc = udf_bitmap_check_trunc_free(bitmap, to_trunc);
1513 to_trunc = unit * (to_trunc / unit); /* round down again */
1514 if (to_trunc == 0)
1515 return num_lb;
1516
1517 DPRINTF(RESERVE, ("\ttruncating %d lbs from the metadata bitmap\n",
1518 to_trunc));
1519
1520 /* get length of the metadata bitmap node file */
1521 bitmap_node = ump->metadatabitmap_node;
1522 if (bitmap_node->fe) {
1523 inf_len = udf_rw64(bitmap_node->fe->inf_len);
1524 } else {
1525 KASSERT(bitmap_node->efe);
1526 inf_len = udf_rw64(bitmap_node->efe->inf_len);
1527 }
1528 inf_len -= to_trunc/8;
1529
1530 /* as per [UDF 2.60/2.2.13.6] : */
1531 /* 1) update the SBD in the metadata bitmap file */
1532 sbd = (struct space_bitmap_desc *) bitmap->blob;
1533 sbd->num_bits = udf_rw32(sbd->num_bits) - to_trunc;
1534 sbd->num_bytes = udf_rw32(sbd->num_bytes) - to_trunc/8;
1535 bitmap->max_offset = udf_rw32(sbd->num_bits);
1536
1537 num_vpart = udf_rw32(lvid->num_part);
1538 freepos = &lvid->tables[0] + meta_vpart_num;
1539 sizepos = &lvid->tables[0] + num_vpart + meta_vpart_num;
1540 *freepos = udf_rw32(*freepos) - to_trunc;
1541 *sizepos = udf_rw32(*sizepos) - to_trunc;
1542
1543 /* realloc bitmap for better memory usage */
1544 new_sbd = realloc(sbd, inf_len, M_UDFVOLD,
1545 M_CANFAIL | M_WAITOK);
1546 if (new_sbd) {
1547 /* update pointers */
1548 ump->metadata_unalloc_dscr = new_sbd;
1549 bitmap->blob = (uint8_t *) new_sbd;
1550 }
1551 ump->lvclose |= UDF_WRITE_PART_BITMAPS;
1552
1553 /*
1554 * The truncated space is secured now and can't be allocated anymore. Release
1555 * the allocate mutex so we can shrink the nodes the normal way.
1556 */
1557 mutex_exit(&ump->allocate_mutex);
1558
1559 /* 2) trunc the metadata bitmap information file, freeing blocks */
1560 err = udf_shrink_node(bitmap_node, inf_len);
1561 KASSERT(err == 0);
1562
1563 /* 3) trunc the metadata file and mirror file, freeing blocks */
1564 inf_len = udf_rw32(sbd->num_bits) * lb_size; /* [4/14.12.4] */
1565 err = udf_shrink_node(ump->metadata_node, inf_len);
1566 KASSERT(err == 0);
1567 if (ump->metadatamirror_node && (ump->metadata_flags & METADATA_DUPLICATED)) {
1568 err = udf_shrink_node(ump->metadatamirror_node, inf_len);
1569 KASSERT(err == 0);
1570 }
1571 ump->lvclose |= UDF_WRITE_METAPART_NODES;
1572
1573 /* relock before exit */
1574 mutex_enter(&ump->allocate_mutex);
1575
1576 if (to_trunc > num_lb)
1577 return 0;
1578 return num_lb - to_trunc;
1579 }
1580
1581
1582 static void
1583 udf_sparsify_metadatapart(struct udf_mount *ump, uint32_t num_lb)
1584 {
1585 /* NOT IMPLEMENTED, fail */
1586 }
1587
1588
1589 static void
1590 udf_collect_free_space_for_vpart(struct udf_mount *ump,
1591 uint16_t vpart_num, uint32_t num_lb)
1592 {
1593 /* allocate mutex is helt */
1594
1595 /* only defined for metadata partitions */
1596 if (ump->vtop_tp[ump->node_part] != UDF_VTOP_TYPE_META) {
1597 DPRINTF(RESERVE, ("\tcan't grow/shrink; no metadata partitioning\n"));
1598 return;
1599 }
1600
1601 /* UDF 2.60 BD-R+POW? */
1602 if (ump->vtop_alloc[ump->node_part] == UDF_ALLOC_METASEQUENTIAL) {
1603 DPRINTF(RESERVE, ("\tUDF 2.60 BD-R+POW track grow not implemented yet\n"));
1604 return;
1605 }
1606
1607 if (ump->vtop_tp[vpart_num] == UDF_VTOP_TYPE_META) {
1608 /* try to grow the meta partition */
1609 DPRINTF(RESERVE, ("\ttrying to grow the meta partition\n"));
1610 /* as per [UDF 2.60/2.2.13.5] : extend bitmap and metadata file(s) */
1611 DPRINTF(NOTIMPL, ("\tgrowing meta partition not implemented yet\n"));
1612 } else {
1613 /* try to shrink the metadata partition */
1614 DPRINTF(RESERVE, ("\ttrying to shrink the meta partition\n"));
1615 /* as per [UDF 2.60/2.2.13.6] : either trunc or make sparse */
1616 num_lb = udf_trunc_metadatapart(ump, num_lb);
1617 if (num_lb)
1618 udf_sparsify_metadatapart(ump, num_lb);
1619 }
1620
1621 /* allocate mutex should still be helt */
1622 }
1623
1624 /* --------------------------------------------------------------------- */
1625
1626 /*
1627 * Allocate a buf on disc for direct write out. The space doesn't have to be
1628 * contiguous as the caller takes care of this.
1629 */
1630
1631 void
1632 udf_late_allocate_buf(struct udf_mount *ump, struct buf *buf,
1633 uint64_t *lmapping, struct long_ad *node_ad_cpy, uint16_t *vpart_nump)
1634 {
1635 struct udf_node *udf_node = VTOI(buf->b_vp);
1636 int lb_size, blks, udf_c_type;
1637 int vpart_num, num_lb;
1638 int error, s;
1639
1640 /*
1641 * for each sector in the buf, allocate a sector on disc and record
1642 * its position in the provided mapping array.
1643 *
1644 * If its userdata or FIDs, record its location in its node.
1645 */
1646
1647 lb_size = udf_rw32(ump->logical_vol->lb_size);
1648 num_lb = (buf->b_bcount + lb_size -1) / lb_size;
1649 blks = lb_size / DEV_BSIZE;
1650 udf_c_type = buf->b_udf_c_type;
1651
1652 KASSERT(lb_size == ump->discinfo.sector_size);
1653
1654 /* select partition to record the buffer on */
1655 vpart_num = *vpart_nump = udf_get_record_vpart(ump, udf_c_type);
1656
1657 if (udf_c_type == UDF_C_NODE) {
1658 /* if not VAT, its allready allocated */
1659 if (ump->vtop_alloc[ump->node_part] != UDF_ALLOC_VAT)
1660 return;
1661
1662 /* allocate on its backing sequential partition */
1663 vpart_num = ump->data_part;
1664 }
1665
1666 /* XXX can this still happen? */
1667 /* do allocation on the selected partition */
1668 error = udf_allocate_space(ump, udf_node, udf_c_type,
1669 vpart_num, num_lb, lmapping);
1670 if (error) {
1671 /*
1672 * ARGH! we haven't done our accounting right! it should
1673 * allways succeed.
1674 */
1675 panic("UDF disc allocation accounting gone wrong");
1676 }
1677
1678 /* If its userdata or FIDs, record its allocation in its node. */
1679 if ((udf_c_type == UDF_C_USERDATA) ||
1680 (udf_c_type == UDF_C_FIDS) ||
1681 (udf_c_type == UDF_C_METADATA_SBM))
1682 {
1683 udf_record_allocation_in_node(ump, buf, vpart_num, lmapping,
1684 node_ad_cpy);
1685 /* decrement our outstanding bufs counter */
1686 s = splbio();
1687 udf_node->outstanding_bufs--;
1688 splx(s);
1689 }
1690 }
1691
1692 /* --------------------------------------------------------------------- */
1693
1694 /*
1695 * Try to merge a1 with the new piece a2. udf_ads_merge returns error when not
1696 * possible (anymore); a2 returns the rest piece.
1697 */
1698
1699 static int
1700 udf_ads_merge(uint32_t lb_size, struct long_ad *a1, struct long_ad *a2)
1701 {
1702 uint32_t max_len, merge_len;
1703 uint32_t a1_len, a2_len;
1704 uint32_t a1_flags, a2_flags;
1705 uint32_t a1_lbnum, a2_lbnum;
1706 uint16_t a1_part, a2_part;
1707
1708 max_len = ((UDF_EXT_MAXLEN / lb_size) * lb_size);
1709
1710 a1_flags = UDF_EXT_FLAGS(udf_rw32(a1->len));
1711 a1_len = UDF_EXT_LEN(udf_rw32(a1->len));
1712 a1_lbnum = udf_rw32(a1->loc.lb_num);
1713 a1_part = udf_rw16(a1->loc.part_num);
1714
1715 a2_flags = UDF_EXT_FLAGS(udf_rw32(a2->len));
1716 a2_len = UDF_EXT_LEN(udf_rw32(a2->len));
1717 a2_lbnum = udf_rw32(a2->loc.lb_num);
1718 a2_part = udf_rw16(a2->loc.part_num);
1719
1720 /* defines same space */
1721 if (a1_flags != a2_flags)
1722 return 1;
1723
1724 if (a1_flags != UDF_EXT_FREE) {
1725 /* the same partition */
1726 if (a1_part != a2_part)
1727 return 1;
1728
1729 /* a2 is successor of a1 */
1730 if (a1_lbnum * lb_size + a1_len != a2_lbnum * lb_size)
1731 return 1;
1732 }
1733
1734 /* merge as most from a2 if possible */
1735 merge_len = MIN(a2_len, max_len - a1_len);
1736 a1_len += merge_len;
1737 a2_len -= merge_len;
1738 a2_lbnum += merge_len/lb_size;
1739
1740 a1->len = udf_rw32(a1_len | a1_flags);
1741 a2->len = udf_rw32(a2_len | a2_flags);
1742 a2->loc.lb_num = udf_rw32(a2_lbnum);
1743
1744 if (a2_len > 0)
1745 return 1;
1746
1747 /* there is space over to merge */
1748 return 0;
1749 }
1750
1751 /* --------------------------------------------------------------------- */
1752
1753 static void
1754 udf_wipe_adslots(struct udf_node *udf_node)
1755 {
1756 struct file_entry *fe;
1757 struct extfile_entry *efe;
1758 struct alloc_ext_entry *ext;
1759 uint64_t inflen, objsize;
1760 uint32_t lb_size, dscr_size, l_ea, l_ad, max_l_ad, crclen;
1761 uint8_t *data_pos;
1762 int extnr;
1763
1764 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1765
1766 fe = udf_node->fe;
1767 efe = udf_node->efe;
1768 if (fe) {
1769 inflen = udf_rw64(fe->inf_len);
1770 objsize = inflen;
1771 dscr_size = sizeof(struct file_entry) -1;
1772 l_ea = udf_rw32(fe->l_ea);
1773 l_ad = udf_rw32(fe->l_ad);
1774 data_pos = (uint8_t *) fe + dscr_size + l_ea;
1775 } else {
1776 inflen = udf_rw64(efe->inf_len);
1777 objsize = udf_rw64(efe->obj_size);
1778 dscr_size = sizeof(struct extfile_entry) -1;
1779 l_ea = udf_rw32(efe->l_ea);
1780 l_ad = udf_rw32(efe->l_ad);
1781 data_pos = (uint8_t *) efe + dscr_size + l_ea;
1782 }
1783 max_l_ad = lb_size - dscr_size - l_ea;
1784
1785 /* wipe fe/efe */
1786 memset(data_pos, 0, max_l_ad);
1787 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea;
1788 if (fe) {
1789 fe->l_ad = udf_rw32(0);
1790 fe->logblks_rec = udf_rw64(0);
1791 fe->tag.desc_crc_len = udf_rw16(crclen);
1792 } else {
1793 efe->l_ad = udf_rw32(0);
1794 efe->logblks_rec = udf_rw64(0);
1795 efe->tag.desc_crc_len = udf_rw16(crclen);
1796 }
1797
1798 /* wipe all allocation extent entries */
1799 for (extnr = 0; extnr < udf_node->num_extensions; extnr++) {
1800 ext = udf_node->ext[extnr];
1801 dscr_size = sizeof(struct alloc_ext_entry) -1;
1802 data_pos = (uint8_t *) ext->data;
1803 max_l_ad = lb_size - dscr_size;
1804 memset(data_pos, 0, max_l_ad);
1805 ext->l_ad = udf_rw32(0);
1806
1807 crclen = dscr_size - UDF_DESC_TAG_LENGTH;
1808 ext->tag.desc_crc_len = udf_rw16(crclen);
1809 }
1810 udf_node->i_flags |= IN_NODE_REBUILD;
1811 }
1812
1813 /* --------------------------------------------------------------------- */
1814
1815 void
1816 udf_get_adslot(struct udf_node *udf_node, int slot, struct long_ad *icb,
1817 int *eof) {
1818 struct file_entry *fe;
1819 struct extfile_entry *efe;
1820 struct alloc_ext_entry *ext;
1821 struct icb_tag *icbtag;
1822 struct short_ad *short_ad;
1823 struct long_ad *long_ad, l_icb;
1824 uint32_t offset;
1825 uint32_t lb_size, dscr_size, l_ea, l_ad, flags;
1826 uint8_t *data_pos;
1827 int icbflags, addr_type, adlen, extnr;
1828
1829 /* determine what descriptor we are in */
1830 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1831
1832 fe = udf_node->fe;
1833 efe = udf_node->efe;
1834 if (fe) {
1835 icbtag = &fe->icbtag;
1836 dscr_size = sizeof(struct file_entry) -1;
1837 l_ea = udf_rw32(fe->l_ea);
1838 l_ad = udf_rw32(fe->l_ad);
1839 data_pos = (uint8_t *) fe + dscr_size + l_ea;
1840 } else {
1841 icbtag = &efe->icbtag;
1842 dscr_size = sizeof(struct extfile_entry) -1;
1843 l_ea = udf_rw32(efe->l_ea);
1844 l_ad = udf_rw32(efe->l_ad);
1845 data_pos = (uint8_t *) efe + dscr_size + l_ea;
1846 }
1847
1848 icbflags = udf_rw16(icbtag->flags);
1849 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
1850
1851 /* just in case we're called on an intern, its EOF */
1852 if (addr_type == UDF_ICB_INTERN_ALLOC) {
1853 memset(icb, 0, sizeof(struct long_ad));
1854 *eof = 1;
1855 return;
1856 }
1857
1858 adlen = 0;
1859 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1860 adlen = sizeof(struct short_ad);
1861 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1862 adlen = sizeof(struct long_ad);
1863 }
1864
1865 /* if offset too big, we go to the allocation extensions */
1866 offset = slot * adlen;
1867 extnr = -1;
1868 while (offset >= l_ad) {
1869 /* check if our last entry is a redirect */
1870 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1871 short_ad = (struct short_ad *) (data_pos + l_ad-adlen);
1872 l_icb.len = short_ad->len;
1873 l_icb.loc.part_num = udf_node->loc.loc.part_num;
1874 l_icb.loc.lb_num = short_ad->lb_num;
1875 } else {
1876 KASSERT(addr_type == UDF_ICB_LONG_ALLOC);
1877 long_ad = (struct long_ad *) (data_pos + l_ad-adlen);
1878 l_icb = *long_ad;
1879 }
1880 flags = UDF_EXT_FLAGS(udf_rw32(l_icb.len));
1881 if (flags != UDF_EXT_REDIRECT) {
1882 l_ad = 0; /* force EOF */
1883 break;
1884 }
1885
1886 /* advance to next extent */
1887 extnr++;
1888 if (extnr >= udf_node->num_extensions) {
1889 l_ad = 0; /* force EOF */
1890 break;
1891 }
1892 offset = offset - l_ad;
1893 ext = udf_node->ext[extnr];
1894 dscr_size = sizeof(struct alloc_ext_entry) -1;
1895 l_ad = udf_rw32(ext->l_ad);
1896 data_pos = (uint8_t *) ext + dscr_size;
1897 }
1898
1899 /* XXX l_ad == 0 should be enough to check */
1900 *eof = (offset >= l_ad) || (l_ad == 0);
1901 if (*eof) {
1902 DPRINTF(PARANOIDADWLK, ("returning EOF, extnr %d, offset %d, "
1903 "l_ad %d\n", extnr, offset, l_ad));
1904 memset(icb, 0, sizeof(struct long_ad));
1905 return;
1906 }
1907
1908 /* get the element */
1909 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1910 short_ad = (struct short_ad *) (data_pos + offset);
1911 icb->len = short_ad->len;
1912 icb->loc.part_num = udf_node->loc.loc.part_num;
1913 icb->loc.lb_num = short_ad->lb_num;
1914 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1915 long_ad = (struct long_ad *) (data_pos + offset);
1916 *icb = *long_ad;
1917 }
1918 DPRINTF(PARANOIDADWLK, ("returning element : v %d, lb %d, len %d, "
1919 "flags %d\n", icb->loc.part_num, icb->loc.lb_num,
1920 UDF_EXT_LEN(icb->len), UDF_EXT_FLAGS(icb->len)));
1921 }
1922
1923 /* --------------------------------------------------------------------- */
1924
1925 int
1926 udf_append_adslot(struct udf_node *udf_node, int *slot, struct long_ad *icb) {
1927 struct udf_mount *ump = udf_node->ump;
1928 union dscrptr *dscr, *extdscr;
1929 struct file_entry *fe;
1930 struct extfile_entry *efe;
1931 struct alloc_ext_entry *ext;
1932 struct icb_tag *icbtag;
1933 struct short_ad *short_ad;
1934 struct long_ad *long_ad, o_icb, l_icb;
1935 uint64_t logblks_rec, *logblks_rec_p;
1936 uint64_t lmapping;
1937 uint32_t offset, rest, len, lb_num;
1938 uint32_t lb_size, dscr_size, l_ea, l_ad, *l_ad_p, max_l_ad, crclen;
1939 uint32_t flags;
1940 uint16_t vpart_num;
1941 uint8_t *data_pos;
1942 int icbflags, addr_type, adlen, extnr;
1943 int error;
1944
1945 lb_size = udf_rw32(ump->logical_vol->lb_size);
1946 vpart_num = udf_rw16(udf_node->loc.loc.part_num);
1947
1948 /* determine what descriptor we are in */
1949 fe = udf_node->fe;
1950 efe = udf_node->efe;
1951 if (fe) {
1952 icbtag = &fe->icbtag;
1953 dscr = (union dscrptr *) fe;
1954 dscr_size = sizeof(struct file_entry) -1;
1955
1956 l_ea = udf_rw32(fe->l_ea);
1957 l_ad_p = &fe->l_ad;
1958 logblks_rec_p = &fe->logblks_rec;
1959 } else {
1960 icbtag = &efe->icbtag;
1961 dscr = (union dscrptr *) efe;
1962 dscr_size = sizeof(struct extfile_entry) -1;
1963
1964 l_ea = udf_rw32(efe->l_ea);
1965 l_ad_p = &efe->l_ad;
1966 logblks_rec_p = &efe->logblks_rec;
1967 }
1968 data_pos = (uint8_t *) dscr + dscr_size + l_ea;
1969 max_l_ad = lb_size - dscr_size - l_ea;
1970
1971 icbflags = udf_rw16(icbtag->flags);
1972 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
1973
1974 /* just in case we're called on an intern, its EOF */
1975 if (addr_type == UDF_ICB_INTERN_ALLOC) {
1976 panic("udf_append_adslot on UDF_ICB_INTERN_ALLOC\n");
1977 }
1978
1979 adlen = 0;
1980 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1981 adlen = sizeof(struct short_ad);
1982 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1983 adlen = sizeof(struct long_ad);
1984 }
1985
1986 /* clean up given long_ad since it can be a synthesized one */
1987 flags = UDF_EXT_FLAGS(udf_rw32(icb->len));
1988 if (flags == UDF_EXT_FREE) {
1989 icb->loc.part_num = udf_rw16(0);
1990 icb->loc.lb_num = udf_rw32(0);
1991 }
1992
1993 /* if offset too big, we go to the allocation extensions */
1994 l_ad = udf_rw32(*l_ad_p);
1995 offset = (*slot) * adlen;
1996 extnr = -1;
1997 while (offset >= l_ad) {
1998 /* check if our last entry is a redirect */
1999 if (addr_type == UDF_ICB_SHORT_ALLOC) {
2000 short_ad = (struct short_ad *) (data_pos + l_ad-adlen);
2001 l_icb.len = short_ad->len;
2002 l_icb.loc.part_num = udf_node->loc.loc.part_num;
2003 l_icb.loc.lb_num = short_ad->lb_num;
2004 } else {
2005 KASSERT(addr_type == UDF_ICB_LONG_ALLOC);
2006 long_ad = (struct long_ad *) (data_pos + l_ad-adlen);
2007 l_icb = *long_ad;
2008 }
2009 flags = UDF_EXT_FLAGS(udf_rw32(l_icb.len));
2010 if (flags != UDF_EXT_REDIRECT) {
2011 /* only one past the last one is adressable */
2012 break;
2013 }
2014
2015 /* advance to next extent */
2016 extnr++;
2017 KASSERT(extnr < udf_node->num_extensions);
2018 offset = offset - l_ad;
2019
2020 ext = udf_node->ext[extnr];
2021 dscr = (union dscrptr *) ext;
2022 dscr_size = sizeof(struct alloc_ext_entry) -1;
2023 max_l_ad = lb_size - dscr_size;
2024 l_ad_p = &ext->l_ad;
2025 l_ad = udf_rw32(*l_ad_p);
2026 data_pos = (uint8_t *) ext + dscr_size;
2027 }
2028 DPRINTF(PARANOIDADWLK, ("append, ext %d, offset %d, l_ad %d\n",
2029 extnr, offset, udf_rw32(*l_ad_p)));
2030 KASSERT(l_ad == udf_rw32(*l_ad_p));
2031
2032 /* offset is offset within the current (E)FE/AED */
2033 l_ad = udf_rw32(*l_ad_p);
2034 crclen = udf_rw16(dscr->tag.desc_crc_len);
2035 logblks_rec = udf_rw64(*logblks_rec_p);
2036
2037 /* overwriting old piece? */
2038 if (offset < l_ad) {
2039 /* overwrite entry; compensate for the old element */
2040 if (addr_type == UDF_ICB_SHORT_ALLOC) {
2041 short_ad = (struct short_ad *) (data_pos + offset);
2042 o_icb.len = short_ad->len;
2043 o_icb.loc.part_num = udf_rw16(0); /* ignore */
2044 o_icb.loc.lb_num = short_ad->lb_num;
2045 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
2046 long_ad = (struct long_ad *) (data_pos + offset);
2047 o_icb = *long_ad;
2048 } else {
2049 panic("Invalid address type in udf_append_adslot\n");
2050 }
2051
2052 len = udf_rw32(o_icb.len);
2053 if (UDF_EXT_FLAGS(len) == UDF_EXT_ALLOCATED) {
2054 /* adjust counts */
2055 len = UDF_EXT_LEN(len);
2056 logblks_rec -= (len + lb_size -1) / lb_size;
2057 }
2058 }
2059
2060 /* check if we're not appending a redirection */
2061 flags = UDF_EXT_FLAGS(udf_rw32(icb->len));
2062 KASSERT(flags != UDF_EXT_REDIRECT);
2063
2064 /* round down available space */
2065 rest = adlen * ((max_l_ad - offset) / adlen);
2066 if (rest <= adlen) {
2067 /* have to append aed, see if we already have a spare one */
2068 extnr++;
2069 ext = udf_node->ext[extnr];
2070 l_icb = udf_node->ext_loc[extnr];
2071 if (ext == NULL) {
2072 DPRINTF(ALLOC,("adding allocation extent %d\n", extnr));
2073
2074 error = udf_reserve_space(ump, NULL, UDF_C_NODE,
2075 vpart_num, 1, /* can fail */ false);
2076 if (error) {
2077 printf("UDF: couldn't reserve space for AED!\n");
2078 return error;
2079 }
2080 error = udf_allocate_space(ump, NULL, UDF_C_NODE,
2081 vpart_num, 1, &lmapping);
2082 lb_num = lmapping;
2083 if (error)
2084 panic("UDF: couldn't allocate AED!\n");
2085
2086 /* initialise pointer to location */
2087 memset(&l_icb, 0, sizeof(struct long_ad));
2088 l_icb.len = udf_rw32(lb_size | UDF_EXT_REDIRECT);
2089 l_icb.loc.lb_num = udf_rw32(lb_num);
2090 l_icb.loc.part_num = udf_rw16(vpart_num);
2091
2092 /* create new aed descriptor */
2093 udf_create_logvol_dscr(ump, udf_node, &l_icb, &extdscr);
2094 ext = &extdscr->aee;
2095
2096 udf_inittag(ump, &ext->tag, TAGID_ALLOCEXTENT, lb_num);
2097 dscr_size = sizeof(struct alloc_ext_entry) -1;
2098 max_l_ad = lb_size - dscr_size;
2099 memset(ext->data, 0, max_l_ad);
2100 ext->l_ad = udf_rw32(0);
2101 ext->tag.desc_crc_len =
2102 udf_rw16(dscr_size - UDF_DESC_TAG_LENGTH);
2103
2104 /* declare aed */
2105 udf_node->num_extensions++;
2106 udf_node->ext_loc[extnr] = l_icb;
2107 udf_node->ext[extnr] = ext;
2108 }
2109 /* add redirect and adjust l_ad and crclen for old descr */
2110 if (addr_type == UDF_ICB_SHORT_ALLOC) {
2111 short_ad = (struct short_ad *) (data_pos + offset);
2112 short_ad->len = l_icb.len;
2113 short_ad->lb_num = l_icb.loc.lb_num;
2114 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
2115 long_ad = (struct long_ad *) (data_pos + offset);
2116 *long_ad = l_icb;
2117 }
2118 l_ad += adlen;
2119 crclen += adlen;
2120 dscr->tag.desc_crc_len = udf_rw16(crclen);
2121 *l_ad_p = udf_rw32(l_ad);
2122
2123 /* advance to the new extension */
2124 KASSERT(ext != NULL);
2125 dscr = (union dscrptr *) ext;
2126 dscr_size = sizeof(struct alloc_ext_entry) -1;
2127 max_l_ad = lb_size - dscr_size;
2128 data_pos = (uint8_t *) dscr + dscr_size;
2129
2130 l_ad_p = &ext->l_ad;
2131 l_ad = udf_rw32(*l_ad_p);
2132 crclen = udf_rw16(dscr->tag.desc_crc_len);
2133 offset = 0;
2134
2135 /* adjust callees slot count for link insert */
2136 *slot += 1;
2137 }
2138
2139 /* write out the element */
2140 DPRINTF(PARANOIDADWLK, ("adding element : %p : v %d, lb %d, "
2141 "len %d, flags %d\n", data_pos + offset,
2142 icb->loc.part_num, icb->loc.lb_num,
2143 UDF_EXT_LEN(icb->len), UDF_EXT_FLAGS(icb->len)));
2144 if (addr_type == UDF_ICB_SHORT_ALLOC) {
2145 short_ad = (struct short_ad *) (data_pos + offset);
2146 short_ad->len = icb->len;
2147 short_ad->lb_num = icb->loc.lb_num;
2148 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
2149 long_ad = (struct long_ad *) (data_pos + offset);
2150 *long_ad = *icb;
2151 }
2152
2153 /* adjust logblks recorded count */
2154 len = udf_rw32(icb->len);
2155 flags = UDF_EXT_FLAGS(len);
2156 if (flags == UDF_EXT_ALLOCATED)
2157 logblks_rec += (UDF_EXT_LEN(len) + lb_size -1) / lb_size;
2158 *logblks_rec_p = udf_rw64(logblks_rec);
2159
2160 /* adjust l_ad and crclen when needed */
2161 if (offset >= l_ad) {
2162 l_ad += adlen;
2163 crclen += adlen;
2164 dscr->tag.desc_crc_len = udf_rw16(crclen);
2165 *l_ad_p = udf_rw32(l_ad);
2166 }
2167
2168 return 0;
2169 }
2170
2171 /* --------------------------------------------------------------------- */
2172
2173 static void
2174 udf_count_alloc_exts(struct udf_node *udf_node)
2175 {
2176 struct long_ad s_ad;
2177 uint32_t lb_num, len, flags;
2178 uint16_t vpart_num;
2179 int slot, eof;
2180 int num_extents, extnr;
2181 int lb_size;
2182
2183 if (udf_node->num_extensions == 0)
2184 return;
2185
2186 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
2187 /* count number of allocation extents in use */
2188 num_extents = 0;
2189 slot = 0;
2190 for (;;) {
2191 udf_get_adslot(udf_node, slot, &s_ad, &eof);
2192 if (eof)
2193 break;
2194 len = udf_rw32(s_ad.len);
2195 flags = UDF_EXT_FLAGS(len);
2196
2197 if (flags == UDF_EXT_REDIRECT)
2198 num_extents++;
2199
2200 slot++;
2201 }
2202
2203 DPRINTF(ALLOC, ("udf_count_alloc_ext counted %d live extents\n",
2204 num_extents));
2205
2206 /* XXX choice: we could delay freeing them on node writeout */
2207 /* free excess entries */
2208 extnr = num_extents;
2209 for (;extnr < udf_node->num_extensions; extnr++) {
2210 DPRINTF(ALLOC, ("freeing alloc ext %d\n", extnr));
2211 /* free dscriptor */
2212 s_ad = udf_node->ext_loc[extnr];
2213 udf_free_logvol_dscr(udf_node->ump, &s_ad,
2214 udf_node->ext[extnr]);
2215 udf_node->ext[extnr] = NULL;
2216
2217 /* free disc space */
2218 lb_num = udf_rw32(s_ad.loc.lb_num);
2219 vpart_num = udf_rw16(s_ad.loc.part_num);
2220 udf_free_allocated_space(udf_node->ump, lb_num, vpart_num, 1);
2221
2222 memset(&udf_node->ext_loc[extnr], 0, sizeof(struct long_ad));
2223 }
2224
2225 /* set our new number of allocation extents */
2226 udf_node->num_extensions = num_extents;
2227 }
2228
2229
2230 /* --------------------------------------------------------------------- */
2231
2232 /*
2233 * Adjust the node's allocation descriptors to reflect the new mapping; do
2234 * take note that we might glue to existing allocation descriptors.
2235 *
2236 * XXX Note there can only be one allocation being recorded/mount; maybe
2237 * explicit allocation in shedule thread?
2238 */
2239
2240 static void
2241 udf_record_allocation_in_node(struct udf_mount *ump, struct buf *buf,
2242 uint16_t vpart_num, uint64_t *mapping, struct long_ad *node_ad_cpy)
2243 {
2244 struct vnode *vp = buf->b_vp;
2245 struct udf_node *udf_node = VTOI(vp);
2246 struct file_entry *fe;
2247 struct extfile_entry *efe;
2248 struct icb_tag *icbtag;
2249 struct long_ad s_ad, c_ad;
2250 uint64_t inflen, from, till;
2251 uint64_t foffset, end_foffset, restart_foffset;
2252 uint64_t orig_inflen, orig_lbrec, new_inflen, new_lbrec;
2253 uint32_t num_lb, len, flags, lb_num;
2254 uint32_t run_start;
2255 uint32_t slot_offset, replace_len, replace;
2256 int addr_type, icbflags;
2257 // int udf_c_type = buf->b_udf_c_type;
2258 int lb_size, run_length, eof;
2259 int slot, cpy_slot, cpy_slots, restart_slot;
2260 int error;
2261
2262 DPRINTF(ALLOC, ("udf_record_allocation_in_node\n"));
2263
2264 #if 0
2265 /* XXX disable sanity check for now */
2266 /* sanity check ... should be panic ? */
2267 if ((udf_c_type != UDF_C_USERDATA) && (udf_c_type != UDF_C_FIDS))
2268 return;
2269 #endif
2270
2271 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
2272
2273 /* do the job */
2274 UDF_LOCK_NODE(udf_node, 0); /* XXX can deadlock ? */
2275 udf_node_sanity_check(udf_node, &orig_inflen, &orig_lbrec);
2276
2277 fe = udf_node->fe;
2278 efe = udf_node->efe;
2279 if (fe) {
2280 icbtag = &fe->icbtag;
2281 inflen = udf_rw64(fe->inf_len);
2282 } else {
2283 icbtag = &efe->icbtag;
2284 inflen = udf_rw64(efe->inf_len);
2285 }
2286
2287 /* do check if `till' is not past file information length */
2288 from = buf->b_lblkno * lb_size;
2289 till = MIN(inflen, from + buf->b_resid);
2290
2291 num_lb = (till - from + lb_size -1) / lb_size;
2292
2293 DPRINTF(ALLOC, ("record allocation from %"PRIu64" + %d\n", from, buf->b_bcount));
2294
2295 icbflags = udf_rw16(icbtag->flags);
2296 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2297
2298 if (addr_type == UDF_ICB_INTERN_ALLOC) {
2299 /* nothing to do */
2300 /* XXX clean up rest of node? just in case? */
2301 UDF_UNLOCK_NODE(udf_node, 0);
2302 return;
2303 }
2304
2305 slot = 0;
2306 cpy_slot = 0;
2307 foffset = 0;
2308
2309 /* 1) copy till first overlap piece to the rewrite buffer */
2310 for (;;) {
2311 udf_get_adslot(udf_node, slot, &s_ad, &eof);
2312 if (eof) {
2313 DPRINTF(WRITE,
2314 ("Record allocation in node "
2315 "failed: encountered EOF\n"));
2316 UDF_UNLOCK_NODE(udf_node, 0);
2317 buf->b_error = EINVAL;
2318 return;
2319 }
2320 len = udf_rw32(s_ad.len);
2321 flags = UDF_EXT_FLAGS(len);
2322 len = UDF_EXT_LEN(len);
2323
2324 if (flags == UDF_EXT_REDIRECT) {
2325 slot++;
2326 continue;
2327 }
2328
2329 end_foffset = foffset + len;
2330 if (end_foffset > from)
2331 break; /* found */
2332
2333 node_ad_cpy[cpy_slot++] = s_ad;
2334
2335 DPRINTF(ALLOC, ("\t1: vp %d, lb %d, len %d, flags %d "
2336 "-> stack\n",
2337 udf_rw16(s_ad.loc.part_num),
2338 udf_rw32(s_ad.loc.lb_num),
2339 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2340 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2341
2342 foffset = end_foffset;
2343 slot++;
2344 }
2345 restart_slot = slot;
2346 restart_foffset = foffset;
2347
2348 /* 2) trunc overlapping slot at overlap and copy it */
2349 slot_offset = from - foffset;
2350 if (slot_offset > 0) {
2351 DPRINTF(ALLOC, ("\tslot_offset = %d, flags = %d (%d)\n",
2352 slot_offset, flags >> 30, flags));
2353
2354 s_ad.len = udf_rw32(slot_offset | flags);
2355 node_ad_cpy[cpy_slot++] = s_ad;
2356
2357 DPRINTF(ALLOC, ("\t2: vp %d, lb %d, len %d, flags %d "
2358 "-> stack\n",
2359 udf_rw16(s_ad.loc.part_num),
2360 udf_rw32(s_ad.loc.lb_num),
2361 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2362 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2363 }
2364 foffset += slot_offset;
2365
2366 /* 3) insert new mappings */
2367 memset(&s_ad, 0, sizeof(struct long_ad));
2368 lb_num = 0;
2369 for (lb_num = 0; lb_num < num_lb; lb_num++) {
2370 run_start = mapping[lb_num];
2371 run_length = 1;
2372 while (lb_num < num_lb-1) {
2373 if (mapping[lb_num+1] != mapping[lb_num]+1)
2374 if (mapping[lb_num+1] != mapping[lb_num])
2375 break;
2376 run_length++;
2377 lb_num++;
2378 }
2379 /* insert slot for this mapping */
2380 len = run_length * lb_size;
2381
2382 /* bounds checking */
2383 if (foffset + len > till)
2384 len = till - foffset;
2385 KASSERT(foffset + len <= inflen);
2386
2387 s_ad.len = udf_rw32(len | UDF_EXT_ALLOCATED);
2388 s_ad.loc.part_num = udf_rw16(vpart_num);
2389 s_ad.loc.lb_num = udf_rw32(run_start);
2390
2391 foffset += len;
2392
2393 /* paranoia */
2394 if (len == 0) {
2395 DPRINTF(WRITE,
2396 ("Record allocation in node "
2397 "failed: insert failed\n"));
2398 UDF_UNLOCK_NODE(udf_node, 0);
2399 buf->b_error = EINVAL;
2400 return;
2401 }
2402 node_ad_cpy[cpy_slot++] = s_ad;
2403
2404 DPRINTF(ALLOC, ("\t3: insert new mapping vp %d lb %d, len %d, "
2405 "flags %d -> stack\n",
2406 udf_rw16(s_ad.loc.part_num), udf_rw32(s_ad.loc.lb_num),
2407 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2408 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2409 }
2410
2411 /* 4) pop replaced length */
2412 slot = restart_slot;
2413 foffset = restart_foffset;
2414
2415 replace_len = till - foffset; /* total amount of bytes to pop */
2416 slot_offset = from - foffset; /* offset in first encounted slot */
2417 KASSERT((slot_offset % lb_size) == 0);
2418
2419 for (;;) {
2420 udf_get_adslot(udf_node, slot, &s_ad, &eof);
2421 if (eof)
2422 break;
2423
2424 len = udf_rw32(s_ad.len);
2425 flags = UDF_EXT_FLAGS(len);
2426 len = UDF_EXT_LEN(len);
2427 lb_num = udf_rw32(s_ad.loc.lb_num);
2428
2429 if (flags == UDF_EXT_REDIRECT) {
2430 slot++;
2431 continue;
2432 }
2433
2434 DPRINTF(ALLOC, ("\t4i: got slot %d, slot_offset %d, "
2435 "replace_len %d, "
2436 "vp %d, lb %d, len %d, flags %d\n",
2437 slot, slot_offset, replace_len,
2438 udf_rw16(s_ad.loc.part_num),
2439 udf_rw32(s_ad.loc.lb_num),
2440 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2441 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2442
2443 /* adjust for slot offset */
2444 if (slot_offset) {
2445 DPRINTF(ALLOC, ("\t4s: skipping %d\n", slot_offset));
2446 lb_num += slot_offset / lb_size;
2447 len -= slot_offset;
2448 foffset += slot_offset;
2449 replace_len -= slot_offset;
2450
2451 /* mark adjusted */
2452 slot_offset = 0;
2453 }
2454
2455 /* advance for (the rest of) this slot */
2456 replace = MIN(len, replace_len);
2457 DPRINTF(ALLOC, ("\t4d: replacing %d\n", replace));
2458
2459 /* advance for this slot */
2460 if (replace) {
2461 /* note: dont round DOWN on num_lb since we then
2462 * forget the last partial one */
2463 num_lb = (replace + lb_size - 1) / lb_size;
2464 if (flags != UDF_EXT_FREE) {
2465 udf_free_allocated_space(ump, lb_num,
2466 udf_rw16(s_ad.loc.part_num), num_lb);
2467 }
2468 lb_num += num_lb;
2469 len -= replace;
2470 foffset += replace;
2471 replace_len -= replace;
2472 }
2473
2474 /* do we have a slot tail ? */
2475 if (len) {
2476 KASSERT(foffset % lb_size == 0);
2477
2478 /* we arrived at our point, push remainder */
2479 s_ad.len = udf_rw32(len | flags);
2480 s_ad.loc.lb_num = udf_rw32(lb_num);
2481 if (flags == UDF_EXT_FREE)
2482 s_ad.loc.lb_num = udf_rw32(0);
2483 node_ad_cpy[cpy_slot++] = s_ad;
2484 foffset += len;
2485 slot++;
2486
2487 DPRINTF(ALLOC, ("\t4: vp %d, lb %d, len %d, flags %d "
2488 "-> stack\n",
2489 udf_rw16(s_ad.loc.part_num),
2490 udf_rw32(s_ad.loc.lb_num),
2491 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2492 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2493 break;
2494 }
2495
2496 slot++;
2497 }
2498
2499 /* 5) copy remainder */
2500 for (;;) {
2501 udf_get_adslot(udf_node, slot, &s_ad, &eof);
2502 if (eof)
2503 break;
2504
2505 len = udf_rw32(s_ad.len);
2506 flags = UDF_EXT_FLAGS(len);
2507 len = UDF_EXT_LEN(len);
2508
2509 if (flags == UDF_EXT_REDIRECT) {
2510 slot++;
2511 continue;
2512 }
2513
2514 node_ad_cpy[cpy_slot++] = s_ad;
2515
2516 DPRINTF(ALLOC, ("\t5: insert new mapping "
2517 "vp %d lb %d, len %d, flags %d "
2518 "-> stack\n",
2519 udf_rw16(s_ad.loc.part_num),
2520 udf_rw32(s_ad.loc.lb_num),
2521 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2522 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2523
2524 slot++;
2525 }
2526
2527 /* 6) reset node descriptors */
2528 udf_wipe_adslots(udf_node);
2529
2530 /* 7) copy back extents; merge when possible. Recounting on the fly */
2531 cpy_slots = cpy_slot;
2532
2533 c_ad = node_ad_cpy[0];
2534 slot = 0;
2535 DPRINTF(ALLOC, ("\t7s: stack -> got mapping vp %d "
2536 "lb %d, len %d, flags %d\n",
2537 udf_rw16(c_ad.loc.part_num),
2538 udf_rw32(c_ad.loc.lb_num),
2539 UDF_EXT_LEN(udf_rw32(c_ad.len)),
2540 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2541
2542 for (cpy_slot = 1; cpy_slot < cpy_slots; cpy_slot++) {
2543 s_ad = node_ad_cpy[cpy_slot];
2544
2545 DPRINTF(ALLOC, ("\t7i: stack -> got mapping vp %d "
2546 "lb %d, len %d, flags %d\n",
2547 udf_rw16(s_ad.loc.part_num),
2548 udf_rw32(s_ad.loc.lb_num),
2549 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2550 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2551
2552 /* see if we can merge */
2553 if (udf_ads_merge(lb_size, &c_ad, &s_ad)) {
2554 /* not mergable (anymore) */
2555 DPRINTF(ALLOC, ("\t7: appending vp %d lb %d, "
2556 "len %d, flags %d\n",
2557 udf_rw16(c_ad.loc.part_num),
2558 udf_rw32(c_ad.loc.lb_num),
2559 UDF_EXT_LEN(udf_rw32(c_ad.len)),
2560 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2561
2562 error = udf_append_adslot(udf_node, &slot, &c_ad);
2563 if (error) {
2564 buf->b_error = error;
2565 goto out;
2566 }
2567 c_ad = s_ad;
2568 slot++;
2569 }
2570 }
2571
2572 /* 8) push rest slot (if any) */
2573 if (UDF_EXT_LEN(c_ad.len) > 0) {
2574 DPRINTF(ALLOC, ("\t8: last append vp %d lb %d, "
2575 "len %d, flags %d\n",
2576 udf_rw16(c_ad.loc.part_num),
2577 udf_rw32(c_ad.loc.lb_num),
2578 UDF_EXT_LEN(udf_rw32(c_ad.len)),
2579 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2580
2581 error = udf_append_adslot(udf_node, &slot, &c_ad);
2582 if (error) {
2583 buf->b_error = error;
2584 goto out;
2585 }
2586 }
2587
2588 out:
2589 udf_count_alloc_exts(udf_node);
2590
2591 /* the node's descriptors should now be sane */
2592 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2593 UDF_UNLOCK_NODE(udf_node, 0);
2594
2595 KASSERT(orig_inflen == new_inflen);
2596 KASSERT(new_lbrec >= orig_lbrec);
2597
2598 return;
2599 }
2600
2601 /* --------------------------------------------------------------------- */
2602
2603 int
2604 udf_grow_node(struct udf_node *udf_node, uint64_t new_size)
2605 {
2606 union dscrptr *dscr;
2607 struct vnode *vp = udf_node->vnode;
2608 struct udf_mount *ump = udf_node->ump;
2609 struct file_entry *fe;
2610 struct extfile_entry *efe;
2611 struct icb_tag *icbtag;
2612 struct long_ad c_ad, s_ad;
2613 uint64_t size_diff, old_size, inflen, objsize, chunk, append_len;
2614 uint64_t foffset, end_foffset;
2615 uint64_t orig_inflen, orig_lbrec, new_inflen, new_lbrec;
2616 uint32_t lb_size, dscr_size, crclen, lastblock_grow;
2617 uint32_t icbflags, len, flags, max_len;
2618 uint32_t max_l_ad, l_ad, l_ea;
2619 uint16_t my_part, dst_part;
2620 uint8_t *data_pos, *evacuated_data;
2621 int addr_type;
2622 int slot, cpy_slot;
2623 int eof, error;
2624
2625 DPRINTF(ALLOC, ("udf_grow_node\n"));
2626
2627 UDF_LOCK_NODE(udf_node, 0);
2628 udf_node_sanity_check(udf_node, &orig_inflen, &orig_lbrec);
2629
2630 lb_size = udf_rw32(ump->logical_vol->lb_size);
2631 max_len = ((UDF_EXT_MAXLEN / lb_size) * lb_size);
2632
2633 fe = udf_node->fe;
2634 efe = udf_node->efe;
2635 if (fe) {
2636 dscr = (union dscrptr *) fe;
2637 icbtag = &fe->icbtag;
2638 inflen = udf_rw64(fe->inf_len);
2639 objsize = inflen;
2640 dscr_size = sizeof(struct file_entry) -1;
2641 l_ea = udf_rw32(fe->l_ea);
2642 l_ad = udf_rw32(fe->l_ad);
2643 } else {
2644 dscr = (union dscrptr *) efe;
2645 icbtag = &efe->icbtag;
2646 inflen = udf_rw64(efe->inf_len);
2647 objsize = udf_rw64(efe->obj_size);
2648 dscr_size = sizeof(struct extfile_entry) -1;
2649 l_ea = udf_rw32(efe->l_ea);
2650 l_ad = udf_rw32(efe->l_ad);
2651 }
2652 data_pos = (uint8_t *) dscr + dscr_size + l_ea;
2653 max_l_ad = lb_size - dscr_size - l_ea;
2654
2655 icbflags = udf_rw16(icbtag->flags);
2656 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2657
2658 old_size = inflen;
2659 size_diff = new_size - old_size;
2660
2661 DPRINTF(ALLOC, ("\tfrom %"PRIu64" to %"PRIu64"\n", old_size, new_size));
2662
2663 evacuated_data = NULL;
2664 if (addr_type == UDF_ICB_INTERN_ALLOC) {
2665 if (l_ad + size_diff <= max_l_ad) {
2666 /* only reflect size change directly in the node */
2667 inflen += size_diff;
2668 objsize += size_diff;
2669 l_ad += size_diff;
2670 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea + l_ad;
2671 if (fe) {
2672 fe->inf_len = udf_rw64(inflen);
2673 fe->l_ad = udf_rw32(l_ad);
2674 fe->tag.desc_crc_len = udf_rw16(crclen);
2675 } else {
2676 efe->inf_len = udf_rw64(inflen);
2677 efe->obj_size = udf_rw64(objsize);
2678 efe->l_ad = udf_rw32(l_ad);
2679 efe->tag.desc_crc_len = udf_rw16(crclen);
2680 }
2681 error = 0;
2682
2683 /* set new size for uvm */
2684 uvm_vnp_setsize(vp, old_size);
2685 uvm_vnp_setwritesize(vp, new_size);
2686
2687 #if 0
2688 /* zero append space in buffer */
2689 uvm_vnp_zerorange(vp, old_size, new_size - old_size);
2690 #endif
2691
2692 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2693
2694 /* unlock */
2695 UDF_UNLOCK_NODE(udf_node, 0);
2696
2697 KASSERT(new_inflen == orig_inflen + size_diff);
2698 KASSERT(new_lbrec == orig_lbrec);
2699 KASSERT(new_lbrec == 0);
2700 return 0;
2701 }
2702
2703 DPRINTF(ALLOC, ("\tCONVERT from internal\n"));
2704
2705 if (old_size > 0) {
2706 /* allocate some space and copy in the stuff to keep */
2707 evacuated_data = malloc(lb_size, M_UDFTEMP, M_WAITOK);
2708 memset(evacuated_data, 0, lb_size);
2709
2710 /* node is locked, so safe to exit mutex */
2711 UDF_UNLOCK_NODE(udf_node, 0);
2712
2713 /* read in using the `normal' vn_rdwr() */
2714 error = vn_rdwr(UIO_READ, udf_node->vnode,
2715 evacuated_data, old_size, 0,
2716 UIO_SYSSPACE, IO_ALTSEMANTICS | IO_NODELOCKED,
2717 FSCRED, NULL, NULL);
2718
2719 /* enter again */
2720 UDF_LOCK_NODE(udf_node, 0);
2721 }
2722
2723 /* convert to a normal alloc and select type */
2724 my_part = udf_rw16(udf_node->loc.loc.part_num);
2725 dst_part = udf_get_record_vpart(ump, udf_get_c_type(udf_node));
2726 addr_type = UDF_ICB_SHORT_ALLOC;
2727 if (dst_part != my_part)
2728 addr_type = UDF_ICB_LONG_ALLOC;
2729
2730 icbflags &= ~UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2731 icbflags |= addr_type;
2732 icbtag->flags = udf_rw16(icbflags);
2733
2734 /* wipe old descriptor space */
2735 udf_wipe_adslots(udf_node);
2736
2737 memset(&c_ad, 0, sizeof(struct long_ad));
2738 c_ad.len = udf_rw32(old_size | UDF_EXT_FREE);
2739 c_ad.loc.part_num = udf_rw16(0); /* not relevant */
2740 c_ad.loc.lb_num = udf_rw32(0); /* not relevant */
2741
2742 slot = 0;
2743 } else {
2744 /* goto the last entry (if any) */
2745 slot = 0;
2746 cpy_slot = 0;
2747 foffset = 0;
2748 memset(&c_ad, 0, sizeof(struct long_ad));
2749 for (;;) {
2750 udf_get_adslot(udf_node, slot, &c_ad, &eof);
2751 if (eof)
2752 break;
2753
2754 len = udf_rw32(c_ad.len);
2755 flags = UDF_EXT_FLAGS(len);
2756 len = UDF_EXT_LEN(len);
2757
2758 end_foffset = foffset + len;
2759 if (flags != UDF_EXT_REDIRECT)
2760 foffset = end_foffset;
2761
2762 slot++;
2763 }
2764 /* at end of adslots */
2765
2766 /* special case if the old size was zero, then there is no last slot */
2767 if (old_size == 0) {
2768 c_ad.len = udf_rw32(0 | UDF_EXT_FREE);
2769 c_ad.loc.part_num = udf_rw16(0); /* not relevant */
2770 c_ad.loc.lb_num = udf_rw32(0); /* not relevant */
2771 } else {
2772 /* refetch last slot */
2773 slot--;
2774 udf_get_adslot(udf_node, slot, &c_ad, &eof);
2775 }
2776 }
2777
2778 /*
2779 * If the length of the last slot is not a multiple of lb_size, adjust
2780 * length so that it is; don't forget to adjust `append_len'! relevant for
2781 * extending existing files
2782 */
2783 len = udf_rw32(c_ad.len);
2784 flags = UDF_EXT_FLAGS(len);
2785 len = UDF_EXT_LEN(len);
2786
2787 lastblock_grow = 0;
2788 if (len % lb_size > 0) {
2789 lastblock_grow = lb_size - (len % lb_size);
2790 lastblock_grow = MIN(size_diff, lastblock_grow);
2791 len += lastblock_grow;
2792 c_ad.len = udf_rw32(len | flags);
2793
2794 /* TODO zero appened space in buffer! */
2795 /* using uvm_vnp_zerorange(vp, old_size, new_size - old_size); ? */
2796 }
2797 memset(&s_ad, 0, sizeof(struct long_ad));
2798
2799 /* size_diff can be bigger than allowed, so grow in chunks */
2800 append_len = size_diff - lastblock_grow;
2801 while (append_len > 0) {
2802 chunk = MIN(append_len, max_len);
2803 s_ad.len = udf_rw32(chunk | UDF_EXT_FREE);
2804 s_ad.loc.part_num = udf_rw16(0);
2805 s_ad.loc.lb_num = udf_rw32(0);
2806
2807 if (udf_ads_merge(lb_size, &c_ad, &s_ad)) {
2808 /* not mergable (anymore) */
2809 error = udf_append_adslot(udf_node, &slot, &c_ad);
2810 if (error)
2811 goto errorout;
2812 slot++;
2813 c_ad = s_ad;
2814 memset(&s_ad, 0, sizeof(struct long_ad));
2815 }
2816 append_len -= chunk;
2817 }
2818
2819 /* if there is a rest piece in the accumulator, append it */
2820 if (UDF_EXT_LEN(udf_rw32(c_ad.len)) > 0) {
2821 error = udf_append_adslot(udf_node, &slot, &c_ad);
2822 if (error)
2823 goto errorout;
2824 slot++;
2825 }
2826
2827 /* if there is a rest piece that didn't fit, append it */
2828 if (UDF_EXT_LEN(udf_rw32(s_ad.len)) > 0) {
2829 error = udf_append_adslot(udf_node, &slot, &s_ad);
2830 if (error)
2831 goto errorout;
2832 slot++;
2833 }
2834
2835 inflen += size_diff;
2836 objsize += size_diff;
2837 if (fe) {
2838 fe->inf_len = udf_rw64(inflen);
2839 } else {
2840 efe->inf_len = udf_rw64(inflen);
2841 efe->obj_size = udf_rw64(objsize);
2842 }
2843 error = 0;
2844
2845 if (evacuated_data) {
2846 /* set new write size for uvm */
2847 uvm_vnp_setwritesize(vp, old_size);
2848
2849 /* write out evacuated data */
2850 error = vn_rdwr(UIO_WRITE, udf_node->vnode,
2851 evacuated_data, old_size, 0,
2852 UIO_SYSSPACE, IO_ALTSEMANTICS | IO_NODELOCKED,
2853 FSCRED, NULL, NULL);
2854 uvm_vnp_setsize(vp, old_size);
2855 }
2856
2857 errorout:
2858 if (evacuated_data)
2859 free(evacuated_data, M_UDFTEMP);
2860
2861 udf_count_alloc_exts(udf_node);
2862
2863 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2864 UDF_UNLOCK_NODE(udf_node, 0);
2865
2866 KASSERT(new_inflen == orig_inflen + size_diff);
2867 KASSERT(new_lbrec == orig_lbrec);
2868
2869 return error;
2870 }
2871
2872 /* --------------------------------------------------------------------- */
2873
2874 int
2875 udf_shrink_node(struct udf_node *udf_node, uint64_t new_size)
2876 {
2877 struct vnode *vp = udf_node->vnode;
2878 struct udf_mount *ump = udf_node->ump;
2879 struct file_entry *fe;
2880 struct extfile_entry *efe;
2881 struct icb_tag *icbtag;
2882 struct long_ad c_ad, s_ad, *node_ad_cpy;
2883 uint64_t size_diff, old_size, inflen, objsize;
2884 uint64_t foffset, end_foffset;
2885 uint64_t orig_inflen, orig_lbrec, new_inflen, new_lbrec;
2886 uint32_t lb_size, dscr_size, crclen;
2887 uint32_t slot_offset, slot_offset_lb;
2888 uint32_t len, flags, max_len;
2889 uint32_t num_lb, lb_num;
2890 uint32_t max_l_ad, l_ad, l_ea;
2891 uint16_t vpart_num;
2892 uint8_t *data_pos;
2893 int icbflags, addr_type;
2894 int slot, cpy_slot, cpy_slots;
2895 int eof, error;
2896
2897 DPRINTF(ALLOC, ("udf_shrink_node\n"));
2898
2899 UDF_LOCK_NODE(udf_node, 0);
2900 udf_node_sanity_check(udf_node, &orig_inflen, &orig_lbrec);
2901
2902 lb_size = udf_rw32(ump->logical_vol->lb_size);
2903 max_len = ((UDF_EXT_MAXLEN / lb_size) * lb_size);
2904
2905 /* do the work */
2906 fe = udf_node->fe;
2907 efe = udf_node->efe;
2908 if (fe) {
2909 icbtag = &fe->icbtag;
2910 inflen = udf_rw64(fe->inf_len);
2911 objsize = inflen;
2912 dscr_size = sizeof(struct file_entry) -1;
2913 l_ea = udf_rw32(fe->l_ea);
2914 l_ad = udf_rw32(fe->l_ad);
2915 data_pos = (uint8_t *) fe + dscr_size + l_ea;
2916 } else {
2917 icbtag = &efe->icbtag;
2918 inflen = udf_rw64(efe->inf_len);
2919 objsize = udf_rw64(efe->obj_size);
2920 dscr_size = sizeof(struct extfile_entry) -1;
2921 l_ea = udf_rw32(efe->l_ea);
2922 l_ad = udf_rw32(efe->l_ad);
2923 data_pos = (uint8_t *) efe + dscr_size + l_ea;
2924 }
2925 max_l_ad = lb_size - dscr_size - l_ea;
2926
2927 icbflags = udf_rw16(icbtag->flags);
2928 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2929
2930 old_size = inflen;
2931 size_diff = old_size - new_size;
2932
2933 DPRINTF(ALLOC, ("\tfrom %"PRIu64" to %"PRIu64"\n", old_size, new_size));
2934
2935 /* shrink the node to its new size */
2936 if (addr_type == UDF_ICB_INTERN_ALLOC) {
2937 /* only reflect size change directly in the node */
2938 KASSERT(new_size <= max_l_ad);
2939 inflen -= size_diff;
2940 objsize -= size_diff;
2941 l_ad -= size_diff;
2942 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea + l_ad;
2943 if (fe) {
2944 fe->inf_len = udf_rw64(inflen);
2945 fe->l_ad = udf_rw32(l_ad);
2946 fe->tag.desc_crc_len = udf_rw16(crclen);
2947 } else {
2948 efe->inf_len = udf_rw64(inflen);
2949 efe->obj_size = udf_rw64(objsize);
2950 efe->l_ad = udf_rw32(l_ad);
2951 efe->tag.desc_crc_len = udf_rw16(crclen);
2952 }
2953 error = 0;
2954
2955 /* clear the space in the descriptor */
2956 KASSERT(old_size > new_size);
2957 memset(data_pos + new_size, 0, old_size - new_size);
2958
2959 /* TODO zero appened space in buffer! */
2960 /* using uvm_vnp_zerorange(vp, old_size, old_size - new_size); ? */
2961
2962 /* set new size for uvm */
2963 uvm_vnp_setsize(vp, new_size);
2964
2965 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2966 UDF_UNLOCK_NODE(udf_node, 0);
2967
2968 KASSERT(new_inflen == orig_inflen - size_diff);
2969 KASSERT(new_lbrec == orig_lbrec);
2970 KASSERT(new_lbrec == 0);
2971
2972 return 0;
2973 }
2974
2975 /* setup node cleanup extents copy space */
2976 node_ad_cpy = malloc(lb_size * UDF_MAX_ALLOC_EXTENTS,
2977 M_UDFMNT, M_WAITOK);
2978 memset(node_ad_cpy, 0, lb_size * UDF_MAX_ALLOC_EXTENTS);
2979
2980 /*
2981 * Shrink the node by releasing the allocations and truncate the last
2982 * allocation to the new size. If the new size fits into the
2983 * allocation descriptor itself, transform it into an
2984 * UDF_ICB_INTERN_ALLOC.
2985 */
2986 slot = 0;
2987 cpy_slot = 0;
2988 foffset = 0;
2989
2990 /* 1) copy till first overlap piece to the rewrite buffer */
2991 for (;;) {
2992 udf_get_adslot(udf_node, slot, &s_ad, &eof);
2993 if (eof) {
2994 DPRINTF(WRITE,
2995 ("Shrink node failed: "
2996 "encountered EOF\n"));
2997 error = EINVAL;
2998 goto errorout; /* panic? */
2999 }
3000 len = udf_rw32(s_ad.len);
3001 flags = UDF_EXT_FLAGS(len);
3002 len = UDF_EXT_LEN(len);
3003
3004 if (flags == UDF_EXT_REDIRECT) {
3005 slot++;
3006 continue;
3007 }
3008
3009 end_foffset = foffset + len;
3010 if (end_foffset > new_size)
3011 break; /* found */
3012
3013 node_ad_cpy[cpy_slot++] = s_ad;
3014
3015 DPRINTF(ALLOC, ("\t1: vp %d, lb %d, len %d, flags %d "
3016 "-> stack\n",
3017 udf_rw16(s_ad.loc.part_num),
3018 udf_rw32(s_ad.loc.lb_num),
3019 UDF_EXT_LEN(udf_rw32(s_ad.len)),
3020 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
3021
3022 foffset = end_foffset;
3023 slot++;
3024 }
3025 slot_offset = new_size - foffset;
3026
3027 /* 2) trunc overlapping slot at overlap and copy it */
3028 if (slot_offset > 0) {
3029 lb_num = udf_rw32(s_ad.loc.lb_num);
3030 vpart_num = udf_rw16(s_ad.loc.part_num);
3031
3032 if (flags == UDF_EXT_ALLOCATED) {
3033 /* calculate extent in lb, and offset in lb */
3034 num_lb = (len + lb_size -1) / lb_size;
3035 slot_offset_lb = (slot_offset + lb_size -1) / lb_size;
3036
3037 /* adjust our slot */
3038 lb_num += slot_offset_lb;
3039 num_lb -= slot_offset_lb;
3040
3041 udf_free_allocated_space(ump, lb_num, vpart_num, num_lb);
3042 }
3043
3044 s_ad.len = udf_rw32(slot_offset | flags);
3045 node_ad_cpy[cpy_slot++] = s_ad;
3046 slot++;
3047
3048 DPRINTF(ALLOC, ("\t2: vp %d, lb %d, len %d, flags %d "
3049 "-> stack\n",
3050 udf_rw16(s_ad.loc.part_num),
3051 udf_rw32(s_ad.loc.lb_num),
3052 UDF_EXT_LEN(udf_rw32(s_ad.len)),
3053 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
3054 }
3055
3056 /* 3) delete remainder */
3057 for (;;) {
3058 udf_get_adslot(udf_node, slot, &s_ad, &eof);
3059 if (eof)
3060 break;
3061
3062 len = udf_rw32(s_ad.len);
3063 flags = UDF_EXT_FLAGS(len);
3064 len = UDF_EXT_LEN(len);
3065
3066 if (flags == UDF_EXT_REDIRECT) {
3067 slot++;
3068 continue;
3069 }
3070
3071 DPRINTF(ALLOC, ("\t3: delete remainder "
3072 "vp %d lb %d, len %d, flags %d\n",
3073 udf_rw16(s_ad.loc.part_num),
3074 udf_rw32(s_ad.loc.lb_num),
3075 UDF_EXT_LEN(udf_rw32(s_ad.len)),
3076 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
3077
3078 if (flags == UDF_EXT_ALLOCATED) {
3079 lb_num = udf_rw32(s_ad.loc.lb_num);
3080 vpart_num = udf_rw16(s_ad.loc.part_num);
3081 num_lb = (len + lb_size - 1) / lb_size;
3082
3083 udf_free_allocated_space(ump, lb_num, vpart_num,
3084 num_lb);
3085 }
3086
3087 slot++;
3088 }
3089
3090 /* 4) if it will fit into the descriptor then convert */
3091 if (new_size < max_l_ad) {
3092 /*
3093 * resque/evacuate old piece by reading it in, and convert it
3094 * to internal alloc.
3095 */
3096 if (new_size == 0) {
3097 /* XXX/TODO only for zero sizing now */
3098 udf_wipe_adslots(udf_node);
3099
3100 icbflags &= ~UDF_ICB_TAG_FLAGS_ALLOC_MASK;
3101 icbflags |= UDF_ICB_INTERN_ALLOC;
3102 icbtag->flags = udf_rw16(icbflags);
3103
3104 inflen -= size_diff; KASSERT(inflen == 0);
3105 objsize -= size_diff;
3106 l_ad = new_size;
3107 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea + l_ad;
3108 if (fe) {
3109 fe->inf_len = udf_rw64(inflen);
3110 fe->l_ad = udf_rw32(l_ad);
3111 fe->tag.desc_crc_len = udf_rw16(crclen);
3112 } else {
3113 efe->inf_len = udf_rw64(inflen);
3114 efe->obj_size = udf_rw64(objsize);
3115 efe->l_ad = udf_rw32(l_ad);
3116 efe->tag.desc_crc_len = udf_rw16(crclen);
3117 }
3118 /* eventually copy in evacuated piece */
3119 /* set new size for uvm */
3120 uvm_vnp_setsize(vp, new_size);
3121
3122 free(node_ad_cpy, M_UDFMNT);
3123 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
3124
3125 UDF_UNLOCK_NODE(udf_node, 0);
3126
3127 KASSERT(new_inflen == orig_inflen - size_diff);
3128 KASSERT(new_inflen == 0);
3129 KASSERT(new_lbrec == 0);
3130
3131 return 0;
3132 }
3133
3134 printf("UDF_SHRINK_NODE: could convert to internal alloc!\n");
3135 }
3136
3137 /* 5) reset node descriptors */
3138 udf_wipe_adslots(udf_node);
3139
3140 /* 6) copy back extents; merge when possible. Recounting on the fly */
3141 cpy_slots = cpy_slot;
3142
3143 c_ad = node_ad_cpy[0];
3144 slot = 0;
3145 for (cpy_slot = 1; cpy_slot < cpy_slots; cpy_slot++) {
3146 s_ad = node_ad_cpy[cpy_slot];
3147
3148 DPRINTF(ALLOC, ("\t6: stack -> got mapping vp %d "
3149 "lb %d, len %d, flags %d\n",
3150 udf_rw16(s_ad.loc.part_num),
3151 udf_rw32(s_ad.loc.lb_num),
3152 UDF_EXT_LEN(udf_rw32(s_ad.len)),
3153 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
3154
3155 /* see if we can merge */
3156 if (udf_ads_merge(lb_size, &c_ad, &s_ad)) {
3157 /* not mergable (anymore) */
3158 DPRINTF(ALLOC, ("\t6: appending vp %d lb %d, "
3159 "len %d, flags %d\n",
3160 udf_rw16(c_ad.loc.part_num),
3161 udf_rw32(c_ad.loc.lb_num),
3162 UDF_EXT_LEN(udf_rw32(c_ad.len)),
3163 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
3164
3165 error = udf_append_adslot(udf_node, &slot, &c_ad);
3166 if (error)
3167 goto errorout; /* panic? */
3168 c_ad = s_ad;
3169 slot++;
3170 }
3171 }
3172
3173 /* 7) push rest slot (if any) */
3174 if (UDF_EXT_LEN(c_ad.len) > 0) {
3175 DPRINTF(ALLOC, ("\t7: last append vp %d lb %d, "
3176 "len %d, flags %d\n",
3177 udf_rw16(c_ad.loc.part_num),
3178 udf_rw32(c_ad.loc.lb_num),
3179 UDF_EXT_LEN(udf_rw32(c_ad.len)),
3180 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
3181
3182 error = udf_append_adslot(udf_node, &slot, &c_ad);
3183 if (error)
3184 goto errorout; /* panic? */
3185 ;
3186 }
3187
3188 inflen -= size_diff;
3189 objsize -= size_diff;
3190 if (fe) {
3191 fe->inf_len = udf_rw64(inflen);
3192 } else {
3193 efe->inf_len = udf_rw64(inflen);
3194 efe->obj_size = udf_rw64(objsize);
3195 }
3196 error = 0;
3197
3198 /* set new size for uvm */
3199 uvm_vnp_setsize(vp, new_size);
3200
3201 errorout:
3202 free(node_ad_cpy, M_UDFMNT);
3203
3204 udf_count_alloc_exts(udf_node);
3205
3206 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
3207 UDF_UNLOCK_NODE(udf_node, 0);
3208
3209 KASSERT(new_inflen == orig_inflen - size_diff);
3210
3211 return error;
3212 }
3213
3214