udf_allocation.c revision 1.1.2.6 1 /* $NetBSD: udf_allocation.c,v 1.1.2.6 2010/03/11 15:04:14 yamt Exp $ */
2
3 /*
4 * Copyright (c) 2006, 2008 Reinoud Zandijk
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 *
27 */
28
29 #include <sys/cdefs.h>
30 #ifndef lint
31 __KERNEL_RCSID(0, "$NetBSD: udf_allocation.c,v 1.1.2.6 2010/03/11 15:04:14 yamt Exp $");
32 #endif /* not lint */
33
34
35 #if defined(_KERNEL_OPT)
36 #include "opt_compat_netbsd.h"
37 #endif
38
39 /* TODO strip */
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/sysctl.h>
43 #include <sys/namei.h>
44 #include <sys/proc.h>
45 #include <sys/kernel.h>
46 #include <sys/vnode.h>
47 #include <miscfs/genfs/genfs_node.h>
48 #include <sys/mount.h>
49 #include <sys/buf.h>
50 #include <sys/file.h>
51 #include <sys/device.h>
52 #include <sys/disklabel.h>
53 #include <sys/ioctl.h>
54 #include <sys/malloc.h>
55 #include <sys/dirent.h>
56 #include <sys/stat.h>
57 #include <sys/conf.h>
58 #include <sys/kauth.h>
59 #include <sys/kthread.h>
60 #include <dev/clock_subr.h>
61
62 #include <fs/udf/ecma167-udf.h>
63 #include <fs/udf/udf_mount.h>
64
65 #include "udf.h"
66 #include "udf_subr.h"
67 #include "udf_bswap.h"
68
69
70 #define VTOI(vnode) ((struct udf_node *) vnode->v_data)
71
72 static void udf_record_allocation_in_node(struct udf_mount *ump,
73 struct buf *buf, uint16_t vpart_num, uint64_t *mapping,
74 struct long_ad *node_ad_cpy);
75
76 static void udf_collect_free_space_for_vpart(struct udf_mount *ump,
77 uint16_t vpart_num, uint32_t num_lb);
78
79 static void udf_wipe_adslots(struct udf_node *udf_node);
80 static void udf_count_alloc_exts(struct udf_node *udf_node);
81
82 /*
83 * IDEA/BUSY: Each udf_node gets its own extentwalker state for all operations;
84 * this will hopefully/likely reduce O(nlog(n)) to O(1) for most functionality
85 * since actions are most likely sequencial and thus seeking doesn't need
86 * searching for the same or adjacent position again.
87 */
88
89 /* --------------------------------------------------------------------- */
90
91 #if 0
92 #if 1
93 static void
94 udf_node_dump(struct udf_node *udf_node) {
95 struct file_entry *fe;
96 struct extfile_entry *efe;
97 struct icb_tag *icbtag;
98 struct long_ad s_ad;
99 uint64_t inflen;
100 uint32_t icbflags, addr_type;
101 uint32_t len, lb_num;
102 uint32_t flags;
103 int part_num;
104 int lb_size, eof, slot;
105
106 if ((udf_verbose & UDF_DEBUG_NODEDUMP) == 0)
107 return;
108
109 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
110
111 fe = udf_node->fe;
112 efe = udf_node->efe;
113 if (fe) {
114 icbtag = &fe->icbtag;
115 inflen = udf_rw64(fe->inf_len);
116 } else {
117 icbtag = &efe->icbtag;
118 inflen = udf_rw64(efe->inf_len);
119 }
120
121 icbflags = udf_rw16(icbtag->flags);
122 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
123
124 printf("udf_node_dump %p :\n", udf_node);
125
126 if (addr_type == UDF_ICB_INTERN_ALLOC) {
127 printf("\tIntern alloc, len = %"PRIu64"\n", inflen);
128 return;
129 }
130
131 printf("\tInflen = %"PRIu64"\n", inflen);
132 printf("\t\t");
133
134 slot = 0;
135 for (;;) {
136 udf_get_adslot(udf_node, slot, &s_ad, &eof);
137 if (eof)
138 break;
139 part_num = udf_rw16(s_ad.loc.part_num);
140 lb_num = udf_rw32(s_ad.loc.lb_num);
141 len = udf_rw32(s_ad.len);
142 flags = UDF_EXT_FLAGS(len);
143 len = UDF_EXT_LEN(len);
144
145 printf("[");
146 if (part_num >= 0)
147 printf("part %d, ", part_num);
148 printf("lb_num %d, len %d", lb_num, len);
149 if (flags)
150 printf(", flags %d", flags>>30);
151 printf("] ");
152
153 if (flags == UDF_EXT_REDIRECT) {
154 printf("\n\textent END\n\tallocation extent\n\t\t");
155 }
156
157 slot++;
158 }
159 printf("\n\tl_ad END\n\n");
160 }
161 #else
162 #define udf_node_dump(a)
163 #endif
164
165
166 static void
167 udf_assert_allocated(struct udf_mount *ump, uint16_t vpart_num,
168 uint32_t lb_num, uint32_t num_lb)
169 {
170 struct udf_bitmap *bitmap;
171 struct part_desc *pdesc;
172 uint32_t ptov;
173 uint32_t bitval;
174 uint8_t *bpos;
175 int bit;
176 int phys_part;
177 int ok;
178
179 DPRINTF(PARANOIA, ("udf_assert_allocated: check virt lbnum %d "
180 "part %d + %d sect\n", lb_num, vpart_num, num_lb));
181
182 /* get partition backing up this vpart_num */
183 pdesc = ump->partitions[ump->vtop[vpart_num]];
184
185 switch (ump->vtop_tp[vpart_num]) {
186 case UDF_VTOP_TYPE_PHYS :
187 case UDF_VTOP_TYPE_SPARABLE :
188 /* free space to freed or unallocated space bitmap */
189 ptov = udf_rw32(pdesc->start_loc);
190 phys_part = ump->vtop[vpart_num];
191
192 /* use unallocated bitmap */
193 bitmap = &ump->part_unalloc_bits[phys_part];
194
195 /* if no bitmaps are defined, bail out */
196 if (bitmap->bits == NULL)
197 break;
198
199 /* check bits */
200 KASSERT(bitmap->bits);
201 ok = 1;
202 bpos = bitmap->bits + lb_num/8;
203 bit = lb_num % 8;
204 while (num_lb > 0) {
205 bitval = (1 << bit);
206 DPRINTF(PARANOIA, ("XXX : check %d, %p, bit %d\n",
207 lb_num, bpos, bit));
208 KASSERT(bitmap->bits + lb_num/8 == bpos);
209 if (*bpos & bitval) {
210 printf("\tlb_num %d is NOT marked busy\n",
211 lb_num);
212 ok = 0;
213 }
214 lb_num++; num_lb--;
215 bit = (bit + 1) % 8;
216 if (bit == 0)
217 bpos++;
218 }
219 if (!ok) {
220 /* KASSERT(0); */
221 }
222
223 break;
224 case UDF_VTOP_TYPE_VIRT :
225 /* TODO check space */
226 KASSERT(num_lb == 1);
227 break;
228 case UDF_VTOP_TYPE_META :
229 /* TODO check space in the metadata bitmap */
230 default:
231 /* not implemented */
232 break;
233 }
234 }
235
236
237 static void
238 udf_node_sanity_check(struct udf_node *udf_node,
239 uint64_t *cnt_inflen, uint64_t *cnt_logblksrec)
240 {
241 union dscrptr *dscr;
242 struct file_entry *fe;
243 struct extfile_entry *efe;
244 struct icb_tag *icbtag;
245 struct long_ad s_ad;
246 uint64_t inflen, logblksrec;
247 uint32_t icbflags, addr_type;
248 uint32_t len, lb_num, l_ea, l_ad, max_l_ad;
249 uint16_t part_num;
250 uint8_t *data_pos;
251 int dscr_size, lb_size, flags, whole_lb;
252 int i, slot, eof;
253
254 // KASSERT(mutex_owned(&udf_node->ump->allocate_mutex));
255
256 if (1)
257 udf_node_dump(udf_node);
258
259 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
260
261 fe = udf_node->fe;
262 efe = udf_node->efe;
263 if (fe) {
264 dscr = (union dscrptr *) fe;
265 icbtag = &fe->icbtag;
266 inflen = udf_rw64(fe->inf_len);
267 dscr_size = sizeof(struct file_entry) -1;
268 logblksrec = udf_rw64(fe->logblks_rec);
269 l_ad = udf_rw32(fe->l_ad);
270 l_ea = udf_rw32(fe->l_ea);
271 } else {
272 dscr = (union dscrptr *) efe;
273 icbtag = &efe->icbtag;
274 inflen = udf_rw64(efe->inf_len);
275 dscr_size = sizeof(struct extfile_entry) -1;
276 logblksrec = udf_rw64(efe->logblks_rec);
277 l_ad = udf_rw32(efe->l_ad);
278 l_ea = udf_rw32(efe->l_ea);
279 }
280 data_pos = (uint8_t *) dscr + dscr_size + l_ea;
281 max_l_ad = lb_size - dscr_size - l_ea;
282 icbflags = udf_rw16(icbtag->flags);
283 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
284
285 /* check if tail is zero */
286 DPRINTF(PARANOIA, ("Sanity check blank tail\n"));
287 for (i = l_ad; i < max_l_ad; i++) {
288 if (data_pos[i] != 0)
289 printf( "sanity_check: violation: node byte %d "
290 "has value %d\n", i, data_pos[i]);
291 }
292
293 /* reset counters */
294 *cnt_inflen = 0;
295 *cnt_logblksrec = 0;
296
297 if (addr_type == UDF_ICB_INTERN_ALLOC) {
298 KASSERT(l_ad <= max_l_ad);
299 KASSERT(l_ad == inflen);
300 *cnt_inflen = inflen;
301 return;
302 }
303
304 /* start counting */
305 whole_lb = 1;
306 slot = 0;
307 for (;;) {
308 udf_get_adslot(udf_node, slot, &s_ad, &eof);
309 if (eof)
310 break;
311 KASSERT(whole_lb == 1);
312
313 part_num = udf_rw16(s_ad.loc.part_num);
314 lb_num = udf_rw32(s_ad.loc.lb_num);
315 len = udf_rw32(s_ad.len);
316 flags = UDF_EXT_FLAGS(len);
317 len = UDF_EXT_LEN(len);
318
319 if (flags != UDF_EXT_REDIRECT) {
320 *cnt_inflen += len;
321 if (flags == UDF_EXT_ALLOCATED) {
322 *cnt_logblksrec += (len + lb_size -1) / lb_size;
323 }
324 } else {
325 KASSERT(len == lb_size);
326 }
327 /* check allocation */
328 if (flags == UDF_EXT_ALLOCATED)
329 udf_assert_allocated(udf_node->ump, part_num, lb_num,
330 (len + lb_size - 1) / lb_size);
331
332 /* check whole lb */
333 whole_lb = ((len % lb_size) == 0);
334
335 slot++;
336 }
337 /* rest should be zero (ad_off > l_ad < max_l_ad - adlen) */
338
339 KASSERT(*cnt_inflen == inflen);
340 KASSERT(*cnt_logblksrec == logblksrec);
341
342 // KASSERT(mutex_owned(&udf_node->ump->allocate_mutex));
343 }
344 #else
345 static void
346 udf_node_sanity_check(struct udf_node *udf_node,
347 uint64_t *cnt_inflen, uint64_t *cnt_logblksrec) {
348 struct file_entry *fe;
349 struct extfile_entry *efe;
350 struct icb_tag *icbtag;
351 uint64_t inflen, logblksrec;
352 int dscr_size, lb_size;
353
354 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
355
356 fe = udf_node->fe;
357 efe = udf_node->efe;
358 if (fe) {
359 icbtag = &fe->icbtag;
360 inflen = udf_rw64(fe->inf_len);
361 dscr_size = sizeof(struct file_entry) -1;
362 logblksrec = udf_rw64(fe->logblks_rec);
363 } else {
364 icbtag = &efe->icbtag;
365 inflen = udf_rw64(efe->inf_len);
366 dscr_size = sizeof(struct extfile_entry) -1;
367 logblksrec = udf_rw64(efe->logblks_rec);
368 }
369 *cnt_logblksrec = logblksrec;
370 *cnt_inflen = inflen;
371 }
372 #endif
373
374 /* --------------------------------------------------------------------- */
375
376 void
377 udf_calc_freespace(struct udf_mount *ump, uint64_t *sizeblks, uint64_t *freeblks)
378 {
379 struct logvol_int_desc *lvid;
380 uint32_t *pos1, *pos2;
381 int vpart, num_vpart;
382
383 lvid = ump->logvol_integrity;
384 *freeblks = *sizeblks = 0;
385
386 /*
387 * Sequentials media report free space directly (CD/DVD/BD-R), for the
388 * other media we need the logical volume integrity.
389 *
390 * We sum all free space up here regardless of type.
391 */
392
393 KASSERT(lvid);
394 num_vpart = udf_rw32(lvid->num_part);
395
396 if (ump->discinfo.mmc_cur & MMC_CAP_SEQUENTIAL) {
397 /* use track info directly summing if there are 2 open */
398 /* XXX assumption at most two tracks open */
399 *freeblks = ump->data_track.free_blocks;
400 if (ump->data_track.tracknr != ump->metadata_track.tracknr)
401 *freeblks += ump->metadata_track.free_blocks;
402 *sizeblks = ump->discinfo.last_possible_lba;
403 } else {
404 /* free and used space for mountpoint based on logvol integrity */
405 for (vpart = 0; vpart < num_vpart; vpart++) {
406 pos1 = &lvid->tables[0] + vpart;
407 pos2 = &lvid->tables[0] + num_vpart + vpart;
408 if (udf_rw32(*pos1) != (uint32_t) -1) {
409 *freeblks += udf_rw32(*pos1);
410 *sizeblks += udf_rw32(*pos2);
411 }
412 }
413 }
414 /* adjust for accounted uncommitted blocks */
415 for (vpart = 0; vpart < num_vpart; vpart++)
416 *freeblks -= ump->uncommitted_lbs[vpart];
417
418 if (*freeblks > UDF_DISC_SLACK) {
419 *freeblks -= UDF_DISC_SLACK;
420 } else {
421 *freeblks = 0;
422 }
423 }
424
425
426 static void
427 udf_calc_vpart_freespace(struct udf_mount *ump, uint16_t vpart_num, uint64_t *freeblks)
428 {
429 struct logvol_int_desc *lvid;
430 uint32_t *pos1;
431
432 lvid = ump->logvol_integrity;
433 *freeblks = 0;
434
435 /*
436 * Sequentials media report free space directly (CD/DVD/BD-R), for the
437 * other media we need the logical volume integrity.
438 *
439 * We sum all free space up here regardless of type.
440 */
441
442 KASSERT(lvid);
443 if (ump->discinfo.mmc_cur & MMC_CAP_SEQUENTIAL) {
444 /* XXX assumption at most two tracks open */
445 if (vpart_num == ump->data_part) {
446 *freeblks = ump->data_track.free_blocks;
447 } else {
448 *freeblks = ump->metadata_track.free_blocks;
449 }
450 } else {
451 /* free and used space for mountpoint based on logvol integrity */
452 pos1 = &lvid->tables[0] + vpart_num;
453 if (udf_rw32(*pos1) != (uint32_t) -1)
454 *freeblks += udf_rw32(*pos1);
455 }
456
457 /* adjust for accounted uncommitted blocks */
458 if (*freeblks > ump->uncommitted_lbs[vpart_num]) {
459 *freeblks -= ump->uncommitted_lbs[vpart_num];
460 } else {
461 *freeblks = 0;
462 }
463 }
464
465 /* --------------------------------------------------------------------- */
466
467 int
468 udf_translate_vtop(struct udf_mount *ump, struct long_ad *icb_loc,
469 uint32_t *lb_numres, uint32_t *extres)
470 {
471 struct part_desc *pdesc;
472 struct spare_map_entry *sme;
473 struct long_ad s_icb_loc;
474 uint64_t foffset, end_foffset;
475 uint32_t lb_size, len;
476 uint32_t lb_num, lb_rel, lb_packet;
477 uint32_t udf_rw32_lbmap, ext_offset;
478 uint16_t vpart;
479 int rel, part, error, eof, slot, flags;
480
481 assert(ump && icb_loc && lb_numres);
482
483 vpart = udf_rw16(icb_loc->loc.part_num);
484 lb_num = udf_rw32(icb_loc->loc.lb_num);
485 if (vpart > UDF_VTOP_RAWPART)
486 return EINVAL;
487
488 translate_again:
489 part = ump->vtop[vpart];
490 pdesc = ump->partitions[part];
491
492 switch (ump->vtop_tp[vpart]) {
493 case UDF_VTOP_TYPE_RAW :
494 /* 1:1 to the end of the device */
495 *lb_numres = lb_num;
496 *extres = INT_MAX;
497 return 0;
498 case UDF_VTOP_TYPE_PHYS :
499 /* transform into its disc logical block */
500 if (lb_num > udf_rw32(pdesc->part_len))
501 return EINVAL;
502 *lb_numres = lb_num + udf_rw32(pdesc->start_loc);
503
504 /* extent from here to the end of the partition */
505 *extres = udf_rw32(pdesc->part_len) - lb_num;
506 return 0;
507 case UDF_VTOP_TYPE_VIRT :
508 /* only maps one logical block, lookup in VAT */
509 if (lb_num >= ump->vat_entries) /* XXX > or >= ? */
510 return EINVAL;
511
512 /* lookup in virtual allocation table file */
513 mutex_enter(&ump->allocate_mutex);
514 error = udf_vat_read(ump->vat_node,
515 (uint8_t *) &udf_rw32_lbmap, 4,
516 ump->vat_offset + lb_num * 4);
517 mutex_exit(&ump->allocate_mutex);
518
519 if (error)
520 return error;
521
522 lb_num = udf_rw32(udf_rw32_lbmap);
523
524 /* transform into its disc logical block */
525 if (lb_num > udf_rw32(pdesc->part_len))
526 return EINVAL;
527 *lb_numres = lb_num + udf_rw32(pdesc->start_loc);
528
529 /* just one logical block */
530 *extres = 1;
531 return 0;
532 case UDF_VTOP_TYPE_SPARABLE :
533 /* check if the packet containing the lb_num is remapped */
534 lb_packet = lb_num / ump->sparable_packet_size;
535 lb_rel = lb_num % ump->sparable_packet_size;
536
537 for (rel = 0; rel < udf_rw16(ump->sparing_table->rt_l); rel++) {
538 sme = &ump->sparing_table->entries[rel];
539 if (lb_packet == udf_rw32(sme->org)) {
540 /* NOTE maps to absolute disc logical block! */
541 *lb_numres = udf_rw32(sme->map) + lb_rel;
542 *extres = ump->sparable_packet_size - lb_rel;
543 return 0;
544 }
545 }
546
547 /* transform into its disc logical block */
548 if (lb_num > udf_rw32(pdesc->part_len))
549 return EINVAL;
550 *lb_numres = lb_num + udf_rw32(pdesc->start_loc);
551
552 /* rest of block */
553 *extres = ump->sparable_packet_size - lb_rel;
554 return 0;
555 case UDF_VTOP_TYPE_META :
556 /* we have to look into the file's allocation descriptors */
557
558 /* use metadatafile allocation mutex */
559 lb_size = udf_rw32(ump->logical_vol->lb_size);
560
561 UDF_LOCK_NODE(ump->metadata_node, 0);
562
563 /* get first overlapping extent */
564 foffset = 0;
565 slot = 0;
566 for (;;) {
567 udf_get_adslot(ump->metadata_node,
568 slot, &s_icb_loc, &eof);
569 DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, "
570 "len = %d, lb_num = %d, part = %d\n",
571 slot, eof,
572 UDF_EXT_FLAGS(udf_rw32(s_icb_loc.len)),
573 UDF_EXT_LEN(udf_rw32(s_icb_loc.len)),
574 udf_rw32(s_icb_loc.loc.lb_num),
575 udf_rw16(s_icb_loc.loc.part_num)));
576 if (eof) {
577 DPRINTF(TRANSLATE,
578 ("Meta partition translation "
579 "failed: can't seek location\n"));
580 UDF_UNLOCK_NODE(ump->metadata_node, 0);
581 return EINVAL;
582 }
583 len = udf_rw32(s_icb_loc.len);
584 flags = UDF_EXT_FLAGS(len);
585 len = UDF_EXT_LEN(len);
586
587 if (flags == UDF_EXT_REDIRECT) {
588 slot++;
589 continue;
590 }
591
592 end_foffset = foffset + len;
593
594 if (end_foffset > lb_num * lb_size)
595 break; /* found */
596 foffset = end_foffset;
597 slot++;
598 }
599 /* found overlapping slot */
600 ext_offset = lb_num * lb_size - foffset;
601
602 /* process extent offset */
603 lb_num = udf_rw32(s_icb_loc.loc.lb_num);
604 vpart = udf_rw16(s_icb_loc.loc.part_num);
605 lb_num += (ext_offset + lb_size -1) / lb_size;
606 ext_offset = 0;
607
608 UDF_UNLOCK_NODE(ump->metadata_node, 0);
609 if (flags != UDF_EXT_ALLOCATED) {
610 DPRINTF(TRANSLATE, ("Metadata partition translation "
611 "failed: not allocated\n"));
612 return EINVAL;
613 }
614
615 /*
616 * vpart and lb_num are updated, translate again since we
617 * might be mapped on sparable media
618 */
619 goto translate_again;
620 default:
621 printf("UDF vtop translation scheme %d unimplemented yet\n",
622 ump->vtop_tp[vpart]);
623 }
624
625 return EINVAL;
626 }
627
628
629 /* XXX provisional primitive braindead version */
630 /* TODO use ext_res */
631 void
632 udf_translate_vtop_list(struct udf_mount *ump, uint32_t sectors,
633 uint16_t vpart_num, uint64_t *lmapping, uint64_t *pmapping)
634 {
635 struct long_ad loc;
636 uint32_t lb_numres, ext_res;
637 int sector;
638
639 for (sector = 0; sector < sectors; sector++) {
640 memset(&loc, 0, sizeof(struct long_ad));
641 loc.loc.part_num = udf_rw16(vpart_num);
642 loc.loc.lb_num = udf_rw32(*lmapping);
643 udf_translate_vtop(ump, &loc, &lb_numres, &ext_res);
644 *pmapping = lb_numres;
645 lmapping++; pmapping++;
646 }
647 }
648
649
650 /* --------------------------------------------------------------------- */
651
652 /*
653 * Translate an extent (in logical_blocks) into logical block numbers; used
654 * for read and write operations. DOESNT't check extents.
655 */
656
657 int
658 udf_translate_file_extent(struct udf_node *udf_node,
659 uint32_t from, uint32_t num_lb,
660 uint64_t *map)
661 {
662 struct udf_mount *ump;
663 struct icb_tag *icbtag;
664 struct long_ad t_ad, s_ad;
665 uint64_t transsec;
666 uint64_t foffset, end_foffset;
667 uint32_t transsec32;
668 uint32_t lb_size;
669 uint32_t ext_offset;
670 uint32_t lb_num, len;
671 uint32_t overlap, translen;
672 uint16_t vpart_num;
673 int eof, error, flags;
674 int slot, addr_type, icbflags;
675
676 if (!udf_node)
677 return ENOENT;
678
679 KASSERT(num_lb > 0);
680
681 UDF_LOCK_NODE(udf_node, 0);
682
683 /* initialise derivative vars */
684 ump = udf_node->ump;
685 lb_size = udf_rw32(ump->logical_vol->lb_size);
686
687 if (udf_node->fe) {
688 icbtag = &udf_node->fe->icbtag;
689 } else {
690 icbtag = &udf_node->efe->icbtag;
691 }
692 icbflags = udf_rw16(icbtag->flags);
693 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
694
695 /* do the work */
696 if (addr_type == UDF_ICB_INTERN_ALLOC) {
697 *map = UDF_TRANS_INTERN;
698 UDF_UNLOCK_NODE(udf_node, 0);
699 return 0;
700 }
701
702 /* find first overlapping extent */
703 foffset = 0;
704 slot = 0;
705 for (;;) {
706 udf_get_adslot(udf_node, slot, &s_ad, &eof);
707 DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, len = %d, "
708 "lb_num = %d, part = %d\n", slot, eof,
709 UDF_EXT_FLAGS(udf_rw32(s_ad.len)),
710 UDF_EXT_LEN(udf_rw32(s_ad.len)),
711 udf_rw32(s_ad.loc.lb_num),
712 udf_rw16(s_ad.loc.part_num)));
713 if (eof) {
714 DPRINTF(TRANSLATE,
715 ("Translate file extent "
716 "failed: can't seek location\n"));
717 UDF_UNLOCK_NODE(udf_node, 0);
718 return EINVAL;
719 }
720 len = udf_rw32(s_ad.len);
721 flags = UDF_EXT_FLAGS(len);
722 len = UDF_EXT_LEN(len);
723 lb_num = udf_rw32(s_ad.loc.lb_num);
724
725 if (flags == UDF_EXT_REDIRECT) {
726 slot++;
727 continue;
728 }
729
730 end_foffset = foffset + len;
731
732 if (end_foffset > from * lb_size)
733 break; /* found */
734 foffset = end_foffset;
735 slot++;
736 }
737 /* found overlapping slot */
738 ext_offset = from * lb_size - foffset;
739
740 for (;;) {
741 udf_get_adslot(udf_node, slot, &s_ad, &eof);
742 DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, len = %d, "
743 "lb_num = %d, part = %d\n", slot, eof,
744 UDF_EXT_FLAGS(udf_rw32(s_ad.len)),
745 UDF_EXT_LEN(udf_rw32(s_ad.len)),
746 udf_rw32(s_ad.loc.lb_num),
747 udf_rw16(s_ad.loc.part_num)));
748 if (eof) {
749 DPRINTF(TRANSLATE,
750 ("Translate file extent "
751 "failed: past eof\n"));
752 UDF_UNLOCK_NODE(udf_node, 0);
753 return EINVAL;
754 }
755
756 len = udf_rw32(s_ad.len);
757 flags = UDF_EXT_FLAGS(len);
758 len = UDF_EXT_LEN(len);
759
760 lb_num = udf_rw32(s_ad.loc.lb_num);
761 vpart_num = udf_rw16(s_ad.loc.part_num);
762
763 end_foffset = foffset + len;
764
765 /* process extent, don't forget to advance on ext_offset! */
766 lb_num += (ext_offset + lb_size -1) / lb_size;
767 overlap = (len - ext_offset + lb_size -1) / lb_size;
768 ext_offset = 0;
769
770 /*
771 * note that the while(){} is nessisary for the extent that
772 * the udf_translate_vtop() returns doens't have to span the
773 * whole extent.
774 */
775
776 overlap = MIN(overlap, num_lb);
777 while (overlap && (flags != UDF_EXT_REDIRECT)) {
778 switch (flags) {
779 case UDF_EXT_FREE :
780 case UDF_EXT_ALLOCATED_BUT_NOT_USED :
781 transsec = UDF_TRANS_ZERO;
782 translen = overlap;
783 while (overlap && num_lb && translen) {
784 *map++ = transsec;
785 lb_num++;
786 overlap--; num_lb--; translen--;
787 }
788 break;
789 case UDF_EXT_ALLOCATED :
790 t_ad.loc.lb_num = udf_rw32(lb_num);
791 t_ad.loc.part_num = udf_rw16(vpart_num);
792 error = udf_translate_vtop(ump,
793 &t_ad, &transsec32, &translen);
794 transsec = transsec32;
795 if (error) {
796 UDF_UNLOCK_NODE(udf_node, 0);
797 return error;
798 }
799 while (overlap && num_lb && translen) {
800 *map++ = transsec;
801 lb_num++; transsec++;
802 overlap--; num_lb--; translen--;
803 }
804 break;
805 default:
806 DPRINTF(TRANSLATE,
807 ("Translate file extent "
808 "failed: bad flags %x\n", flags));
809 UDF_UNLOCK_NODE(udf_node, 0);
810 return EINVAL;
811 }
812 }
813 if (num_lb == 0)
814 break;
815
816 if (flags != UDF_EXT_REDIRECT)
817 foffset = end_foffset;
818 slot++;
819 }
820 UDF_UNLOCK_NODE(udf_node, 0);
821
822 return 0;
823 }
824
825 /* --------------------------------------------------------------------- */
826
827 static int
828 udf_search_free_vatloc(struct udf_mount *ump, uint32_t *lbnumres)
829 {
830 uint32_t lb_size, lb_num, lb_map, udf_rw32_lbmap;
831 uint8_t *blob;
832 int entry, chunk, found, error;
833
834 KASSERT(ump);
835 KASSERT(ump->logical_vol);
836
837 lb_size = udf_rw32(ump->logical_vol->lb_size);
838 blob = malloc(lb_size, M_UDFTEMP, M_WAITOK);
839
840 /* TODO static allocation of search chunk */
841
842 lb_num = MIN(ump->vat_entries, ump->vat_last_free_lb);
843 found = 0;
844 error = 0;
845 entry = 0;
846 do {
847 chunk = MIN(lb_size, (ump->vat_entries - lb_num) * 4);
848 if (chunk <= 0)
849 break;
850 /* load in chunk */
851 error = udf_vat_read(ump->vat_node, blob, chunk,
852 ump->vat_offset + lb_num * 4);
853
854 if (error)
855 break;
856
857 /* search this chunk */
858 for (entry=0; entry < chunk /4; entry++, lb_num++) {
859 udf_rw32_lbmap = *((uint32_t *) (blob + entry * 4));
860 lb_map = udf_rw32(udf_rw32_lbmap);
861 if (lb_map == 0xffffffff) {
862 found = 1;
863 break;
864 }
865 }
866 } while (!found);
867 if (error) {
868 printf("udf_search_free_vatloc: error reading in vat chunk "
869 "(lb %d, size %d)\n", lb_num, chunk);
870 }
871
872 if (!found) {
873 /* extend VAT */
874 DPRINTF(WRITE, ("udf_search_free_vatloc: extending\n"));
875 lb_num = ump->vat_entries;
876 ump->vat_entries++;
877 }
878
879 /* mark entry with initialiser just in case */
880 lb_map = udf_rw32(0xfffffffe);
881 udf_vat_write(ump->vat_node, (uint8_t *) &lb_map, 4,
882 ump->vat_offset + lb_num *4);
883 ump->vat_last_free_lb = lb_num;
884
885 free(blob, M_UDFTEMP);
886 *lbnumres = lb_num;
887 return 0;
888 }
889
890
891 static void
892 udf_bitmap_allocate(struct udf_bitmap *bitmap, int ismetadata,
893 uint32_t *num_lb, uint64_t *lmappos)
894 {
895 uint32_t offset, lb_num, bit;
896 int32_t diff;
897 uint8_t *bpos;
898 int pass;
899
900 if (!ismetadata) {
901 /* heuristic to keep the two pointers not too close */
902 diff = bitmap->data_pos - bitmap->metadata_pos;
903 if ((diff >= 0) && (diff < 1024))
904 bitmap->data_pos = bitmap->metadata_pos + 1024;
905 }
906 offset = ismetadata ? bitmap->metadata_pos : bitmap->data_pos;
907 offset &= ~7;
908 for (pass = 0; pass < 2; pass++) {
909 if (offset >= bitmap->max_offset)
910 offset = 0;
911
912 while (offset < bitmap->max_offset) {
913 if (*num_lb == 0)
914 break;
915
916 /* use first bit not set */
917 bpos = bitmap->bits + offset/8;
918 bit = ffs(*bpos); /* returns 0 or 1..8 */
919 if (bit == 0) {
920 offset += 8;
921 continue;
922 }
923
924 /* check for ffs overshoot */
925 if (offset + bit-1 >= bitmap->max_offset) {
926 offset = bitmap->max_offset;
927 break;
928 }
929
930 DPRINTF(PARANOIA, ("XXX : allocate %d, %p, bit %d\n",
931 offset + bit -1, bpos, bit-1));
932 *bpos &= ~(1 << (bit-1));
933 lb_num = offset + bit-1;
934 *lmappos++ = lb_num;
935 *num_lb = *num_lb - 1;
936 // offset = (offset & ~7);
937 }
938 }
939
940 if (ismetadata) {
941 bitmap->metadata_pos = offset;
942 } else {
943 bitmap->data_pos = offset;
944 }
945 }
946
947
948 static void
949 udf_bitmap_free(struct udf_bitmap *bitmap, uint32_t lb_num, uint32_t num_lb)
950 {
951 uint32_t offset;
952 uint32_t bit, bitval;
953 uint8_t *bpos;
954
955 offset = lb_num;
956
957 /* starter bits */
958 bpos = bitmap->bits + offset/8;
959 bit = offset % 8;
960 while ((bit != 0) && (num_lb > 0)) {
961 bitval = (1 << bit);
962 KASSERT((*bpos & bitval) == 0);
963 DPRINTF(PARANOIA, ("XXX : free %d, %p, %d\n",
964 offset, bpos, bit));
965 *bpos |= bitval;
966 offset++; num_lb--;
967 bit = (bit + 1) % 8;
968 }
969 if (num_lb == 0)
970 return;
971
972 /* whole bytes */
973 KASSERT(bit == 0);
974 bpos = bitmap->bits + offset / 8;
975 while (num_lb >= 8) {
976 KASSERT((*bpos == 0));
977 DPRINTF(PARANOIA, ("XXX : free %d + 8, %p\n", offset, bpos));
978 *bpos = 255;
979 offset += 8; num_lb -= 8;
980 bpos++;
981 }
982
983 /* stop bits */
984 KASSERT(num_lb < 8);
985 bit = 0;
986 while (num_lb > 0) {
987 bitval = (1 << bit);
988 KASSERT((*bpos & bitval) == 0);
989 DPRINTF(PARANOIA, ("XXX : free %d, %p, %d\n",
990 offset, bpos, bit));
991 *bpos |= bitval;
992 offset++; num_lb--;
993 bit = (bit + 1) % 8;
994 }
995 }
996
997
998 static uint32_t
999 udf_bitmap_check_trunc_free(struct udf_bitmap *bitmap, uint32_t to_trunc)
1000 {
1001 uint32_t seq_free, offset;
1002 uint8_t *bpos;
1003 uint8_t bit, bitval;
1004
1005 DPRINTF(RESERVE, ("\ttrying to trunc %d bits from bitmap\n", to_trunc));
1006 offset = bitmap->max_offset - to_trunc;
1007
1008 /* starter bits (if any) */
1009 bpos = bitmap->bits + offset/8;
1010 bit = offset % 8;
1011 seq_free = 0;
1012 while (to_trunc > 0) {
1013 seq_free++;
1014 bitval = (1 << bit);
1015 if (!(*bpos & bitval))
1016 seq_free = 0;
1017 offset++; to_trunc--;
1018 bit++;
1019 if (bit == 8) {
1020 bpos++;
1021 bit = 0;
1022 }
1023 }
1024
1025 DPRINTF(RESERVE, ("\tfound %d sequential free bits in bitmap\n", seq_free));
1026 return seq_free;
1027 }
1028
1029 /* --------------------------------------------------------------------- */
1030
1031 /*
1032 * We check for overall disc space with a margin to prevent critical
1033 * conditions. If disc space is low we try to force a sync() to improve our
1034 * estimates. When confronted with meta-data partition size shortage we know
1035 * we have to check if it can be extended and we need to extend it when
1036 * needed.
1037 *
1038 * A 2nd strategy we could use when disc space is getting low on a disc
1039 * formatted with a meta-data partition is to see if there are sparse areas in
1040 * the meta-data partition and free blocks there for extra data.
1041 */
1042
1043 void
1044 udf_do_reserve_space(struct udf_mount *ump, struct udf_node *udf_node,
1045 uint16_t vpart_num, uint32_t num_lb)
1046 {
1047 ump->uncommitted_lbs[vpart_num] += num_lb;
1048 if (udf_node)
1049 udf_node->uncommitted_lbs += num_lb;
1050 }
1051
1052
1053 void
1054 udf_do_unreserve_space(struct udf_mount *ump, struct udf_node *udf_node,
1055 uint16_t vpart_num, uint32_t num_lb)
1056 {
1057 ump->uncommitted_lbs[vpart_num] -= num_lb;
1058 if (ump->uncommitted_lbs[vpart_num] < 0) {
1059 DPRINTF(RESERVE, ("UDF: underflow on partition reservation, "
1060 "part %d: %d\n", vpart_num,
1061 ump->uncommitted_lbs[vpart_num]));
1062 ump->uncommitted_lbs[vpart_num] = 0;
1063 }
1064 if (udf_node) {
1065 udf_node->uncommitted_lbs -= num_lb;
1066 if (udf_node->uncommitted_lbs < 0) {
1067 DPRINTF(RESERVE, ("UDF: underflow of node "
1068 "reservation : %d\n",
1069 udf_node->uncommitted_lbs));
1070 udf_node->uncommitted_lbs = 0;
1071 }
1072 }
1073 }
1074
1075
1076 int
1077 udf_reserve_space(struct udf_mount *ump, struct udf_node *udf_node,
1078 int udf_c_type, uint16_t vpart_num, uint32_t num_lb, int can_fail)
1079 {
1080 uint64_t freeblks;
1081 uint64_t slack;
1082 int i, error;
1083
1084 slack = 0;
1085 if (can_fail)
1086 slack = UDF_DISC_SLACK;
1087
1088 error = 0;
1089 mutex_enter(&ump->allocate_mutex);
1090
1091 /* check if there is enough space available */
1092 for (i = 0; i < 3; i++) { /* XXX arbitrary number */
1093 udf_calc_vpart_freespace(ump, vpart_num, &freeblks);
1094 if (num_lb + slack < freeblks)
1095 break;
1096 /* issue SYNC */
1097 DPRINTF(RESERVE, ("udf_reserve_space: issuing sync\n"));
1098 mutex_exit(&ump->allocate_mutex);
1099 udf_do_sync(ump, FSCRED, 0);
1100 mutex_enter(&mntvnode_lock);
1101 /* 1/8 second wait */
1102 cv_timedwait(&ump->dirtynodes_cv, &mntvnode_lock,
1103 hz/8);
1104 mutex_exit(&mntvnode_lock);
1105 mutex_enter(&ump->allocate_mutex);
1106 }
1107
1108 /* check if there is enough space available now */
1109 udf_calc_vpart_freespace(ump, vpart_num, &freeblks);
1110 if (num_lb + slack >= freeblks) {
1111 DPRINTF(RESERVE, ("udf_reserve_space: try to redistribute "
1112 "partition space\n"));
1113 DPRINTF(RESERVE, ("\tvpart %d, type %d is full\n",
1114 vpart_num, ump->vtop_alloc[vpart_num]));
1115 /* Try to redistribute space if possible */
1116 udf_collect_free_space_for_vpart(ump, vpart_num, num_lb + slack);
1117 }
1118
1119 /* check if there is enough space available now */
1120 udf_calc_vpart_freespace(ump, vpart_num, &freeblks);
1121 if (num_lb + slack <= freeblks) {
1122 udf_do_reserve_space(ump, udf_node, vpart_num, num_lb);
1123 } else {
1124 DPRINTF(RESERVE, ("udf_reserve_space: out of disc space\n"));
1125 error = ENOSPC;
1126 }
1127
1128 mutex_exit(&ump->allocate_mutex);
1129 return error;
1130 }
1131
1132
1133 void
1134 udf_cleanup_reservation(struct udf_node *udf_node)
1135 {
1136 struct udf_mount *ump = udf_node->ump;
1137 int vpart_num;
1138
1139 mutex_enter(&ump->allocate_mutex);
1140
1141 /* compensate for overlapping blocks */
1142 DPRINTF(RESERVE, ("UDF: overlapped %d blocks in count\n", udf_node->uncommitted_lbs));
1143
1144 vpart_num = udf_get_record_vpart(ump, udf_get_c_type(udf_node));
1145 udf_do_unreserve_space(ump, udf_node, vpart_num, udf_node->uncommitted_lbs);
1146
1147 DPRINTF(RESERVE, ("\ttotal now %d\n", ump->uncommitted_lbs[vpart_num]));
1148
1149 /* sanity */
1150 if (ump->uncommitted_lbs[vpart_num] < 0)
1151 ump->uncommitted_lbs[vpart_num] = 0;
1152
1153 mutex_exit(&ump->allocate_mutex);
1154 }
1155
1156 /* --------------------------------------------------------------------- */
1157
1158 /*
1159 * Allocate an extent of given length on given virt. partition. It doesn't
1160 * have to be one stretch.
1161 */
1162
1163 int
1164 udf_allocate_space(struct udf_mount *ump, struct udf_node *udf_node,
1165 int udf_c_type, uint16_t vpart_num, uint32_t num_lb, uint64_t *lmapping)
1166 {
1167 struct mmc_trackinfo *alloc_track, *other_track;
1168 struct udf_bitmap *bitmap;
1169 struct part_desc *pdesc;
1170 struct logvol_int_desc *lvid;
1171 uint64_t *lmappos;
1172 uint32_t ptov, lb_num, *freepos, free_lbs;
1173 int lb_size, alloc_num_lb;
1174 int alloc_type, error;
1175 int is_node;
1176
1177 DPRINTF(CALL, ("udf_allocate_space(ctype %d, vpart %d, num_lb %d\n",
1178 udf_c_type, vpart_num, num_lb));
1179 mutex_enter(&ump->allocate_mutex);
1180
1181 lb_size = udf_rw32(ump->logical_vol->lb_size);
1182 KASSERT(lb_size == ump->discinfo.sector_size);
1183
1184 alloc_type = ump->vtop_alloc[vpart_num];
1185 is_node = (udf_c_type == UDF_C_NODE);
1186
1187 lmappos = lmapping;
1188 error = 0;
1189 switch (alloc_type) {
1190 case UDF_ALLOC_VAT :
1191 /* search empty slot in VAT file */
1192 KASSERT(num_lb == 1);
1193 error = udf_search_free_vatloc(ump, &lb_num);
1194 if (!error) {
1195 *lmappos = lb_num;
1196
1197 /* reserve on the backing sequential partition since
1198 * that partition is credited back later */
1199 udf_do_reserve_space(ump, udf_node,
1200 ump->vtop[vpart_num], num_lb);
1201 }
1202 break;
1203 case UDF_ALLOC_SEQUENTIAL :
1204 /* sequential allocation on recordable media */
1205 /* get partition backing up this vpart_num_num */
1206 pdesc = ump->partitions[ump->vtop[vpart_num]];
1207
1208 /* calculate offset from physical base partition */
1209 ptov = udf_rw32(pdesc->start_loc);
1210
1211 /* get our track descriptors */
1212 if (vpart_num == ump->node_part) {
1213 alloc_track = &ump->metadata_track;
1214 other_track = &ump->data_track;
1215 } else {
1216 alloc_track = &ump->data_track;
1217 other_track = &ump->metadata_track;
1218 }
1219
1220 /* allocate */
1221 for (lb_num = 0; lb_num < num_lb; lb_num++) {
1222 *lmappos++ = alloc_track->next_writable - ptov;
1223 alloc_track->next_writable++;
1224 alloc_track->free_blocks--;
1225 }
1226
1227 /* keep other track up-to-date */
1228 if (alloc_track->tracknr == other_track->tracknr)
1229 memcpy(other_track, alloc_track,
1230 sizeof(struct mmc_trackinfo));
1231 break;
1232 case UDF_ALLOC_SPACEMAP :
1233 /* try to allocate on unallocated bits */
1234 alloc_num_lb = num_lb;
1235 bitmap = &ump->part_unalloc_bits[vpart_num];
1236 udf_bitmap_allocate(bitmap, is_node, &alloc_num_lb, lmappos);
1237 ump->lvclose |= UDF_WRITE_PART_BITMAPS;
1238
1239 /* have we allocated all? */
1240 if (alloc_num_lb) {
1241 /* TODO convert freed to unalloc and try again */
1242 /* free allocated piece for now */
1243 lmappos = lmapping;
1244 for (lb_num=0; lb_num < num_lb-alloc_num_lb; lb_num++) {
1245 udf_bitmap_free(bitmap, *lmappos++, 1);
1246 }
1247 error = ENOSPC;
1248 }
1249 if (!error) {
1250 /* adjust freecount */
1251 lvid = ump->logvol_integrity;
1252 freepos = &lvid->tables[0] + vpart_num;
1253 free_lbs = udf_rw32(*freepos);
1254 *freepos = udf_rw32(free_lbs - num_lb);
1255 }
1256 break;
1257 case UDF_ALLOC_METABITMAP : /* UDF 2.50, 2.60 BluRay-RE */
1258 /* allocate on metadata unallocated bits */
1259 alloc_num_lb = num_lb;
1260 bitmap = &ump->metadata_unalloc_bits;
1261 udf_bitmap_allocate(bitmap, is_node, &alloc_num_lb, lmappos);
1262 ump->lvclose |= UDF_WRITE_PART_BITMAPS;
1263
1264 /* have we allocated all? */
1265 if (alloc_num_lb) {
1266 /* YIKES! TODO we need to extend the metadata partition */
1267 /* free allocated piece for now */
1268 lmappos = lmapping;
1269 for (lb_num=0; lb_num < num_lb-alloc_num_lb; lb_num++) {
1270 udf_bitmap_free(bitmap, *lmappos++, 1);
1271 }
1272 error = ENOSPC;
1273 }
1274 if (!error) {
1275 /* adjust freecount */
1276 lvid = ump->logvol_integrity;
1277 freepos = &lvid->tables[0] + vpart_num;
1278 free_lbs = udf_rw32(*freepos);
1279 *freepos = udf_rw32(free_lbs - num_lb);
1280 }
1281 break;
1282 case UDF_ALLOC_METASEQUENTIAL : /* UDF 2.60 BluRay-R */
1283 case UDF_ALLOC_RELAXEDSEQUENTIAL : /* UDF 2.50/~meta BluRay-R */
1284 printf("ALERT: udf_allocate_space : allocation %d "
1285 "not implemented yet!\n", alloc_type);
1286 /* TODO implement, doesn't have to be contiguous */
1287 error = ENOSPC;
1288 break;
1289 }
1290
1291 if (!error) {
1292 /* credit our partition since we have committed the space */
1293 udf_do_unreserve_space(ump, udf_node, vpart_num, num_lb);
1294 }
1295
1296 #ifdef DEBUG
1297 if (udf_verbose & UDF_DEBUG_ALLOC) {
1298 lmappos = lmapping;
1299 printf("udf_allocate_space, allocated logical lba :\n");
1300 for (lb_num = 0; lb_num < num_lb; lb_num++) {
1301 printf("%s %"PRIu64, (lb_num > 0)?",":"",
1302 *lmappos++);
1303 }
1304 printf("\n");
1305 }
1306 #endif
1307 mutex_exit(&ump->allocate_mutex);
1308
1309 return error;
1310 }
1311
1312 /* --------------------------------------------------------------------- */
1313
1314 void
1315 udf_free_allocated_space(struct udf_mount *ump, uint32_t lb_num,
1316 uint16_t vpart_num, uint32_t num_lb)
1317 {
1318 struct udf_bitmap *bitmap;
1319 struct part_desc *pdesc;
1320 struct logvol_int_desc *lvid;
1321 uint32_t ptov, lb_map, udf_rw32_lbmap;
1322 uint32_t *freepos, free_lbs;
1323 int phys_part;
1324 int error;
1325
1326 DPRINTF(ALLOC, ("udf_free_allocated_space: freeing virt lbnum %d "
1327 "part %d + %d sect\n", lb_num, vpart_num, num_lb));
1328
1329 /* no use freeing zero length */
1330 if (num_lb == 0)
1331 return;
1332
1333 mutex_enter(&ump->allocate_mutex);
1334
1335 /* get partition backing up this vpart_num */
1336 pdesc = ump->partitions[ump->vtop[vpart_num]];
1337
1338 switch (ump->vtop_tp[vpart_num]) {
1339 case UDF_VTOP_TYPE_PHYS :
1340 case UDF_VTOP_TYPE_SPARABLE :
1341 /* free space to freed or unallocated space bitmap */
1342 ptov = udf_rw32(pdesc->start_loc);
1343 phys_part = ump->vtop[vpart_num];
1344
1345 /* first try freed space bitmap */
1346 bitmap = &ump->part_freed_bits[phys_part];
1347
1348 /* if not defined, use unallocated bitmap */
1349 if (bitmap->bits == NULL)
1350 bitmap = &ump->part_unalloc_bits[phys_part];
1351
1352 /* if no bitmaps are defined, bail out; XXX OK? */
1353 if (bitmap->bits == NULL)
1354 break;
1355
1356 /* free bits if its defined */
1357 KASSERT(bitmap->bits);
1358 ump->lvclose |= UDF_WRITE_PART_BITMAPS;
1359 udf_bitmap_free(bitmap, lb_num, num_lb);
1360
1361 /* adjust freecount */
1362 lvid = ump->logvol_integrity;
1363 freepos = &lvid->tables[0] + vpart_num;
1364 free_lbs = udf_rw32(*freepos);
1365 *freepos = udf_rw32(free_lbs + num_lb);
1366 break;
1367 case UDF_VTOP_TYPE_VIRT :
1368 /* free this VAT entry */
1369 KASSERT(num_lb == 1);
1370
1371 lb_map = 0xffffffff;
1372 udf_rw32_lbmap = udf_rw32(lb_map);
1373 error = udf_vat_write(ump->vat_node,
1374 (uint8_t *) &udf_rw32_lbmap, 4,
1375 ump->vat_offset + lb_num * 4);
1376 KASSERT(error == 0);
1377 ump->vat_last_free_lb = MIN(ump->vat_last_free_lb, lb_num);
1378 break;
1379 case UDF_VTOP_TYPE_META :
1380 /* free space in the metadata bitmap */
1381 bitmap = &ump->metadata_unalloc_bits;
1382 KASSERT(bitmap->bits);
1383
1384 ump->lvclose |= UDF_WRITE_PART_BITMAPS;
1385 udf_bitmap_free(bitmap, lb_num, num_lb);
1386
1387 /* adjust freecount */
1388 lvid = ump->logvol_integrity;
1389 freepos = &lvid->tables[0] + vpart_num;
1390 free_lbs = udf_rw32(*freepos);
1391 *freepos = udf_rw32(free_lbs + num_lb);
1392 break;
1393 default:
1394 printf("ALERT: udf_free_allocated_space : allocation %d "
1395 "not implemented yet!\n", ump->vtop_tp[vpart_num]);
1396 break;
1397 }
1398
1399 mutex_exit(&ump->allocate_mutex);
1400 }
1401
1402 /* --------------------------------------------------------------------- */
1403
1404 /*
1405 * Special function to synchronise the metadatamirror file when they change on
1406 * resizing. When the metadatafile is actually duplicated, this action is a
1407 * no-op since they describe different extents on the disc.
1408 */
1409
1410 void udf_synchronise_metadatamirror_node(struct udf_mount *ump)
1411 {
1412 struct udf_node *meta_node, *metamirror_node;
1413 struct long_ad s_ad;
1414 int slot, cpy_slot;
1415 int error, eof;
1416
1417 if (ump->metadata_flags & METADATA_DUPLICATED)
1418 return;
1419
1420 meta_node = ump->metadata_node;
1421 metamirror_node = ump->metadatamirror_node;
1422
1423 /* 1) wipe mirror node */
1424 udf_wipe_adslots(metamirror_node);
1425
1426 /* 2) copy all node descriptors from the meta_node */
1427 slot = 0;
1428 cpy_slot = 0;
1429 for (;;) {
1430 udf_get_adslot(meta_node, slot, &s_ad, &eof);
1431 if (eof)
1432 break;
1433 error = udf_append_adslot(metamirror_node, &cpy_slot, &s_ad);
1434 if (error) {
1435 /* WTF, this shouldn't happen, what to do now? */
1436 panic("udf_synchronise_metadatamirror_node failed!");
1437 }
1438 slot++;
1439 }
1440
1441 /* 3) adjust metamirror_node size */
1442 if (meta_node->fe) {
1443 KASSERT(metamirror_node->fe);
1444 metamirror_node->fe->inf_len = meta_node->fe->inf_len;
1445 } else {
1446 KASSERT(meta_node->efe);
1447 KASSERT(metamirror_node->efe);
1448 metamirror_node->efe->inf_len = meta_node->efe->inf_len;
1449 metamirror_node->efe->obj_size = meta_node->efe->obj_size;
1450 }
1451
1452 /* for sanity */
1453 udf_count_alloc_exts(metamirror_node);
1454 }
1455
1456 /* --------------------------------------------------------------------- */
1457
1458 /*
1459 * When faced with an out of space but there is still space available on other
1460 * partitions, try to redistribute the space. This is only defined for media
1461 * using Metadata partitions.
1462 *
1463 * There are two formats to deal with. Either its a `normal' metadata
1464 * partition and we can move blocks between a metadata bitmap and its
1465 * companion data spacemap OR its a UDF 2.60 formatted BluRay-R disc with POW
1466 * and a metadata partition.
1467 */
1468
1469 static uint32_t
1470 udf_trunc_metadatapart(struct udf_mount *ump, uint32_t num_lb)
1471 {
1472 struct udf_node *bitmap_node;
1473 struct udf_bitmap *bitmap;
1474 struct space_bitmap_desc *sbd, *new_sbd;
1475 struct logvol_int_desc *lvid;
1476 uint64_t inf_len;
1477 uint64_t meta_free_lbs, data_free_lbs;
1478 uint32_t *freepos, *sizepos;
1479 uint32_t unit, lb_size, to_trunc;
1480 uint16_t meta_vpart_num, data_vpart_num, num_vpart;
1481 int err;
1482
1483 unit = ump->metadata_alloc_unit_size;
1484 lb_size = udf_rw32(ump->logical_vol->lb_size);
1485 lvid = ump->logvol_integrity;
1486
1487 /* lookup vpart for metadata partition */
1488 meta_vpart_num = ump->node_part;
1489 KASSERT(ump->vtop_alloc[meta_vpart_num] == UDF_ALLOC_METABITMAP);
1490
1491 /* lookup vpart for data partition */
1492 data_vpart_num = ump->data_part;
1493 KASSERT(ump->vtop_alloc[data_vpart_num] == UDF_ALLOC_SPACEMAP);
1494
1495 udf_calc_vpart_freespace(ump, data_vpart_num, &data_free_lbs);
1496 udf_calc_vpart_freespace(ump, meta_vpart_num, &meta_free_lbs);
1497
1498 DPRINTF(RESERVE, ("\tfree space on data partition %"PRIu64" blks\n", data_free_lbs));
1499 DPRINTF(RESERVE, ("\tfree space on metadata partition %"PRIu64" blks\n", meta_free_lbs));
1500
1501 /* give away some of the free meta space, in unit block sizes */
1502 to_trunc = meta_free_lbs/4; /* give out a quarter */
1503 to_trunc = MAX(to_trunc, num_lb);
1504 to_trunc = unit * ((to_trunc + unit-1) / unit); /* round up */
1505
1506 /* scale down if needed and bail out when out of space */
1507 if (to_trunc >= meta_free_lbs)
1508 return num_lb;
1509
1510 /* check extent of bits marked free at the end of the map */
1511 bitmap = &ump->metadata_unalloc_bits;
1512 to_trunc = udf_bitmap_check_trunc_free(bitmap, to_trunc);
1513 to_trunc = unit * (to_trunc / unit); /* round down again */
1514 if (to_trunc == 0)
1515 return num_lb;
1516
1517 DPRINTF(RESERVE, ("\ttruncating %d lbs from the metadata bitmap\n",
1518 to_trunc));
1519
1520 /* get length of the metadata bitmap node file */
1521 bitmap_node = ump->metadatabitmap_node;
1522 if (bitmap_node->fe) {
1523 inf_len = udf_rw64(bitmap_node->fe->inf_len);
1524 } else {
1525 KASSERT(bitmap_node->efe);
1526 inf_len = udf_rw64(bitmap_node->efe->inf_len);
1527 }
1528 inf_len -= to_trunc/8;
1529
1530 /* as per [UDF 2.60/2.2.13.6] : */
1531 /* 1) update the SBD in the metadata bitmap file */
1532 sbd = (struct space_bitmap_desc *) bitmap->blob;
1533 sbd->num_bits = udf_rw32(sbd->num_bits) - to_trunc;
1534 sbd->num_bytes = udf_rw32(sbd->num_bytes) - to_trunc/8;
1535 bitmap->max_offset = udf_rw32(sbd->num_bits);
1536
1537 num_vpart = udf_rw32(lvid->num_part);
1538 freepos = &lvid->tables[0] + meta_vpart_num;
1539 sizepos = &lvid->tables[0] + num_vpart + meta_vpart_num;
1540 *freepos = udf_rw32(*freepos) - to_trunc;
1541 *sizepos = udf_rw32(*sizepos) - to_trunc;
1542
1543 /* realloc bitmap for better memory usage */
1544 new_sbd = realloc(sbd, inf_len, M_UDFVOLD,
1545 M_CANFAIL | M_WAITOK);
1546 if (new_sbd) {
1547 /* update pointers */
1548 ump->metadata_unalloc_dscr = new_sbd;
1549 bitmap->blob = (uint8_t *) new_sbd;
1550 }
1551 ump->lvclose |= UDF_WRITE_PART_BITMAPS;
1552
1553 /*
1554 * The truncated space is secured now and can't be allocated anymore. Release
1555 * the allocate mutex so we can shrink the nodes the normal way.
1556 */
1557 mutex_exit(&ump->allocate_mutex);
1558
1559 /* 2) trunc the metadata bitmap information file, freeing blocks */
1560 err = udf_shrink_node(bitmap_node, inf_len);
1561 KASSERT(err == 0);
1562
1563 /* 3) trunc the metadata file and mirror file, freeing blocks */
1564 inf_len = udf_rw32(sbd->num_bits) * lb_size; /* [4/14.12.4] */
1565 err = udf_shrink_node(ump->metadata_node, inf_len);
1566 KASSERT(err == 0);
1567 if (ump->metadatamirror_node && (ump->metadata_flags & METADATA_DUPLICATED)) {
1568 err = udf_shrink_node(ump->metadatamirror_node, inf_len);
1569 KASSERT(err == 0);
1570 }
1571 ump->lvclose |= UDF_WRITE_METAPART_NODES;
1572
1573 /* relock before exit */
1574 mutex_enter(&ump->allocate_mutex);
1575
1576 if (to_trunc > num_lb)
1577 return 0;
1578 return num_lb - to_trunc;
1579 }
1580
1581
1582 static void
1583 udf_sparsify_metadatapart(struct udf_mount *ump, uint32_t num_lb)
1584 {
1585 /* NOT IMPLEMENTED, fail */
1586 }
1587
1588
1589 static void
1590 udf_collect_free_space_for_vpart(struct udf_mount *ump,
1591 uint16_t vpart_num, uint32_t num_lb)
1592 {
1593 /* allocate mutex is helt */
1594
1595 /* only defined for metadata partitions */
1596 if (ump->vtop_tp[ump->node_part] != UDF_VTOP_TYPE_META) {
1597 DPRINTF(RESERVE, ("\tcan't grow/shrink; no metadata partitioning\n"));
1598 return;
1599 }
1600
1601 /* UDF 2.60 BD-R+POW? */
1602 if (ump->vtop_alloc[ump->node_part] == UDF_ALLOC_METASEQUENTIAL) {
1603 DPRINTF(RESERVE, ("\tUDF 2.60 BD-R+POW track grow not implemented yet\n"));
1604 return;
1605 }
1606
1607 if (ump->vtop_tp[vpart_num] == UDF_VTOP_TYPE_META) {
1608 /* try to grow the meta partition */
1609 DPRINTF(RESERVE, ("\ttrying to grow the meta partition\n"));
1610 /* as per [UDF 2.60/2.2.13.5] : extend bitmap and metadata file(s) */
1611 } else {
1612 /* try to shrink the metadata partition */
1613 DPRINTF(RESERVE, ("\ttrying to shrink the meta partition\n"));
1614 /* as per [UDF 2.60/2.2.13.6] : either trunc or make sparse */
1615 num_lb = udf_trunc_metadatapart(ump, num_lb);
1616 if (num_lb)
1617 udf_sparsify_metadatapart(ump, num_lb);
1618 }
1619
1620 /* allocate mutex should still be helt */
1621 }
1622
1623 /* --------------------------------------------------------------------- */
1624
1625 /*
1626 * Allocate a buf on disc for direct write out. The space doesn't have to be
1627 * contiguous as the caller takes care of this.
1628 */
1629
1630 void
1631 udf_late_allocate_buf(struct udf_mount *ump, struct buf *buf,
1632 uint64_t *lmapping, struct long_ad *node_ad_cpy, uint16_t *vpart_nump)
1633 {
1634 struct udf_node *udf_node = VTOI(buf->b_vp);
1635 int lb_size, blks, udf_c_type;
1636 int vpart_num, num_lb;
1637 int error, s;
1638
1639 /*
1640 * for each sector in the buf, allocate a sector on disc and record
1641 * its position in the provided mapping array.
1642 *
1643 * If its userdata or FIDs, record its location in its node.
1644 */
1645
1646 lb_size = udf_rw32(ump->logical_vol->lb_size);
1647 num_lb = (buf->b_bcount + lb_size -1) / lb_size;
1648 blks = lb_size / DEV_BSIZE;
1649 udf_c_type = buf->b_udf_c_type;
1650
1651 KASSERT(lb_size == ump->discinfo.sector_size);
1652
1653 /* select partition to record the buffer on */
1654 vpart_num = *vpart_nump = udf_get_record_vpart(ump, udf_c_type);
1655
1656 if (udf_c_type == UDF_C_NODE) {
1657 /* if not VAT, its allready allocated */
1658 if (ump->vtop_alloc[ump->node_part] != UDF_ALLOC_VAT)
1659 return;
1660
1661 /* allocate on its backing sequential partition */
1662 vpart_num = ump->data_part;
1663 }
1664
1665 /* XXX can this still happen? */
1666 /* do allocation on the selected partition */
1667 error = udf_allocate_space(ump, udf_node, udf_c_type,
1668 vpart_num, num_lb, lmapping);
1669 if (error) {
1670 /*
1671 * ARGH! we haven't done our accounting right! it should
1672 * allways succeed.
1673 */
1674 panic("UDF disc allocation accounting gone wrong");
1675 }
1676
1677 /* If its userdata or FIDs, record its allocation in its node. */
1678 if ((udf_c_type == UDF_C_USERDATA) ||
1679 (udf_c_type == UDF_C_FIDS) ||
1680 (udf_c_type == UDF_C_METADATA_SBM))
1681 {
1682 udf_record_allocation_in_node(ump, buf, vpart_num, lmapping,
1683 node_ad_cpy);
1684 /* decrement our outstanding bufs counter */
1685 s = splbio();
1686 udf_node->outstanding_bufs--;
1687 splx(s);
1688 }
1689 }
1690
1691 /* --------------------------------------------------------------------- */
1692
1693 /*
1694 * Try to merge a1 with the new piece a2. udf_ads_merge returns error when not
1695 * possible (anymore); a2 returns the rest piece.
1696 */
1697
1698 static int
1699 udf_ads_merge(uint32_t lb_size, struct long_ad *a1, struct long_ad *a2)
1700 {
1701 uint32_t max_len, merge_len;
1702 uint32_t a1_len, a2_len;
1703 uint32_t a1_flags, a2_flags;
1704 uint32_t a1_lbnum, a2_lbnum;
1705 uint16_t a1_part, a2_part;
1706
1707 max_len = ((UDF_EXT_MAXLEN / lb_size) * lb_size);
1708
1709 a1_flags = UDF_EXT_FLAGS(udf_rw32(a1->len));
1710 a1_len = UDF_EXT_LEN(udf_rw32(a1->len));
1711 a1_lbnum = udf_rw32(a1->loc.lb_num);
1712 a1_part = udf_rw16(a1->loc.part_num);
1713
1714 a2_flags = UDF_EXT_FLAGS(udf_rw32(a2->len));
1715 a2_len = UDF_EXT_LEN(udf_rw32(a2->len));
1716 a2_lbnum = udf_rw32(a2->loc.lb_num);
1717 a2_part = udf_rw16(a2->loc.part_num);
1718
1719 /* defines same space */
1720 if (a1_flags != a2_flags)
1721 return 1;
1722
1723 if (a1_flags != UDF_EXT_FREE) {
1724 /* the same partition */
1725 if (a1_part != a2_part)
1726 return 1;
1727
1728 /* a2 is successor of a1 */
1729 if (a1_lbnum * lb_size + a1_len != a2_lbnum * lb_size)
1730 return 1;
1731 }
1732
1733 /* merge as most from a2 if possible */
1734 merge_len = MIN(a2_len, max_len - a1_len);
1735 a1_len += merge_len;
1736 a2_len -= merge_len;
1737 a2_lbnum += merge_len/lb_size;
1738
1739 a1->len = udf_rw32(a1_len | a1_flags);
1740 a2->len = udf_rw32(a2_len | a2_flags);
1741 a2->loc.lb_num = udf_rw32(a2_lbnum);
1742
1743 if (a2_len > 0)
1744 return 1;
1745
1746 /* there is space over to merge */
1747 return 0;
1748 }
1749
1750 /* --------------------------------------------------------------------- */
1751
1752 static void
1753 udf_wipe_adslots(struct udf_node *udf_node)
1754 {
1755 struct file_entry *fe;
1756 struct extfile_entry *efe;
1757 struct alloc_ext_entry *ext;
1758 uint64_t inflen, objsize;
1759 uint32_t lb_size, dscr_size, l_ea, l_ad, max_l_ad, crclen;
1760 uint8_t *data_pos;
1761 int extnr;
1762
1763 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1764
1765 fe = udf_node->fe;
1766 efe = udf_node->efe;
1767 if (fe) {
1768 inflen = udf_rw64(fe->inf_len);
1769 objsize = inflen;
1770 dscr_size = sizeof(struct file_entry) -1;
1771 l_ea = udf_rw32(fe->l_ea);
1772 l_ad = udf_rw32(fe->l_ad);
1773 data_pos = (uint8_t *) fe + dscr_size + l_ea;
1774 } else {
1775 inflen = udf_rw64(efe->inf_len);
1776 objsize = udf_rw64(efe->obj_size);
1777 dscr_size = sizeof(struct extfile_entry) -1;
1778 l_ea = udf_rw32(efe->l_ea);
1779 l_ad = udf_rw32(efe->l_ad);
1780 data_pos = (uint8_t *) efe + dscr_size + l_ea;
1781 }
1782 max_l_ad = lb_size - dscr_size - l_ea;
1783
1784 /* wipe fe/efe */
1785 memset(data_pos, 0, max_l_ad);
1786 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea;
1787 if (fe) {
1788 fe->l_ad = udf_rw32(0);
1789 fe->logblks_rec = udf_rw64(0);
1790 fe->tag.desc_crc_len = udf_rw16(crclen);
1791 } else {
1792 efe->l_ad = udf_rw32(0);
1793 efe->logblks_rec = udf_rw64(0);
1794 efe->tag.desc_crc_len = udf_rw16(crclen);
1795 }
1796
1797 /* wipe all allocation extent entries */
1798 for (extnr = 0; extnr < udf_node->num_extensions; extnr++) {
1799 ext = udf_node->ext[extnr];
1800 dscr_size = sizeof(struct alloc_ext_entry) -1;
1801 data_pos = (uint8_t *) ext->data;
1802 max_l_ad = lb_size - dscr_size;
1803 memset(data_pos, 0, max_l_ad);
1804 ext->l_ad = udf_rw32(0);
1805
1806 crclen = dscr_size - UDF_DESC_TAG_LENGTH;
1807 ext->tag.desc_crc_len = udf_rw16(crclen);
1808 }
1809 udf_node->i_flags |= IN_NODE_REBUILD;
1810 }
1811
1812 /* --------------------------------------------------------------------- */
1813
1814 void
1815 udf_get_adslot(struct udf_node *udf_node, int slot, struct long_ad *icb,
1816 int *eof) {
1817 struct file_entry *fe;
1818 struct extfile_entry *efe;
1819 struct alloc_ext_entry *ext;
1820 struct icb_tag *icbtag;
1821 struct short_ad *short_ad;
1822 struct long_ad *long_ad, l_icb;
1823 uint32_t offset;
1824 uint32_t lb_size, dscr_size, l_ea, l_ad, flags;
1825 uint8_t *data_pos;
1826 int icbflags, addr_type, adlen, extnr;
1827
1828 /* determine what descriptor we are in */
1829 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1830
1831 fe = udf_node->fe;
1832 efe = udf_node->efe;
1833 if (fe) {
1834 icbtag = &fe->icbtag;
1835 dscr_size = sizeof(struct file_entry) -1;
1836 l_ea = udf_rw32(fe->l_ea);
1837 l_ad = udf_rw32(fe->l_ad);
1838 data_pos = (uint8_t *) fe + dscr_size + l_ea;
1839 } else {
1840 icbtag = &efe->icbtag;
1841 dscr_size = sizeof(struct extfile_entry) -1;
1842 l_ea = udf_rw32(efe->l_ea);
1843 l_ad = udf_rw32(efe->l_ad);
1844 data_pos = (uint8_t *) efe + dscr_size + l_ea;
1845 }
1846
1847 icbflags = udf_rw16(icbtag->flags);
1848 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
1849
1850 /* just in case we're called on an intern, its EOF */
1851 if (addr_type == UDF_ICB_INTERN_ALLOC) {
1852 memset(icb, 0, sizeof(struct long_ad));
1853 *eof = 1;
1854 return;
1855 }
1856
1857 adlen = 0;
1858 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1859 adlen = sizeof(struct short_ad);
1860 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1861 adlen = sizeof(struct long_ad);
1862 }
1863
1864 /* if offset too big, we go to the allocation extensions */
1865 offset = slot * adlen;
1866 extnr = -1;
1867 while (offset >= l_ad) {
1868 /* check if our last entry is a redirect */
1869 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1870 short_ad = (struct short_ad *) (data_pos + l_ad-adlen);
1871 l_icb.len = short_ad->len;
1872 l_icb.loc.part_num = udf_node->loc.loc.part_num;
1873 l_icb.loc.lb_num = short_ad->lb_num;
1874 } else {
1875 KASSERT(addr_type == UDF_ICB_LONG_ALLOC);
1876 long_ad = (struct long_ad *) (data_pos + l_ad-adlen);
1877 l_icb = *long_ad;
1878 }
1879 flags = UDF_EXT_FLAGS(udf_rw32(l_icb.len));
1880 if (flags != UDF_EXT_REDIRECT) {
1881 l_ad = 0; /* force EOF */
1882 break;
1883 }
1884
1885 /* advance to next extent */
1886 extnr++;
1887 if (extnr >= udf_node->num_extensions) {
1888 l_ad = 0; /* force EOF */
1889 break;
1890 }
1891 offset = offset - l_ad;
1892 ext = udf_node->ext[extnr];
1893 dscr_size = sizeof(struct alloc_ext_entry) -1;
1894 l_ad = udf_rw32(ext->l_ad);
1895 data_pos = (uint8_t *) ext + dscr_size;
1896 }
1897
1898 /* XXX l_ad == 0 should be enough to check */
1899 *eof = (offset >= l_ad) || (l_ad == 0);
1900 if (*eof) {
1901 DPRINTF(PARANOIDADWLK, ("returning EOF, extnr %d, offset %d, "
1902 "l_ad %d\n", extnr, offset, l_ad));
1903 memset(icb, 0, sizeof(struct long_ad));
1904 return;
1905 }
1906
1907 /* get the element */
1908 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1909 short_ad = (struct short_ad *) (data_pos + offset);
1910 icb->len = short_ad->len;
1911 icb->loc.part_num = udf_node->loc.loc.part_num;
1912 icb->loc.lb_num = short_ad->lb_num;
1913 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1914 long_ad = (struct long_ad *) (data_pos + offset);
1915 *icb = *long_ad;
1916 }
1917 DPRINTF(PARANOIDADWLK, ("returning element : v %d, lb %d, len %d, "
1918 "flags %d\n", icb->loc.part_num, icb->loc.lb_num,
1919 UDF_EXT_LEN(icb->len), UDF_EXT_FLAGS(icb->len)));
1920 }
1921
1922 /* --------------------------------------------------------------------- */
1923
1924 int
1925 udf_append_adslot(struct udf_node *udf_node, int *slot, struct long_ad *icb) {
1926 struct udf_mount *ump = udf_node->ump;
1927 union dscrptr *dscr, *extdscr;
1928 struct file_entry *fe;
1929 struct extfile_entry *efe;
1930 struct alloc_ext_entry *ext;
1931 struct icb_tag *icbtag;
1932 struct short_ad *short_ad;
1933 struct long_ad *long_ad, o_icb, l_icb;
1934 uint64_t logblks_rec, *logblks_rec_p;
1935 uint64_t lmapping;
1936 uint32_t offset, rest, len, lb_num;
1937 uint32_t lb_size, dscr_size, l_ea, l_ad, *l_ad_p, max_l_ad, crclen;
1938 uint32_t flags;
1939 uint16_t vpart_num;
1940 uint8_t *data_pos;
1941 int icbflags, addr_type, adlen, extnr;
1942 int error;
1943
1944 lb_size = udf_rw32(ump->logical_vol->lb_size);
1945 vpart_num = udf_rw16(udf_node->loc.loc.part_num);
1946
1947 /* determine what descriptor we are in */
1948 fe = udf_node->fe;
1949 efe = udf_node->efe;
1950 if (fe) {
1951 icbtag = &fe->icbtag;
1952 dscr = (union dscrptr *) fe;
1953 dscr_size = sizeof(struct file_entry) -1;
1954
1955 l_ea = udf_rw32(fe->l_ea);
1956 l_ad_p = &fe->l_ad;
1957 logblks_rec_p = &fe->logblks_rec;
1958 } else {
1959 icbtag = &efe->icbtag;
1960 dscr = (union dscrptr *) efe;
1961 dscr_size = sizeof(struct extfile_entry) -1;
1962
1963 l_ea = udf_rw32(efe->l_ea);
1964 l_ad_p = &efe->l_ad;
1965 logblks_rec_p = &efe->logblks_rec;
1966 }
1967 data_pos = (uint8_t *) dscr + dscr_size + l_ea;
1968 max_l_ad = lb_size - dscr_size - l_ea;
1969
1970 icbflags = udf_rw16(icbtag->flags);
1971 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
1972
1973 /* just in case we're called on an intern, its EOF */
1974 if (addr_type == UDF_ICB_INTERN_ALLOC) {
1975 panic("udf_append_adslot on UDF_ICB_INTERN_ALLOC\n");
1976 }
1977
1978 adlen = 0;
1979 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1980 adlen = sizeof(struct short_ad);
1981 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1982 adlen = sizeof(struct long_ad);
1983 }
1984
1985 /* clean up given long_ad since it can be a synthesized one */
1986 flags = UDF_EXT_FLAGS(udf_rw32(icb->len));
1987 if (flags == UDF_EXT_FREE) {
1988 icb->loc.part_num = udf_rw16(0);
1989 icb->loc.lb_num = udf_rw32(0);
1990 }
1991
1992 /* if offset too big, we go to the allocation extensions */
1993 l_ad = udf_rw32(*l_ad_p);
1994 offset = (*slot) * adlen;
1995 extnr = -1;
1996 while (offset >= l_ad) {
1997 /* check if our last entry is a redirect */
1998 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1999 short_ad = (struct short_ad *) (data_pos + l_ad-adlen);
2000 l_icb.len = short_ad->len;
2001 l_icb.loc.part_num = udf_node->loc.loc.part_num;
2002 l_icb.loc.lb_num = short_ad->lb_num;
2003 } else {
2004 KASSERT(addr_type == UDF_ICB_LONG_ALLOC);
2005 long_ad = (struct long_ad *) (data_pos + l_ad-adlen);
2006 l_icb = *long_ad;
2007 }
2008 flags = UDF_EXT_FLAGS(udf_rw32(l_icb.len));
2009 if (flags != UDF_EXT_REDIRECT) {
2010 /* only one past the last one is adressable */
2011 break;
2012 }
2013
2014 /* advance to next extent */
2015 extnr++;
2016 KASSERT(extnr < udf_node->num_extensions);
2017 offset = offset - l_ad;
2018
2019 ext = udf_node->ext[extnr];
2020 dscr = (union dscrptr *) ext;
2021 dscr_size = sizeof(struct alloc_ext_entry) -1;
2022 max_l_ad = lb_size - dscr_size;
2023 l_ad_p = &ext->l_ad;
2024 l_ad = udf_rw32(*l_ad_p);
2025 data_pos = (uint8_t *) ext + dscr_size;
2026 }
2027 DPRINTF(PARANOIDADWLK, ("append, ext %d, offset %d, l_ad %d\n",
2028 extnr, offset, udf_rw32(*l_ad_p)));
2029 KASSERT(l_ad == udf_rw32(*l_ad_p));
2030
2031 /* offset is offset within the current (E)FE/AED */
2032 l_ad = udf_rw32(*l_ad_p);
2033 crclen = udf_rw16(dscr->tag.desc_crc_len);
2034 logblks_rec = udf_rw64(*logblks_rec_p);
2035
2036 /* overwriting old piece? */
2037 if (offset < l_ad) {
2038 /* overwrite entry; compensate for the old element */
2039 if (addr_type == UDF_ICB_SHORT_ALLOC) {
2040 short_ad = (struct short_ad *) (data_pos + offset);
2041 o_icb.len = short_ad->len;
2042 o_icb.loc.part_num = udf_rw16(0); /* ignore */
2043 o_icb.loc.lb_num = short_ad->lb_num;
2044 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
2045 long_ad = (struct long_ad *) (data_pos + offset);
2046 o_icb = *long_ad;
2047 } else {
2048 panic("Invalid address type in udf_append_adslot\n");
2049 }
2050
2051 len = udf_rw32(o_icb.len);
2052 if (UDF_EXT_FLAGS(len) == UDF_EXT_ALLOCATED) {
2053 /* adjust counts */
2054 len = UDF_EXT_LEN(len);
2055 logblks_rec -= (len + lb_size -1) / lb_size;
2056 }
2057 }
2058
2059 /* check if we're not appending a redirection */
2060 flags = UDF_EXT_FLAGS(udf_rw32(icb->len));
2061 KASSERT(flags != UDF_EXT_REDIRECT);
2062
2063 /* round down available space */
2064 rest = adlen * ((max_l_ad - offset) / adlen);
2065 if (rest <= adlen) {
2066 /* have to append aed, see if we already have a spare one */
2067 extnr++;
2068 ext = udf_node->ext[extnr];
2069 l_icb = udf_node->ext_loc[extnr];
2070 if (ext == NULL) {
2071 DPRINTF(ALLOC,("adding allocation extent %d\n", extnr));
2072
2073 error = udf_reserve_space(ump, NULL, UDF_C_NODE,
2074 vpart_num, 1, /* can fail */ false);
2075 if (error) {
2076 printf("UDF: couldn't reserve space for AED!\n");
2077 return error;
2078 }
2079 error = udf_allocate_space(ump, NULL, UDF_C_NODE,
2080 vpart_num, 1, &lmapping);
2081 lb_num = lmapping;
2082 if (error)
2083 panic("UDF: couldn't allocate AED!\n");
2084
2085 /* initialise pointer to location */
2086 memset(&l_icb, 0, sizeof(struct long_ad));
2087 l_icb.len = udf_rw32(lb_size | UDF_EXT_REDIRECT);
2088 l_icb.loc.lb_num = udf_rw32(lb_num);
2089 l_icb.loc.part_num = udf_rw16(vpart_num);
2090
2091 /* create new aed descriptor */
2092 udf_create_logvol_dscr(ump, udf_node, &l_icb, &extdscr);
2093 ext = &extdscr->aee;
2094
2095 udf_inittag(ump, &ext->tag, TAGID_ALLOCEXTENT, lb_num);
2096 dscr_size = sizeof(struct alloc_ext_entry) -1;
2097 max_l_ad = lb_size - dscr_size;
2098 memset(ext->data, 0, max_l_ad);
2099 ext->l_ad = udf_rw32(0);
2100 ext->tag.desc_crc_len =
2101 udf_rw16(dscr_size - UDF_DESC_TAG_LENGTH);
2102
2103 /* declare aed */
2104 udf_node->num_extensions++;
2105 udf_node->ext_loc[extnr] = l_icb;
2106 udf_node->ext[extnr] = ext;
2107 }
2108 /* add redirect and adjust l_ad and crclen for old descr */
2109 if (addr_type == UDF_ICB_SHORT_ALLOC) {
2110 short_ad = (struct short_ad *) (data_pos + offset);
2111 short_ad->len = l_icb.len;
2112 short_ad->lb_num = l_icb.loc.lb_num;
2113 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
2114 long_ad = (struct long_ad *) (data_pos + offset);
2115 *long_ad = l_icb;
2116 }
2117 l_ad += adlen;
2118 crclen += adlen;
2119 dscr->tag.desc_crc_len = udf_rw16(crclen);
2120 *l_ad_p = udf_rw32(l_ad);
2121
2122 /* advance to the new extension */
2123 KASSERT(ext != NULL);
2124 dscr = (union dscrptr *) ext;
2125 dscr_size = sizeof(struct alloc_ext_entry) -1;
2126 max_l_ad = lb_size - dscr_size;
2127 data_pos = (uint8_t *) dscr + dscr_size;
2128
2129 l_ad_p = &ext->l_ad;
2130 l_ad = udf_rw32(*l_ad_p);
2131 crclen = udf_rw16(dscr->tag.desc_crc_len);
2132 offset = 0;
2133
2134 /* adjust callees slot count for link insert */
2135 *slot += 1;
2136 }
2137
2138 /* write out the element */
2139 DPRINTF(PARANOIDADWLK, ("adding element : %p : v %d, lb %d, "
2140 "len %d, flags %d\n", data_pos + offset,
2141 icb->loc.part_num, icb->loc.lb_num,
2142 UDF_EXT_LEN(icb->len), UDF_EXT_FLAGS(icb->len)));
2143 if (addr_type == UDF_ICB_SHORT_ALLOC) {
2144 short_ad = (struct short_ad *) (data_pos + offset);
2145 short_ad->len = icb->len;
2146 short_ad->lb_num = icb->loc.lb_num;
2147 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
2148 long_ad = (struct long_ad *) (data_pos + offset);
2149 *long_ad = *icb;
2150 }
2151
2152 /* adjust logblks recorded count */
2153 len = udf_rw32(icb->len);
2154 flags = UDF_EXT_FLAGS(len);
2155 if (flags == UDF_EXT_ALLOCATED)
2156 logblks_rec += (UDF_EXT_LEN(len) + lb_size -1) / lb_size;
2157 *logblks_rec_p = udf_rw64(logblks_rec);
2158
2159 /* adjust l_ad and crclen when needed */
2160 if (offset >= l_ad) {
2161 l_ad += adlen;
2162 crclen += adlen;
2163 dscr->tag.desc_crc_len = udf_rw16(crclen);
2164 *l_ad_p = udf_rw32(l_ad);
2165 }
2166
2167 return 0;
2168 }
2169
2170 /* --------------------------------------------------------------------- */
2171
2172 static void
2173 udf_count_alloc_exts(struct udf_node *udf_node)
2174 {
2175 struct long_ad s_ad;
2176 uint32_t lb_num, len, flags;
2177 uint16_t vpart_num;
2178 int slot, eof;
2179 int num_extents, extnr;
2180 int lb_size;
2181
2182 if (udf_node->num_extensions == 0)
2183 return;
2184
2185 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
2186 /* count number of allocation extents in use */
2187 num_extents = 0;
2188 slot = 0;
2189 for (;;) {
2190 udf_get_adslot(udf_node, slot, &s_ad, &eof);
2191 if (eof)
2192 break;
2193 len = udf_rw32(s_ad.len);
2194 flags = UDF_EXT_FLAGS(len);
2195
2196 if (flags == UDF_EXT_REDIRECT)
2197 num_extents++;
2198
2199 slot++;
2200 }
2201
2202 DPRINTF(ALLOC, ("udf_count_alloc_ext counted %d live extents\n",
2203 num_extents));
2204
2205 /* XXX choice: we could delay freeing them on node writeout */
2206 /* free excess entries */
2207 extnr = num_extents;
2208 for (;extnr < udf_node->num_extensions; extnr++) {
2209 DPRINTF(ALLOC, ("freeing alloc ext %d\n", extnr));
2210 /* free dscriptor */
2211 s_ad = udf_node->ext_loc[extnr];
2212 udf_free_logvol_dscr(udf_node->ump, &s_ad,
2213 udf_node->ext[extnr]);
2214 udf_node->ext[extnr] = NULL;
2215
2216 /* free disc space */
2217 lb_num = udf_rw32(s_ad.loc.lb_num);
2218 vpart_num = udf_rw16(s_ad.loc.part_num);
2219 udf_free_allocated_space(udf_node->ump, lb_num, vpart_num, 1);
2220
2221 memset(&udf_node->ext_loc[extnr], 0, sizeof(struct long_ad));
2222 }
2223
2224 /* set our new number of allocation extents */
2225 udf_node->num_extensions = num_extents;
2226 }
2227
2228
2229 /* --------------------------------------------------------------------- */
2230
2231 /*
2232 * Adjust the node's allocation descriptors to reflect the new mapping; do
2233 * take note that we might glue to existing allocation descriptors.
2234 *
2235 * XXX Note there can only be one allocation being recorded/mount; maybe
2236 * explicit allocation in shedule thread?
2237 */
2238
2239 static void
2240 udf_record_allocation_in_node(struct udf_mount *ump, struct buf *buf,
2241 uint16_t vpart_num, uint64_t *mapping, struct long_ad *node_ad_cpy)
2242 {
2243 struct vnode *vp = buf->b_vp;
2244 struct udf_node *udf_node = VTOI(vp);
2245 struct file_entry *fe;
2246 struct extfile_entry *efe;
2247 struct icb_tag *icbtag;
2248 struct long_ad s_ad, c_ad;
2249 uint64_t inflen, from, till;
2250 uint64_t foffset, end_foffset, restart_foffset;
2251 uint64_t orig_inflen, orig_lbrec, new_inflen, new_lbrec;
2252 uint32_t num_lb, len, flags, lb_num;
2253 uint32_t run_start;
2254 uint32_t slot_offset, replace_len, replace;
2255 int addr_type, icbflags;
2256 // int udf_c_type = buf->b_udf_c_type;
2257 int lb_size, run_length, eof;
2258 int slot, cpy_slot, cpy_slots, restart_slot;
2259 int error;
2260
2261 DPRINTF(ALLOC, ("udf_record_allocation_in_node\n"));
2262
2263 #if 0
2264 /* XXX disable sanity check for now */
2265 /* sanity check ... should be panic ? */
2266 if ((udf_c_type != UDF_C_USERDATA) && (udf_c_type != UDF_C_FIDS))
2267 return;
2268 #endif
2269
2270 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
2271
2272 /* do the job */
2273 UDF_LOCK_NODE(udf_node, 0); /* XXX can deadlock ? */
2274 udf_node_sanity_check(udf_node, &orig_inflen, &orig_lbrec);
2275
2276 fe = udf_node->fe;
2277 efe = udf_node->efe;
2278 if (fe) {
2279 icbtag = &fe->icbtag;
2280 inflen = udf_rw64(fe->inf_len);
2281 } else {
2282 icbtag = &efe->icbtag;
2283 inflen = udf_rw64(efe->inf_len);
2284 }
2285
2286 /* do check if `till' is not past file information length */
2287 from = buf->b_lblkno * lb_size;
2288 till = MIN(inflen, from + buf->b_resid);
2289
2290 num_lb = (till - from + lb_size -1) / lb_size;
2291
2292 DPRINTF(ALLOC, ("record allocation from %"PRIu64" + %d\n", from, buf->b_bcount));
2293
2294 icbflags = udf_rw16(icbtag->flags);
2295 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2296
2297 if (addr_type == UDF_ICB_INTERN_ALLOC) {
2298 /* nothing to do */
2299 /* XXX clean up rest of node? just in case? */
2300 UDF_UNLOCK_NODE(udf_node, 0);
2301 return;
2302 }
2303
2304 slot = 0;
2305 cpy_slot = 0;
2306 foffset = 0;
2307
2308 /* 1) copy till first overlap piece to the rewrite buffer */
2309 for (;;) {
2310 udf_get_adslot(udf_node, slot, &s_ad, &eof);
2311 if (eof) {
2312 DPRINTF(WRITE,
2313 ("Record allocation in node "
2314 "failed: encountered EOF\n"));
2315 UDF_UNLOCK_NODE(udf_node, 0);
2316 buf->b_error = EINVAL;
2317 return;
2318 }
2319 len = udf_rw32(s_ad.len);
2320 flags = UDF_EXT_FLAGS(len);
2321 len = UDF_EXT_LEN(len);
2322
2323 if (flags == UDF_EXT_REDIRECT) {
2324 slot++;
2325 continue;
2326 }
2327
2328 end_foffset = foffset + len;
2329 if (end_foffset > from)
2330 break; /* found */
2331
2332 node_ad_cpy[cpy_slot++] = s_ad;
2333
2334 DPRINTF(ALLOC, ("\t1: vp %d, lb %d, len %d, flags %d "
2335 "-> stack\n",
2336 udf_rw16(s_ad.loc.part_num),
2337 udf_rw32(s_ad.loc.lb_num),
2338 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2339 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2340
2341 foffset = end_foffset;
2342 slot++;
2343 }
2344 restart_slot = slot;
2345 restart_foffset = foffset;
2346
2347 /* 2) trunc overlapping slot at overlap and copy it */
2348 slot_offset = from - foffset;
2349 if (slot_offset > 0) {
2350 DPRINTF(ALLOC, ("\tslot_offset = %d, flags = %d (%d)\n",
2351 slot_offset, flags >> 30, flags));
2352
2353 s_ad.len = udf_rw32(slot_offset | flags);
2354 node_ad_cpy[cpy_slot++] = s_ad;
2355
2356 DPRINTF(ALLOC, ("\t2: vp %d, lb %d, len %d, flags %d "
2357 "-> stack\n",
2358 udf_rw16(s_ad.loc.part_num),
2359 udf_rw32(s_ad.loc.lb_num),
2360 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2361 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2362 }
2363 foffset += slot_offset;
2364
2365 /* 3) insert new mappings */
2366 memset(&s_ad, 0, sizeof(struct long_ad));
2367 lb_num = 0;
2368 for (lb_num = 0; lb_num < num_lb; lb_num++) {
2369 run_start = mapping[lb_num];
2370 run_length = 1;
2371 while (lb_num < num_lb-1) {
2372 if (mapping[lb_num+1] != mapping[lb_num]+1)
2373 if (mapping[lb_num+1] != mapping[lb_num])
2374 break;
2375 run_length++;
2376 lb_num++;
2377 }
2378 /* insert slot for this mapping */
2379 len = run_length * lb_size;
2380
2381 /* bounds checking */
2382 if (foffset + len > till)
2383 len = till - foffset;
2384 KASSERT(foffset + len <= inflen);
2385
2386 s_ad.len = udf_rw32(len | UDF_EXT_ALLOCATED);
2387 s_ad.loc.part_num = udf_rw16(vpart_num);
2388 s_ad.loc.lb_num = udf_rw32(run_start);
2389
2390 foffset += len;
2391
2392 /* paranoia */
2393 if (len == 0) {
2394 DPRINTF(WRITE,
2395 ("Record allocation in node "
2396 "failed: insert failed\n"));
2397 UDF_UNLOCK_NODE(udf_node, 0);
2398 buf->b_error = EINVAL;
2399 return;
2400 }
2401 node_ad_cpy[cpy_slot++] = s_ad;
2402
2403 DPRINTF(ALLOC, ("\t3: insert new mapping vp %d lb %d, len %d, "
2404 "flags %d -> stack\n",
2405 udf_rw16(s_ad.loc.part_num), udf_rw32(s_ad.loc.lb_num),
2406 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2407 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2408 }
2409
2410 /* 4) pop replaced length */
2411 slot = restart_slot;
2412 foffset = restart_foffset;
2413
2414 replace_len = till - foffset; /* total amount of bytes to pop */
2415 slot_offset = from - foffset; /* offset in first encounted slot */
2416 KASSERT((slot_offset % lb_size) == 0);
2417
2418 for (;;) {
2419 udf_get_adslot(udf_node, slot, &s_ad, &eof);
2420 if (eof)
2421 break;
2422
2423 len = udf_rw32(s_ad.len);
2424 flags = UDF_EXT_FLAGS(len);
2425 len = UDF_EXT_LEN(len);
2426 lb_num = udf_rw32(s_ad.loc.lb_num);
2427
2428 if (flags == UDF_EXT_REDIRECT) {
2429 slot++;
2430 continue;
2431 }
2432
2433 DPRINTF(ALLOC, ("\t4i: got slot %d, slot_offset %d, "
2434 "replace_len %d, "
2435 "vp %d, lb %d, len %d, flags %d\n",
2436 slot, slot_offset, replace_len,
2437 udf_rw16(s_ad.loc.part_num),
2438 udf_rw32(s_ad.loc.lb_num),
2439 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2440 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2441
2442 /* adjust for slot offset */
2443 if (slot_offset) {
2444 DPRINTF(ALLOC, ("\t4s: skipping %d\n", slot_offset));
2445 lb_num += slot_offset / lb_size;
2446 len -= slot_offset;
2447 foffset += slot_offset;
2448 replace_len -= slot_offset;
2449
2450 /* mark adjusted */
2451 slot_offset = 0;
2452 }
2453
2454 /* advance for (the rest of) this slot */
2455 replace = MIN(len, replace_len);
2456 DPRINTF(ALLOC, ("\t4d: replacing %d\n", replace));
2457
2458 /* advance for this slot */
2459 if (replace) {
2460 /* note: dont round DOWN on num_lb since we then
2461 * forget the last partial one */
2462 num_lb = (replace + lb_size - 1) / lb_size;
2463 if (flags != UDF_EXT_FREE) {
2464 udf_free_allocated_space(ump, lb_num,
2465 udf_rw16(s_ad.loc.part_num), num_lb);
2466 }
2467 lb_num += num_lb;
2468 len -= replace;
2469 foffset += replace;
2470 replace_len -= replace;
2471 }
2472
2473 /* do we have a slot tail ? */
2474 if (len) {
2475 KASSERT(foffset % lb_size == 0);
2476
2477 /* we arrived at our point, push remainder */
2478 s_ad.len = udf_rw32(len | flags);
2479 s_ad.loc.lb_num = udf_rw32(lb_num);
2480 if (flags == UDF_EXT_FREE)
2481 s_ad.loc.lb_num = udf_rw32(0);
2482 node_ad_cpy[cpy_slot++] = s_ad;
2483 foffset += len;
2484 slot++;
2485
2486 DPRINTF(ALLOC, ("\t4: vp %d, lb %d, len %d, flags %d "
2487 "-> stack\n",
2488 udf_rw16(s_ad.loc.part_num),
2489 udf_rw32(s_ad.loc.lb_num),
2490 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2491 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2492 break;
2493 }
2494
2495 slot++;
2496 }
2497
2498 /* 5) copy remainder */
2499 for (;;) {
2500 udf_get_adslot(udf_node, slot, &s_ad, &eof);
2501 if (eof)
2502 break;
2503
2504 len = udf_rw32(s_ad.len);
2505 flags = UDF_EXT_FLAGS(len);
2506 len = UDF_EXT_LEN(len);
2507
2508 if (flags == UDF_EXT_REDIRECT) {
2509 slot++;
2510 continue;
2511 }
2512
2513 node_ad_cpy[cpy_slot++] = s_ad;
2514
2515 DPRINTF(ALLOC, ("\t5: insert new mapping "
2516 "vp %d lb %d, len %d, flags %d "
2517 "-> stack\n",
2518 udf_rw16(s_ad.loc.part_num),
2519 udf_rw32(s_ad.loc.lb_num),
2520 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2521 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2522
2523 slot++;
2524 }
2525
2526 /* 6) reset node descriptors */
2527 udf_wipe_adslots(udf_node);
2528
2529 /* 7) copy back extents; merge when possible. Recounting on the fly */
2530 cpy_slots = cpy_slot;
2531
2532 c_ad = node_ad_cpy[0];
2533 slot = 0;
2534 DPRINTF(ALLOC, ("\t7s: stack -> got mapping vp %d "
2535 "lb %d, len %d, flags %d\n",
2536 udf_rw16(c_ad.loc.part_num),
2537 udf_rw32(c_ad.loc.lb_num),
2538 UDF_EXT_LEN(udf_rw32(c_ad.len)),
2539 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2540
2541 for (cpy_slot = 1; cpy_slot < cpy_slots; cpy_slot++) {
2542 s_ad = node_ad_cpy[cpy_slot];
2543
2544 DPRINTF(ALLOC, ("\t7i: stack -> got mapping vp %d "
2545 "lb %d, len %d, flags %d\n",
2546 udf_rw16(s_ad.loc.part_num),
2547 udf_rw32(s_ad.loc.lb_num),
2548 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2549 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2550
2551 /* see if we can merge */
2552 if (udf_ads_merge(lb_size, &c_ad, &s_ad)) {
2553 /* not mergable (anymore) */
2554 DPRINTF(ALLOC, ("\t7: appending vp %d lb %d, "
2555 "len %d, flags %d\n",
2556 udf_rw16(c_ad.loc.part_num),
2557 udf_rw32(c_ad.loc.lb_num),
2558 UDF_EXT_LEN(udf_rw32(c_ad.len)),
2559 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2560
2561 error = udf_append_adslot(udf_node, &slot, &c_ad);
2562 if (error) {
2563 buf->b_error = error;
2564 goto out;
2565 }
2566 c_ad = s_ad;
2567 slot++;
2568 }
2569 }
2570
2571 /* 8) push rest slot (if any) */
2572 if (UDF_EXT_LEN(c_ad.len) > 0) {
2573 DPRINTF(ALLOC, ("\t8: last append vp %d lb %d, "
2574 "len %d, flags %d\n",
2575 udf_rw16(c_ad.loc.part_num),
2576 udf_rw32(c_ad.loc.lb_num),
2577 UDF_EXT_LEN(udf_rw32(c_ad.len)),
2578 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2579
2580 error = udf_append_adslot(udf_node, &slot, &c_ad);
2581 if (error) {
2582 buf->b_error = error;
2583 goto out;
2584 }
2585 }
2586
2587 out:
2588 udf_count_alloc_exts(udf_node);
2589
2590 /* the node's descriptors should now be sane */
2591 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2592 UDF_UNLOCK_NODE(udf_node, 0);
2593
2594 KASSERT(orig_inflen == new_inflen);
2595 KASSERT(new_lbrec >= orig_lbrec);
2596
2597 return;
2598 }
2599
2600 /* --------------------------------------------------------------------- */
2601
2602 int
2603 udf_grow_node(struct udf_node *udf_node, uint64_t new_size)
2604 {
2605 union dscrptr *dscr;
2606 struct vnode *vp = udf_node->vnode;
2607 struct udf_mount *ump = udf_node->ump;
2608 struct file_entry *fe;
2609 struct extfile_entry *efe;
2610 struct icb_tag *icbtag;
2611 struct long_ad c_ad, s_ad;
2612 uint64_t size_diff, old_size, inflen, objsize, chunk, append_len;
2613 uint64_t foffset, end_foffset;
2614 uint64_t orig_inflen, orig_lbrec, new_inflen, new_lbrec;
2615 uint32_t lb_size, dscr_size, crclen, lastblock_grow;
2616 uint32_t icbflags, len, flags, max_len;
2617 uint32_t max_l_ad, l_ad, l_ea;
2618 uint16_t my_part, dst_part;
2619 uint8_t *data_pos, *evacuated_data;
2620 int addr_type;
2621 int slot, cpy_slot;
2622 int eof, error;
2623
2624 DPRINTF(ALLOC, ("udf_grow_node\n"));
2625
2626 UDF_LOCK_NODE(udf_node, 0);
2627 udf_node_sanity_check(udf_node, &orig_inflen, &orig_lbrec);
2628
2629 lb_size = udf_rw32(ump->logical_vol->lb_size);
2630 max_len = ((UDF_EXT_MAXLEN / lb_size) * lb_size);
2631
2632 fe = udf_node->fe;
2633 efe = udf_node->efe;
2634 if (fe) {
2635 dscr = (union dscrptr *) fe;
2636 icbtag = &fe->icbtag;
2637 inflen = udf_rw64(fe->inf_len);
2638 objsize = inflen;
2639 dscr_size = sizeof(struct file_entry) -1;
2640 l_ea = udf_rw32(fe->l_ea);
2641 l_ad = udf_rw32(fe->l_ad);
2642 } else {
2643 dscr = (union dscrptr *) efe;
2644 icbtag = &efe->icbtag;
2645 inflen = udf_rw64(efe->inf_len);
2646 objsize = udf_rw64(efe->obj_size);
2647 dscr_size = sizeof(struct extfile_entry) -1;
2648 l_ea = udf_rw32(efe->l_ea);
2649 l_ad = udf_rw32(efe->l_ad);
2650 }
2651 data_pos = (uint8_t *) dscr + dscr_size + l_ea;
2652 max_l_ad = lb_size - dscr_size - l_ea;
2653
2654 icbflags = udf_rw16(icbtag->flags);
2655 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2656
2657 old_size = inflen;
2658 size_diff = new_size - old_size;
2659
2660 DPRINTF(ALLOC, ("\tfrom %"PRIu64" to %"PRIu64"\n", old_size, new_size));
2661
2662 evacuated_data = NULL;
2663 if (addr_type == UDF_ICB_INTERN_ALLOC) {
2664 if (l_ad + size_diff <= max_l_ad) {
2665 /* only reflect size change directly in the node */
2666 inflen += size_diff;
2667 objsize += size_diff;
2668 l_ad += size_diff;
2669 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea + l_ad;
2670 if (fe) {
2671 fe->inf_len = udf_rw64(inflen);
2672 fe->l_ad = udf_rw32(l_ad);
2673 fe->tag.desc_crc_len = udf_rw16(crclen);
2674 } else {
2675 efe->inf_len = udf_rw64(inflen);
2676 efe->obj_size = udf_rw64(objsize);
2677 efe->l_ad = udf_rw32(l_ad);
2678 efe->tag.desc_crc_len = udf_rw16(crclen);
2679 }
2680 error = 0;
2681
2682 /* set new size for uvm */
2683 uvm_vnp_setsize(vp, old_size);
2684 uvm_vnp_setwritesize(vp, new_size);
2685
2686 #if 0
2687 /* zero append space in buffer */
2688 uvm_vnp_zerorange(vp, old_size, new_size - old_size);
2689 #endif
2690
2691 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2692
2693 /* unlock */
2694 UDF_UNLOCK_NODE(udf_node, 0);
2695
2696 KASSERT(new_inflen == orig_inflen + size_diff);
2697 KASSERT(new_lbrec == orig_lbrec);
2698 KASSERT(new_lbrec == 0);
2699 return 0;
2700 }
2701
2702 DPRINTF(ALLOC, ("\tCONVERT from internal\n"));
2703
2704 if (old_size > 0) {
2705 /* allocate some space and copy in the stuff to keep */
2706 evacuated_data = malloc(lb_size, M_UDFTEMP, M_WAITOK);
2707 memset(evacuated_data, 0, lb_size);
2708
2709 /* node is locked, so safe to exit mutex */
2710 UDF_UNLOCK_NODE(udf_node, 0);
2711
2712 /* read in using the `normal' vn_rdwr() */
2713 error = vn_rdwr(UIO_READ, udf_node->vnode,
2714 evacuated_data, old_size, 0,
2715 UIO_SYSSPACE, IO_ALTSEMANTICS | IO_NODELOCKED,
2716 FSCRED, NULL, NULL);
2717
2718 /* enter again */
2719 UDF_LOCK_NODE(udf_node, 0);
2720 }
2721
2722 /* convert to a normal alloc and select type */
2723 my_part = udf_rw16(udf_node->loc.loc.part_num);
2724 dst_part = udf_get_record_vpart(ump, udf_get_c_type(udf_node));
2725 addr_type = UDF_ICB_SHORT_ALLOC;
2726 if (dst_part != my_part)
2727 addr_type = UDF_ICB_LONG_ALLOC;
2728
2729 icbflags &= ~UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2730 icbflags |= addr_type;
2731 icbtag->flags = udf_rw16(icbflags);
2732
2733 /* wipe old descriptor space */
2734 udf_wipe_adslots(udf_node);
2735
2736 memset(&c_ad, 0, sizeof(struct long_ad));
2737 c_ad.len = udf_rw32(old_size | UDF_EXT_FREE);
2738 c_ad.loc.part_num = udf_rw16(0); /* not relevant */
2739 c_ad.loc.lb_num = udf_rw32(0); /* not relevant */
2740
2741 slot = 0;
2742 } else {
2743 /* goto the last entry (if any) */
2744 slot = 0;
2745 cpy_slot = 0;
2746 foffset = 0;
2747 memset(&c_ad, 0, sizeof(struct long_ad));
2748 for (;;) {
2749 udf_get_adslot(udf_node, slot, &c_ad, &eof);
2750 if (eof)
2751 break;
2752
2753 len = udf_rw32(c_ad.len);
2754 flags = UDF_EXT_FLAGS(len);
2755 len = UDF_EXT_LEN(len);
2756
2757 end_foffset = foffset + len;
2758 if (flags != UDF_EXT_REDIRECT)
2759 foffset = end_foffset;
2760
2761 slot++;
2762 }
2763 /* at end of adslots */
2764
2765 /* special case if the old size was zero, then there is no last slot */
2766 if (old_size == 0) {
2767 c_ad.len = udf_rw32(0 | UDF_EXT_FREE);
2768 c_ad.loc.part_num = udf_rw16(0); /* not relevant */
2769 c_ad.loc.lb_num = udf_rw32(0); /* not relevant */
2770 } else {
2771 /* refetch last slot */
2772 slot--;
2773 udf_get_adslot(udf_node, slot, &c_ad, &eof);
2774 }
2775 }
2776
2777 /*
2778 * If the length of the last slot is not a multiple of lb_size, adjust
2779 * length so that it is; don't forget to adjust `append_len'! relevant for
2780 * extending existing files
2781 */
2782 len = udf_rw32(c_ad.len);
2783 flags = UDF_EXT_FLAGS(len);
2784 len = UDF_EXT_LEN(len);
2785
2786 lastblock_grow = 0;
2787 if (len % lb_size > 0) {
2788 lastblock_grow = lb_size - (len % lb_size);
2789 lastblock_grow = MIN(size_diff, lastblock_grow);
2790 len += lastblock_grow;
2791 c_ad.len = udf_rw32(len | flags);
2792
2793 /* TODO zero appened space in buffer! */
2794 /* using uvm_vnp_zerorange(vp, old_size, new_size - old_size); ? */
2795 }
2796 memset(&s_ad, 0, sizeof(struct long_ad));
2797
2798 /* size_diff can be bigger than allowed, so grow in chunks */
2799 append_len = size_diff - lastblock_grow;
2800 while (append_len > 0) {
2801 chunk = MIN(append_len, max_len);
2802 s_ad.len = udf_rw32(chunk | UDF_EXT_FREE);
2803 s_ad.loc.part_num = udf_rw16(0);
2804 s_ad.loc.lb_num = udf_rw32(0);
2805
2806 if (udf_ads_merge(lb_size, &c_ad, &s_ad)) {
2807 /* not mergable (anymore) */
2808 error = udf_append_adslot(udf_node, &slot, &c_ad);
2809 if (error)
2810 goto errorout;
2811 slot++;
2812 c_ad = s_ad;
2813 memset(&s_ad, 0, sizeof(struct long_ad));
2814 }
2815 append_len -= chunk;
2816 }
2817
2818 /* if there is a rest piece in the accumulator, append it */
2819 if (UDF_EXT_LEN(udf_rw32(c_ad.len)) > 0) {
2820 error = udf_append_adslot(udf_node, &slot, &c_ad);
2821 if (error)
2822 goto errorout;
2823 slot++;
2824 }
2825
2826 /* if there is a rest piece that didn't fit, append it */
2827 if (UDF_EXT_LEN(udf_rw32(s_ad.len)) > 0) {
2828 error = udf_append_adslot(udf_node, &slot, &s_ad);
2829 if (error)
2830 goto errorout;
2831 slot++;
2832 }
2833
2834 inflen += size_diff;
2835 objsize += size_diff;
2836 if (fe) {
2837 fe->inf_len = udf_rw64(inflen);
2838 } else {
2839 efe->inf_len = udf_rw64(inflen);
2840 efe->obj_size = udf_rw64(objsize);
2841 }
2842 error = 0;
2843
2844 if (evacuated_data) {
2845 /* set new write size for uvm */
2846 uvm_vnp_setwritesize(vp, old_size);
2847
2848 /* write out evacuated data */
2849 error = vn_rdwr(UIO_WRITE, udf_node->vnode,
2850 evacuated_data, old_size, 0,
2851 UIO_SYSSPACE, IO_ALTSEMANTICS | IO_NODELOCKED,
2852 FSCRED, NULL, NULL);
2853 uvm_vnp_setsize(vp, old_size);
2854 }
2855
2856 errorout:
2857 if (evacuated_data)
2858 free(evacuated_data, M_UDFTEMP);
2859
2860 udf_count_alloc_exts(udf_node);
2861
2862 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2863 UDF_UNLOCK_NODE(udf_node, 0);
2864
2865 KASSERT(new_inflen == orig_inflen + size_diff);
2866 KASSERT(new_lbrec == orig_lbrec);
2867
2868 return error;
2869 }
2870
2871 /* --------------------------------------------------------------------- */
2872
2873 int
2874 udf_shrink_node(struct udf_node *udf_node, uint64_t new_size)
2875 {
2876 struct vnode *vp = udf_node->vnode;
2877 struct udf_mount *ump = udf_node->ump;
2878 struct file_entry *fe;
2879 struct extfile_entry *efe;
2880 struct icb_tag *icbtag;
2881 struct long_ad c_ad, s_ad, *node_ad_cpy;
2882 uint64_t size_diff, old_size, inflen, objsize;
2883 uint64_t foffset, end_foffset;
2884 uint64_t orig_inflen, orig_lbrec, new_inflen, new_lbrec;
2885 uint32_t lb_size, dscr_size, crclen;
2886 uint32_t slot_offset, slot_offset_lb;
2887 uint32_t len, flags, max_len;
2888 uint32_t num_lb, lb_num;
2889 uint32_t max_l_ad, l_ad, l_ea;
2890 uint16_t vpart_num;
2891 uint8_t *data_pos;
2892 int icbflags, addr_type;
2893 int slot, cpy_slot, cpy_slots;
2894 int eof, error;
2895
2896 DPRINTF(ALLOC, ("udf_shrink_node\n"));
2897
2898 UDF_LOCK_NODE(udf_node, 0);
2899 udf_node_sanity_check(udf_node, &orig_inflen, &orig_lbrec);
2900
2901 lb_size = udf_rw32(ump->logical_vol->lb_size);
2902 max_len = ((UDF_EXT_MAXLEN / lb_size) * lb_size);
2903
2904 /* do the work */
2905 fe = udf_node->fe;
2906 efe = udf_node->efe;
2907 if (fe) {
2908 icbtag = &fe->icbtag;
2909 inflen = udf_rw64(fe->inf_len);
2910 objsize = inflen;
2911 dscr_size = sizeof(struct file_entry) -1;
2912 l_ea = udf_rw32(fe->l_ea);
2913 l_ad = udf_rw32(fe->l_ad);
2914 data_pos = (uint8_t *) fe + dscr_size + l_ea;
2915 } else {
2916 icbtag = &efe->icbtag;
2917 inflen = udf_rw64(efe->inf_len);
2918 objsize = udf_rw64(efe->obj_size);
2919 dscr_size = sizeof(struct extfile_entry) -1;
2920 l_ea = udf_rw32(efe->l_ea);
2921 l_ad = udf_rw32(efe->l_ad);
2922 data_pos = (uint8_t *) efe + dscr_size + l_ea;
2923 }
2924 max_l_ad = lb_size - dscr_size - l_ea;
2925
2926 icbflags = udf_rw16(icbtag->flags);
2927 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2928
2929 old_size = inflen;
2930 size_diff = old_size - new_size;
2931
2932 DPRINTF(ALLOC, ("\tfrom %"PRIu64" to %"PRIu64"\n", old_size, new_size));
2933
2934 /* shrink the node to its new size */
2935 if (addr_type == UDF_ICB_INTERN_ALLOC) {
2936 /* only reflect size change directly in the node */
2937 KASSERT(new_size <= max_l_ad);
2938 inflen -= size_diff;
2939 objsize -= size_diff;
2940 l_ad -= size_diff;
2941 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea + l_ad;
2942 if (fe) {
2943 fe->inf_len = udf_rw64(inflen);
2944 fe->l_ad = udf_rw32(l_ad);
2945 fe->tag.desc_crc_len = udf_rw16(crclen);
2946 } else {
2947 efe->inf_len = udf_rw64(inflen);
2948 efe->obj_size = udf_rw64(objsize);
2949 efe->l_ad = udf_rw32(l_ad);
2950 efe->tag.desc_crc_len = udf_rw16(crclen);
2951 }
2952 error = 0;
2953
2954 /* clear the space in the descriptor */
2955 KASSERT(old_size > new_size);
2956 memset(data_pos + new_size, 0, old_size - new_size);
2957
2958 /* TODO zero appened space in buffer! */
2959 /* using uvm_vnp_zerorange(vp, old_size, old_size - new_size); ? */
2960
2961 /* set new size for uvm */
2962 uvm_vnp_setsize(vp, new_size);
2963
2964 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2965 UDF_UNLOCK_NODE(udf_node, 0);
2966
2967 KASSERT(new_inflen == orig_inflen - size_diff);
2968 KASSERT(new_lbrec == orig_lbrec);
2969 KASSERT(new_lbrec == 0);
2970
2971 return 0;
2972 }
2973
2974 /* setup node cleanup extents copy space */
2975 node_ad_cpy = malloc(lb_size * UDF_MAX_ALLOC_EXTENTS,
2976 M_UDFMNT, M_WAITOK);
2977 memset(node_ad_cpy, 0, lb_size * UDF_MAX_ALLOC_EXTENTS);
2978
2979 /*
2980 * Shrink the node by releasing the allocations and truncate the last
2981 * allocation to the new size. If the new size fits into the
2982 * allocation descriptor itself, transform it into an
2983 * UDF_ICB_INTERN_ALLOC.
2984 */
2985 slot = 0;
2986 cpy_slot = 0;
2987 foffset = 0;
2988
2989 /* 1) copy till first overlap piece to the rewrite buffer */
2990 for (;;) {
2991 udf_get_adslot(udf_node, slot, &s_ad, &eof);
2992 if (eof) {
2993 DPRINTF(WRITE,
2994 ("Shrink node failed: "
2995 "encountered EOF\n"));
2996 error = EINVAL;
2997 goto errorout; /* panic? */
2998 }
2999 len = udf_rw32(s_ad.len);
3000 flags = UDF_EXT_FLAGS(len);
3001 len = UDF_EXT_LEN(len);
3002
3003 if (flags == UDF_EXT_REDIRECT) {
3004 slot++;
3005 continue;
3006 }
3007
3008 end_foffset = foffset + len;
3009 if (end_foffset > new_size)
3010 break; /* found */
3011
3012 node_ad_cpy[cpy_slot++] = s_ad;
3013
3014 DPRINTF(ALLOC, ("\t1: vp %d, lb %d, len %d, flags %d "
3015 "-> stack\n",
3016 udf_rw16(s_ad.loc.part_num),
3017 udf_rw32(s_ad.loc.lb_num),
3018 UDF_EXT_LEN(udf_rw32(s_ad.len)),
3019 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
3020
3021 foffset = end_foffset;
3022 slot++;
3023 }
3024 slot_offset = new_size - foffset;
3025
3026 /* 2) trunc overlapping slot at overlap and copy it */
3027 if (slot_offset > 0) {
3028 lb_num = udf_rw32(s_ad.loc.lb_num);
3029 vpart_num = udf_rw16(s_ad.loc.part_num);
3030
3031 if (flags == UDF_EXT_ALLOCATED) {
3032 /* calculate extent in lb, and offset in lb */
3033 num_lb = (len + lb_size -1) / lb_size;
3034 slot_offset_lb = (slot_offset + lb_size -1) / lb_size;
3035
3036 /* adjust our slot */
3037 lb_num += slot_offset_lb;
3038 num_lb -= slot_offset_lb;
3039
3040 udf_free_allocated_space(ump, lb_num, vpart_num, num_lb);
3041 }
3042
3043 s_ad.len = udf_rw32(slot_offset | flags);
3044 node_ad_cpy[cpy_slot++] = s_ad;
3045 slot++;
3046
3047 DPRINTF(ALLOC, ("\t2: vp %d, lb %d, len %d, flags %d "
3048 "-> stack\n",
3049 udf_rw16(s_ad.loc.part_num),
3050 udf_rw32(s_ad.loc.lb_num),
3051 UDF_EXT_LEN(udf_rw32(s_ad.len)),
3052 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
3053 }
3054
3055 /* 3) delete remainder */
3056 for (;;) {
3057 udf_get_adslot(udf_node, slot, &s_ad, &eof);
3058 if (eof)
3059 break;
3060
3061 len = udf_rw32(s_ad.len);
3062 flags = UDF_EXT_FLAGS(len);
3063 len = UDF_EXT_LEN(len);
3064
3065 if (flags == UDF_EXT_REDIRECT) {
3066 slot++;
3067 continue;
3068 }
3069
3070 DPRINTF(ALLOC, ("\t3: delete remainder "
3071 "vp %d lb %d, len %d, flags %d\n",
3072 udf_rw16(s_ad.loc.part_num),
3073 udf_rw32(s_ad.loc.lb_num),
3074 UDF_EXT_LEN(udf_rw32(s_ad.len)),
3075 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
3076
3077 if (flags == UDF_EXT_ALLOCATED) {
3078 lb_num = udf_rw32(s_ad.loc.lb_num);
3079 vpart_num = udf_rw16(s_ad.loc.part_num);
3080 num_lb = (len + lb_size - 1) / lb_size;
3081
3082 udf_free_allocated_space(ump, lb_num, vpart_num,
3083 num_lb);
3084 }
3085
3086 slot++;
3087 }
3088
3089 /* 4) if it will fit into the descriptor then convert */
3090 if (new_size < max_l_ad) {
3091 /*
3092 * resque/evacuate old piece by reading it in, and convert it
3093 * to internal alloc.
3094 */
3095 if (new_size == 0) {
3096 /* XXX/TODO only for zero sizing now */
3097 udf_wipe_adslots(udf_node);
3098
3099 icbflags &= ~UDF_ICB_TAG_FLAGS_ALLOC_MASK;
3100 icbflags |= UDF_ICB_INTERN_ALLOC;
3101 icbtag->flags = udf_rw16(icbflags);
3102
3103 inflen -= size_diff; KASSERT(inflen == 0);
3104 objsize -= size_diff;
3105 l_ad = new_size;
3106 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea + l_ad;
3107 if (fe) {
3108 fe->inf_len = udf_rw64(inflen);
3109 fe->l_ad = udf_rw32(l_ad);
3110 fe->tag.desc_crc_len = udf_rw16(crclen);
3111 } else {
3112 efe->inf_len = udf_rw64(inflen);
3113 efe->obj_size = udf_rw64(objsize);
3114 efe->l_ad = udf_rw32(l_ad);
3115 efe->tag.desc_crc_len = udf_rw16(crclen);
3116 }
3117 /* eventually copy in evacuated piece */
3118 /* set new size for uvm */
3119 uvm_vnp_setsize(vp, new_size);
3120
3121 free(node_ad_cpy, M_UDFMNT);
3122 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
3123
3124 UDF_UNLOCK_NODE(udf_node, 0);
3125
3126 KASSERT(new_inflen == orig_inflen - size_diff);
3127 KASSERT(new_inflen == 0);
3128 KASSERT(new_lbrec == 0);
3129
3130 return 0;
3131 }
3132
3133 printf("UDF_SHRINK_NODE: could convert to internal alloc!\n");
3134 }
3135
3136 /* 5) reset node descriptors */
3137 udf_wipe_adslots(udf_node);
3138
3139 /* 6) copy back extents; merge when possible. Recounting on the fly */
3140 cpy_slots = cpy_slot;
3141
3142 c_ad = node_ad_cpy[0];
3143 slot = 0;
3144 for (cpy_slot = 1; cpy_slot < cpy_slots; cpy_slot++) {
3145 s_ad = node_ad_cpy[cpy_slot];
3146
3147 DPRINTF(ALLOC, ("\t6: stack -> got mapping vp %d "
3148 "lb %d, len %d, flags %d\n",
3149 udf_rw16(s_ad.loc.part_num),
3150 udf_rw32(s_ad.loc.lb_num),
3151 UDF_EXT_LEN(udf_rw32(s_ad.len)),
3152 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
3153
3154 /* see if we can merge */
3155 if (udf_ads_merge(lb_size, &c_ad, &s_ad)) {
3156 /* not mergable (anymore) */
3157 DPRINTF(ALLOC, ("\t6: appending vp %d lb %d, "
3158 "len %d, flags %d\n",
3159 udf_rw16(c_ad.loc.part_num),
3160 udf_rw32(c_ad.loc.lb_num),
3161 UDF_EXT_LEN(udf_rw32(c_ad.len)),
3162 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
3163
3164 error = udf_append_adslot(udf_node, &slot, &c_ad);
3165 if (error)
3166 goto errorout; /* panic? */
3167 c_ad = s_ad;
3168 slot++;
3169 }
3170 }
3171
3172 /* 7) push rest slot (if any) */
3173 if (UDF_EXT_LEN(c_ad.len) > 0) {
3174 DPRINTF(ALLOC, ("\t7: last append vp %d lb %d, "
3175 "len %d, flags %d\n",
3176 udf_rw16(c_ad.loc.part_num),
3177 udf_rw32(c_ad.loc.lb_num),
3178 UDF_EXT_LEN(udf_rw32(c_ad.len)),
3179 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
3180
3181 error = udf_append_adslot(udf_node, &slot, &c_ad);
3182 if (error)
3183 goto errorout; /* panic? */
3184 ;
3185 }
3186
3187 inflen -= size_diff;
3188 objsize -= size_diff;
3189 if (fe) {
3190 fe->inf_len = udf_rw64(inflen);
3191 } else {
3192 efe->inf_len = udf_rw64(inflen);
3193 efe->obj_size = udf_rw64(objsize);
3194 }
3195 error = 0;
3196
3197 /* set new size for uvm */
3198 uvm_vnp_setsize(vp, new_size);
3199
3200 errorout:
3201 free(node_ad_cpy, M_UDFMNT);
3202
3203 udf_count_alloc_exts(udf_node);
3204
3205 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
3206 UDF_UNLOCK_NODE(udf_node, 0);
3207
3208 KASSERT(new_inflen == orig_inflen - size_diff);
3209
3210 return error;
3211 }
3212
3213