udf_allocation.c revision 1.31 1 /* $NetBSD: udf_allocation.c,v 1.31 2011/01/14 09:09:18 reinoud Exp $ */
2
3 /*
4 * Copyright (c) 2006, 2008 Reinoud Zandijk
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 *
27 */
28
29 #include <sys/cdefs.h>
30 #ifndef lint
31 __KERNEL_RCSID(0, "$NetBSD: udf_allocation.c,v 1.31 2011/01/14 09:09:18 reinoud Exp $");
32 #endif /* not lint */
33
34
35 #if defined(_KERNEL_OPT)
36 #include "opt_compat_netbsd.h"
37 #endif
38
39 /* TODO strip */
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/sysctl.h>
43 #include <sys/namei.h>
44 #include <sys/proc.h>
45 #include <sys/kernel.h>
46 #include <sys/vnode.h>
47 #include <miscfs/genfs/genfs_node.h>
48 #include <sys/mount.h>
49 #include <sys/buf.h>
50 #include <sys/file.h>
51 #include <sys/device.h>
52 #include <sys/disklabel.h>
53 #include <sys/ioctl.h>
54 #include <sys/malloc.h>
55 #include <sys/dirent.h>
56 #include <sys/stat.h>
57 #include <sys/conf.h>
58 #include <sys/kauth.h>
59 #include <sys/kthread.h>
60 #include <dev/clock_subr.h>
61
62 #include <fs/udf/ecma167-udf.h>
63 #include <fs/udf/udf_mount.h>
64
65 #include "udf.h"
66 #include "udf_subr.h"
67 #include "udf_bswap.h"
68
69
70 #define VTOI(vnode) ((struct udf_node *) vnode->v_data)
71
72 static void udf_record_allocation_in_node(struct udf_mount *ump,
73 struct buf *buf, uint16_t vpart_num, uint64_t *mapping,
74 struct long_ad *node_ad_cpy);
75
76 static void udf_collect_free_space_for_vpart(struct udf_mount *ump,
77 uint16_t vpart_num, uint32_t num_lb);
78
79 static int udf_ads_merge(uint32_t max_len, uint32_t lb_size, struct long_ad *a1, struct long_ad *a2);
80 static void udf_wipe_adslots(struct udf_node *udf_node);
81 static void udf_count_alloc_exts(struct udf_node *udf_node);
82
83
84 /* --------------------------------------------------------------------- */
85
86 #if 0
87 #if 1
88 static void
89 udf_node_dump(struct udf_node *udf_node) {
90 struct file_entry *fe;
91 struct extfile_entry *efe;
92 struct icb_tag *icbtag;
93 struct long_ad s_ad;
94 uint64_t inflen;
95 uint32_t icbflags, addr_type;
96 uint32_t len, lb_num;
97 uint32_t flags;
98 int part_num;
99 int lb_size, eof, slot;
100
101 if ((udf_verbose & UDF_DEBUG_NODEDUMP) == 0)
102 return;
103
104 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
105
106 fe = udf_node->fe;
107 efe = udf_node->efe;
108 if (fe) {
109 icbtag = &fe->icbtag;
110 inflen = udf_rw64(fe->inf_len);
111 } else {
112 icbtag = &efe->icbtag;
113 inflen = udf_rw64(efe->inf_len);
114 }
115
116 icbflags = udf_rw16(icbtag->flags);
117 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
118
119 printf("udf_node_dump %p :\n", udf_node);
120
121 if (addr_type == UDF_ICB_INTERN_ALLOC) {
122 printf("\tIntern alloc, len = %"PRIu64"\n", inflen);
123 return;
124 }
125
126 printf("\tInflen = %"PRIu64"\n", inflen);
127 printf("\t\t");
128
129 slot = 0;
130 for (;;) {
131 udf_get_adslot(udf_node, slot, &s_ad, &eof);
132 if (eof)
133 break;
134 part_num = udf_rw16(s_ad.loc.part_num);
135 lb_num = udf_rw32(s_ad.loc.lb_num);
136 len = udf_rw32(s_ad.len);
137 flags = UDF_EXT_FLAGS(len);
138 len = UDF_EXT_LEN(len);
139
140 printf("[");
141 if (part_num >= 0)
142 printf("part %d, ", part_num);
143 printf("lb_num %d, len %d", lb_num, len);
144 if (flags)
145 printf(", flags %d", flags>>30);
146 printf("] ");
147
148 if (flags == UDF_EXT_REDIRECT) {
149 printf("\n\textent END\n\tallocation extent\n\t\t");
150 }
151
152 slot++;
153 }
154 printf("\n\tl_ad END\n\n");
155 }
156 #else
157 #define udf_node_dump(a)
158 #endif
159
160
161 static void
162 udf_assert_allocated(struct udf_mount *ump, uint16_t vpart_num,
163 uint32_t lb_num, uint32_t num_lb)
164 {
165 struct udf_bitmap *bitmap;
166 struct part_desc *pdesc;
167 uint32_t ptov;
168 uint32_t bitval;
169 uint8_t *bpos;
170 int bit;
171 int phys_part;
172 int ok;
173
174 DPRINTF(PARANOIA, ("udf_assert_allocated: check virt lbnum %d "
175 "part %d + %d sect\n", lb_num, vpart_num, num_lb));
176
177 /* get partition backing up this vpart_num */
178 pdesc = ump->partitions[ump->vtop[vpart_num]];
179
180 switch (ump->vtop_tp[vpart_num]) {
181 case UDF_VTOP_TYPE_PHYS :
182 case UDF_VTOP_TYPE_SPARABLE :
183 /* free space to freed or unallocated space bitmap */
184 ptov = udf_rw32(pdesc->start_loc);
185 phys_part = ump->vtop[vpart_num];
186
187 /* use unallocated bitmap */
188 bitmap = &ump->part_unalloc_bits[phys_part];
189
190 /* if no bitmaps are defined, bail out */
191 if (bitmap->bits == NULL)
192 break;
193
194 /* check bits */
195 KASSERT(bitmap->bits);
196 ok = 1;
197 bpos = bitmap->bits + lb_num/8;
198 bit = lb_num % 8;
199 while (num_lb > 0) {
200 bitval = (1 << bit);
201 DPRINTF(PARANOIA, ("XXX : check %d, %p, bit %d\n",
202 lb_num, bpos, bit));
203 KASSERT(bitmap->bits + lb_num/8 == bpos);
204 if (*bpos & bitval) {
205 printf("\tlb_num %d is NOT marked busy\n",
206 lb_num);
207 ok = 0;
208 }
209 lb_num++; num_lb--;
210 bit = (bit + 1) % 8;
211 if (bit == 0)
212 bpos++;
213 }
214 if (!ok) {
215 /* KASSERT(0); */
216 }
217
218 break;
219 case UDF_VTOP_TYPE_VIRT :
220 /* TODO check space */
221 KASSERT(num_lb == 1);
222 break;
223 case UDF_VTOP_TYPE_META :
224 /* TODO check space in the metadata bitmap */
225 default:
226 /* not implemented */
227 break;
228 }
229 }
230
231
232 static void
233 udf_node_sanity_check(struct udf_node *udf_node,
234 uint64_t *cnt_inflen, uint64_t *cnt_logblksrec)
235 {
236 union dscrptr *dscr;
237 struct file_entry *fe;
238 struct extfile_entry *efe;
239 struct icb_tag *icbtag;
240 struct long_ad s_ad;
241 uint64_t inflen, logblksrec;
242 uint32_t icbflags, addr_type;
243 uint32_t len, lb_num, l_ea, l_ad, max_l_ad;
244 uint16_t part_num;
245 uint8_t *data_pos;
246 int dscr_size, lb_size, flags, whole_lb;
247 int i, slot, eof;
248
249 // KASSERT(mutex_owned(&udf_node->ump->allocate_mutex));
250
251 if (1)
252 udf_node_dump(udf_node);
253
254 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
255
256 fe = udf_node->fe;
257 efe = udf_node->efe;
258 if (fe) {
259 dscr = (union dscrptr *) fe;
260 icbtag = &fe->icbtag;
261 inflen = udf_rw64(fe->inf_len);
262 dscr_size = sizeof(struct file_entry) -1;
263 logblksrec = udf_rw64(fe->logblks_rec);
264 l_ad = udf_rw32(fe->l_ad);
265 l_ea = udf_rw32(fe->l_ea);
266 } else {
267 dscr = (union dscrptr *) efe;
268 icbtag = &efe->icbtag;
269 inflen = udf_rw64(efe->inf_len);
270 dscr_size = sizeof(struct extfile_entry) -1;
271 logblksrec = udf_rw64(efe->logblks_rec);
272 l_ad = udf_rw32(efe->l_ad);
273 l_ea = udf_rw32(efe->l_ea);
274 }
275 data_pos = (uint8_t *) dscr + dscr_size + l_ea;
276 max_l_ad = lb_size - dscr_size - l_ea;
277 icbflags = udf_rw16(icbtag->flags);
278 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
279
280 /* check if tail is zero */
281 DPRINTF(PARANOIA, ("Sanity check blank tail\n"));
282 for (i = l_ad; i < max_l_ad; i++) {
283 if (data_pos[i] != 0)
284 printf( "sanity_check: violation: node byte %d "
285 "has value %d\n", i, data_pos[i]);
286 }
287
288 /* reset counters */
289 *cnt_inflen = 0;
290 *cnt_logblksrec = 0;
291
292 if (addr_type == UDF_ICB_INTERN_ALLOC) {
293 KASSERT(l_ad <= max_l_ad);
294 KASSERT(l_ad == inflen);
295 *cnt_inflen = inflen;
296 return;
297 }
298
299 /* start counting */
300 whole_lb = 1;
301 slot = 0;
302 for (;;) {
303 udf_get_adslot(udf_node, slot, &s_ad, &eof);
304 if (eof)
305 break;
306 KASSERT(whole_lb == 1);
307
308 part_num = udf_rw16(s_ad.loc.part_num);
309 lb_num = udf_rw32(s_ad.loc.lb_num);
310 len = udf_rw32(s_ad.len);
311 flags = UDF_EXT_FLAGS(len);
312 len = UDF_EXT_LEN(len);
313
314 if (flags != UDF_EXT_REDIRECT) {
315 *cnt_inflen += len;
316 if (flags == UDF_EXT_ALLOCATED) {
317 *cnt_logblksrec += (len + lb_size -1) / lb_size;
318 }
319 } else {
320 KASSERT(len == lb_size);
321 }
322 /* check allocation */
323 if (flags == UDF_EXT_ALLOCATED)
324 udf_assert_allocated(udf_node->ump, part_num, lb_num,
325 (len + lb_size - 1) / lb_size);
326
327 /* check whole lb */
328 whole_lb = ((len % lb_size) == 0);
329
330 slot++;
331 }
332 /* rest should be zero (ad_off > l_ad < max_l_ad - adlen) */
333
334 KASSERT(*cnt_inflen == inflen);
335 KASSERT(*cnt_logblksrec == logblksrec);
336
337 // KASSERT(mutex_owned(&udf_node->ump->allocate_mutex));
338 }
339 #else
340 static void
341 udf_node_sanity_check(struct udf_node *udf_node,
342 uint64_t *cnt_inflen, uint64_t *cnt_logblksrec) {
343 struct file_entry *fe;
344 struct extfile_entry *efe;
345 struct icb_tag *icbtag;
346 uint64_t inflen, logblksrec;
347 int dscr_size, lb_size;
348
349 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
350
351 fe = udf_node->fe;
352 efe = udf_node->efe;
353 if (fe) {
354 icbtag = &fe->icbtag;
355 inflen = udf_rw64(fe->inf_len);
356 dscr_size = sizeof(struct file_entry) -1;
357 logblksrec = udf_rw64(fe->logblks_rec);
358 } else {
359 icbtag = &efe->icbtag;
360 inflen = udf_rw64(efe->inf_len);
361 dscr_size = sizeof(struct extfile_entry) -1;
362 logblksrec = udf_rw64(efe->logblks_rec);
363 }
364 *cnt_logblksrec = logblksrec;
365 *cnt_inflen = inflen;
366 }
367 #endif
368
369 /* --------------------------------------------------------------------- */
370
371 void
372 udf_calc_freespace(struct udf_mount *ump, uint64_t *sizeblks, uint64_t *freeblks)
373 {
374 struct logvol_int_desc *lvid;
375 uint32_t *pos1, *pos2;
376 int vpart, num_vpart;
377
378 lvid = ump->logvol_integrity;
379 *freeblks = *sizeblks = 0;
380
381 /*
382 * Sequentials media report free space directly (CD/DVD/BD-R), for the
383 * other media we need the logical volume integrity.
384 *
385 * We sum all free space up here regardless of type.
386 */
387
388 KASSERT(lvid);
389 num_vpart = udf_rw32(lvid->num_part);
390
391 if (ump->discinfo.mmc_cur & MMC_CAP_SEQUENTIAL) {
392 /* use track info directly summing if there are 2 open */
393 /* XXX assumption at most two tracks open */
394 *freeblks = ump->data_track.free_blocks;
395 if (ump->data_track.tracknr != ump->metadata_track.tracknr)
396 *freeblks += ump->metadata_track.free_blocks;
397 *sizeblks = ump->discinfo.last_possible_lba;
398 } else {
399 /* free and used space for mountpoint based on logvol integrity */
400 for (vpart = 0; vpart < num_vpart; vpart++) {
401 pos1 = &lvid->tables[0] + vpart;
402 pos2 = &lvid->tables[0] + num_vpart + vpart;
403 if (udf_rw32(*pos1) != (uint32_t) -1) {
404 *freeblks += udf_rw32(*pos1);
405 *sizeblks += udf_rw32(*pos2);
406 }
407 }
408 }
409 /* adjust for accounted uncommitted blocks */
410 for (vpart = 0; vpart < num_vpart; vpart++)
411 *freeblks -= ump->uncommitted_lbs[vpart];
412
413 if (*freeblks > UDF_DISC_SLACK) {
414 *freeblks -= UDF_DISC_SLACK;
415 } else {
416 *freeblks = 0;
417 }
418 }
419
420
421 static void
422 udf_calc_vpart_freespace(struct udf_mount *ump, uint16_t vpart_num, uint64_t *freeblks)
423 {
424 struct logvol_int_desc *lvid;
425 uint32_t *pos1;
426
427 lvid = ump->logvol_integrity;
428 *freeblks = 0;
429
430 /*
431 * Sequentials media report free space directly (CD/DVD/BD-R), for the
432 * other media we need the logical volume integrity.
433 *
434 * We sum all free space up here regardless of type.
435 */
436
437 KASSERT(lvid);
438 if (ump->discinfo.mmc_cur & MMC_CAP_SEQUENTIAL) {
439 /* XXX assumption at most two tracks open */
440 if (vpart_num == ump->data_part) {
441 *freeblks = ump->data_track.free_blocks;
442 } else {
443 *freeblks = ump->metadata_track.free_blocks;
444 }
445 } else {
446 /* free and used space for mountpoint based on logvol integrity */
447 pos1 = &lvid->tables[0] + vpart_num;
448 if (udf_rw32(*pos1) != (uint32_t) -1)
449 *freeblks += udf_rw32(*pos1);
450 }
451
452 /* adjust for accounted uncommitted blocks */
453 if (*freeblks > ump->uncommitted_lbs[vpart_num]) {
454 *freeblks -= ump->uncommitted_lbs[vpart_num];
455 } else {
456 *freeblks = 0;
457 }
458 }
459
460 /* --------------------------------------------------------------------- */
461
462 int
463 udf_translate_vtop(struct udf_mount *ump, struct long_ad *icb_loc,
464 uint32_t *lb_numres, uint32_t *extres)
465 {
466 struct part_desc *pdesc;
467 struct spare_map_entry *sme;
468 struct long_ad s_icb_loc;
469 uint64_t foffset, end_foffset;
470 uint32_t lb_size, len;
471 uint32_t lb_num, lb_rel, lb_packet;
472 uint32_t udf_rw32_lbmap, ext_offset;
473 uint16_t vpart;
474 int rel, part, error, eof, slot, flags;
475
476 assert(ump && icb_loc && lb_numres);
477
478 vpart = udf_rw16(icb_loc->loc.part_num);
479 lb_num = udf_rw32(icb_loc->loc.lb_num);
480 if (vpart > UDF_VTOP_RAWPART)
481 return EINVAL;
482
483 translate_again:
484 part = ump->vtop[vpart];
485 pdesc = ump->partitions[part];
486
487 switch (ump->vtop_tp[vpart]) {
488 case UDF_VTOP_TYPE_RAW :
489 /* 1:1 to the end of the device */
490 *lb_numres = lb_num;
491 *extres = INT_MAX;
492 return 0;
493 case UDF_VTOP_TYPE_PHYS :
494 /* transform into its disc logical block */
495 if (lb_num > udf_rw32(pdesc->part_len))
496 return EINVAL;
497 *lb_numres = lb_num + udf_rw32(pdesc->start_loc);
498
499 /* extent from here to the end of the partition */
500 *extres = udf_rw32(pdesc->part_len) - lb_num;
501 return 0;
502 case UDF_VTOP_TYPE_VIRT :
503 /* only maps one logical block, lookup in VAT */
504 if (lb_num >= ump->vat_entries) /* XXX > or >= ? */
505 return EINVAL;
506
507 /* lookup in virtual allocation table file */
508 mutex_enter(&ump->allocate_mutex);
509 error = udf_vat_read(ump->vat_node,
510 (uint8_t *) &udf_rw32_lbmap, 4,
511 ump->vat_offset + lb_num * 4);
512 mutex_exit(&ump->allocate_mutex);
513
514 if (error)
515 return error;
516
517 lb_num = udf_rw32(udf_rw32_lbmap);
518
519 /* transform into its disc logical block */
520 if (lb_num > udf_rw32(pdesc->part_len))
521 return EINVAL;
522 *lb_numres = lb_num + udf_rw32(pdesc->start_loc);
523
524 /* just one logical block */
525 *extres = 1;
526 return 0;
527 case UDF_VTOP_TYPE_SPARABLE :
528 /* check if the packet containing the lb_num is remapped */
529 lb_packet = lb_num / ump->sparable_packet_size;
530 lb_rel = lb_num % ump->sparable_packet_size;
531
532 for (rel = 0; rel < udf_rw16(ump->sparing_table->rt_l); rel++) {
533 sme = &ump->sparing_table->entries[rel];
534 if (lb_packet == udf_rw32(sme->org)) {
535 /* NOTE maps to absolute disc logical block! */
536 *lb_numres = udf_rw32(sme->map) + lb_rel;
537 *extres = ump->sparable_packet_size - lb_rel;
538 return 0;
539 }
540 }
541
542 /* transform into its disc logical block */
543 if (lb_num > udf_rw32(pdesc->part_len))
544 return EINVAL;
545 *lb_numres = lb_num + udf_rw32(pdesc->start_loc);
546
547 /* rest of block */
548 *extres = ump->sparable_packet_size - lb_rel;
549 return 0;
550 case UDF_VTOP_TYPE_META :
551 /* we have to look into the file's allocation descriptors */
552
553 /* use metadatafile allocation mutex */
554 lb_size = udf_rw32(ump->logical_vol->lb_size);
555
556 UDF_LOCK_NODE(ump->metadata_node, 0);
557
558 /* get first overlapping extent */
559 foffset = 0;
560 slot = 0;
561 for (;;) {
562 udf_get_adslot(ump->metadata_node,
563 slot, &s_icb_loc, &eof);
564 DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, "
565 "len = %d, lb_num = %d, part = %d\n",
566 slot, eof,
567 UDF_EXT_FLAGS(udf_rw32(s_icb_loc.len)),
568 UDF_EXT_LEN(udf_rw32(s_icb_loc.len)),
569 udf_rw32(s_icb_loc.loc.lb_num),
570 udf_rw16(s_icb_loc.loc.part_num)));
571 if (eof) {
572 DPRINTF(TRANSLATE,
573 ("Meta partition translation "
574 "failed: can't seek location\n"));
575 UDF_UNLOCK_NODE(ump->metadata_node, 0);
576 return EINVAL;
577 }
578 len = udf_rw32(s_icb_loc.len);
579 flags = UDF_EXT_FLAGS(len);
580 len = UDF_EXT_LEN(len);
581
582 if (flags == UDF_EXT_REDIRECT) {
583 slot++;
584 continue;
585 }
586
587 end_foffset = foffset + len;
588
589 if (end_foffset > lb_num * lb_size)
590 break; /* found */
591 foffset = end_foffset;
592 slot++;
593 }
594 /* found overlapping slot */
595 ext_offset = lb_num * lb_size - foffset;
596
597 /* process extent offset */
598 lb_num = udf_rw32(s_icb_loc.loc.lb_num);
599 vpart = udf_rw16(s_icb_loc.loc.part_num);
600 lb_num += (ext_offset + lb_size -1) / lb_size;
601 ext_offset = 0;
602
603 UDF_UNLOCK_NODE(ump->metadata_node, 0);
604 if (flags != UDF_EXT_ALLOCATED) {
605 DPRINTF(TRANSLATE, ("Metadata partition translation "
606 "failed: not allocated\n"));
607 return EINVAL;
608 }
609
610 /*
611 * vpart and lb_num are updated, translate again since we
612 * might be mapped on sparable media
613 */
614 goto translate_again;
615 default:
616 printf("UDF vtop translation scheme %d unimplemented yet\n",
617 ump->vtop_tp[vpart]);
618 }
619
620 return EINVAL;
621 }
622
623
624 /* XXX provisional primitive braindead version */
625 /* TODO use ext_res */
626 void
627 udf_translate_vtop_list(struct udf_mount *ump, uint32_t sectors,
628 uint16_t vpart_num, uint64_t *lmapping, uint64_t *pmapping)
629 {
630 struct long_ad loc;
631 uint32_t lb_numres, ext_res;
632 int sector;
633
634 for (sector = 0; sector < sectors; sector++) {
635 memset(&loc, 0, sizeof(struct long_ad));
636 loc.loc.part_num = udf_rw16(vpart_num);
637 loc.loc.lb_num = udf_rw32(*lmapping);
638 udf_translate_vtop(ump, &loc, &lb_numres, &ext_res);
639 *pmapping = lb_numres;
640 lmapping++; pmapping++;
641 }
642 }
643
644
645 /* --------------------------------------------------------------------- */
646
647 /*
648 * Translate an extent (in logical_blocks) into logical block numbers; used
649 * for read and write operations. DOESNT't check extents.
650 */
651
652 int
653 udf_translate_file_extent(struct udf_node *udf_node,
654 uint32_t from, uint32_t num_lb,
655 uint64_t *map)
656 {
657 struct udf_mount *ump;
658 struct icb_tag *icbtag;
659 struct long_ad t_ad, s_ad;
660 uint64_t transsec;
661 uint64_t foffset, end_foffset;
662 uint32_t transsec32;
663 uint32_t lb_size;
664 uint32_t ext_offset;
665 uint32_t lb_num, len;
666 uint32_t overlap, translen;
667 uint16_t vpart_num;
668 int eof, error, flags;
669 int slot, addr_type, icbflags;
670
671 if (!udf_node)
672 return ENOENT;
673
674 KASSERT(num_lb > 0);
675
676 UDF_LOCK_NODE(udf_node, 0);
677
678 /* initialise derivative vars */
679 ump = udf_node->ump;
680 lb_size = udf_rw32(ump->logical_vol->lb_size);
681
682 if (udf_node->fe) {
683 icbtag = &udf_node->fe->icbtag;
684 } else {
685 icbtag = &udf_node->efe->icbtag;
686 }
687 icbflags = udf_rw16(icbtag->flags);
688 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
689
690 /* do the work */
691 if (addr_type == UDF_ICB_INTERN_ALLOC) {
692 *map = UDF_TRANS_INTERN;
693 UDF_UNLOCK_NODE(udf_node, 0);
694 return 0;
695 }
696
697 /* find first overlapping extent */
698 foffset = 0;
699 slot = 0;
700 for (;;) {
701 udf_get_adslot(udf_node, slot, &s_ad, &eof);
702 DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, len = %d, "
703 "lb_num = %d, part = %d\n", slot, eof,
704 UDF_EXT_FLAGS(udf_rw32(s_ad.len)),
705 UDF_EXT_LEN(udf_rw32(s_ad.len)),
706 udf_rw32(s_ad.loc.lb_num),
707 udf_rw16(s_ad.loc.part_num)));
708 if (eof) {
709 DPRINTF(TRANSLATE,
710 ("Translate file extent "
711 "failed: can't seek location\n"));
712 UDF_UNLOCK_NODE(udf_node, 0);
713 return EINVAL;
714 }
715 len = udf_rw32(s_ad.len);
716 flags = UDF_EXT_FLAGS(len);
717 len = UDF_EXT_LEN(len);
718 lb_num = udf_rw32(s_ad.loc.lb_num);
719
720 if (flags == UDF_EXT_REDIRECT) {
721 slot++;
722 continue;
723 }
724
725 end_foffset = foffset + len;
726
727 if (end_foffset > from * lb_size)
728 break; /* found */
729 foffset = end_foffset;
730 slot++;
731 }
732 /* found overlapping slot */
733 ext_offset = from * lb_size - foffset;
734
735 for (;;) {
736 udf_get_adslot(udf_node, slot, &s_ad, &eof);
737 DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, len = %d, "
738 "lb_num = %d, part = %d\n", slot, eof,
739 UDF_EXT_FLAGS(udf_rw32(s_ad.len)),
740 UDF_EXT_LEN(udf_rw32(s_ad.len)),
741 udf_rw32(s_ad.loc.lb_num),
742 udf_rw16(s_ad.loc.part_num)));
743 if (eof) {
744 DPRINTF(TRANSLATE,
745 ("Translate file extent "
746 "failed: past eof\n"));
747 UDF_UNLOCK_NODE(udf_node, 0);
748 return EINVAL;
749 }
750
751 len = udf_rw32(s_ad.len);
752 flags = UDF_EXT_FLAGS(len);
753 len = UDF_EXT_LEN(len);
754
755 lb_num = udf_rw32(s_ad.loc.lb_num);
756 vpart_num = udf_rw16(s_ad.loc.part_num);
757
758 end_foffset = foffset + len;
759
760 /* process extent, don't forget to advance on ext_offset! */
761 lb_num += (ext_offset + lb_size -1) / lb_size;
762 overlap = (len - ext_offset + lb_size -1) / lb_size;
763 ext_offset = 0;
764
765 /*
766 * note that the while(){} is nessisary for the extent that
767 * the udf_translate_vtop() returns doens't have to span the
768 * whole extent.
769 */
770
771 overlap = MIN(overlap, num_lb);
772 while (overlap && (flags != UDF_EXT_REDIRECT)) {
773 switch (flags) {
774 case UDF_EXT_FREE :
775 case UDF_EXT_ALLOCATED_BUT_NOT_USED :
776 transsec = UDF_TRANS_ZERO;
777 translen = overlap;
778 while (overlap && num_lb && translen) {
779 *map++ = transsec;
780 lb_num++;
781 overlap--; num_lb--; translen--;
782 }
783 break;
784 case UDF_EXT_ALLOCATED :
785 t_ad.loc.lb_num = udf_rw32(lb_num);
786 t_ad.loc.part_num = udf_rw16(vpart_num);
787 error = udf_translate_vtop(ump,
788 &t_ad, &transsec32, &translen);
789 transsec = transsec32;
790 if (error) {
791 UDF_UNLOCK_NODE(udf_node, 0);
792 return error;
793 }
794 while (overlap && num_lb && translen) {
795 *map++ = transsec;
796 lb_num++; transsec++;
797 overlap--; num_lb--; translen--;
798 }
799 break;
800 default:
801 DPRINTF(TRANSLATE,
802 ("Translate file extent "
803 "failed: bad flags %x\n", flags));
804 UDF_UNLOCK_NODE(udf_node, 0);
805 return EINVAL;
806 }
807 }
808 if (num_lb == 0)
809 break;
810
811 if (flags != UDF_EXT_REDIRECT)
812 foffset = end_foffset;
813 slot++;
814 }
815 UDF_UNLOCK_NODE(udf_node, 0);
816
817 return 0;
818 }
819
820 /* --------------------------------------------------------------------- */
821
822 static int
823 udf_search_free_vatloc(struct udf_mount *ump, uint32_t *lbnumres)
824 {
825 uint32_t lb_size, lb_num, lb_map, udf_rw32_lbmap;
826 uint8_t *blob;
827 int entry, chunk, found, error;
828
829 KASSERT(ump);
830 KASSERT(ump->logical_vol);
831
832 lb_size = udf_rw32(ump->logical_vol->lb_size);
833 blob = malloc(lb_size, M_UDFTEMP, M_WAITOK);
834
835 /* TODO static allocation of search chunk */
836
837 lb_num = MIN(ump->vat_entries, ump->vat_last_free_lb);
838 found = 0;
839 error = 0;
840 entry = 0;
841 do {
842 chunk = MIN(lb_size, (ump->vat_entries - lb_num) * 4);
843 if (chunk <= 0)
844 break;
845 /* load in chunk */
846 error = udf_vat_read(ump->vat_node, blob, chunk,
847 ump->vat_offset + lb_num * 4);
848
849 if (error)
850 break;
851
852 /* search this chunk */
853 for (entry=0; entry < chunk /4; entry++, lb_num++) {
854 udf_rw32_lbmap = *((uint32_t *) (blob + entry * 4));
855 lb_map = udf_rw32(udf_rw32_lbmap);
856 if (lb_map == 0xffffffff) {
857 found = 1;
858 break;
859 }
860 }
861 } while (!found);
862 if (error) {
863 printf("udf_search_free_vatloc: error reading in vat chunk "
864 "(lb %d, size %d)\n", lb_num, chunk);
865 }
866
867 if (!found) {
868 /* extend VAT */
869 DPRINTF(WRITE, ("udf_search_free_vatloc: extending\n"));
870 lb_num = ump->vat_entries;
871 ump->vat_entries++;
872 }
873
874 /* mark entry with initialiser just in case */
875 lb_map = udf_rw32(0xfffffffe);
876 udf_vat_write(ump->vat_node, (uint8_t *) &lb_map, 4,
877 ump->vat_offset + lb_num *4);
878 ump->vat_last_free_lb = lb_num;
879
880 free(blob, M_UDFTEMP);
881 *lbnumres = lb_num;
882 return 0;
883 }
884
885
886 static void
887 udf_bitmap_allocate(struct udf_bitmap *bitmap, int ismetadata,
888 uint32_t *num_lb, uint64_t *lmappos)
889 {
890 uint32_t offset, lb_num, bit;
891 int32_t diff;
892 uint8_t *bpos;
893 int pass;
894
895 if (!ismetadata) {
896 /* heuristic to keep the two pointers not too close */
897 diff = bitmap->data_pos - bitmap->metadata_pos;
898 if ((diff >= 0) && (diff < 1024))
899 bitmap->data_pos = bitmap->metadata_pos + 1024;
900 }
901 offset = ismetadata ? bitmap->metadata_pos : bitmap->data_pos;
902 offset &= ~7;
903 for (pass = 0; pass < 2; pass++) {
904 if (offset >= bitmap->max_offset)
905 offset = 0;
906
907 while (offset < bitmap->max_offset) {
908 if (*num_lb == 0)
909 break;
910
911 /* use first bit not set */
912 bpos = bitmap->bits + offset/8;
913 bit = ffs(*bpos); /* returns 0 or 1..8 */
914 if (bit == 0) {
915 offset += 8;
916 continue;
917 }
918
919 /* check for ffs overshoot */
920 if (offset + bit-1 >= bitmap->max_offset) {
921 offset = bitmap->max_offset;
922 break;
923 }
924
925 DPRINTF(PARANOIA, ("XXX : allocate %d, %p, bit %d\n",
926 offset + bit -1, bpos, bit-1));
927 *bpos &= ~(1 << (bit-1));
928 lb_num = offset + bit-1;
929 *lmappos++ = lb_num;
930 *num_lb = *num_lb - 1;
931 // offset = (offset & ~7);
932 }
933 }
934
935 if (ismetadata) {
936 bitmap->metadata_pos = offset;
937 } else {
938 bitmap->data_pos = offset;
939 }
940 }
941
942
943 static void
944 udf_bitmap_free(struct udf_bitmap *bitmap, uint32_t lb_num, uint32_t num_lb)
945 {
946 uint32_t offset;
947 uint32_t bit, bitval;
948 uint8_t *bpos;
949
950 offset = lb_num;
951
952 /* starter bits */
953 bpos = bitmap->bits + offset/8;
954 bit = offset % 8;
955 while ((bit != 0) && (num_lb > 0)) {
956 bitval = (1 << bit);
957 KASSERT((*bpos & bitval) == 0);
958 DPRINTF(PARANOIA, ("XXX : free %d, %p, %d\n",
959 offset, bpos, bit));
960 *bpos |= bitval;
961 offset++; num_lb--;
962 bit = (bit + 1) % 8;
963 }
964 if (num_lb == 0)
965 return;
966
967 /* whole bytes */
968 KASSERT(bit == 0);
969 bpos = bitmap->bits + offset / 8;
970 while (num_lb >= 8) {
971 KASSERT((*bpos == 0));
972 DPRINTF(PARANOIA, ("XXX : free %d + 8, %p\n", offset, bpos));
973 *bpos = 255;
974 offset += 8; num_lb -= 8;
975 bpos++;
976 }
977
978 /* stop bits */
979 KASSERT(num_lb < 8);
980 bit = 0;
981 while (num_lb > 0) {
982 bitval = (1 << bit);
983 KASSERT((*bpos & bitval) == 0);
984 DPRINTF(PARANOIA, ("XXX : free %d, %p, %d\n",
985 offset, bpos, bit));
986 *bpos |= bitval;
987 offset++; num_lb--;
988 bit = (bit + 1) % 8;
989 }
990 }
991
992
993 static uint32_t
994 udf_bitmap_check_trunc_free(struct udf_bitmap *bitmap, uint32_t to_trunc)
995 {
996 uint32_t seq_free, offset;
997 uint8_t *bpos;
998 uint8_t bit, bitval;
999
1000 DPRINTF(RESERVE, ("\ttrying to trunc %d bits from bitmap\n", to_trunc));
1001 offset = bitmap->max_offset - to_trunc;
1002
1003 /* starter bits (if any) */
1004 bpos = bitmap->bits + offset/8;
1005 bit = offset % 8;
1006 seq_free = 0;
1007 while (to_trunc > 0) {
1008 seq_free++;
1009 bitval = (1 << bit);
1010 if (!(*bpos & bitval))
1011 seq_free = 0;
1012 offset++; to_trunc--;
1013 bit++;
1014 if (bit == 8) {
1015 bpos++;
1016 bit = 0;
1017 }
1018 }
1019
1020 DPRINTF(RESERVE, ("\tfound %d sequential free bits in bitmap\n", seq_free));
1021 return seq_free;
1022 }
1023
1024 /* --------------------------------------------------------------------- */
1025
1026 /*
1027 * We check for overall disc space with a margin to prevent critical
1028 * conditions. If disc space is low we try to force a sync() to improve our
1029 * estimates. When confronted with meta-data partition size shortage we know
1030 * we have to check if it can be extended and we need to extend it when
1031 * needed.
1032 *
1033 * A 2nd strategy we could use when disc space is getting low on a disc
1034 * formatted with a meta-data partition is to see if there are sparse areas in
1035 * the meta-data partition and free blocks there for extra data.
1036 */
1037
1038 void
1039 udf_do_reserve_space(struct udf_mount *ump, struct udf_node *udf_node,
1040 uint16_t vpart_num, uint32_t num_lb)
1041 {
1042 ump->uncommitted_lbs[vpart_num] += num_lb;
1043 if (udf_node)
1044 udf_node->uncommitted_lbs += num_lb;
1045 }
1046
1047
1048 void
1049 udf_do_unreserve_space(struct udf_mount *ump, struct udf_node *udf_node,
1050 uint16_t vpart_num, uint32_t num_lb)
1051 {
1052 ump->uncommitted_lbs[vpart_num] -= num_lb;
1053 if (ump->uncommitted_lbs[vpart_num] < 0) {
1054 DPRINTF(RESERVE, ("UDF: underflow on partition reservation, "
1055 "part %d: %d\n", vpart_num,
1056 ump->uncommitted_lbs[vpart_num]));
1057 ump->uncommitted_lbs[vpart_num] = 0;
1058 }
1059 if (udf_node) {
1060 udf_node->uncommitted_lbs -= num_lb;
1061 if (udf_node->uncommitted_lbs < 0) {
1062 DPRINTF(RESERVE, ("UDF: underflow of node "
1063 "reservation : %d\n",
1064 udf_node->uncommitted_lbs));
1065 udf_node->uncommitted_lbs = 0;
1066 }
1067 }
1068 }
1069
1070
1071 int
1072 udf_reserve_space(struct udf_mount *ump, struct udf_node *udf_node,
1073 int udf_c_type, uint16_t vpart_num, uint32_t num_lb, int can_fail)
1074 {
1075 uint64_t freeblks;
1076 uint64_t slack;
1077 int i, error;
1078
1079 slack = 0;
1080 if (can_fail)
1081 slack = UDF_DISC_SLACK;
1082
1083 error = 0;
1084 mutex_enter(&ump->allocate_mutex);
1085
1086 /* check if there is enough space available */
1087 for (i = 0; i < 3; i++) { /* XXX arbitrary number */
1088 udf_calc_vpart_freespace(ump, vpart_num, &freeblks);
1089 if (num_lb + slack < freeblks)
1090 break;
1091 /* issue SYNC */
1092 DPRINTF(RESERVE, ("udf_reserve_space: issuing sync\n"));
1093 mutex_exit(&ump->allocate_mutex);
1094 udf_do_sync(ump, FSCRED, 0);
1095 mutex_enter(&mntvnode_lock);
1096 /* 1/8 second wait */
1097 cv_timedwait(&ump->dirtynodes_cv, &mntvnode_lock,
1098 hz/8);
1099 mutex_exit(&mntvnode_lock);
1100 mutex_enter(&ump->allocate_mutex);
1101 }
1102
1103 /* check if there is enough space available now */
1104 udf_calc_vpart_freespace(ump, vpart_num, &freeblks);
1105 if (num_lb + slack >= freeblks) {
1106 DPRINTF(RESERVE, ("udf_reserve_space: try to redistribute "
1107 "partition space\n"));
1108 DPRINTF(RESERVE, ("\tvpart %d, type %d is full\n",
1109 vpart_num, ump->vtop_alloc[vpart_num]));
1110 /* Try to redistribute space if possible */
1111 udf_collect_free_space_for_vpart(ump, vpart_num, num_lb + slack);
1112 }
1113
1114 /* check if there is enough space available now */
1115 udf_calc_vpart_freespace(ump, vpart_num, &freeblks);
1116 if (num_lb + slack <= freeblks) {
1117 udf_do_reserve_space(ump, udf_node, vpart_num, num_lb);
1118 } else {
1119 DPRINTF(RESERVE, ("udf_reserve_space: out of disc space\n"));
1120 error = ENOSPC;
1121 }
1122
1123 mutex_exit(&ump->allocate_mutex);
1124 return error;
1125 }
1126
1127
1128 void
1129 udf_cleanup_reservation(struct udf_node *udf_node)
1130 {
1131 struct udf_mount *ump = udf_node->ump;
1132 int vpart_num;
1133
1134 mutex_enter(&ump->allocate_mutex);
1135
1136 /* compensate for overlapping blocks */
1137 DPRINTF(RESERVE, ("UDF: overlapped %d blocks in count\n", udf_node->uncommitted_lbs));
1138
1139 vpart_num = udf_get_record_vpart(ump, udf_get_c_type(udf_node));
1140 udf_do_unreserve_space(ump, udf_node, vpart_num, udf_node->uncommitted_lbs);
1141
1142 DPRINTF(RESERVE, ("\ttotal now %d\n", ump->uncommitted_lbs[vpart_num]));
1143
1144 /* sanity */
1145 if (ump->uncommitted_lbs[vpart_num] < 0)
1146 ump->uncommitted_lbs[vpart_num] = 0;
1147
1148 mutex_exit(&ump->allocate_mutex);
1149 }
1150
1151 /* --------------------------------------------------------------------- */
1152
1153 /*
1154 * Allocate an extent of given length on given virt. partition. It doesn't
1155 * have to be one stretch.
1156 */
1157
1158 int
1159 udf_allocate_space(struct udf_mount *ump, struct udf_node *udf_node,
1160 int udf_c_type, uint16_t vpart_num, uint32_t num_lb, uint64_t *lmapping)
1161 {
1162 struct mmc_trackinfo *alloc_track, *other_track;
1163 struct udf_bitmap *bitmap;
1164 struct part_desc *pdesc;
1165 struct logvol_int_desc *lvid;
1166 uint64_t *lmappos;
1167 uint32_t ptov, lb_num, *freepos, free_lbs;
1168 int lb_size, alloc_num_lb;
1169 int alloc_type, error;
1170 int is_node;
1171
1172 DPRINTF(CALL, ("udf_allocate_space(ctype %d, vpart %d, num_lb %d\n",
1173 udf_c_type, vpart_num, num_lb));
1174 mutex_enter(&ump->allocate_mutex);
1175
1176 lb_size = udf_rw32(ump->logical_vol->lb_size);
1177 KASSERT(lb_size == ump->discinfo.sector_size);
1178
1179 alloc_type = ump->vtop_alloc[vpart_num];
1180 is_node = (udf_c_type == UDF_C_NODE);
1181
1182 lmappos = lmapping;
1183 error = 0;
1184 switch (alloc_type) {
1185 case UDF_ALLOC_VAT :
1186 /* search empty slot in VAT file */
1187 KASSERT(num_lb == 1);
1188 error = udf_search_free_vatloc(ump, &lb_num);
1189 if (!error) {
1190 *lmappos = lb_num;
1191
1192 /* reserve on the backing sequential partition since
1193 * that partition is credited back later */
1194 udf_do_reserve_space(ump, udf_node,
1195 ump->vtop[vpart_num], num_lb);
1196 }
1197 break;
1198 case UDF_ALLOC_SEQUENTIAL :
1199 /* sequential allocation on recordable media */
1200 /* get partition backing up this vpart_num_num */
1201 pdesc = ump->partitions[ump->vtop[vpart_num]];
1202
1203 /* calculate offset from physical base partition */
1204 ptov = udf_rw32(pdesc->start_loc);
1205
1206 /* get our track descriptors */
1207 if (vpart_num == ump->node_part) {
1208 alloc_track = &ump->metadata_track;
1209 other_track = &ump->data_track;
1210 } else {
1211 alloc_track = &ump->data_track;
1212 other_track = &ump->metadata_track;
1213 }
1214
1215 /* allocate */
1216 for (lb_num = 0; lb_num < num_lb; lb_num++) {
1217 *lmappos++ = alloc_track->next_writable - ptov;
1218 alloc_track->next_writable++;
1219 alloc_track->free_blocks--;
1220 }
1221
1222 /* keep other track up-to-date */
1223 if (alloc_track->tracknr == other_track->tracknr)
1224 memcpy(other_track, alloc_track,
1225 sizeof(struct mmc_trackinfo));
1226 break;
1227 case UDF_ALLOC_SPACEMAP :
1228 /* try to allocate on unallocated bits */
1229 alloc_num_lb = num_lb;
1230 bitmap = &ump->part_unalloc_bits[vpart_num];
1231 udf_bitmap_allocate(bitmap, is_node, &alloc_num_lb, lmappos);
1232 ump->lvclose |= UDF_WRITE_PART_BITMAPS;
1233
1234 /* have we allocated all? */
1235 if (alloc_num_lb) {
1236 /* TODO convert freed to unalloc and try again */
1237 /* free allocated piece for now */
1238 lmappos = lmapping;
1239 for (lb_num=0; lb_num < num_lb-alloc_num_lb; lb_num++) {
1240 udf_bitmap_free(bitmap, *lmappos++, 1);
1241 }
1242 error = ENOSPC;
1243 }
1244 if (!error) {
1245 /* adjust freecount */
1246 lvid = ump->logvol_integrity;
1247 freepos = &lvid->tables[0] + vpart_num;
1248 free_lbs = udf_rw32(*freepos);
1249 *freepos = udf_rw32(free_lbs - num_lb);
1250 }
1251 break;
1252 case UDF_ALLOC_METABITMAP : /* UDF 2.50, 2.60 BluRay-RE */
1253 /* allocate on metadata unallocated bits */
1254 alloc_num_lb = num_lb;
1255 bitmap = &ump->metadata_unalloc_bits;
1256 udf_bitmap_allocate(bitmap, is_node, &alloc_num_lb, lmappos);
1257 ump->lvclose |= UDF_WRITE_PART_BITMAPS;
1258
1259 /* have we allocated all? */
1260 if (alloc_num_lb) {
1261 /* YIKES! TODO we need to extend the metadata partition */
1262 /* free allocated piece for now */
1263 lmappos = lmapping;
1264 for (lb_num=0; lb_num < num_lb-alloc_num_lb; lb_num++) {
1265 udf_bitmap_free(bitmap, *lmappos++, 1);
1266 }
1267 error = ENOSPC;
1268 }
1269 if (!error) {
1270 /* adjust freecount */
1271 lvid = ump->logvol_integrity;
1272 freepos = &lvid->tables[0] + vpart_num;
1273 free_lbs = udf_rw32(*freepos);
1274 *freepos = udf_rw32(free_lbs - num_lb);
1275 }
1276 break;
1277 case UDF_ALLOC_METASEQUENTIAL : /* UDF 2.60 BluRay-R */
1278 case UDF_ALLOC_RELAXEDSEQUENTIAL : /* UDF 2.50/~meta BluRay-R */
1279 printf("ALERT: udf_allocate_space : allocation %d "
1280 "not implemented yet!\n", alloc_type);
1281 /* TODO implement, doesn't have to be contiguous */
1282 error = ENOSPC;
1283 break;
1284 }
1285
1286 if (!error) {
1287 /* credit our partition since we have committed the space */
1288 udf_do_unreserve_space(ump, udf_node, vpart_num, num_lb);
1289 }
1290
1291 #ifdef DEBUG
1292 if (udf_verbose & UDF_DEBUG_ALLOC) {
1293 lmappos = lmapping;
1294 printf("udf_allocate_space, allocated logical lba :\n");
1295 for (lb_num = 0; lb_num < num_lb; lb_num++) {
1296 printf("%s %"PRIu64, (lb_num > 0)?",":"",
1297 *lmappos++);
1298 }
1299 printf("\n");
1300 }
1301 #endif
1302 mutex_exit(&ump->allocate_mutex);
1303
1304 return error;
1305 }
1306
1307 /* --------------------------------------------------------------------- */
1308
1309 void
1310 udf_free_allocated_space(struct udf_mount *ump, uint32_t lb_num,
1311 uint16_t vpart_num, uint32_t num_lb)
1312 {
1313 struct udf_bitmap *bitmap;
1314 struct part_desc *pdesc;
1315 struct logvol_int_desc *lvid;
1316 uint32_t ptov, lb_map, udf_rw32_lbmap;
1317 uint32_t *freepos, free_lbs;
1318 int phys_part;
1319 int error;
1320
1321 DPRINTF(ALLOC, ("udf_free_allocated_space: freeing virt lbnum %d "
1322 "part %d + %d sect\n", lb_num, vpart_num, num_lb));
1323
1324 /* no use freeing zero length */
1325 if (num_lb == 0)
1326 return;
1327
1328 mutex_enter(&ump->allocate_mutex);
1329
1330 /* get partition backing up this vpart_num */
1331 pdesc = ump->partitions[ump->vtop[vpart_num]];
1332
1333 switch (ump->vtop_tp[vpart_num]) {
1334 case UDF_VTOP_TYPE_PHYS :
1335 case UDF_VTOP_TYPE_SPARABLE :
1336 /* free space to freed or unallocated space bitmap */
1337 ptov = udf_rw32(pdesc->start_loc);
1338 phys_part = ump->vtop[vpart_num];
1339
1340 /* first try freed space bitmap */
1341 bitmap = &ump->part_freed_bits[phys_part];
1342
1343 /* if not defined, use unallocated bitmap */
1344 if (bitmap->bits == NULL)
1345 bitmap = &ump->part_unalloc_bits[phys_part];
1346
1347 /* if no bitmaps are defined, bail out; XXX OK? */
1348 if (bitmap->bits == NULL)
1349 break;
1350
1351 /* free bits if its defined */
1352 KASSERT(bitmap->bits);
1353 ump->lvclose |= UDF_WRITE_PART_BITMAPS;
1354 udf_bitmap_free(bitmap, lb_num, num_lb);
1355
1356 /* adjust freecount */
1357 lvid = ump->logvol_integrity;
1358 freepos = &lvid->tables[0] + vpart_num;
1359 free_lbs = udf_rw32(*freepos);
1360 *freepos = udf_rw32(free_lbs + num_lb);
1361 break;
1362 case UDF_VTOP_TYPE_VIRT :
1363 /* free this VAT entry */
1364 KASSERT(num_lb == 1);
1365
1366 lb_map = 0xffffffff;
1367 udf_rw32_lbmap = udf_rw32(lb_map);
1368 error = udf_vat_write(ump->vat_node,
1369 (uint8_t *) &udf_rw32_lbmap, 4,
1370 ump->vat_offset + lb_num * 4);
1371 KASSERT(error == 0);
1372 ump->vat_last_free_lb = MIN(ump->vat_last_free_lb, lb_num);
1373 break;
1374 case UDF_VTOP_TYPE_META :
1375 /* free space in the metadata bitmap */
1376 bitmap = &ump->metadata_unalloc_bits;
1377 KASSERT(bitmap->bits);
1378
1379 ump->lvclose |= UDF_WRITE_PART_BITMAPS;
1380 udf_bitmap_free(bitmap, lb_num, num_lb);
1381
1382 /* adjust freecount */
1383 lvid = ump->logvol_integrity;
1384 freepos = &lvid->tables[0] + vpart_num;
1385 free_lbs = udf_rw32(*freepos);
1386 *freepos = udf_rw32(free_lbs + num_lb);
1387 break;
1388 default:
1389 printf("ALERT: udf_free_allocated_space : allocation %d "
1390 "not implemented yet!\n", ump->vtop_tp[vpart_num]);
1391 break;
1392 }
1393
1394 mutex_exit(&ump->allocate_mutex);
1395 }
1396
1397 /* --------------------------------------------------------------------- */
1398
1399 /*
1400 * Special function to synchronise the metadatamirror file when they change on
1401 * resizing. When the metadatafile is actually duplicated, this action is a
1402 * no-op since they describe different extents on the disc.
1403 */
1404
1405 void
1406 udf_synchronise_metadatamirror_node(struct udf_mount *ump)
1407 {
1408 struct udf_node *meta_node, *metamirror_node;
1409 struct long_ad s_ad;
1410 uint32_t len, flags;
1411 int slot, cpy_slot;
1412 int error, eof;
1413
1414 if (ump->metadata_flags & METADATA_DUPLICATED)
1415 return;
1416
1417 meta_node = ump->metadata_node;
1418 metamirror_node = ump->metadatamirror_node;
1419
1420 /* 1) wipe mirror node */
1421 udf_wipe_adslots(metamirror_node);
1422
1423 /* 2) copy all node descriptors from the meta_node */
1424 slot = 0;
1425 cpy_slot = 0;
1426 for (;;) {
1427 udf_get_adslot(meta_node, slot, &s_ad, &eof);
1428 if (eof)
1429 break;
1430 len = udf_rw32(s_ad.len);
1431 flags = UDF_EXT_FLAGS(len);
1432 len = UDF_EXT_LEN(len);
1433
1434 if (flags == UDF_EXT_REDIRECT) {
1435 slot++;
1436 continue;
1437 }
1438
1439 error = udf_append_adslot(metamirror_node, &cpy_slot, &s_ad);
1440 if (error) {
1441 /* WTF, this shouldn't happen, what to do now? */
1442 panic("udf_synchronise_metadatamirror_node failed!");
1443 }
1444 cpy_slot++;
1445 slot++;
1446 }
1447
1448 /* 3) adjust metamirror_node size */
1449 if (meta_node->fe) {
1450 KASSERT(metamirror_node->fe);
1451 metamirror_node->fe->inf_len = meta_node->fe->inf_len;
1452 } else {
1453 KASSERT(meta_node->efe);
1454 KASSERT(metamirror_node->efe);
1455 metamirror_node->efe->inf_len = meta_node->efe->inf_len;
1456 metamirror_node->efe->obj_size = meta_node->efe->obj_size;
1457 }
1458
1459 /* for sanity */
1460 udf_count_alloc_exts(metamirror_node);
1461 }
1462
1463 /* --------------------------------------------------------------------- */
1464
1465 /*
1466 * When faced with an out of space but there is still space available on other
1467 * partitions, try to redistribute the space. This is only defined for media
1468 * using Metadata partitions.
1469 *
1470 * There are two formats to deal with. Either its a `normal' metadata
1471 * partition and we can move blocks between a metadata bitmap and its
1472 * companion data spacemap OR its a UDF 2.60 formatted BluRay-R disc with POW
1473 * and a metadata partition.
1474 */
1475
1476 /* implementation limit: ump->datapart is the companion partition */
1477 static uint32_t
1478 udf_trunc_metadatapart(struct udf_mount *ump, uint32_t num_lb)
1479 {
1480 struct udf_node *bitmap_node;
1481 struct udf_bitmap *bitmap;
1482 struct space_bitmap_desc *sbd, *new_sbd;
1483 struct logvol_int_desc *lvid;
1484 uint64_t inf_len;
1485 uint64_t meta_free_lbs, data_free_lbs, to_trunc;
1486 uint32_t *freepos, *sizepos;
1487 uint32_t unit, lb_size;
1488 uint16_t meta_vpart_num, data_vpart_num, num_vpart;
1489 int err;
1490
1491 unit = ump->metadata_alloc_unit_size;
1492 lb_size = udf_rw32(ump->logical_vol->lb_size);
1493 lvid = ump->logvol_integrity;
1494
1495 /* XXX
1496 *
1497 * the following checks will fail for BD-R UDF 2.60! but they are
1498 * read-only for now anyway! Its even doubtfull if it is to be allowed
1499 * for these discs.
1500 */
1501
1502 /* lookup vpart for metadata partition */
1503 meta_vpart_num = ump->node_part;
1504 KASSERT(ump->vtop_alloc[meta_vpart_num] == UDF_ALLOC_METABITMAP);
1505
1506 /* lookup vpart for data partition */
1507 data_vpart_num = ump->data_part;
1508 KASSERT(ump->vtop_alloc[data_vpart_num] == UDF_ALLOC_SPACEMAP);
1509
1510 udf_calc_vpart_freespace(ump, data_vpart_num, &data_free_lbs);
1511 udf_calc_vpart_freespace(ump, meta_vpart_num, &meta_free_lbs);
1512
1513 DPRINTF(RESERVE, ("\tfree space on data partition %"PRIu64" blks\n", data_free_lbs));
1514 DPRINTF(RESERVE, ("\tfree space on metadata partition %"PRIu64" blks\n", meta_free_lbs));
1515
1516 /* give away some of the free meta space, in unit block sizes */
1517 to_trunc = meta_free_lbs/4; /* give out a quarter */
1518 to_trunc = MAX(to_trunc, num_lb);
1519 to_trunc = unit * ((to_trunc + unit-1) / unit); /* round up */
1520
1521 /* scale down if needed and bail out when out of space */
1522 if (to_trunc >= meta_free_lbs)
1523 return num_lb;
1524
1525 /* check extent of bits marked free at the end of the map */
1526 bitmap = &ump->metadata_unalloc_bits;
1527 to_trunc = udf_bitmap_check_trunc_free(bitmap, to_trunc);
1528 to_trunc = unit * (to_trunc / unit); /* round down again */
1529 if (to_trunc == 0)
1530 return num_lb;
1531
1532 DPRINTF(RESERVE, ("\ttruncating %"PRIu64" lbs from the metadata bitmap\n",
1533 to_trunc));
1534
1535 /* get length of the metadata bitmap node file */
1536 bitmap_node = ump->metadatabitmap_node;
1537 if (bitmap_node->fe) {
1538 inf_len = udf_rw64(bitmap_node->fe->inf_len);
1539 } else {
1540 KASSERT(bitmap_node->efe);
1541 inf_len = udf_rw64(bitmap_node->efe->inf_len);
1542 }
1543 inf_len -= to_trunc/8;
1544
1545 /* as per [UDF 2.60/2.2.13.6] : */
1546 /* 1) update the SBD in the metadata bitmap file */
1547 sbd = (struct space_bitmap_desc *) bitmap->blob;
1548 sbd->num_bits = udf_rw32(udf_rw32(sbd->num_bits) - to_trunc);
1549 sbd->num_bytes = udf_rw32(udf_rw32(sbd->num_bytes) - to_trunc/8);
1550 bitmap->max_offset = udf_rw32(sbd->num_bits);
1551
1552 num_vpart = udf_rw32(lvid->num_part);
1553 freepos = &lvid->tables[0] + meta_vpart_num;
1554 sizepos = &lvid->tables[0] + num_vpart + meta_vpart_num;
1555 *freepos = udf_rw32(*freepos) - to_trunc;
1556 *sizepos = udf_rw32(*sizepos) - to_trunc;
1557
1558 /* realloc bitmap for better memory usage */
1559 new_sbd = realloc(sbd, inf_len, M_UDFVOLD,
1560 M_CANFAIL | M_WAITOK);
1561 if (new_sbd) {
1562 /* update pointers */
1563 ump->metadata_unalloc_dscr = new_sbd;
1564 bitmap->blob = (uint8_t *) new_sbd;
1565 }
1566 ump->lvclose |= UDF_WRITE_PART_BITMAPS;
1567
1568 /*
1569 * The truncated space is secured now and can't be allocated anymore.
1570 * Release the allocate mutex so we can shrink the nodes the normal
1571 * way.
1572 */
1573 mutex_exit(&ump->allocate_mutex);
1574
1575 /* 2) trunc the metadata bitmap information file, freeing blocks */
1576 err = udf_shrink_node(bitmap_node, inf_len);
1577 KASSERT(err == 0);
1578
1579 /* 3) trunc the metadata file and mirror file, freeing blocks */
1580 inf_len = (uint64_t) udf_rw32(sbd->num_bits) * lb_size; /* [4/14.12.4] */
1581 err = udf_shrink_node(ump->metadata_node, inf_len);
1582 KASSERT(err == 0);
1583 if (ump->metadatamirror_node) {
1584 if (ump->metadata_flags & METADATA_DUPLICATED) {
1585 err = udf_shrink_node(ump->metadatamirror_node, inf_len);
1586 } else {
1587 /* extents will be copied on writeout */
1588 }
1589 KASSERT(err == 0);
1590 }
1591 ump->lvclose |= UDF_WRITE_METAPART_NODES;
1592
1593 /* relock before exit */
1594 mutex_enter(&ump->allocate_mutex);
1595
1596 if (to_trunc > num_lb)
1597 return 0;
1598 return num_lb - to_trunc;
1599 }
1600
1601
1602 static void
1603 udf_sparsify_metadatapart(struct udf_mount *ump, uint32_t num_lb)
1604 {
1605 /* NOT IMPLEMENTED, fail */
1606 }
1607
1608
1609 static void
1610 udf_collect_free_space_for_vpart(struct udf_mount *ump,
1611 uint16_t vpart_num, uint32_t num_lb)
1612 {
1613 /* allocate mutex is helt */
1614
1615 /* only defined for metadata partitions */
1616 if (ump->vtop_tp[ump->node_part] != UDF_VTOP_TYPE_META) {
1617 DPRINTF(RESERVE, ("\tcan't grow/shrink; no metadata partitioning\n"));
1618 return;
1619 }
1620
1621 /* UDF 2.60 BD-R+POW? */
1622 if (ump->vtop_alloc[ump->node_part] == UDF_ALLOC_METASEQUENTIAL) {
1623 DPRINTF(RESERVE, ("\tUDF 2.60 BD-R+POW track grow not implemented yet\n"));
1624 return;
1625 }
1626
1627 if (ump->vtop_tp[vpart_num] == UDF_VTOP_TYPE_META) {
1628 /* try to grow the meta partition */
1629 DPRINTF(RESERVE, ("\ttrying to grow the meta partition\n"));
1630 /* as per [UDF 2.60/2.2.13.5] : extend bitmap and metadata file(s) */
1631 DPRINTF(NOTIMPL, ("\tgrowing meta partition not implemented yet\n"));
1632 } else {
1633 /* try to shrink the metadata partition */
1634 DPRINTF(RESERVE, ("\ttrying to shrink the meta partition\n"));
1635 /* as per [UDF 2.60/2.2.13.6] : either trunc or make sparse */
1636 num_lb = udf_trunc_metadatapart(ump, num_lb);
1637 if (num_lb)
1638 udf_sparsify_metadatapart(ump, num_lb);
1639 }
1640
1641 /* allocate mutex should still be helt */
1642 }
1643
1644 /* --------------------------------------------------------------------- */
1645
1646 /*
1647 * Allocate a buf on disc for direct write out. The space doesn't have to be
1648 * contiguous as the caller takes care of this.
1649 */
1650
1651 void
1652 udf_late_allocate_buf(struct udf_mount *ump, struct buf *buf,
1653 uint64_t *lmapping, struct long_ad *node_ad_cpy, uint16_t *vpart_nump)
1654 {
1655 struct udf_node *udf_node = VTOI(buf->b_vp);
1656 int lb_size, blks, udf_c_type;
1657 int vpart_num, num_lb;
1658 int error, s;
1659
1660 /*
1661 * for each sector in the buf, allocate a sector on disc and record
1662 * its position in the provided mapping array.
1663 *
1664 * If its userdata or FIDs, record its location in its node.
1665 */
1666
1667 lb_size = udf_rw32(ump->logical_vol->lb_size);
1668 num_lb = (buf->b_bcount + lb_size -1) / lb_size;
1669 blks = lb_size / DEV_BSIZE;
1670 udf_c_type = buf->b_udf_c_type;
1671
1672 KASSERT(lb_size == ump->discinfo.sector_size);
1673
1674 /* select partition to record the buffer on */
1675 vpart_num = *vpart_nump = udf_get_record_vpart(ump, udf_c_type);
1676
1677 if (udf_c_type == UDF_C_NODE) {
1678 /* if not VAT, its allready allocated */
1679 if (ump->vtop_alloc[ump->node_part] != UDF_ALLOC_VAT)
1680 return;
1681
1682 /* allocate on its backing sequential partition */
1683 vpart_num = ump->data_part;
1684 }
1685
1686 /* XXX can this still happen? */
1687 /* do allocation on the selected partition */
1688 error = udf_allocate_space(ump, udf_node, udf_c_type,
1689 vpart_num, num_lb, lmapping);
1690 if (error) {
1691 /*
1692 * ARGH! we haven't done our accounting right! it should
1693 * allways succeed.
1694 */
1695 panic("UDF disc allocation accounting gone wrong");
1696 }
1697
1698 /* If its userdata or FIDs, record its allocation in its node. */
1699 if ((udf_c_type == UDF_C_USERDATA) ||
1700 (udf_c_type == UDF_C_FIDS) ||
1701 (udf_c_type == UDF_C_METADATA_SBM))
1702 {
1703 udf_record_allocation_in_node(ump, buf, vpart_num, lmapping,
1704 node_ad_cpy);
1705 /* decrement our outstanding bufs counter */
1706 s = splbio();
1707 udf_node->outstanding_bufs--;
1708 splx(s);
1709 }
1710 }
1711
1712 /* --------------------------------------------------------------------- */
1713
1714 /*
1715 * Try to merge a1 with the new piece a2. udf_ads_merge returns error when not
1716 * possible (anymore); a2 returns the rest piece.
1717 */
1718
1719 static int
1720 udf_ads_merge(uint32_t max_len, uint32_t lb_size, struct long_ad *a1, struct long_ad *a2)
1721 {
1722 uint32_t merge_len;
1723 uint32_t a1_len, a2_len;
1724 uint32_t a1_flags, a2_flags;
1725 uint32_t a1_lbnum, a2_lbnum;
1726 uint16_t a1_part, a2_part;
1727
1728 a1_flags = UDF_EXT_FLAGS(udf_rw32(a1->len));
1729 a1_len = UDF_EXT_LEN(udf_rw32(a1->len));
1730 a1_lbnum = udf_rw32(a1->loc.lb_num);
1731 a1_part = udf_rw16(a1->loc.part_num);
1732
1733 a2_flags = UDF_EXT_FLAGS(udf_rw32(a2->len));
1734 a2_len = UDF_EXT_LEN(udf_rw32(a2->len));
1735 a2_lbnum = udf_rw32(a2->loc.lb_num);
1736 a2_part = udf_rw16(a2->loc.part_num);
1737
1738 /* defines same space */
1739 if (a1_flags != a2_flags)
1740 return 1;
1741
1742 if (a1_flags != UDF_EXT_FREE) {
1743 /* the same partition */
1744 if (a1_part != a2_part)
1745 return 1;
1746
1747 /* a2 is successor of a1 */
1748 if (a1_lbnum * lb_size + a1_len != a2_lbnum * lb_size)
1749 return 1;
1750 }
1751
1752 /* merge as most from a2 if possible */
1753 merge_len = MIN(a2_len, max_len - a1_len);
1754 a1_len += merge_len;
1755 a2_len -= merge_len;
1756 a2_lbnum += merge_len/lb_size;
1757
1758 a1->len = udf_rw32(a1_len | a1_flags);
1759 a2->len = udf_rw32(a2_len | a2_flags);
1760 a2->loc.lb_num = udf_rw32(a2_lbnum);
1761
1762 if (a2_len > 0)
1763 return 1;
1764
1765 /* there is space over to merge */
1766 return 0;
1767 }
1768
1769 /* --------------------------------------------------------------------- */
1770
1771 static void
1772 udf_wipe_adslots(struct udf_node *udf_node)
1773 {
1774 struct file_entry *fe;
1775 struct extfile_entry *efe;
1776 struct alloc_ext_entry *ext;
1777 uint64_t inflen, objsize;
1778 uint32_t lb_size, dscr_size, l_ea, l_ad, max_l_ad, crclen;
1779 uint8_t *data_pos;
1780 int extnr;
1781
1782 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1783
1784 fe = udf_node->fe;
1785 efe = udf_node->efe;
1786 if (fe) {
1787 inflen = udf_rw64(fe->inf_len);
1788 objsize = inflen;
1789 dscr_size = sizeof(struct file_entry) -1;
1790 l_ea = udf_rw32(fe->l_ea);
1791 l_ad = udf_rw32(fe->l_ad);
1792 data_pos = (uint8_t *) fe + dscr_size + l_ea;
1793 } else {
1794 inflen = udf_rw64(efe->inf_len);
1795 objsize = udf_rw64(efe->obj_size);
1796 dscr_size = sizeof(struct extfile_entry) -1;
1797 l_ea = udf_rw32(efe->l_ea);
1798 l_ad = udf_rw32(efe->l_ad);
1799 data_pos = (uint8_t *) efe + dscr_size + l_ea;
1800 }
1801 max_l_ad = lb_size - dscr_size - l_ea;
1802
1803 /* wipe fe/efe */
1804 memset(data_pos, 0, max_l_ad);
1805 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea;
1806 if (fe) {
1807 fe->l_ad = udf_rw32(0);
1808 fe->logblks_rec = udf_rw64(0);
1809 fe->tag.desc_crc_len = udf_rw16(crclen);
1810 } else {
1811 efe->l_ad = udf_rw32(0);
1812 efe->logblks_rec = udf_rw64(0);
1813 efe->tag.desc_crc_len = udf_rw16(crclen);
1814 }
1815
1816 /* wipe all allocation extent entries */
1817 for (extnr = 0; extnr < udf_node->num_extensions; extnr++) {
1818 ext = udf_node->ext[extnr];
1819 dscr_size = sizeof(struct alloc_ext_entry) -1;
1820 data_pos = (uint8_t *) ext->data;
1821 max_l_ad = lb_size - dscr_size;
1822 memset(data_pos, 0, max_l_ad);
1823 ext->l_ad = udf_rw32(0);
1824
1825 crclen = dscr_size - UDF_DESC_TAG_LENGTH;
1826 ext->tag.desc_crc_len = udf_rw16(crclen);
1827 }
1828 udf_node->i_flags |= IN_NODE_REBUILD;
1829 }
1830
1831 /* --------------------------------------------------------------------- */
1832
1833 void
1834 udf_get_adslot(struct udf_node *udf_node, int slot, struct long_ad *icb,
1835 int *eof) {
1836 struct file_entry *fe;
1837 struct extfile_entry *efe;
1838 struct alloc_ext_entry *ext;
1839 struct icb_tag *icbtag;
1840 struct short_ad *short_ad;
1841 struct long_ad *long_ad, l_icb;
1842 uint32_t offset;
1843 uint32_t lb_size, dscr_size, l_ea, l_ad, flags;
1844 uint8_t *data_pos;
1845 int icbflags, addr_type, adlen, extnr;
1846
1847 /* determine what descriptor we are in */
1848 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1849
1850 fe = udf_node->fe;
1851 efe = udf_node->efe;
1852 if (fe) {
1853 icbtag = &fe->icbtag;
1854 dscr_size = sizeof(struct file_entry) -1;
1855 l_ea = udf_rw32(fe->l_ea);
1856 l_ad = udf_rw32(fe->l_ad);
1857 data_pos = (uint8_t *) fe + dscr_size + l_ea;
1858 } else {
1859 icbtag = &efe->icbtag;
1860 dscr_size = sizeof(struct extfile_entry) -1;
1861 l_ea = udf_rw32(efe->l_ea);
1862 l_ad = udf_rw32(efe->l_ad);
1863 data_pos = (uint8_t *) efe + dscr_size + l_ea;
1864 }
1865
1866 icbflags = udf_rw16(icbtag->flags);
1867 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
1868
1869 /* just in case we're called on an intern, its EOF */
1870 if (addr_type == UDF_ICB_INTERN_ALLOC) {
1871 memset(icb, 0, sizeof(struct long_ad));
1872 *eof = 1;
1873 return;
1874 }
1875
1876 adlen = 0;
1877 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1878 adlen = sizeof(struct short_ad);
1879 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1880 adlen = sizeof(struct long_ad);
1881 }
1882
1883 /* if offset too big, we go to the allocation extensions */
1884 offset = slot * adlen;
1885 extnr = -1;
1886 while (offset >= l_ad) {
1887 /* check if our last entry is a redirect */
1888 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1889 short_ad = (struct short_ad *) (data_pos + l_ad-adlen);
1890 l_icb.len = short_ad->len;
1891 l_icb.loc.part_num = udf_node->loc.loc.part_num;
1892 l_icb.loc.lb_num = short_ad->lb_num;
1893 } else {
1894 KASSERT(addr_type == UDF_ICB_LONG_ALLOC);
1895 long_ad = (struct long_ad *) (data_pos + l_ad-adlen);
1896 l_icb = *long_ad;
1897 }
1898 flags = UDF_EXT_FLAGS(udf_rw32(l_icb.len));
1899 if (flags != UDF_EXT_REDIRECT) {
1900 l_ad = 0; /* force EOF */
1901 break;
1902 }
1903
1904 /* advance to next extent */
1905 extnr++;
1906 if (extnr >= udf_node->num_extensions) {
1907 l_ad = 0; /* force EOF */
1908 break;
1909 }
1910 offset = offset - l_ad;
1911 ext = udf_node->ext[extnr];
1912 dscr_size = sizeof(struct alloc_ext_entry) -1;
1913 l_ad = udf_rw32(ext->l_ad);
1914 data_pos = (uint8_t *) ext + dscr_size;
1915 }
1916
1917 /* XXX l_ad == 0 should be enough to check */
1918 *eof = (offset >= l_ad) || (l_ad == 0);
1919 if (*eof) {
1920 DPRINTF(PARANOIDADWLK, ("returning EOF, extnr %d, offset %d, "
1921 "l_ad %d\n", extnr, offset, l_ad));
1922 memset(icb, 0, sizeof(struct long_ad));
1923 return;
1924 }
1925
1926 /* get the element */
1927 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1928 short_ad = (struct short_ad *) (data_pos + offset);
1929 icb->len = short_ad->len;
1930 icb->loc.part_num = udf_node->loc.loc.part_num;
1931 icb->loc.lb_num = short_ad->lb_num;
1932 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1933 long_ad = (struct long_ad *) (data_pos + offset);
1934 *icb = *long_ad;
1935 }
1936 DPRINTF(PARANOIDADWLK, ("returning element : v %d, lb %d, len %d, "
1937 "flags %d\n", icb->loc.part_num, icb->loc.lb_num,
1938 UDF_EXT_LEN(icb->len), UDF_EXT_FLAGS(icb->len)));
1939 }
1940
1941 /* --------------------------------------------------------------------- */
1942
1943 int
1944 udf_append_adslot(struct udf_node *udf_node, int *slot, struct long_ad *icb) {
1945 struct udf_mount *ump = udf_node->ump;
1946 union dscrptr *dscr, *extdscr;
1947 struct file_entry *fe;
1948 struct extfile_entry *efe;
1949 struct alloc_ext_entry *ext;
1950 struct icb_tag *icbtag;
1951 struct short_ad *short_ad;
1952 struct long_ad *long_ad, o_icb, l_icb;
1953 uint64_t logblks_rec, *logblks_rec_p;
1954 uint64_t lmapping;
1955 uint32_t offset, rest, len, lb_num;
1956 uint32_t lb_size, dscr_size, l_ea, l_ad, *l_ad_p, max_l_ad, crclen;
1957 uint32_t flags;
1958 uint16_t vpart_num;
1959 uint8_t *data_pos;
1960 int icbflags, addr_type, adlen, extnr;
1961 int error;
1962
1963 lb_size = udf_rw32(ump->logical_vol->lb_size);
1964 vpart_num = udf_rw16(udf_node->loc.loc.part_num);
1965
1966 /* determine what descriptor we are in */
1967 fe = udf_node->fe;
1968 efe = udf_node->efe;
1969 if (fe) {
1970 icbtag = &fe->icbtag;
1971 dscr = (union dscrptr *) fe;
1972 dscr_size = sizeof(struct file_entry) -1;
1973
1974 l_ea = udf_rw32(fe->l_ea);
1975 l_ad_p = &fe->l_ad;
1976 logblks_rec_p = &fe->logblks_rec;
1977 } else {
1978 icbtag = &efe->icbtag;
1979 dscr = (union dscrptr *) efe;
1980 dscr_size = sizeof(struct extfile_entry) -1;
1981
1982 l_ea = udf_rw32(efe->l_ea);
1983 l_ad_p = &efe->l_ad;
1984 logblks_rec_p = &efe->logblks_rec;
1985 }
1986 data_pos = (uint8_t *) dscr + dscr_size + l_ea;
1987 max_l_ad = lb_size - dscr_size - l_ea;
1988
1989 icbflags = udf_rw16(icbtag->flags);
1990 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
1991
1992 /* just in case we're called on an intern, its EOF */
1993 if (addr_type == UDF_ICB_INTERN_ALLOC) {
1994 panic("udf_append_adslot on UDF_ICB_INTERN_ALLOC\n");
1995 }
1996
1997 adlen = 0;
1998 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1999 adlen = sizeof(struct short_ad);
2000 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
2001 adlen = sizeof(struct long_ad);
2002 }
2003
2004 /* clean up given long_ad since it can be a synthesized one */
2005 flags = UDF_EXT_FLAGS(udf_rw32(icb->len));
2006 if (flags == UDF_EXT_FREE) {
2007 icb->loc.part_num = udf_rw16(0);
2008 icb->loc.lb_num = udf_rw32(0);
2009 }
2010
2011 /* if offset too big, we go to the allocation extensions */
2012 l_ad = udf_rw32(*l_ad_p);
2013 offset = (*slot) * adlen;
2014 extnr = -1;
2015 while (offset >= l_ad) {
2016 /* check if our last entry is a redirect */
2017 if (addr_type == UDF_ICB_SHORT_ALLOC) {
2018 short_ad = (struct short_ad *) (data_pos + l_ad-adlen);
2019 l_icb.len = short_ad->len;
2020 l_icb.loc.part_num = udf_node->loc.loc.part_num;
2021 l_icb.loc.lb_num = short_ad->lb_num;
2022 } else {
2023 KASSERT(addr_type == UDF_ICB_LONG_ALLOC);
2024 long_ad = (struct long_ad *) (data_pos + l_ad-adlen);
2025 l_icb = *long_ad;
2026 }
2027 flags = UDF_EXT_FLAGS(udf_rw32(l_icb.len));
2028 if (flags != UDF_EXT_REDIRECT) {
2029 /* only one past the last one is adressable */
2030 break;
2031 }
2032
2033 /* advance to next extent */
2034 extnr++;
2035 KASSERT(extnr < udf_node->num_extensions);
2036 offset = offset - l_ad;
2037
2038 ext = udf_node->ext[extnr];
2039 dscr = (union dscrptr *) ext;
2040 dscr_size = sizeof(struct alloc_ext_entry) -1;
2041 max_l_ad = lb_size - dscr_size;
2042 l_ad_p = &ext->l_ad;
2043 l_ad = udf_rw32(*l_ad_p);
2044 data_pos = (uint8_t *) ext + dscr_size;
2045 }
2046 DPRINTF(PARANOIDADWLK, ("append, ext %d, offset %d, l_ad %d\n",
2047 extnr, offset, udf_rw32(*l_ad_p)));
2048 KASSERT(l_ad == udf_rw32(*l_ad_p));
2049
2050 /* offset is offset within the current (E)FE/AED */
2051 l_ad = udf_rw32(*l_ad_p);
2052 crclen = udf_rw16(dscr->tag.desc_crc_len);
2053 logblks_rec = udf_rw64(*logblks_rec_p);
2054
2055 /* overwriting old piece? */
2056 if (offset < l_ad) {
2057 /* overwrite entry; compensate for the old element */
2058 if (addr_type == UDF_ICB_SHORT_ALLOC) {
2059 short_ad = (struct short_ad *) (data_pos + offset);
2060 o_icb.len = short_ad->len;
2061 o_icb.loc.part_num = udf_rw16(0); /* ignore */
2062 o_icb.loc.lb_num = short_ad->lb_num;
2063 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
2064 long_ad = (struct long_ad *) (data_pos + offset);
2065 o_icb = *long_ad;
2066 } else {
2067 panic("Invalid address type in udf_append_adslot\n");
2068 }
2069
2070 len = udf_rw32(o_icb.len);
2071 if (UDF_EXT_FLAGS(len) == UDF_EXT_ALLOCATED) {
2072 /* adjust counts */
2073 len = UDF_EXT_LEN(len);
2074 logblks_rec -= (len + lb_size -1) / lb_size;
2075 }
2076 }
2077
2078 /* check if we're not appending a redirection */
2079 flags = UDF_EXT_FLAGS(udf_rw32(icb->len));
2080 KASSERT(flags != UDF_EXT_REDIRECT);
2081
2082 /* round down available space */
2083 rest = adlen * ((max_l_ad - offset) / adlen);
2084 if (rest <= adlen) {
2085 /* have to append aed, see if we already have a spare one */
2086 extnr++;
2087 ext = udf_node->ext[extnr];
2088 l_icb = udf_node->ext_loc[extnr];
2089 if (ext == NULL) {
2090 DPRINTF(ALLOC,("adding allocation extent %d\n", extnr));
2091
2092 error = udf_reserve_space(ump, NULL, UDF_C_NODE,
2093 vpart_num, 1, /* can fail */ false);
2094 if (error) {
2095 printf("UDF: couldn't reserve space for AED!\n");
2096 return error;
2097 }
2098 error = udf_allocate_space(ump, NULL, UDF_C_NODE,
2099 vpart_num, 1, &lmapping);
2100 lb_num = lmapping;
2101 if (error)
2102 panic("UDF: couldn't allocate AED!\n");
2103
2104 /* initialise pointer to location */
2105 memset(&l_icb, 0, sizeof(struct long_ad));
2106 l_icb.len = udf_rw32(lb_size | UDF_EXT_REDIRECT);
2107 l_icb.loc.lb_num = udf_rw32(lb_num);
2108 l_icb.loc.part_num = udf_rw16(vpart_num);
2109
2110 /* create new aed descriptor */
2111 udf_create_logvol_dscr(ump, udf_node, &l_icb, &extdscr);
2112 ext = &extdscr->aee;
2113
2114 udf_inittag(ump, &ext->tag, TAGID_ALLOCEXTENT, lb_num);
2115 dscr_size = sizeof(struct alloc_ext_entry) -1;
2116 max_l_ad = lb_size - dscr_size;
2117 memset(ext->data, 0, max_l_ad);
2118 ext->l_ad = udf_rw32(0);
2119 ext->tag.desc_crc_len =
2120 udf_rw16(dscr_size - UDF_DESC_TAG_LENGTH);
2121
2122 /* declare aed */
2123 udf_node->num_extensions++;
2124 udf_node->ext_loc[extnr] = l_icb;
2125 udf_node->ext[extnr] = ext;
2126 }
2127 /* add redirect and adjust l_ad and crclen for old descr */
2128 if (addr_type == UDF_ICB_SHORT_ALLOC) {
2129 short_ad = (struct short_ad *) (data_pos + offset);
2130 short_ad->len = l_icb.len;
2131 short_ad->lb_num = l_icb.loc.lb_num;
2132 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
2133 long_ad = (struct long_ad *) (data_pos + offset);
2134 *long_ad = l_icb;
2135 }
2136 l_ad += adlen;
2137 crclen += adlen;
2138 dscr->tag.desc_crc_len = udf_rw16(crclen);
2139 *l_ad_p = udf_rw32(l_ad);
2140
2141 /* advance to the new extension */
2142 KASSERT(ext != NULL);
2143 dscr = (union dscrptr *) ext;
2144 dscr_size = sizeof(struct alloc_ext_entry) -1;
2145 max_l_ad = lb_size - dscr_size;
2146 data_pos = (uint8_t *) dscr + dscr_size;
2147
2148 l_ad_p = &ext->l_ad;
2149 l_ad = udf_rw32(*l_ad_p);
2150 crclen = udf_rw16(dscr->tag.desc_crc_len);
2151 offset = 0;
2152
2153 /* adjust callees slot count for link insert */
2154 *slot += 1;
2155 }
2156
2157 /* write out the element */
2158 DPRINTF(PARANOIDADWLK, ("adding element : %p : v %d, lb %d, "
2159 "len %d, flags %d\n", data_pos + offset,
2160 icb->loc.part_num, icb->loc.lb_num,
2161 UDF_EXT_LEN(icb->len), UDF_EXT_FLAGS(icb->len)));
2162 if (addr_type == UDF_ICB_SHORT_ALLOC) {
2163 short_ad = (struct short_ad *) (data_pos + offset);
2164 short_ad->len = icb->len;
2165 short_ad->lb_num = icb->loc.lb_num;
2166 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
2167 long_ad = (struct long_ad *) (data_pos + offset);
2168 *long_ad = *icb;
2169 }
2170
2171 /* adjust logblks recorded count */
2172 len = udf_rw32(icb->len);
2173 flags = UDF_EXT_FLAGS(len);
2174 if (flags == UDF_EXT_ALLOCATED)
2175 logblks_rec += (UDF_EXT_LEN(len) + lb_size -1) / lb_size;
2176 *logblks_rec_p = udf_rw64(logblks_rec);
2177
2178 /* adjust l_ad and crclen when needed */
2179 if (offset >= l_ad) {
2180 l_ad += adlen;
2181 crclen += adlen;
2182 dscr->tag.desc_crc_len = udf_rw16(crclen);
2183 *l_ad_p = udf_rw32(l_ad);
2184 }
2185
2186 return 0;
2187 }
2188
2189 /* --------------------------------------------------------------------- */
2190
2191 static void
2192 udf_count_alloc_exts(struct udf_node *udf_node)
2193 {
2194 struct long_ad s_ad;
2195 uint32_t lb_num, len, flags;
2196 uint16_t vpart_num;
2197 int slot, eof;
2198 int num_extents, extnr;
2199 int lb_size;
2200
2201 if (udf_node->num_extensions == 0)
2202 return;
2203
2204 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
2205 /* count number of allocation extents in use */
2206 num_extents = 0;
2207 slot = 0;
2208 for (;;) {
2209 udf_get_adslot(udf_node, slot, &s_ad, &eof);
2210 if (eof)
2211 break;
2212 len = udf_rw32(s_ad.len);
2213 flags = UDF_EXT_FLAGS(len);
2214
2215 if (flags == UDF_EXT_REDIRECT)
2216 num_extents++;
2217
2218 slot++;
2219 }
2220
2221 DPRINTF(ALLOC, ("udf_count_alloc_ext counted %d live extents\n",
2222 num_extents));
2223
2224 /* XXX choice: we could delay freeing them on node writeout */
2225 /* free excess entries */
2226 extnr = num_extents;
2227 for (;extnr < udf_node->num_extensions; extnr++) {
2228 DPRINTF(ALLOC, ("freeing alloc ext %d\n", extnr));
2229 /* free dscriptor */
2230 s_ad = udf_node->ext_loc[extnr];
2231 udf_free_logvol_dscr(udf_node->ump, &s_ad,
2232 udf_node->ext[extnr]);
2233 udf_node->ext[extnr] = NULL;
2234
2235 /* free disc space */
2236 lb_num = udf_rw32(s_ad.loc.lb_num);
2237 vpart_num = udf_rw16(s_ad.loc.part_num);
2238 udf_free_allocated_space(udf_node->ump, lb_num, vpart_num, 1);
2239
2240 memset(&udf_node->ext_loc[extnr], 0, sizeof(struct long_ad));
2241 }
2242
2243 /* set our new number of allocation extents */
2244 udf_node->num_extensions = num_extents;
2245 }
2246
2247
2248 /* --------------------------------------------------------------------- */
2249
2250 /*
2251 * Adjust the node's allocation descriptors to reflect the new mapping; do
2252 * take note that we might glue to existing allocation descriptors.
2253 *
2254 * XXX Note there can only be one allocation being recorded/mount; maybe
2255 * explicit allocation in shedule thread?
2256 */
2257
2258 static void
2259 udf_record_allocation_in_node(struct udf_mount *ump, struct buf *buf,
2260 uint16_t vpart_num, uint64_t *mapping, struct long_ad *node_ad_cpy)
2261 {
2262 struct vnode *vp = buf->b_vp;
2263 struct udf_node *udf_node = VTOI(vp);
2264 struct file_entry *fe;
2265 struct extfile_entry *efe;
2266 struct icb_tag *icbtag;
2267 struct long_ad s_ad, c_ad;
2268 uint64_t inflen, from, till;
2269 uint64_t foffset, end_foffset, restart_foffset;
2270 uint64_t orig_inflen, orig_lbrec, new_inflen, new_lbrec;
2271 uint32_t max_len;
2272 uint32_t num_lb, len, flags, lb_num;
2273 uint32_t run_start;
2274 uint32_t slot_offset, replace_len, replace;
2275 int addr_type, icbflags;
2276 // int udf_c_type = buf->b_udf_c_type;
2277 int lb_size, run_length, eof;
2278 int slot, cpy_slot, cpy_slots, restart_slot;
2279 int error;
2280
2281 DPRINTF(ALLOC, ("udf_record_allocation_in_node\n"));
2282
2283 #if 0
2284 /* XXX disable sanity check for now */
2285 /* sanity check ... should be panic ? */
2286 if ((udf_c_type != UDF_C_USERDATA) && (udf_c_type != UDF_C_FIDS))
2287 return;
2288 #endif
2289
2290 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
2291 max_len = ((UDF_EXT_MAXLEN / lb_size) * lb_size);
2292
2293 /* do the job */
2294 UDF_LOCK_NODE(udf_node, 0); /* XXX can deadlock ? */
2295 udf_node_sanity_check(udf_node, &orig_inflen, &orig_lbrec);
2296
2297 fe = udf_node->fe;
2298 efe = udf_node->efe;
2299 if (fe) {
2300 icbtag = &fe->icbtag;
2301 inflen = udf_rw64(fe->inf_len);
2302 } else {
2303 icbtag = &efe->icbtag;
2304 inflen = udf_rw64(efe->inf_len);
2305 }
2306
2307 /* do check if `till' is not past file information length */
2308 from = buf->b_lblkno * lb_size;
2309 till = MIN(inflen, from + buf->b_resid);
2310
2311 num_lb = (till - from + lb_size -1) / lb_size;
2312
2313 DPRINTF(ALLOC, ("record allocation from %"PRIu64" + %d\n", from, buf->b_bcount));
2314
2315 icbflags = udf_rw16(icbtag->flags);
2316 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2317
2318 if (addr_type == UDF_ICB_INTERN_ALLOC) {
2319 /* nothing to do */
2320 /* XXX clean up rest of node? just in case? */
2321 UDF_UNLOCK_NODE(udf_node, 0);
2322 return;
2323 }
2324
2325 slot = 0;
2326 cpy_slot = 0;
2327 foffset = 0;
2328
2329 /* 1) copy till first overlap piece to the rewrite buffer */
2330 for (;;) {
2331 udf_get_adslot(udf_node, slot, &s_ad, &eof);
2332 if (eof) {
2333 DPRINTF(WRITE,
2334 ("Record allocation in node "
2335 "failed: encountered EOF\n"));
2336 UDF_UNLOCK_NODE(udf_node, 0);
2337 buf->b_error = EINVAL;
2338 return;
2339 }
2340 len = udf_rw32(s_ad.len);
2341 flags = UDF_EXT_FLAGS(len);
2342 len = UDF_EXT_LEN(len);
2343
2344 if (flags == UDF_EXT_REDIRECT) {
2345 slot++;
2346 continue;
2347 }
2348
2349 end_foffset = foffset + len;
2350 if (end_foffset > from)
2351 break; /* found */
2352
2353 node_ad_cpy[cpy_slot++] = s_ad;
2354
2355 DPRINTF(ALLOC, ("\t1: vp %d, lb %d, len %d, flags %d "
2356 "-> stack\n",
2357 udf_rw16(s_ad.loc.part_num),
2358 udf_rw32(s_ad.loc.lb_num),
2359 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2360 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2361
2362 foffset = end_foffset;
2363 slot++;
2364 }
2365 restart_slot = slot;
2366 restart_foffset = foffset;
2367
2368 /* 2) trunc overlapping slot at overlap and copy it */
2369 slot_offset = from - foffset;
2370 if (slot_offset > 0) {
2371 DPRINTF(ALLOC, ("\tslot_offset = %d, flags = %d (%d)\n",
2372 slot_offset, flags >> 30, flags));
2373
2374 s_ad.len = udf_rw32(slot_offset | flags);
2375 node_ad_cpy[cpy_slot++] = s_ad;
2376
2377 DPRINTF(ALLOC, ("\t2: vp %d, lb %d, len %d, flags %d "
2378 "-> stack\n",
2379 udf_rw16(s_ad.loc.part_num),
2380 udf_rw32(s_ad.loc.lb_num),
2381 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2382 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2383 }
2384 foffset += slot_offset;
2385
2386 /* 3) insert new mappings */
2387 memset(&s_ad, 0, sizeof(struct long_ad));
2388 lb_num = 0;
2389 for (lb_num = 0; lb_num < num_lb; lb_num++) {
2390 run_start = mapping[lb_num];
2391 run_length = 1;
2392 while (lb_num < num_lb-1) {
2393 if (mapping[lb_num+1] != mapping[lb_num]+1)
2394 if (mapping[lb_num+1] != mapping[lb_num])
2395 break;
2396 run_length++;
2397 lb_num++;
2398 }
2399 /* insert slot for this mapping */
2400 len = run_length * lb_size;
2401
2402 /* bounds checking */
2403 if (foffset + len > till)
2404 len = till - foffset;
2405 KASSERT(foffset + len <= inflen);
2406
2407 s_ad.len = udf_rw32(len | UDF_EXT_ALLOCATED);
2408 s_ad.loc.part_num = udf_rw16(vpart_num);
2409 s_ad.loc.lb_num = udf_rw32(run_start);
2410
2411 foffset += len;
2412
2413 /* paranoia */
2414 if (len == 0) {
2415 DPRINTF(WRITE,
2416 ("Record allocation in node "
2417 "failed: insert failed\n"));
2418 UDF_UNLOCK_NODE(udf_node, 0);
2419 buf->b_error = EINVAL;
2420 return;
2421 }
2422 node_ad_cpy[cpy_slot++] = s_ad;
2423
2424 DPRINTF(ALLOC, ("\t3: insert new mapping vp %d lb %d, len %d, "
2425 "flags %d -> stack\n",
2426 udf_rw16(s_ad.loc.part_num), udf_rw32(s_ad.loc.lb_num),
2427 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2428 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2429 }
2430
2431 /* 4) pop replaced length */
2432 slot = restart_slot;
2433 foffset = restart_foffset;
2434
2435 replace_len = till - foffset; /* total amount of bytes to pop */
2436 slot_offset = from - foffset; /* offset in first encounted slot */
2437 KASSERT((slot_offset % lb_size) == 0);
2438
2439 for (;;) {
2440 udf_get_adslot(udf_node, slot, &s_ad, &eof);
2441 if (eof)
2442 break;
2443
2444 len = udf_rw32(s_ad.len);
2445 flags = UDF_EXT_FLAGS(len);
2446 len = UDF_EXT_LEN(len);
2447 lb_num = udf_rw32(s_ad.loc.lb_num);
2448
2449 if (flags == UDF_EXT_REDIRECT) {
2450 slot++;
2451 continue;
2452 }
2453
2454 DPRINTF(ALLOC, ("\t4i: got slot %d, slot_offset %d, "
2455 "replace_len %d, "
2456 "vp %d, lb %d, len %d, flags %d\n",
2457 slot, slot_offset, replace_len,
2458 udf_rw16(s_ad.loc.part_num),
2459 udf_rw32(s_ad.loc.lb_num),
2460 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2461 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2462
2463 /* adjust for slot offset */
2464 if (slot_offset) {
2465 DPRINTF(ALLOC, ("\t4s: skipping %d\n", slot_offset));
2466 lb_num += slot_offset / lb_size;
2467 len -= slot_offset;
2468 foffset += slot_offset;
2469 replace_len -= slot_offset;
2470
2471 /* mark adjusted */
2472 slot_offset = 0;
2473 }
2474
2475 /* advance for (the rest of) this slot */
2476 replace = MIN(len, replace_len);
2477 DPRINTF(ALLOC, ("\t4d: replacing %d\n", replace));
2478
2479 /* advance for this slot */
2480 if (replace) {
2481 /* note: dont round DOWN on num_lb since we then
2482 * forget the last partial one */
2483 num_lb = (replace + lb_size - 1) / lb_size;
2484 if (flags != UDF_EXT_FREE) {
2485 udf_free_allocated_space(ump, lb_num,
2486 udf_rw16(s_ad.loc.part_num), num_lb);
2487 }
2488 lb_num += num_lb;
2489 len -= replace;
2490 foffset += replace;
2491 replace_len -= replace;
2492 }
2493
2494 /* do we have a slot tail ? */
2495 if (len) {
2496 KASSERT(foffset % lb_size == 0);
2497
2498 /* we arrived at our point, push remainder */
2499 s_ad.len = udf_rw32(len | flags);
2500 s_ad.loc.lb_num = udf_rw32(lb_num);
2501 if (flags == UDF_EXT_FREE)
2502 s_ad.loc.lb_num = udf_rw32(0);
2503 node_ad_cpy[cpy_slot++] = s_ad;
2504 foffset += len;
2505 slot++;
2506
2507 DPRINTF(ALLOC, ("\t4: vp %d, lb %d, len %d, flags %d "
2508 "-> stack\n",
2509 udf_rw16(s_ad.loc.part_num),
2510 udf_rw32(s_ad.loc.lb_num),
2511 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2512 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2513 break;
2514 }
2515
2516 slot++;
2517 }
2518
2519 /* 5) copy remainder */
2520 for (;;) {
2521 udf_get_adslot(udf_node, slot, &s_ad, &eof);
2522 if (eof)
2523 break;
2524
2525 len = udf_rw32(s_ad.len);
2526 flags = UDF_EXT_FLAGS(len);
2527 len = UDF_EXT_LEN(len);
2528
2529 if (flags == UDF_EXT_REDIRECT) {
2530 slot++;
2531 continue;
2532 }
2533
2534 node_ad_cpy[cpy_slot++] = s_ad;
2535
2536 DPRINTF(ALLOC, ("\t5: insert new mapping "
2537 "vp %d lb %d, len %d, flags %d "
2538 "-> stack\n",
2539 udf_rw16(s_ad.loc.part_num),
2540 udf_rw32(s_ad.loc.lb_num),
2541 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2542 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2543
2544 slot++;
2545 }
2546
2547 /* 6) reset node descriptors */
2548 udf_wipe_adslots(udf_node);
2549
2550 /* 7) copy back extents; merge when possible. Recounting on the fly */
2551 cpy_slots = cpy_slot;
2552
2553 c_ad = node_ad_cpy[0];
2554 slot = 0;
2555 DPRINTF(ALLOC, ("\t7s: stack -> got mapping vp %d "
2556 "lb %d, len %d, flags %d\n",
2557 udf_rw16(c_ad.loc.part_num),
2558 udf_rw32(c_ad.loc.lb_num),
2559 UDF_EXT_LEN(udf_rw32(c_ad.len)),
2560 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2561
2562 for (cpy_slot = 1; cpy_slot < cpy_slots; cpy_slot++) {
2563 s_ad = node_ad_cpy[cpy_slot];
2564
2565 DPRINTF(ALLOC, ("\t7i: stack -> got mapping vp %d "
2566 "lb %d, len %d, flags %d\n",
2567 udf_rw16(s_ad.loc.part_num),
2568 udf_rw32(s_ad.loc.lb_num),
2569 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2570 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2571
2572 /* see if we can merge */
2573 if (udf_ads_merge(max_len, lb_size, &c_ad, &s_ad)) {
2574 /* not mergable (anymore) */
2575 DPRINTF(ALLOC, ("\t7: appending vp %d lb %d, "
2576 "len %d, flags %d\n",
2577 udf_rw16(c_ad.loc.part_num),
2578 udf_rw32(c_ad.loc.lb_num),
2579 UDF_EXT_LEN(udf_rw32(c_ad.len)),
2580 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2581
2582 error = udf_append_adslot(udf_node, &slot, &c_ad);
2583 if (error) {
2584 buf->b_error = error;
2585 goto out;
2586 }
2587 c_ad = s_ad;
2588 slot++;
2589 }
2590 }
2591
2592 /* 8) push rest slot (if any) */
2593 if (UDF_EXT_LEN(c_ad.len) > 0) {
2594 DPRINTF(ALLOC, ("\t8: last append vp %d lb %d, "
2595 "len %d, flags %d\n",
2596 udf_rw16(c_ad.loc.part_num),
2597 udf_rw32(c_ad.loc.lb_num),
2598 UDF_EXT_LEN(udf_rw32(c_ad.len)),
2599 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2600
2601 error = udf_append_adslot(udf_node, &slot, &c_ad);
2602 if (error) {
2603 buf->b_error = error;
2604 goto out;
2605 }
2606 }
2607
2608 out:
2609 udf_count_alloc_exts(udf_node);
2610
2611 /* the node's descriptors should now be sane */
2612 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2613 UDF_UNLOCK_NODE(udf_node, 0);
2614
2615 KASSERT(orig_inflen == new_inflen);
2616 KASSERT(new_lbrec >= orig_lbrec);
2617
2618 return;
2619 }
2620
2621 /* --------------------------------------------------------------------- */
2622
2623 int
2624 udf_grow_node(struct udf_node *udf_node, uint64_t new_size)
2625 {
2626 union dscrptr *dscr;
2627 struct vnode *vp = udf_node->vnode;
2628 struct udf_mount *ump = udf_node->ump;
2629 struct file_entry *fe;
2630 struct extfile_entry *efe;
2631 struct icb_tag *icbtag;
2632 struct long_ad c_ad, s_ad;
2633 uint64_t size_diff, old_size, inflen, objsize, chunk, append_len;
2634 uint64_t foffset, end_foffset;
2635 uint64_t orig_inflen, orig_lbrec, new_inflen, new_lbrec;
2636 uint32_t lb_size, unit_size, dscr_size, crclen, lastblock_grow;
2637 uint32_t icbflags, len, flags, max_len;
2638 uint32_t max_l_ad, l_ad, l_ea;
2639 uint16_t my_part, dst_part;
2640 uint8_t *data_pos, *evacuated_data;
2641 int addr_type;
2642 int slot, cpy_slot;
2643 int eof, error;
2644
2645 DPRINTF(ALLOC, ("udf_grow_node\n"));
2646
2647 UDF_LOCK_NODE(udf_node, 0);
2648 udf_node_sanity_check(udf_node, &orig_inflen, &orig_lbrec);
2649
2650 lb_size = udf_rw32(ump->logical_vol->lb_size);
2651
2652 /* max_len in unit's IFF its a metadata node or metadata mirror node */
2653 unit_size = lb_size;
2654 if ((udf_node == ump->metadata_node) || (udf_node == ump->metadatamirror_node))
2655 unit_size = ump->metadata_alloc_unit_size * lb_size;
2656 max_len = ((UDF_EXT_MAXLEN / unit_size) * unit_size);
2657
2658 fe = udf_node->fe;
2659 efe = udf_node->efe;
2660 if (fe) {
2661 dscr = (union dscrptr *) fe;
2662 icbtag = &fe->icbtag;
2663 inflen = udf_rw64(fe->inf_len);
2664 objsize = inflen;
2665 dscr_size = sizeof(struct file_entry) -1;
2666 l_ea = udf_rw32(fe->l_ea);
2667 l_ad = udf_rw32(fe->l_ad);
2668 } else {
2669 dscr = (union dscrptr *) efe;
2670 icbtag = &efe->icbtag;
2671 inflen = udf_rw64(efe->inf_len);
2672 objsize = udf_rw64(efe->obj_size);
2673 dscr_size = sizeof(struct extfile_entry) -1;
2674 l_ea = udf_rw32(efe->l_ea);
2675 l_ad = udf_rw32(efe->l_ad);
2676 }
2677 data_pos = (uint8_t *) dscr + dscr_size + l_ea;
2678 max_l_ad = lb_size - dscr_size - l_ea;
2679
2680 icbflags = udf_rw16(icbtag->flags);
2681 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2682
2683 old_size = inflen;
2684 size_diff = new_size - old_size;
2685
2686 DPRINTF(ALLOC, ("\tfrom %"PRIu64" to %"PRIu64"\n", old_size, new_size));
2687
2688 evacuated_data = NULL;
2689 if (addr_type == UDF_ICB_INTERN_ALLOC) {
2690 if (l_ad + size_diff <= max_l_ad) {
2691 /* only reflect size change directly in the node */
2692 inflen += size_diff;
2693 objsize += size_diff;
2694 l_ad += size_diff;
2695 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea + l_ad;
2696 if (fe) {
2697 fe->inf_len = udf_rw64(inflen);
2698 fe->l_ad = udf_rw32(l_ad);
2699 fe->tag.desc_crc_len = udf_rw16(crclen);
2700 } else {
2701 efe->inf_len = udf_rw64(inflen);
2702 efe->obj_size = udf_rw64(objsize);
2703 efe->l_ad = udf_rw32(l_ad);
2704 efe->tag.desc_crc_len = udf_rw16(crclen);
2705 }
2706 error = 0;
2707
2708 /* set new size for uvm */
2709 uvm_vnp_setsize(vp, old_size);
2710 uvm_vnp_setwritesize(vp, new_size);
2711
2712 #if 0
2713 /* zero append space in buffer */
2714 uvm_vnp_zerorange(vp, old_size, new_size - old_size);
2715 #endif
2716
2717 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2718
2719 /* unlock */
2720 UDF_UNLOCK_NODE(udf_node, 0);
2721
2722 KASSERT(new_inflen == orig_inflen + size_diff);
2723 KASSERT(new_lbrec == orig_lbrec);
2724 KASSERT(new_lbrec == 0);
2725 return 0;
2726 }
2727
2728 DPRINTF(ALLOC, ("\tCONVERT from internal\n"));
2729
2730 if (old_size > 0) {
2731 /* allocate some space and copy in the stuff to keep */
2732 evacuated_data = malloc(lb_size, M_UDFTEMP, M_WAITOK);
2733 memset(evacuated_data, 0, lb_size);
2734
2735 /* node is locked, so safe to exit mutex */
2736 UDF_UNLOCK_NODE(udf_node, 0);
2737
2738 /* read in using the `normal' vn_rdwr() */
2739 error = vn_rdwr(UIO_READ, udf_node->vnode,
2740 evacuated_data, old_size, 0,
2741 UIO_SYSSPACE, IO_ALTSEMANTICS | IO_NODELOCKED,
2742 FSCRED, NULL, NULL);
2743
2744 /* enter again */
2745 UDF_LOCK_NODE(udf_node, 0);
2746 }
2747
2748 /* convert to a normal alloc and select type */
2749 my_part = udf_rw16(udf_node->loc.loc.part_num);
2750 dst_part = udf_get_record_vpart(ump, udf_get_c_type(udf_node));
2751 addr_type = UDF_ICB_SHORT_ALLOC;
2752 if (dst_part != my_part)
2753 addr_type = UDF_ICB_LONG_ALLOC;
2754
2755 icbflags &= ~UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2756 icbflags |= addr_type;
2757 icbtag->flags = udf_rw16(icbflags);
2758
2759 /* wipe old descriptor space */
2760 udf_wipe_adslots(udf_node);
2761
2762 memset(&c_ad, 0, sizeof(struct long_ad));
2763 c_ad.len = udf_rw32(old_size | UDF_EXT_FREE);
2764 c_ad.loc.part_num = udf_rw16(0); /* not relevant */
2765 c_ad.loc.lb_num = udf_rw32(0); /* not relevant */
2766
2767 slot = 0;
2768 } else {
2769 /* goto the last entry (if any) */
2770 slot = 0;
2771 cpy_slot = 0;
2772 foffset = 0;
2773 memset(&c_ad, 0, sizeof(struct long_ad));
2774 for (;;) {
2775 udf_get_adslot(udf_node, slot, &c_ad, &eof);
2776 if (eof)
2777 break;
2778
2779 len = udf_rw32(c_ad.len);
2780 flags = UDF_EXT_FLAGS(len);
2781 len = UDF_EXT_LEN(len);
2782
2783 end_foffset = foffset + len;
2784 if (flags != UDF_EXT_REDIRECT)
2785 foffset = end_foffset;
2786
2787 slot++;
2788 }
2789 /* at end of adslots */
2790
2791 /* special case if the old size was zero, then there is no last slot */
2792 if (old_size == 0) {
2793 c_ad.len = udf_rw32(0 | UDF_EXT_FREE);
2794 c_ad.loc.part_num = udf_rw16(0); /* not relevant */
2795 c_ad.loc.lb_num = udf_rw32(0); /* not relevant */
2796 } else {
2797 /* refetch last slot */
2798 slot--;
2799 udf_get_adslot(udf_node, slot, &c_ad, &eof);
2800 }
2801 }
2802
2803 /*
2804 * If the length of the last slot is not a multiple of lb_size, adjust
2805 * length so that it is; don't forget to adjust `append_len'! relevant for
2806 * extending existing files
2807 */
2808 len = udf_rw32(c_ad.len);
2809 flags = UDF_EXT_FLAGS(len);
2810 len = UDF_EXT_LEN(len);
2811
2812 lastblock_grow = 0;
2813 if (len % lb_size > 0) {
2814 lastblock_grow = lb_size - (len % lb_size);
2815 lastblock_grow = MIN(size_diff, lastblock_grow);
2816 len += lastblock_grow;
2817 c_ad.len = udf_rw32(len | flags);
2818
2819 /* TODO zero appened space in buffer! */
2820 /* using uvm_vnp_zerorange(vp, old_size, new_size - old_size); ? */
2821 }
2822 memset(&s_ad, 0, sizeof(struct long_ad));
2823
2824 /* size_diff can be bigger than allowed, so grow in chunks */
2825 append_len = size_diff - lastblock_grow;
2826 while (append_len > 0) {
2827 chunk = MIN(append_len, max_len);
2828 s_ad.len = udf_rw32(chunk | UDF_EXT_FREE);
2829 s_ad.loc.part_num = udf_rw16(0);
2830 s_ad.loc.lb_num = udf_rw32(0);
2831
2832 if (udf_ads_merge(max_len, lb_size, &c_ad, &s_ad)) {
2833 /* not mergable (anymore) */
2834 error = udf_append_adslot(udf_node, &slot, &c_ad);
2835 if (error)
2836 goto errorout;
2837 slot++;
2838 c_ad = s_ad;
2839 memset(&s_ad, 0, sizeof(struct long_ad));
2840 }
2841 append_len -= chunk;
2842 }
2843
2844 /* if there is a rest piece in the accumulator, append it */
2845 if (UDF_EXT_LEN(udf_rw32(c_ad.len)) > 0) {
2846 error = udf_append_adslot(udf_node, &slot, &c_ad);
2847 if (error)
2848 goto errorout;
2849 slot++;
2850 }
2851
2852 /* if there is a rest piece that didn't fit, append it */
2853 if (UDF_EXT_LEN(udf_rw32(s_ad.len)) > 0) {
2854 error = udf_append_adslot(udf_node, &slot, &s_ad);
2855 if (error)
2856 goto errorout;
2857 slot++;
2858 }
2859
2860 inflen += size_diff;
2861 objsize += size_diff;
2862 if (fe) {
2863 fe->inf_len = udf_rw64(inflen);
2864 } else {
2865 efe->inf_len = udf_rw64(inflen);
2866 efe->obj_size = udf_rw64(objsize);
2867 }
2868 error = 0;
2869
2870 if (evacuated_data) {
2871 /* set new write size for uvm */
2872 uvm_vnp_setwritesize(vp, old_size);
2873
2874 /* write out evacuated data */
2875 error = vn_rdwr(UIO_WRITE, udf_node->vnode,
2876 evacuated_data, old_size, 0,
2877 UIO_SYSSPACE, IO_ALTSEMANTICS | IO_NODELOCKED,
2878 FSCRED, NULL, NULL);
2879 uvm_vnp_setsize(vp, old_size);
2880 }
2881
2882 errorout:
2883 if (evacuated_data)
2884 free(evacuated_data, M_UDFTEMP);
2885
2886 udf_count_alloc_exts(udf_node);
2887
2888 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2889 UDF_UNLOCK_NODE(udf_node, 0);
2890
2891 KASSERT(new_inflen == orig_inflen + size_diff);
2892 KASSERT(new_lbrec == orig_lbrec);
2893
2894 return error;
2895 }
2896
2897 /* --------------------------------------------------------------------- */
2898
2899 int
2900 udf_shrink_node(struct udf_node *udf_node, uint64_t new_size)
2901 {
2902 struct vnode *vp = udf_node->vnode;
2903 struct udf_mount *ump = udf_node->ump;
2904 struct file_entry *fe;
2905 struct extfile_entry *efe;
2906 struct icb_tag *icbtag;
2907 struct long_ad c_ad, s_ad, *node_ad_cpy;
2908 uint64_t size_diff, old_size, inflen, objsize;
2909 uint64_t foffset, end_foffset;
2910 uint64_t orig_inflen, orig_lbrec, new_inflen, new_lbrec;
2911 uint32_t lb_size, unit_size, dscr_size, crclen;
2912 uint32_t slot_offset, slot_offset_lb;
2913 uint32_t len, flags, max_len;
2914 uint32_t num_lb, lb_num;
2915 uint32_t max_l_ad, l_ad, l_ea;
2916 uint16_t vpart_num;
2917 uint8_t *data_pos;
2918 int icbflags, addr_type;
2919 int slot, cpy_slot, cpy_slots;
2920 int eof, error;
2921
2922 DPRINTF(ALLOC, ("udf_shrink_node\n"));
2923
2924 UDF_LOCK_NODE(udf_node, 0);
2925 udf_node_sanity_check(udf_node, &orig_inflen, &orig_lbrec);
2926
2927 lb_size = udf_rw32(ump->logical_vol->lb_size);
2928
2929 /* max_len in unit's IFF its a metadata node or metadata mirror node */
2930 unit_size = lb_size;
2931 if ((udf_node == ump->metadata_node) || (udf_node == ump->metadatamirror_node))
2932 unit_size = ump->metadata_alloc_unit_size * lb_size;
2933 max_len = ((UDF_EXT_MAXLEN / unit_size) * unit_size);
2934
2935 /* do the work */
2936 fe = udf_node->fe;
2937 efe = udf_node->efe;
2938 if (fe) {
2939 icbtag = &fe->icbtag;
2940 inflen = udf_rw64(fe->inf_len);
2941 objsize = inflen;
2942 dscr_size = sizeof(struct file_entry) -1;
2943 l_ea = udf_rw32(fe->l_ea);
2944 l_ad = udf_rw32(fe->l_ad);
2945 data_pos = (uint8_t *) fe + dscr_size + l_ea;
2946 } else {
2947 icbtag = &efe->icbtag;
2948 inflen = udf_rw64(efe->inf_len);
2949 objsize = udf_rw64(efe->obj_size);
2950 dscr_size = sizeof(struct extfile_entry) -1;
2951 l_ea = udf_rw32(efe->l_ea);
2952 l_ad = udf_rw32(efe->l_ad);
2953 data_pos = (uint8_t *) efe + dscr_size + l_ea;
2954 }
2955 max_l_ad = lb_size - dscr_size - l_ea;
2956
2957 icbflags = udf_rw16(icbtag->flags);
2958 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2959
2960 old_size = inflen;
2961 size_diff = old_size - new_size;
2962
2963 DPRINTF(ALLOC, ("\tfrom %"PRIu64" to %"PRIu64"\n", old_size, new_size));
2964
2965 /* shrink the node to its new size */
2966 if (addr_type == UDF_ICB_INTERN_ALLOC) {
2967 /* only reflect size change directly in the node */
2968 KASSERT(new_size <= max_l_ad);
2969 inflen -= size_diff;
2970 objsize -= size_diff;
2971 l_ad -= size_diff;
2972 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea + l_ad;
2973 if (fe) {
2974 fe->inf_len = udf_rw64(inflen);
2975 fe->l_ad = udf_rw32(l_ad);
2976 fe->tag.desc_crc_len = udf_rw16(crclen);
2977 } else {
2978 efe->inf_len = udf_rw64(inflen);
2979 efe->obj_size = udf_rw64(objsize);
2980 efe->l_ad = udf_rw32(l_ad);
2981 efe->tag.desc_crc_len = udf_rw16(crclen);
2982 }
2983 error = 0;
2984
2985 /* clear the space in the descriptor */
2986 KASSERT(old_size > new_size);
2987 memset(data_pos + new_size, 0, old_size - new_size);
2988
2989 /* TODO zero appened space in buffer! */
2990 /* using uvm_vnp_zerorange(vp, old_size, old_size - new_size); ? */
2991
2992 /* set new size for uvm */
2993 uvm_vnp_setsize(vp, new_size);
2994
2995 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2996 UDF_UNLOCK_NODE(udf_node, 0);
2997
2998 KASSERT(new_inflen == orig_inflen - size_diff);
2999 KASSERT(new_lbrec == orig_lbrec);
3000 KASSERT(new_lbrec == 0);
3001
3002 return 0;
3003 }
3004
3005 /* setup node cleanup extents copy space */
3006 node_ad_cpy = malloc(lb_size * UDF_MAX_ALLOC_EXTENTS,
3007 M_UDFMNT, M_WAITOK);
3008 memset(node_ad_cpy, 0, lb_size * UDF_MAX_ALLOC_EXTENTS);
3009
3010 /*
3011 * Shrink the node by releasing the allocations and truncate the last
3012 * allocation to the new size. If the new size fits into the
3013 * allocation descriptor itself, transform it into an
3014 * UDF_ICB_INTERN_ALLOC.
3015 */
3016 slot = 0;
3017 cpy_slot = 0;
3018 foffset = 0;
3019
3020 /* 1) copy till first overlap piece to the rewrite buffer */
3021 for (;;) {
3022 udf_get_adslot(udf_node, slot, &s_ad, &eof);
3023 if (eof) {
3024 DPRINTF(WRITE,
3025 ("Shrink node failed: "
3026 "encountered EOF\n"));
3027 error = EINVAL;
3028 goto errorout; /* panic? */
3029 }
3030 len = udf_rw32(s_ad.len);
3031 flags = UDF_EXT_FLAGS(len);
3032 len = UDF_EXT_LEN(len);
3033
3034 if (flags == UDF_EXT_REDIRECT) {
3035 slot++;
3036 continue;
3037 }
3038
3039 end_foffset = foffset + len;
3040 if (end_foffset > new_size)
3041 break; /* found */
3042
3043 node_ad_cpy[cpy_slot++] = s_ad;
3044
3045 DPRINTF(ALLOC, ("\t1: vp %d, lb %d, len %d, flags %d "
3046 "-> stack\n",
3047 udf_rw16(s_ad.loc.part_num),
3048 udf_rw32(s_ad.loc.lb_num),
3049 UDF_EXT_LEN(udf_rw32(s_ad.len)),
3050 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
3051
3052 foffset = end_foffset;
3053 slot++;
3054 }
3055 slot_offset = new_size - foffset;
3056
3057 /* 2) trunc overlapping slot at overlap and copy it */
3058 if (slot_offset > 0) {
3059 lb_num = udf_rw32(s_ad.loc.lb_num);
3060 vpart_num = udf_rw16(s_ad.loc.part_num);
3061
3062 if (flags == UDF_EXT_ALLOCATED) {
3063 /* calculate extent in lb, and offset in lb */
3064 num_lb = (len + lb_size -1) / lb_size;
3065 slot_offset_lb = (slot_offset + lb_size -1) / lb_size;
3066
3067 /* adjust our slot */
3068 lb_num += slot_offset_lb;
3069 num_lb -= slot_offset_lb;
3070
3071 udf_free_allocated_space(ump, lb_num, vpart_num, num_lb);
3072 }
3073
3074 s_ad.len = udf_rw32(slot_offset | flags);
3075 node_ad_cpy[cpy_slot++] = s_ad;
3076 slot++;
3077
3078 DPRINTF(ALLOC, ("\t2: vp %d, lb %d, len %d, flags %d "
3079 "-> stack\n",
3080 udf_rw16(s_ad.loc.part_num),
3081 udf_rw32(s_ad.loc.lb_num),
3082 UDF_EXT_LEN(udf_rw32(s_ad.len)),
3083 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
3084 }
3085
3086 /* 3) delete remainder */
3087 for (;;) {
3088 udf_get_adslot(udf_node, slot, &s_ad, &eof);
3089 if (eof)
3090 break;
3091
3092 len = udf_rw32(s_ad.len);
3093 flags = UDF_EXT_FLAGS(len);
3094 len = UDF_EXT_LEN(len);
3095
3096 if (flags == UDF_EXT_REDIRECT) {
3097 slot++;
3098 continue;
3099 }
3100
3101 DPRINTF(ALLOC, ("\t3: delete remainder "
3102 "vp %d lb %d, len %d, flags %d\n",
3103 udf_rw16(s_ad.loc.part_num),
3104 udf_rw32(s_ad.loc.lb_num),
3105 UDF_EXT_LEN(udf_rw32(s_ad.len)),
3106 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
3107
3108 if (flags == UDF_EXT_ALLOCATED) {
3109 lb_num = udf_rw32(s_ad.loc.lb_num);
3110 vpart_num = udf_rw16(s_ad.loc.part_num);
3111 num_lb = (len + lb_size - 1) / lb_size;
3112
3113 udf_free_allocated_space(ump, lb_num, vpart_num,
3114 num_lb);
3115 }
3116
3117 slot++;
3118 }
3119
3120 /* 4) if it will fit into the descriptor then convert */
3121 if (new_size < max_l_ad) {
3122 /*
3123 * resque/evacuate old piece by reading it in, and convert it
3124 * to internal alloc.
3125 */
3126 if (new_size == 0) {
3127 /* XXX/TODO only for zero sizing now */
3128 udf_wipe_adslots(udf_node);
3129
3130 icbflags &= ~UDF_ICB_TAG_FLAGS_ALLOC_MASK;
3131 icbflags |= UDF_ICB_INTERN_ALLOC;
3132 icbtag->flags = udf_rw16(icbflags);
3133
3134 inflen -= size_diff; KASSERT(inflen == 0);
3135 objsize -= size_diff;
3136 l_ad = new_size;
3137 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea + l_ad;
3138 if (fe) {
3139 fe->inf_len = udf_rw64(inflen);
3140 fe->l_ad = udf_rw32(l_ad);
3141 fe->tag.desc_crc_len = udf_rw16(crclen);
3142 } else {
3143 efe->inf_len = udf_rw64(inflen);
3144 efe->obj_size = udf_rw64(objsize);
3145 efe->l_ad = udf_rw32(l_ad);
3146 efe->tag.desc_crc_len = udf_rw16(crclen);
3147 }
3148 /* eventually copy in evacuated piece */
3149 /* set new size for uvm */
3150 uvm_vnp_setsize(vp, new_size);
3151
3152 free(node_ad_cpy, M_UDFMNT);
3153 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
3154
3155 UDF_UNLOCK_NODE(udf_node, 0);
3156
3157 KASSERT(new_inflen == orig_inflen - size_diff);
3158 KASSERT(new_inflen == 0);
3159 KASSERT(new_lbrec == 0);
3160
3161 return 0;
3162 }
3163
3164 printf("UDF_SHRINK_NODE: could convert to internal alloc!\n");
3165 }
3166
3167 /* 5) reset node descriptors */
3168 udf_wipe_adslots(udf_node);
3169
3170 /* 6) copy back extents; merge when possible. Recounting on the fly */
3171 cpy_slots = cpy_slot;
3172
3173 c_ad = node_ad_cpy[0];
3174 slot = 0;
3175 for (cpy_slot = 1; cpy_slot < cpy_slots; cpy_slot++) {
3176 s_ad = node_ad_cpy[cpy_slot];
3177
3178 DPRINTF(ALLOC, ("\t6: stack -> got mapping vp %d "
3179 "lb %d, len %d, flags %d\n",
3180 udf_rw16(s_ad.loc.part_num),
3181 udf_rw32(s_ad.loc.lb_num),
3182 UDF_EXT_LEN(udf_rw32(s_ad.len)),
3183 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
3184
3185 /* see if we can merge */
3186 if (udf_ads_merge(max_len, lb_size, &c_ad, &s_ad)) {
3187 /* not mergable (anymore) */
3188 DPRINTF(ALLOC, ("\t6: appending vp %d lb %d, "
3189 "len %d, flags %d\n",
3190 udf_rw16(c_ad.loc.part_num),
3191 udf_rw32(c_ad.loc.lb_num),
3192 UDF_EXT_LEN(udf_rw32(c_ad.len)),
3193 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
3194
3195 error = udf_append_adslot(udf_node, &slot, &c_ad);
3196 if (error)
3197 goto errorout; /* panic? */
3198 c_ad = s_ad;
3199 slot++;
3200 }
3201 }
3202
3203 /* 7) push rest slot (if any) */
3204 if (UDF_EXT_LEN(c_ad.len) > 0) {
3205 DPRINTF(ALLOC, ("\t7: last append vp %d lb %d, "
3206 "len %d, flags %d\n",
3207 udf_rw16(c_ad.loc.part_num),
3208 udf_rw32(c_ad.loc.lb_num),
3209 UDF_EXT_LEN(udf_rw32(c_ad.len)),
3210 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
3211
3212 error = udf_append_adslot(udf_node, &slot, &c_ad);
3213 if (error)
3214 goto errorout; /* panic? */
3215 ;
3216 }
3217
3218 inflen -= size_diff;
3219 objsize -= size_diff;
3220 if (fe) {
3221 fe->inf_len = udf_rw64(inflen);
3222 } else {
3223 efe->inf_len = udf_rw64(inflen);
3224 efe->obj_size = udf_rw64(objsize);
3225 }
3226 error = 0;
3227
3228 /* set new size for uvm */
3229 uvm_vnp_setsize(vp, new_size);
3230
3231 errorout:
3232 free(node_ad_cpy, M_UDFMNT);
3233
3234 udf_count_alloc_exts(udf_node);
3235
3236 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
3237 UDF_UNLOCK_NODE(udf_node, 0);
3238
3239 KASSERT(new_inflen == orig_inflen - size_diff);
3240
3241 return error;
3242 }
3243
3244