udf_allocation.c revision 1.11 1 /* $NetBSD: udf_allocation.c,v 1.11 2008/07/07 18:45:26 reinoud Exp $ */
2
3 /*
4 * Copyright (c) 2006, 2008 Reinoud Zandijk
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 *
27 */
28
29 #include <sys/cdefs.h>
30 #ifndef lint
31 __KERNEL_RCSID(0, "$NetBSD: udf_allocation.c,v 1.11 2008/07/07 18:45:26 reinoud Exp $");
32 #endif /* not lint */
33
34
35 #if defined(_KERNEL_OPT)
36 #include "opt_quota.h"
37 #include "opt_compat_netbsd.h"
38 #endif
39
40 /* TODO strip */
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/sysctl.h>
44 #include <sys/namei.h>
45 #include <sys/proc.h>
46 #include <sys/kernel.h>
47 #include <sys/vnode.h>
48 #include <miscfs/genfs/genfs_node.h>
49 #include <sys/mount.h>
50 #include <sys/buf.h>
51 #include <sys/file.h>
52 #include <sys/device.h>
53 #include <sys/disklabel.h>
54 #include <sys/ioctl.h>
55 #include <sys/malloc.h>
56 #include <sys/dirent.h>
57 #include <sys/stat.h>
58 #include <sys/conf.h>
59 #include <sys/kauth.h>
60 #include <sys/kthread.h>
61 #include <dev/clock_subr.h>
62
63 #include <fs/udf/ecma167-udf.h>
64 #include <fs/udf/udf_mount.h>
65
66 #if defined(_KERNEL_OPT)
67 #include "opt_udf.h"
68 #endif
69
70 #include "udf.h"
71 #include "udf_subr.h"
72 #include "udf_bswap.h"
73
74
75 #define VTOI(vnode) ((struct udf_node *) vnode->v_data)
76
77 static void udf_record_allocation_in_node(struct udf_mount *ump,
78 struct buf *buf, uint16_t vpart_num, uint64_t *mapping,
79 struct long_ad *node_ad_cpy);
80
81 /*
82 * IDEA/BUSY: Each udf_node gets its own extentwalker state for all operations;
83 * this will hopefully/likely reduce O(nlog(n)) to O(1) for most functionality
84 * since actions are most likely sequencial and thus seeking doesn't need
85 * searching for the same or adjacent position again.
86 */
87
88 /* --------------------------------------------------------------------- */
89
90 #if 1
91 #if 1
92 static void
93 udf_node_dump(struct udf_node *udf_node) {
94 struct file_entry *fe;
95 struct extfile_entry *efe;
96 struct icb_tag *icbtag;
97 struct long_ad s_ad;
98 uint64_t inflen;
99 uint32_t icbflags, addr_type;
100 uint32_t len, lb_num;
101 uint32_t flags;
102 int part_num;
103 int lb_size, eof, slot;
104
105 if ((udf_verbose & UDF_DEBUG_NODEDUMP) == 0)
106 return;
107
108 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
109
110 fe = udf_node->fe;
111 efe = udf_node->efe;
112 if (fe) {
113 icbtag = &fe->icbtag;
114 inflen = udf_rw64(fe->inf_len);
115 } else {
116 icbtag = &efe->icbtag;
117 inflen = udf_rw64(efe->inf_len);
118 }
119
120 icbflags = udf_rw16(icbtag->flags);
121 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
122
123 printf("udf_node_dump %p :\n", udf_node);
124
125 if (addr_type == UDF_ICB_INTERN_ALLOC) {
126 printf("\tIntern alloc, len = %"PRIu64"\n", inflen);
127 return;
128 }
129
130 printf("\tInflen = %"PRIu64"\n", inflen);
131 printf("\t\t");
132
133 slot = 0;
134 for (;;) {
135 udf_get_adslot(udf_node, slot, &s_ad, &eof);
136 if (eof)
137 break;
138 part_num = udf_rw16(s_ad.loc.part_num);
139 lb_num = udf_rw32(s_ad.loc.lb_num);
140 len = udf_rw32(s_ad.len);
141 flags = UDF_EXT_FLAGS(len);
142 len = UDF_EXT_LEN(len);
143
144 printf("[");
145 if (part_num >= 0)
146 printf("part %d, ", part_num);
147 printf("lb_num %d, len %d", lb_num, len);
148 if (flags)
149 printf(", flags %d", flags>>30);
150 printf("] ");
151
152 if (flags == UDF_EXT_REDIRECT) {
153 printf("\n\textent END\n\tallocation extent\n\t\t");
154 }
155
156 slot++;
157 }
158 printf("\n\tl_ad END\n\n");
159 }
160 #else
161 #define udf_node_dump(a)
162 #endif
163
164
165 static void
166 udf_assert_allocated(struct udf_mount *ump, uint16_t vpart_num,
167 uint32_t lb_num, uint32_t num_lb)
168 {
169 struct udf_bitmap *bitmap;
170 struct part_desc *pdesc;
171 uint32_t ptov;
172 uint32_t bitval;
173 uint8_t *bpos;
174 int bit;
175 int phys_part;
176 int ok;
177
178 DPRINTF(PARANOIA, ("udf_assert_allocated: check virt lbnum %d "
179 "part %d + %d sect\n", lb_num, vpart_num, num_lb));
180
181 /* get partition backing up this vpart_num */
182 pdesc = ump->partitions[ump->vtop[vpart_num]];
183
184 switch (ump->vtop_tp[vpart_num]) {
185 case UDF_VTOP_TYPE_PHYS :
186 case UDF_VTOP_TYPE_SPARABLE :
187 /* free space to freed or unallocated space bitmap */
188 ptov = udf_rw32(pdesc->start_loc);
189 phys_part = ump->vtop[vpart_num];
190
191 /* use unallocated bitmap */
192 bitmap = &ump->part_unalloc_bits[phys_part];
193
194 /* if no bitmaps are defined, bail out */
195 if (bitmap->bits == NULL)
196 break;
197
198 /* check bits */
199 KASSERT(bitmap->bits);
200 ok = 1;
201 bpos = bitmap->bits + lb_num/8;
202 bit = lb_num % 8;
203 while (num_lb > 0) {
204 bitval = (1 << bit);
205 DPRINTF(PARANOIA, ("XXX : check %d, %p, bit %d\n",
206 lb_num, bpos, bit));
207 KASSERT(bitmap->bits + lb_num/8 == bpos);
208 if (*bpos & bitval) {
209 printf("\tlb_num %d is NOT marked busy\n",
210 lb_num);
211 ok = 0;
212 }
213 lb_num++; num_lb--;
214 bit = (bit + 1) % 8;
215 if (bit == 0)
216 bpos++;
217 }
218 if (!ok) {
219 /* KASSERT(0); */
220 }
221
222 break;
223 case UDF_VTOP_TYPE_VIRT :
224 /* TODO check space */
225 KASSERT(num_lb == 1);
226 break;
227 case UDF_VTOP_TYPE_META :
228 /* TODO check space in the metadata bitmap */
229 default:
230 /* not implemented */
231 break;
232 }
233 }
234
235
236 static void
237 udf_node_sanity_check(struct udf_node *udf_node,
238 uint64_t *cnt_inflen, uint64_t *cnt_logblksrec) {
239 struct file_entry *fe;
240 struct extfile_entry *efe;
241 struct icb_tag *icbtag;
242 struct long_ad s_ad;
243 uint64_t inflen, logblksrec;
244 uint32_t icbflags, addr_type;
245 uint32_t len, lb_num, l_ea, l_ad, max_l_ad;
246 uint16_t part_num;
247 int dscr_size, lb_size, flags, whole_lb;
248 int slot, eof;
249
250 // KASSERT(mutex_owned(&udf_node->ump->allocate_mutex));
251
252 if (1)
253 udf_node_dump(udf_node);
254
255 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
256
257 fe = udf_node->fe;
258 efe = udf_node->efe;
259 if (fe) {
260 icbtag = &fe->icbtag;
261 inflen = udf_rw64(fe->inf_len);
262 dscr_size = sizeof(struct file_entry) -1;
263 logblksrec = udf_rw64(fe->logblks_rec);
264 l_ad = udf_rw32(fe->l_ad);
265 l_ea = udf_rw32(fe->l_ea);
266 } else {
267 icbtag = &efe->icbtag;
268 inflen = udf_rw64(efe->inf_len);
269 dscr_size = sizeof(struct extfile_entry) -1;
270 logblksrec = udf_rw64(efe->logblks_rec);
271 l_ad = udf_rw32(efe->l_ad);
272 l_ea = udf_rw32(efe->l_ea);
273 }
274 max_l_ad = lb_size - dscr_size - l_ea;
275 icbflags = udf_rw16(icbtag->flags);
276 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
277
278 /* reset counters */
279 *cnt_inflen = 0;
280 *cnt_logblksrec = 0;
281
282 if (addr_type == UDF_ICB_INTERN_ALLOC) {
283 KASSERT(l_ad <= max_l_ad);
284 KASSERT(l_ad == inflen);
285 *cnt_inflen = inflen;
286 return;
287 }
288
289 /* start counting */
290 whole_lb = 1;
291 slot = 0;
292 for (;;) {
293 udf_get_adslot(udf_node, slot, &s_ad, &eof);
294 if (eof)
295 break;
296 KASSERT(whole_lb == 1);
297
298 part_num = udf_rw16(s_ad.loc.part_num);
299 lb_num = udf_rw32(s_ad.loc.lb_num);
300 len = udf_rw32(s_ad.len);
301 flags = UDF_EXT_FLAGS(len);
302 len = UDF_EXT_LEN(len);
303
304 if (flags != UDF_EXT_REDIRECT) {
305 *cnt_inflen += len;
306 if (flags == UDF_EXT_ALLOCATED) {
307 *cnt_logblksrec += (len + lb_size -1) / lb_size;
308 }
309 } else {
310 KASSERT(len == lb_size);
311 }
312 /* check allocation */
313 if (flags == UDF_EXT_ALLOCATED)
314 udf_assert_allocated(udf_node->ump, part_num, lb_num,
315 (len + lb_size - 1) / lb_size);
316
317 /* check whole lb */
318 whole_lb = ((len % lb_size) == 0);
319
320 slot++;
321 }
322 /* rest should be zero (ad_off > l_ad < max_l_ad - adlen) */
323
324 KASSERT(*cnt_inflen == inflen);
325 KASSERT(*cnt_logblksrec == logblksrec);
326
327 // KASSERT(mutex_owned(&udf_node->ump->allocate_mutex));
328 }
329 #else
330 #define udf_node_sanity_check(a, b, c)
331 #endif
332
333 /* --------------------------------------------------------------------- */
334
335 int
336 udf_translate_vtop(struct udf_mount *ump, struct long_ad *icb_loc,
337 uint32_t *lb_numres, uint32_t *extres)
338 {
339 struct part_desc *pdesc;
340 struct spare_map_entry *sme;
341 struct long_ad s_icb_loc;
342 uint64_t foffset, end_foffset;
343 uint32_t lb_size, len;
344 uint32_t lb_num, lb_rel, lb_packet;
345 uint32_t udf_rw32_lbmap, ext_offset;
346 uint16_t vpart;
347 int rel, part, error, eof, slot, flags;
348
349 assert(ump && icb_loc && lb_numres);
350
351 vpart = udf_rw16(icb_loc->loc.part_num);
352 lb_num = udf_rw32(icb_loc->loc.lb_num);
353 if (vpart > UDF_VTOP_RAWPART)
354 return EINVAL;
355
356 translate_again:
357 part = ump->vtop[vpart];
358 pdesc = ump->partitions[part];
359
360 switch (ump->vtop_tp[vpart]) {
361 case UDF_VTOP_TYPE_RAW :
362 /* 1:1 to the end of the device */
363 *lb_numres = lb_num;
364 *extres = INT_MAX;
365 return 0;
366 case UDF_VTOP_TYPE_PHYS :
367 /* transform into its disc logical block */
368 if (lb_num > udf_rw32(pdesc->part_len))
369 return EINVAL;
370 *lb_numres = lb_num + udf_rw32(pdesc->start_loc);
371
372 /* extent from here to the end of the partition */
373 *extres = udf_rw32(pdesc->part_len) - lb_num;
374 return 0;
375 case UDF_VTOP_TYPE_VIRT :
376 /* only maps one logical block, lookup in VAT */
377 if (lb_num >= ump->vat_entries) /* XXX > or >= ? */
378 return EINVAL;
379
380 /* lookup in virtual allocation table file */
381 mutex_enter(&ump->allocate_mutex);
382 error = udf_vat_read(ump->vat_node,
383 (uint8_t *) &udf_rw32_lbmap, 4,
384 ump->vat_offset + lb_num * 4);
385 mutex_exit(&ump->allocate_mutex);
386
387 if (error)
388 return error;
389
390 lb_num = udf_rw32(udf_rw32_lbmap);
391
392 /* transform into its disc logical block */
393 if (lb_num > udf_rw32(pdesc->part_len))
394 return EINVAL;
395 *lb_numres = lb_num + udf_rw32(pdesc->start_loc);
396
397 /* just one logical block */
398 *extres = 1;
399 return 0;
400 case UDF_VTOP_TYPE_SPARABLE :
401 /* check if the packet containing the lb_num is remapped */
402 lb_packet = lb_num / ump->sparable_packet_size;
403 lb_rel = lb_num % ump->sparable_packet_size;
404
405 for (rel = 0; rel < udf_rw16(ump->sparing_table->rt_l); rel++) {
406 sme = &ump->sparing_table->entries[rel];
407 if (lb_packet == udf_rw32(sme->org)) {
408 /* NOTE maps to absolute disc logical block! */
409 *lb_numres = udf_rw32(sme->map) + lb_rel;
410 *extres = ump->sparable_packet_size - lb_rel;
411 return 0;
412 }
413 }
414
415 /* transform into its disc logical block */
416 if (lb_num > udf_rw32(pdesc->part_len))
417 return EINVAL;
418 *lb_numres = lb_num + udf_rw32(pdesc->start_loc);
419
420 /* rest of block */
421 *extres = ump->sparable_packet_size - lb_rel;
422 return 0;
423 case UDF_VTOP_TYPE_META :
424 /* we have to look into the file's allocation descriptors */
425
426 /* use metadatafile allocation mutex */
427 lb_size = udf_rw32(ump->logical_vol->lb_size);
428
429 UDF_LOCK_NODE(ump->metadata_node, 0);
430
431 /* get first overlapping extent */
432 foffset = 0;
433 slot = 0;
434 for (;;) {
435 udf_get_adslot(ump->metadata_node,
436 slot, &s_icb_loc, &eof);
437 DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, "
438 "len = %d, lb_num = %d, part = %d\n",
439 slot, eof,
440 UDF_EXT_FLAGS(udf_rw32(s_icb_loc.len)),
441 UDF_EXT_LEN(udf_rw32(s_icb_loc.len)),
442 udf_rw32(s_icb_loc.loc.lb_num),
443 udf_rw16(s_icb_loc.loc.part_num)));
444 if (eof) {
445 DPRINTF(TRANSLATE,
446 ("Meta partition translation "
447 "failed: can't seek location\n"));
448 UDF_UNLOCK_NODE(ump->metadata_node, 0);
449 return EINVAL;
450 }
451 len = udf_rw32(s_icb_loc.len);
452 flags = UDF_EXT_FLAGS(len);
453 len = UDF_EXT_LEN(len);
454
455 if (flags == UDF_EXT_REDIRECT) {
456 slot++;
457 continue;
458 }
459
460 end_foffset = foffset + len;
461
462 if (end_foffset > lb_num * lb_size)
463 break; /* found */
464 foffset = end_foffset;
465 slot++;
466 }
467 /* found overlapping slot */
468 ext_offset = lb_num * lb_size - foffset;
469
470 /* process extent offset */
471 lb_num = udf_rw32(s_icb_loc.loc.lb_num);
472 vpart = udf_rw16(s_icb_loc.loc.part_num);
473 lb_num += (ext_offset + lb_size -1) / lb_size;
474 len -= ext_offset;
475 ext_offset = 0;
476
477 flags = UDF_EXT_FLAGS(s_icb_loc.len);
478
479 UDF_UNLOCK_NODE(ump->metadata_node, 0);
480 if (flags != UDF_EXT_ALLOCATED) {
481 DPRINTF(TRANSLATE, ("Metadata partition translation "
482 "failed: not allocated\n"));
483 return EINVAL;
484 }
485
486 /*
487 * vpart and lb_num are updated, translate again since we
488 * might be mapped on sparable media
489 */
490 goto translate_again;
491 default:
492 printf("UDF vtop translation scheme %d unimplemented yet\n",
493 ump->vtop_tp[vpart]);
494 }
495
496 return EINVAL;
497 }
498
499 /* --------------------------------------------------------------------- */
500
501 /*
502 * Translate an extent (in logical_blocks) into logical block numbers; used
503 * for read and write operations. DOESNT't check extents.
504 */
505
506 int
507 udf_translate_file_extent(struct udf_node *udf_node,
508 uint32_t from, uint32_t num_lb,
509 uint64_t *map)
510 {
511 struct udf_mount *ump;
512 struct icb_tag *icbtag;
513 struct long_ad t_ad, s_ad;
514 uint64_t transsec;
515 uint64_t foffset, end_foffset;
516 uint32_t transsec32;
517 uint32_t lb_size;
518 uint32_t ext_offset;
519 uint32_t lb_num, len;
520 uint32_t overlap, translen;
521 uint16_t vpart_num;
522 int eof, error, flags;
523 int slot, addr_type, icbflags;
524
525 if (!udf_node)
526 return ENOENT;
527
528 KASSERT(num_lb > 0);
529
530 UDF_LOCK_NODE(udf_node, 0);
531
532 /* initialise derivative vars */
533 ump = udf_node->ump;
534 lb_size = udf_rw32(ump->logical_vol->lb_size);
535
536 if (udf_node->fe) {
537 icbtag = &udf_node->fe->icbtag;
538 } else {
539 icbtag = &udf_node->efe->icbtag;
540 }
541 icbflags = udf_rw16(icbtag->flags);
542 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
543
544 /* do the work */
545 if (addr_type == UDF_ICB_INTERN_ALLOC) {
546 *map = UDF_TRANS_INTERN;
547 UDF_UNLOCK_NODE(udf_node, 0);
548 return 0;
549 }
550
551 /* find first overlapping extent */
552 foffset = 0;
553 slot = 0;
554 for (;;) {
555 udf_get_adslot(udf_node, slot, &s_ad, &eof);
556 DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, len = %d, "
557 "lb_num = %d, part = %d\n", slot, eof,
558 UDF_EXT_FLAGS(udf_rw32(s_ad.len)),
559 UDF_EXT_LEN(udf_rw32(s_ad.len)),
560 udf_rw32(s_ad.loc.lb_num),
561 udf_rw16(s_ad.loc.part_num)));
562 if (eof) {
563 DPRINTF(TRANSLATE,
564 ("Translate file extent "
565 "failed: can't seek location\n"));
566 UDF_UNLOCK_NODE(udf_node, 0);
567 return EINVAL;
568 }
569 len = udf_rw32(s_ad.len);
570 flags = UDF_EXT_FLAGS(len);
571 len = UDF_EXT_LEN(len);
572 lb_num = udf_rw32(s_ad.loc.lb_num);
573
574 if (flags == UDF_EXT_REDIRECT) {
575 slot++;
576 continue;
577 }
578
579 end_foffset = foffset + len;
580
581 if (end_foffset > from * lb_size)
582 break; /* found */
583 foffset = end_foffset;
584 slot++;
585 }
586 /* found overlapping slot */
587 ext_offset = from * lb_size - foffset;
588
589 for (;;) {
590 udf_get_adslot(udf_node, slot, &s_ad, &eof);
591 DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, len = %d, "
592 "lb_num = %d, part = %d\n", slot, eof,
593 UDF_EXT_FLAGS(udf_rw32(s_ad.len)),
594 UDF_EXT_LEN(udf_rw32(s_ad.len)),
595 udf_rw32(s_ad.loc.lb_num),
596 udf_rw16(s_ad.loc.part_num)));
597 if (eof) {
598 DPRINTF(TRANSLATE,
599 ("Translate file extent "
600 "failed: past eof\n"));
601 UDF_UNLOCK_NODE(udf_node, 0);
602 return EINVAL;
603 }
604
605 len = udf_rw32(s_ad.len);
606 flags = UDF_EXT_FLAGS(len);
607 len = UDF_EXT_LEN(len);
608
609 lb_num = udf_rw32(s_ad.loc.lb_num);
610 vpart_num = udf_rw16(s_ad.loc.part_num);
611
612 end_foffset = foffset + len;
613
614 /* process extent, don't forget to advance on ext_offset! */
615 lb_num += (ext_offset + lb_size -1) / lb_size;
616 overlap = (len - ext_offset + lb_size -1) / lb_size;
617 ext_offset = 0;
618
619 /*
620 * note that the while(){} is nessisary for the extent that
621 * the udf_translate_vtop() returns doens't have to span the
622 * whole extent.
623 */
624
625 overlap = MIN(overlap, num_lb);
626 while (overlap && (flags != UDF_EXT_REDIRECT)) {
627 switch (flags) {
628 case UDF_EXT_FREE :
629 case UDF_EXT_ALLOCATED_BUT_NOT_USED :
630 transsec = UDF_TRANS_ZERO;
631 translen = overlap;
632 while (overlap && num_lb && translen) {
633 *map++ = transsec;
634 lb_num++;
635 overlap--; num_lb--; translen--;
636 }
637 break;
638 case UDF_EXT_ALLOCATED :
639 t_ad.loc.lb_num = udf_rw32(lb_num);
640 t_ad.loc.part_num = udf_rw16(vpart_num);
641 error = udf_translate_vtop(ump,
642 &t_ad, &transsec32, &translen);
643 transsec = transsec32;
644 if (error) {
645 UDF_UNLOCK_NODE(udf_node, 0);
646 return error;
647 }
648 while (overlap && num_lb && translen) {
649 *map++ = transsec;
650 lb_num++; transsec++;
651 overlap--; num_lb--; translen--;
652 }
653 break;
654 default:
655 DPRINTF(TRANSLATE,
656 ("Translate file extent "
657 "failed: bad flags %x\n", flags));
658 UDF_UNLOCK_NODE(udf_node, 0);
659 return EINVAL;
660 }
661 }
662 if (num_lb == 0)
663 break;
664
665 if (flags != UDF_EXT_REDIRECT)
666 foffset = end_foffset;
667 slot++;
668 }
669 UDF_UNLOCK_NODE(udf_node, 0);
670
671 return 0;
672 }
673
674 /* --------------------------------------------------------------------- */
675
676 static int
677 udf_search_free_vatloc(struct udf_mount *ump, uint32_t *lbnumres)
678 {
679 uint32_t lb_size, lb_num, lb_map, udf_rw32_lbmap;
680 uint8_t *blob;
681 int entry, chunk, found, error;
682
683 KASSERT(ump);
684 KASSERT(ump->logical_vol);
685
686 lb_size = udf_rw32(ump->logical_vol->lb_size);
687 blob = malloc(lb_size, M_UDFTEMP, M_WAITOK);
688
689 /* TODO static allocation of search chunk */
690
691 lb_num = MIN(ump->vat_entries, ump->vat_last_free_lb);
692 found = 0;
693 error = 0;
694 entry = 0;
695 do {
696 chunk = MIN(lb_size, (ump->vat_entries - lb_num) * 4);
697 if (chunk <= 0)
698 break;
699 /* load in chunk */
700 error = udf_vat_read(ump->vat_node, blob, chunk,
701 ump->vat_offset + lb_num * 4);
702
703 if (error)
704 break;
705
706 /* search this chunk */
707 for (entry=0; entry < chunk /4; entry++, lb_num++) {
708 udf_rw32_lbmap = *((uint32_t *) (blob + entry * 4));
709 lb_map = udf_rw32(udf_rw32_lbmap);
710 if (lb_map == 0xffffffff) {
711 found = 1;
712 break;
713 }
714 }
715 } while (!found);
716 if (error) {
717 printf("udf_search_free_vatloc: error reading in vat chunk "
718 "(lb %d, size %d)\n", lb_num, chunk);
719 }
720
721 if (!found) {
722 /* extend VAT */
723 DPRINTF(WRITE, ("udf_search_free_vatloc: extending\n"));
724 lb_num = ump->vat_entries;
725 ump->vat_entries++;
726 }
727
728 /* mark entry with initialiser just in case */
729 lb_map = udf_rw32(0xfffffffe);
730 udf_vat_write(ump->vat_node, (uint8_t *) &lb_map, 4,
731 ump->vat_offset + lb_num *4);
732 ump->vat_last_free_lb = lb_num;
733
734 free(blob, M_UDFTEMP);
735 *lbnumres = lb_num;
736 return 0;
737 }
738
739
740 static void
741 udf_bitmap_allocate(struct udf_bitmap *bitmap, int ismetadata,
742 uint32_t ptov, uint32_t *num_lb, uint64_t *pmappos, uint64_t *lmappos)
743 {
744 uint32_t offset, lb_num, bit;
745 int32_t diff;
746 uint8_t *bpos;
747 int pass;
748
749 if (!ismetadata) {
750 /* heuristic to keep the two pointers not too close */
751 diff = bitmap->data_pos - bitmap->metadata_pos;
752 if ((diff >= 0) && (diff < 1024))
753 bitmap->data_pos = bitmap->metadata_pos + 1024;
754 }
755 offset = ismetadata ? bitmap->metadata_pos : bitmap->data_pos;
756 offset &= ~7;
757 for (pass = 0; pass < 2; pass++) {
758 if (offset >= bitmap->max_offset)
759 offset = 0;
760
761 while (offset < bitmap->max_offset) {
762 if (*num_lb == 0)
763 break;
764
765 /* use first bit not set */
766 bpos = bitmap->bits + offset/8;
767 bit = ffs(*bpos); /* returns 0 or 1..8 */
768 if (bit == 0) {
769 offset += 8;
770 continue;
771 }
772 DPRINTF(PARANOIA, ("XXX : allocate %d, %p, bit %d\n",
773 offset + bit -1, bpos, bit-1));
774 *bpos &= ~(1 << (bit-1));
775 lb_num = offset + bit-1;
776 *lmappos++ = lb_num;
777 *pmappos++ = lb_num + ptov;
778 *num_lb = *num_lb - 1;
779 // offset = (offset & ~7);
780 }
781 }
782
783 if (ismetadata) {
784 bitmap->metadata_pos = offset;
785 } else {
786 bitmap->data_pos = offset;
787 }
788 }
789
790
791 static void
792 udf_bitmap_free(struct udf_bitmap *bitmap, uint32_t lb_num, uint32_t num_lb)
793 {
794 uint32_t offset;
795 uint32_t bit, bitval;
796 uint8_t *bpos;
797
798 offset = lb_num;
799
800 /* starter bits */
801 bpos = bitmap->bits + offset/8;
802 bit = offset % 8;
803 while ((bit != 0) && (num_lb > 0)) {
804 bitval = (1 << bit);
805 KASSERT((*bpos & bitval) == 0);
806 DPRINTF(PARANOIA, ("XXX : free %d, %p, %d\n",
807 offset, bpos, bit));
808 *bpos |= bitval;
809 offset++; num_lb--;
810 bit = (bit + 1) % 8;
811 }
812 if (num_lb == 0)
813 return;
814
815 /* whole bytes */
816 KASSERT(bit == 0);
817 bpos = bitmap->bits + offset / 8;
818 while (num_lb >= 8) {
819 KASSERT((*bpos == 0));
820 DPRINTF(PARANOIA, ("XXX : free %d + 8, %p\n", offset, bpos));
821 *bpos = 255;
822 offset += 8; num_lb -= 8;
823 bpos++;
824 }
825
826 /* stop bits */
827 KASSERT(num_lb < 8);
828 bit = 0;
829 while (num_lb > 0) {
830 bitval = (1 << bit);
831 KASSERT((*bpos & bitval) == 0);
832 DPRINTF(PARANOIA, ("XXX : free %d, %p, %d\n",
833 offset, bpos, bit));
834 *bpos |= bitval;
835 offset++; num_lb--;
836 bit = (bit + 1) % 8;
837 }
838 }
839
840
841 /* allocate a contiguous sequence of sectornumbers */
842 static int
843 udf_allocate_space(struct udf_mount *ump, int ismetadata, int alloc_type,
844 int num_lb, uint16_t *alloc_partp,
845 uint64_t *lmapping, uint64_t *pmapping)
846 {
847 struct mmc_trackinfo *alloc_track, *other_track;
848 struct udf_bitmap *bitmap;
849 struct part_desc *pdesc;
850 struct logvol_int_desc *lvid;
851 uint64_t *lmappos, *pmappos;
852 uint32_t ptov, lb_num, *freepos, free_lbs;
853 int lb_size, alloc_num_lb;
854 int alloc_part;
855 int error;
856
857 mutex_enter(&ump->allocate_mutex);
858
859 lb_size = udf_rw32(ump->logical_vol->lb_size);
860 KASSERT(lb_size == ump->discinfo.sector_size);
861
862 if (ismetadata) {
863 alloc_part = ump->metadata_part;
864 alloc_track = &ump->metadata_track;
865 other_track = &ump->data_track;
866 } else {
867 alloc_part = ump->data_part;
868 alloc_track = &ump->data_track;
869 other_track = &ump->metadata_track;
870 }
871
872 *alloc_partp = alloc_part;
873
874 error = 0;
875 /* XXX check disc space */
876
877 pdesc = ump->partitions[ump->vtop[alloc_part]];
878 lmappos = lmapping;
879 pmappos = pmapping;
880
881 switch (alloc_type) {
882 case UDF_ALLOC_VAT :
883 /* search empty slot in VAT file */
884 KASSERT(num_lb == 1);
885 error = udf_search_free_vatloc(ump, &lb_num);
886 if (!error) {
887 *lmappos = lb_num;
888 *pmappos = 0; /* will get late-allocated */
889 }
890 break;
891 case UDF_ALLOC_SEQUENTIAL :
892 /* sequential allocation on recordable media */
893 /* calculate offset from physical base partition */
894 ptov = udf_rw32(pdesc->start_loc);
895
896 for (lb_num = 0; lb_num < num_lb; lb_num++) {
897 *pmappos++ = alloc_track->next_writable;
898 *lmappos++ = alloc_track->next_writable - ptov;
899 alloc_track->next_writable++;
900 alloc_track->free_blocks--;
901 }
902 if (alloc_track->tracknr == other_track->tracknr)
903 memcpy(other_track, alloc_track,
904 sizeof(struct mmc_trackinfo));
905 break;
906 case UDF_ALLOC_SPACEMAP :
907 ptov = udf_rw32(pdesc->start_loc);
908
909 /* allocate on unallocated bits page */
910 alloc_num_lb = num_lb;
911 bitmap = &ump->part_unalloc_bits[alloc_part];
912 udf_bitmap_allocate(bitmap, ismetadata, ptov, &alloc_num_lb,
913 pmappos, lmappos);
914 ump->lvclose |= UDF_WRITE_PART_BITMAPS;
915 if (alloc_num_lb) {
916 /* TODO convert freed to unalloc and try again */
917 /* free allocated piece for now */
918 lmappos = lmapping;
919 for (lb_num=0; lb_num < num_lb-alloc_num_lb; lb_num++) {
920 udf_bitmap_free(bitmap, *lmappos++, 1);
921 }
922 error = ENOSPC;
923 }
924 if (!error) {
925 /* adjust freecount */
926 lvid = ump->logvol_integrity;
927 freepos = &lvid->tables[0] + alloc_part;
928 free_lbs = udf_rw32(*freepos);
929 *freepos = udf_rw32(free_lbs - num_lb);
930 }
931 break;
932 case UDF_ALLOC_METABITMAP :
933 case UDF_ALLOC_METASEQUENTIAL :
934 case UDF_ALLOC_RELAXEDSEQUENTIAL :
935 printf("ALERT: udf_allocate_space : allocation %d "
936 "not implemented yet!\n", alloc_type);
937 /* TODO implement, doesn't have to be contiguous */
938 error = ENOSPC;
939 break;
940 }
941
942 #ifdef DEBUG
943 if (udf_verbose & UDF_DEBUG_ALLOC) {
944 lmappos = lmapping;
945 pmappos = pmapping;
946 printf("udf_allocate_space, mapping l->p:\n");
947 for (lb_num = 0; lb_num < num_lb; lb_num++) {
948 printf("\t%"PRIu64" -> %"PRIu64"\n",
949 *lmappos++, *pmappos++);
950 }
951 }
952 #endif
953 mutex_exit(&ump->allocate_mutex);
954
955 return error;
956 }
957
958 /* --------------------------------------------------------------------- */
959
960 void
961 udf_free_allocated_space(struct udf_mount *ump, uint32_t lb_num,
962 uint16_t vpart_num, uint32_t num_lb)
963 {
964 struct udf_bitmap *bitmap;
965 struct part_desc *pdesc;
966 struct logvol_int_desc *lvid;
967 uint32_t ptov, lb_map, udf_rw32_lbmap;
968 uint32_t *freepos, free_lbs;
969 int phys_part;
970 int error;
971
972 DPRINTF(ALLOC, ("udf_free_allocated_space: freeing virt lbnum %d "
973 "part %d + %d sect\n", lb_num, vpart_num, num_lb));
974
975 /* no use freeing zero length */
976 if (num_lb == 0)
977 return;
978
979 mutex_enter(&ump->allocate_mutex);
980
981 /* get partition backing up this vpart_num */
982 pdesc = ump->partitions[ump->vtop[vpart_num]];
983
984 switch (ump->vtop_tp[vpart_num]) {
985 case UDF_VTOP_TYPE_PHYS :
986 case UDF_VTOP_TYPE_SPARABLE :
987 /* free space to freed or unallocated space bitmap */
988 ptov = udf_rw32(pdesc->start_loc);
989 phys_part = ump->vtop[vpart_num];
990
991 /* first try freed space bitmap */
992 bitmap = &ump->part_freed_bits[phys_part];
993
994 /* if not defined, use unallocated bitmap */
995 if (bitmap->bits == NULL)
996 bitmap = &ump->part_unalloc_bits[phys_part];
997
998 /* if no bitmaps are defined, bail out */
999 if (bitmap->bits == NULL)
1000 break;
1001
1002 /* free bits if its defined */
1003 KASSERT(bitmap->bits);
1004 ump->lvclose |= UDF_WRITE_PART_BITMAPS;
1005 udf_bitmap_free(bitmap, lb_num, num_lb);
1006
1007 /* adjust freecount */
1008 lvid = ump->logvol_integrity;
1009 freepos = &lvid->tables[0] + vpart_num;
1010 free_lbs = udf_rw32(*freepos);
1011 *freepos = udf_rw32(free_lbs + num_lb);
1012 break;
1013 case UDF_VTOP_TYPE_VIRT :
1014 /* free this VAT entry */
1015 KASSERT(num_lb == 1);
1016
1017 lb_map = 0xffffffff;
1018 udf_rw32_lbmap = udf_rw32(lb_map);
1019 error = udf_vat_write(ump->vat_node,
1020 (uint8_t *) &udf_rw32_lbmap, 4,
1021 ump->vat_offset + lb_num * 4);
1022 KASSERT(error == 0);
1023 ump->vat_last_free_lb = MIN(ump->vat_last_free_lb, lb_num);
1024 break;
1025 case UDF_VTOP_TYPE_META :
1026 /* free space in the metadata bitmap */
1027 default:
1028 printf("ALERT: udf_free_allocated_space : allocation %d "
1029 "not implemented yet!\n", ump->vtop_tp[vpart_num]);
1030 break;
1031 }
1032
1033 mutex_exit(&ump->allocate_mutex);
1034 }
1035
1036 /* --------------------------------------------------------------------- */
1037
1038 int
1039 udf_pre_allocate_space(struct udf_mount *ump, int udf_c_type, int num_lb,
1040 uint16_t *alloc_partp, uint64_t *lmapping, uint64_t *pmapping)
1041 {
1042 int ismetadata, alloc_type;
1043
1044 ismetadata = (udf_c_type == UDF_C_NODE);
1045 alloc_type = ismetadata? ump->meta_alloc : ump->data_alloc;
1046
1047 #ifdef DIAGNOSTIC
1048 if ((alloc_type == UDF_ALLOC_VAT) && (udf_c_type != UDF_C_NODE)) {
1049 panic("udf_pre_allocate_space: bad c_type on VAT!\n");
1050 }
1051 #endif
1052
1053 /* reserve size for VAT allocated data */
1054 if (alloc_type == UDF_ALLOC_VAT) {
1055 mutex_enter(&ump->allocate_mutex);
1056 ump->uncomitted_lb += num_lb;
1057 mutex_exit(&ump->allocate_mutex);
1058 }
1059
1060 return udf_allocate_space(ump, ismetadata, alloc_type,
1061 num_lb, alloc_partp, lmapping, pmapping);
1062 }
1063
1064 /* --------------------------------------------------------------------- */
1065
1066 /*
1067 * Allocate a buf on disc for direct write out. The space doesn't have to be
1068 * contiguous as the caller takes care of this.
1069 */
1070
1071 void
1072 udf_late_allocate_buf(struct udf_mount *ump, struct buf *buf,
1073 uint64_t *lmapping, uint64_t *pmapping, struct long_ad *node_ad_cpy)
1074 {
1075 struct udf_node *udf_node = VTOI(buf->b_vp);
1076 uint16_t vpart_num;
1077 int lb_size, blks, udf_c_type;
1078 int ismetadata, alloc_type;
1079 int num_lb;
1080 int error, s;
1081
1082 /*
1083 * for each sector in the buf, allocate a sector on disc and record
1084 * its position in the provided mapping array.
1085 *
1086 * If its userdata or FIDs, record its location in its node.
1087 */
1088
1089 lb_size = udf_rw32(ump->logical_vol->lb_size);
1090 num_lb = (buf->b_bcount + lb_size -1) / lb_size;
1091 blks = lb_size / DEV_BSIZE;
1092 udf_c_type = buf->b_udf_c_type;
1093
1094 KASSERT(lb_size == ump->discinfo.sector_size);
1095
1096 ismetadata = (udf_c_type == UDF_C_NODE);
1097 alloc_type = ismetadata? ump->meta_alloc : ump->data_alloc;
1098
1099 #ifdef DIAGNOSTIC
1100 if ((alloc_type == UDF_ALLOC_VAT) && (udf_c_type != UDF_C_NODE)) {
1101 panic("udf_late_allocate_buf: bad c_type on VAT!\n");
1102 }
1103 #endif
1104
1105 if (udf_c_type == UDF_C_NODE) {
1106 /* if not VAT, its allready allocated */
1107 if (alloc_type != UDF_ALLOC_VAT)
1108 return;
1109
1110 /* allocate sequential */
1111 alloc_type = UDF_ALLOC_SEQUENTIAL;
1112 }
1113
1114 error = udf_allocate_space(ump, ismetadata, alloc_type,
1115 num_lb, &vpart_num, lmapping, pmapping);
1116 if (error) {
1117 /* ARGH! we've not done our accounting right! */
1118 panic("UDF disc allocation accounting gone wrong");
1119 }
1120
1121 /* commit our sector count */
1122 mutex_enter(&ump->allocate_mutex);
1123 if (num_lb > ump->uncomitted_lb) {
1124 ump->uncomitted_lb = 0;
1125 } else {
1126 ump->uncomitted_lb -= num_lb;
1127 }
1128 mutex_exit(&ump->allocate_mutex);
1129
1130 buf->b_blkno = (*pmapping) * blks;
1131
1132 /* If its userdata or FIDs, record its allocation in its node. */
1133 if ((udf_c_type == UDF_C_USERDATA) || (udf_c_type == UDF_C_FIDS)) {
1134 udf_record_allocation_in_node(ump, buf, vpart_num, lmapping,
1135 node_ad_cpy);
1136 /* decrement our outstanding bufs counter */
1137 s = splbio();
1138 udf_node->outstanding_bufs--;
1139 splx(s);
1140 }
1141 }
1142
1143 /* --------------------------------------------------------------------- */
1144
1145 /*
1146 * Try to merge a1 with the new piece a2. udf_ads_merge returns error when not
1147 * possible (anymore); a2 returns the rest piece.
1148 */
1149
1150 static int
1151 udf_ads_merge(uint32_t lb_size, struct long_ad *a1, struct long_ad *a2)
1152 {
1153 uint32_t max_len, merge_len;
1154 uint32_t a1_len, a2_len;
1155 uint32_t a1_flags, a2_flags;
1156 uint32_t a1_lbnum, a2_lbnum;
1157 uint16_t a1_part, a2_part;
1158
1159 max_len = ((UDF_EXT_MAXLEN / lb_size) * lb_size);
1160
1161 a1_flags = UDF_EXT_FLAGS(udf_rw32(a1->len));
1162 a1_len = UDF_EXT_LEN(udf_rw32(a1->len));
1163 a1_lbnum = udf_rw32(a1->loc.lb_num);
1164 a1_part = udf_rw16(a1->loc.part_num);
1165
1166 a2_flags = UDF_EXT_FLAGS(udf_rw32(a2->len));
1167 a2_len = UDF_EXT_LEN(udf_rw32(a2->len));
1168 a2_lbnum = udf_rw32(a2->loc.lb_num);
1169 a2_part = udf_rw16(a2->loc.part_num);
1170
1171 /* defines same space */
1172 if (a1_flags != a2_flags)
1173 return 1;
1174
1175 if (a1_flags != UDF_EXT_FREE) {
1176 /* the same partition */
1177 if (a1_part != a2_part)
1178 return 1;
1179
1180 /* a2 is successor of a1 */
1181 if (a1_lbnum * lb_size + a1_len != a2_lbnum * lb_size)
1182 return 1;
1183 }
1184
1185 /* merge as most from a2 if possible */
1186 merge_len = MIN(a2_len, max_len - a1_len);
1187 a1_len += merge_len;
1188 a2_len -= merge_len;
1189 a2_lbnum += merge_len/lb_size;
1190
1191 a1->len = udf_rw32(a1_len | a1_flags);
1192 a2->len = udf_rw32(a2_len | a2_flags);
1193 a2->loc.lb_num = udf_rw32(a2_lbnum);
1194
1195 if (a2_len > 0)
1196 return 1;
1197
1198 /* there is space over to merge */
1199 return 0;
1200 }
1201
1202 /* --------------------------------------------------------------------- */
1203
1204 static void
1205 udf_wipe_adslots(struct udf_node *udf_node)
1206 {
1207 struct file_entry *fe;
1208 struct extfile_entry *efe;
1209 struct alloc_ext_entry *ext;
1210 uint64_t inflen, objsize;
1211 uint32_t lb_size, dscr_size, l_ea, l_ad, max_l_ad, crclen;
1212 uint8_t *data_pos;
1213 int extnr;
1214
1215 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1216
1217 fe = udf_node->fe;
1218 efe = udf_node->efe;
1219 if (fe) {
1220 inflen = udf_rw64(fe->inf_len);
1221 objsize = inflen;
1222 dscr_size = sizeof(struct file_entry) -1;
1223 l_ea = udf_rw32(fe->l_ea);
1224 l_ad = udf_rw32(fe->l_ad);
1225 data_pos = (uint8_t *) fe + dscr_size + l_ea;
1226 } else {
1227 inflen = udf_rw64(efe->inf_len);
1228 objsize = udf_rw64(efe->obj_size);
1229 dscr_size = sizeof(struct extfile_entry) -1;
1230 l_ea = udf_rw32(efe->l_ea);
1231 l_ad = udf_rw32(efe->l_ad);
1232 data_pos = (uint8_t *) efe + dscr_size + l_ea;
1233 }
1234 max_l_ad = lb_size - dscr_size - l_ea;
1235
1236 /* wipe fe/efe */
1237 memset(data_pos, 0, max_l_ad);
1238 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea;
1239 if (fe) {
1240 fe->l_ad = udf_rw32(0);
1241 fe->logblks_rec = udf_rw64(0);
1242 fe->tag.desc_crc_len = udf_rw32(crclen);
1243 } else {
1244 efe->l_ad = udf_rw32(0);
1245 efe->logblks_rec = udf_rw64(0);
1246 efe->tag.desc_crc_len = udf_rw32(crclen);
1247 }
1248
1249 /* wipe all allocation extent entries */
1250 for (extnr = 0; extnr < udf_node->num_extensions; extnr++) {
1251 ext = udf_node->ext[extnr];
1252 dscr_size = sizeof(struct alloc_ext_entry) -1;
1253 data_pos = (uint8_t *) ext->data;
1254 max_l_ad = lb_size - dscr_size;
1255 memset(data_pos, 0, max_l_ad);
1256 ext->l_ad = udf_rw32(0);
1257
1258 crclen = dscr_size - UDF_DESC_TAG_LENGTH;
1259 ext->tag.desc_crc_len = udf_rw32(crclen);
1260 }
1261 udf_node->i_flags |= IN_NODE_REBUILD;
1262 }
1263
1264 /* --------------------------------------------------------------------- */
1265
1266 void
1267 udf_get_adslot(struct udf_node *udf_node, int slot, struct long_ad *icb,
1268 int *eof) {
1269 struct file_entry *fe;
1270 struct extfile_entry *efe;
1271 struct alloc_ext_entry *ext;
1272 struct icb_tag *icbtag;
1273 struct short_ad *short_ad;
1274 struct long_ad *long_ad, l_icb;
1275 uint32_t offset;
1276 uint32_t lb_size, dscr_size, l_ea, l_ad, flags;
1277 uint8_t *data_pos;
1278 int icbflags, addr_type, adlen, extnr;
1279
1280 /* determine what descriptor we are in */
1281 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1282
1283 fe = udf_node->fe;
1284 efe = udf_node->efe;
1285 if (fe) {
1286 icbtag = &fe->icbtag;
1287 dscr_size = sizeof(struct file_entry) -1;
1288 l_ea = udf_rw32(fe->l_ea);
1289 l_ad = udf_rw32(fe->l_ad);
1290 data_pos = (uint8_t *) fe + dscr_size + l_ea;
1291 } else {
1292 icbtag = &efe->icbtag;
1293 dscr_size = sizeof(struct extfile_entry) -1;
1294 l_ea = udf_rw32(efe->l_ea);
1295 l_ad = udf_rw32(efe->l_ad);
1296 data_pos = (uint8_t *) efe + dscr_size + l_ea;
1297 }
1298
1299 icbflags = udf_rw16(icbtag->flags);
1300 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
1301
1302 /* just in case we're called on an intern, its EOF */
1303 if (addr_type == UDF_ICB_INTERN_ALLOC) {
1304 memset(icb, 0, sizeof(struct long_ad));
1305 *eof = 1;
1306 return;
1307 }
1308
1309 adlen = 0;
1310 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1311 adlen = sizeof(struct short_ad);
1312 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1313 adlen = sizeof(struct long_ad);
1314 }
1315
1316 /* if offset too big, we go to the allocation extensions */
1317 offset = slot * adlen;
1318 extnr = -1;
1319 while (offset >= l_ad) {
1320 /* check if our last entry is a redirect */
1321 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1322 short_ad = (struct short_ad *) (data_pos + l_ad-adlen);
1323 l_icb.len = short_ad->len;
1324 l_icb.loc.part_num = udf_node->loc.loc.part_num;
1325 l_icb.loc.lb_num = short_ad->lb_num;
1326 } else {
1327 KASSERT(addr_type == UDF_ICB_LONG_ALLOC);
1328 long_ad = (struct long_ad *) (data_pos + l_ad-adlen);
1329 l_icb = *long_ad;
1330 }
1331 flags = UDF_EXT_FLAGS(udf_rw32(l_icb.len));
1332 if (flags != UDF_EXT_REDIRECT) {
1333 l_ad = 0; /* force EOF */
1334 break;
1335 }
1336
1337 /* advance to next extent */
1338 extnr++;
1339 if (extnr >= udf_node->num_extensions) {
1340 l_ad = 0; /* force EOF */
1341 break;
1342 }
1343 offset = offset - l_ad;
1344 ext = udf_node->ext[extnr];
1345 dscr_size = sizeof(struct alloc_ext_entry) -1;
1346 l_ad = udf_rw32(ext->l_ad);
1347 data_pos = (uint8_t *) ext + dscr_size;
1348 }
1349
1350 /* XXX l_ad == 0 should be enough to check */
1351 *eof = (offset >= l_ad) || (l_ad == 0);
1352 if (*eof) {
1353 DPRINTF(PARANOIDADWLK, ("returning EOF, extnr %d, offset %d, "
1354 "l_ad %d\n", extnr, offset, l_ad));
1355 memset(icb, 0, sizeof(struct long_ad));
1356 return;
1357 }
1358
1359 /* get the element */
1360 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1361 short_ad = (struct short_ad *) (data_pos + offset);
1362 icb->len = short_ad->len;
1363 icb->loc.part_num = udf_node->loc.loc.part_num;
1364 icb->loc.lb_num = short_ad->lb_num;
1365 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1366 long_ad = (struct long_ad *) (data_pos + offset);
1367 *icb = *long_ad;
1368 }
1369 DPRINTF(PARANOIDADWLK, ("returning element : v %d, lb %d, len %d, "
1370 "flags %d\n", icb->loc.part_num, icb->loc.lb_num,
1371 UDF_EXT_LEN(icb->len), UDF_EXT_FLAGS(icb->len)));
1372 }
1373
1374 /* --------------------------------------------------------------------- */
1375
1376 int
1377 udf_append_adslot(struct udf_node *udf_node, int *slot, struct long_ad *icb) {
1378 struct udf_mount *ump = udf_node->ump;
1379 union dscrptr *dscr;
1380 struct file_entry *fe;
1381 struct extfile_entry *efe;
1382 struct alloc_ext_entry *ext;
1383 struct icb_tag *icbtag;
1384 struct short_ad *short_ad;
1385 struct long_ad *long_ad, o_icb, l_icb;
1386 uint64_t logblks_rec, *logblks_rec_p;
1387 uint64_t lmapping, pmapping;
1388 uint32_t offset, rest, len, lb_num;
1389 uint32_t lb_size, dscr_size, l_ea, l_ad, *l_ad_p, max_l_ad, crclen;
1390 uint32_t flags;
1391 uint16_t vpart_num;
1392 uint8_t *data_pos;
1393 int icbflags, addr_type, adlen, extnr;
1394 int error;
1395
1396 /* determine what descriptor we are in */
1397 lb_size = udf_rw32(ump->logical_vol->lb_size);
1398
1399 fe = udf_node->fe;
1400 efe = udf_node->efe;
1401 if (fe) {
1402 icbtag = &fe->icbtag;
1403 dscr = (union dscrptr *) fe;
1404 dscr_size = sizeof(struct file_entry) -1;
1405
1406 l_ea = udf_rw32(fe->l_ea);
1407 l_ad_p = &fe->l_ad;
1408 logblks_rec_p = &fe->logblks_rec;
1409 } else {
1410 icbtag = &efe->icbtag;
1411 dscr = (union dscrptr *) efe;
1412 dscr_size = sizeof(struct extfile_entry) -1;
1413
1414 l_ea = udf_rw32(efe->l_ea);
1415 l_ad_p = &efe->l_ad;
1416 logblks_rec_p = &efe->logblks_rec;
1417 }
1418 data_pos = (uint8_t *) dscr + dscr_size + l_ea;
1419 max_l_ad = lb_size - dscr_size - l_ea;
1420
1421 icbflags = udf_rw16(icbtag->flags);
1422 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
1423
1424 /* just in case we're called on an intern, its EOF */
1425 if (addr_type == UDF_ICB_INTERN_ALLOC) {
1426 panic("udf_append_adslot on UDF_ICB_INTERN_ALLOC\n");
1427 }
1428
1429 adlen = 0;
1430 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1431 adlen = sizeof(struct short_ad);
1432 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1433 adlen = sizeof(struct long_ad);
1434 }
1435
1436 /* clean up given long_ad */
1437 #ifdef DIAGNOSTIC
1438 flags = UDF_EXT_FLAGS(udf_rw32(icb->len));
1439 if (flags == UDF_EXT_FREE) {
1440 if ((udf_rw16(icb->loc.part_num) != 0) ||
1441 (udf_rw32(icb->loc.lb_num) != 0))
1442 printf("UDF: warning, cleaning long_ad marked free\n");
1443 icb->loc.part_num = udf_rw16(0);
1444 icb->loc.lb_num = udf_rw32(0);
1445 }
1446 #endif
1447
1448 /* if offset too big, we go to the allocation extensions */
1449 l_ad = udf_rw32(*l_ad_p);
1450 offset = (*slot) * adlen;
1451 extnr = -1;
1452 while (offset >= l_ad) {
1453 /* check if our last entry is a redirect */
1454 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1455 short_ad = (struct short_ad *) (data_pos + l_ad-adlen);
1456 l_icb.len = short_ad->len;
1457 l_icb.loc.part_num = udf_node->loc.loc.part_num;
1458 l_icb.loc.lb_num = short_ad->lb_num;
1459 } else {
1460 KASSERT(addr_type == UDF_ICB_LONG_ALLOC);
1461 long_ad = (struct long_ad *) (data_pos + l_ad-adlen);
1462 l_icb = *long_ad;
1463 }
1464 flags = UDF_EXT_FLAGS(udf_rw32(l_icb.len));
1465 if (flags != UDF_EXT_REDIRECT) {
1466 /* only one past the last one is adressable */
1467 break;
1468 }
1469
1470 /* advance to next extent */
1471 extnr++;
1472 KASSERT(extnr < udf_node->num_extensions);
1473 offset = offset - l_ad;
1474
1475 ext = udf_node->ext[extnr];
1476 dscr = (union dscrptr *) ext;
1477 dscr_size = sizeof(struct alloc_ext_entry) -1;
1478 max_l_ad = lb_size - dscr_size;
1479 l_ad_p = &ext->l_ad;
1480 l_ad = udf_rw32(*l_ad_p);
1481 data_pos = (uint8_t *) ext + dscr_size;
1482 }
1483 DPRINTF(PARANOIDADWLK, ("append, ext %d, offset %d, l_ad %d\n",
1484 extnr, offset, udf_rw32(*l_ad_p)));
1485 KASSERT(l_ad == udf_rw32(*l_ad_p));
1486
1487 /* offset is offset within the current (E)FE/AED */
1488 l_ad = udf_rw32(*l_ad_p);
1489 crclen = udf_rw32(dscr->tag.desc_crc_len);
1490 logblks_rec = udf_rw64(*logblks_rec_p);
1491
1492 /* overwriting old piece? */
1493 if (offset < l_ad) {
1494 /* overwrite entry; compensate for the old element */
1495 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1496 short_ad = (struct short_ad *) (data_pos + offset);
1497 o_icb.len = short_ad->len;
1498 o_icb.loc.part_num = udf_rw16(0); /* ignore */
1499 o_icb.loc.lb_num = short_ad->lb_num;
1500 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1501 long_ad = (struct long_ad *) (data_pos + offset);
1502 o_icb = *long_ad;
1503 } else {
1504 panic("Invalid address type in udf_append_adslot\n");
1505 }
1506
1507 len = udf_rw32(o_icb.len);
1508 if (UDF_EXT_FLAGS(len) == UDF_EXT_ALLOCATED) {
1509 /* adjust counts */
1510 len = UDF_EXT_LEN(len);
1511 logblks_rec -= (len + lb_size -1) / lb_size;
1512 }
1513 }
1514
1515 /* check if we're not appending a redirection */
1516 flags = UDF_EXT_FLAGS(udf_rw32(icb->len));
1517 KASSERT(flags != UDF_EXT_REDIRECT);
1518
1519 /* round down available space */
1520 rest = adlen * ((max_l_ad - offset) / adlen);
1521 if (rest <= adlen) {
1522 /* have to append aed, see if we already have a spare one */
1523 extnr++;
1524 ext = udf_node->ext[extnr];
1525 l_icb = udf_node->ext_loc[extnr];
1526 if (ext == NULL) {
1527 DPRINTF(ALLOC,("adding allocation extent %d\n", extnr));
1528 error = udf_pre_allocate_space(ump, UDF_C_NODE, 1,
1529 &vpart_num, &lmapping, &pmapping);
1530 lb_num = lmapping;
1531 if (error)
1532 return error;
1533
1534 /* initialise pointer to location */
1535 memset(&l_icb, 0, sizeof(struct long_ad));
1536 l_icb.len = udf_rw32(lb_size | UDF_EXT_REDIRECT);
1537 l_icb.loc.lb_num = udf_rw32(lb_num);
1538 l_icb.loc.part_num = udf_rw16(vpart_num);
1539
1540 /* create new aed descriptor */
1541 udf_create_logvol_dscr(ump, udf_node, &l_icb,
1542 (union dscrptr **) &ext);
1543
1544 udf_inittag(ump, &ext->tag, TAGID_ALLOCEXTENT, lb_num);
1545 dscr_size = sizeof(struct alloc_ext_entry) -1;
1546 max_l_ad = lb_size - dscr_size;
1547 memset(ext->data, 0, max_l_ad);
1548 ext->l_ad = udf_rw32(0);
1549 ext->tag.desc_crc_len =
1550 udf_rw32(dscr_size - UDF_DESC_TAG_LENGTH);
1551
1552 /* declare aed */
1553 udf_node->num_extensions++;
1554 udf_node->ext_loc[extnr] = l_icb;
1555 udf_node->ext[extnr] = ext;
1556 }
1557 /* add redirect and adjust l_ad and crclen for old descr */
1558 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1559 short_ad = (struct short_ad *) (data_pos + offset);
1560 short_ad->len = l_icb.len;
1561 short_ad->lb_num = l_icb.loc.lb_num;
1562 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1563 long_ad = (struct long_ad *) (data_pos + offset);
1564 *long_ad = l_icb;
1565 }
1566 l_ad += adlen;
1567 crclen += adlen;
1568 dscr->tag.desc_crc_len = udf_rw32(crclen);
1569 *l_ad_p = udf_rw32(l_ad);
1570
1571 /* advance to the new extension */
1572 KASSERT(ext != NULL);
1573 dscr = (union dscrptr *) ext;
1574 dscr_size = sizeof(struct alloc_ext_entry) -1;
1575 max_l_ad = lb_size - dscr_size;
1576 data_pos = (uint8_t *) dscr + dscr_size;
1577
1578 l_ad_p = &ext->l_ad;
1579 l_ad = udf_rw32(*l_ad_p);
1580 crclen = udf_rw32(dscr->tag.desc_crc_len);
1581 offset = 0;
1582
1583 /* adjust callees slot count for link insert */
1584 *slot += 1;
1585 }
1586
1587 /* write out the element */
1588 DPRINTF(PARANOIDADWLK, ("adding element : %p : v %d, lb %d, "
1589 "len %d, flags %d\n", data_pos + offset,
1590 icb->loc.part_num, icb->loc.lb_num,
1591 UDF_EXT_LEN(icb->len), UDF_EXT_FLAGS(icb->len)));
1592 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1593 short_ad = (struct short_ad *) (data_pos + offset);
1594 short_ad->len = icb->len;
1595 short_ad->lb_num = icb->loc.lb_num;
1596 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1597 long_ad = (struct long_ad *) (data_pos + offset);
1598 *long_ad = *icb;
1599 }
1600
1601 /* adjust logblks recorded count */
1602 flags = UDF_EXT_FLAGS(udf_rw32(icb->len));
1603 if (flags == UDF_EXT_ALLOCATED)
1604 logblks_rec += (UDF_EXT_LEN(icb->len) + lb_size -1) / lb_size;
1605 *logblks_rec_p = udf_rw64(logblks_rec);
1606
1607 /* adjust l_ad and crclen when needed */
1608 if (offset >= l_ad) {
1609 l_ad += adlen;
1610 crclen += adlen;
1611 dscr->tag.desc_crc_len = udf_rw32(crclen);
1612 *l_ad_p = udf_rw32(l_ad);
1613 }
1614
1615 return 0;
1616 }
1617
1618 /* --------------------------------------------------------------------- */
1619
1620 static void
1621 udf_count_alloc_exts(struct udf_node *udf_node)
1622 {
1623 struct long_ad s_ad;
1624 uint32_t lb_num, len, flags;
1625 uint16_t vpart_num;
1626 int slot, eof;
1627 int num_extents, extnr;
1628 int lb_size;
1629
1630 if (udf_node->num_extensions == 0)
1631 return;
1632
1633 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1634 /* count number of allocation extents in use */
1635 num_extents = 0;
1636 slot = 0;
1637 for (;;) {
1638 udf_get_adslot(udf_node, slot, &s_ad, &eof);
1639 if (eof)
1640 break;
1641 len = udf_rw32(s_ad.len);
1642 flags = UDF_EXT_FLAGS(len);
1643
1644 if (flags == UDF_EXT_REDIRECT)
1645 num_extents++;
1646
1647 slot++;
1648 }
1649
1650 DPRINTF(ALLOC, ("udf_count_alloc_ext counted %d live extents\n",
1651 num_extents));
1652
1653 /* XXX choice: we could delay freeing them on node writeout */
1654 /* free excess entries */
1655 extnr = num_extents;
1656 for (;extnr < udf_node->num_extensions; extnr++) {
1657 DPRINTF(ALLOC, ("freeing alloc ext %d\n", extnr));
1658 /* free dscriptor */
1659 s_ad = udf_node->ext_loc[extnr];
1660 udf_free_logvol_dscr(udf_node->ump, &s_ad,
1661 udf_node->ext[extnr]);
1662 udf_node->ext[extnr] = NULL;
1663
1664 /* free disc space */
1665 lb_num = udf_rw32(s_ad.loc.lb_num);
1666 vpart_num = udf_rw16(s_ad.loc.part_num);
1667 udf_free_allocated_space(udf_node->ump, lb_num, vpart_num, 1);
1668
1669 memset(&udf_node->ext_loc[extnr], 0, sizeof(struct long_ad));
1670 }
1671
1672 /* set our new number of allocation extents */
1673 udf_node->num_extensions = num_extents;
1674 }
1675
1676
1677 /* --------------------------------------------------------------------- */
1678
1679 /*
1680 * Adjust the node's allocation descriptors to reflect the new mapping; do
1681 * take note that we might glue to existing allocation descriptors.
1682 *
1683 * XXX Note there can only be one allocation being recorded/mount; maybe
1684 * explicit allocation in shedule thread?
1685 */
1686
1687 static void
1688 udf_record_allocation_in_node(struct udf_mount *ump, struct buf *buf,
1689 uint16_t vpart_num, uint64_t *mapping, struct long_ad *node_ad_cpy)
1690 {
1691 struct vnode *vp = buf->b_vp;
1692 struct udf_node *udf_node = VTOI(vp);
1693 struct file_entry *fe;
1694 struct extfile_entry *efe;
1695 struct icb_tag *icbtag;
1696 struct long_ad s_ad, c_ad;
1697 uint64_t inflen, from, till;
1698 uint64_t foffset, end_foffset, restart_foffset;
1699 uint64_t orig_inflen, orig_lbrec, new_inflen, new_lbrec;
1700 uint32_t num_lb, len, flags, lb_num;
1701 uint32_t run_start;
1702 uint32_t slot_offset, replace_len, replace;
1703 int addr_type, icbflags;
1704 int udf_c_type = buf->b_udf_c_type;
1705 int lb_size, run_length, eof;
1706 int slot, cpy_slot, cpy_slots, restart_slot;
1707 int error;
1708
1709 DPRINTF(ALLOC, ("udf_record_allocation_in_node\n"));
1710
1711 /* sanity check ... should be panic ? */
1712 if ((udf_c_type != UDF_C_USERDATA) && (udf_c_type != UDF_C_FIDS))
1713 return;
1714
1715 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1716
1717 /* do the job */
1718 UDF_LOCK_NODE(udf_node, 0); /* XXX can deadlock ? */
1719 udf_node_sanity_check(udf_node, &orig_inflen, &orig_lbrec);
1720
1721 fe = udf_node->fe;
1722 efe = udf_node->efe;
1723 if (fe) {
1724 icbtag = &fe->icbtag;
1725 inflen = udf_rw64(fe->inf_len);
1726 } else {
1727 icbtag = &efe->icbtag;
1728 inflen = udf_rw64(efe->inf_len);
1729 }
1730
1731 /* do check if `till' is not past file information length */
1732 from = buf->b_lblkno * lb_size;
1733 till = MIN(inflen, from + buf->b_resid);
1734
1735 num_lb = (till - from + lb_size -1) / lb_size;
1736
1737 DPRINTF(ALLOC, ("record allocation from %"PRIu64" + %d\n", from, buf->b_bcount));
1738
1739 icbflags = udf_rw16(icbtag->flags);
1740 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
1741
1742 if (addr_type == UDF_ICB_INTERN_ALLOC) {
1743 /* nothing to do */
1744 /* XXX clean up rest of node? just in case? */
1745 UDF_UNLOCK_NODE(udf_node, 0);
1746 return;
1747 }
1748
1749 slot = 0;
1750 cpy_slot = 0;
1751 foffset = 0;
1752
1753 /* 1) copy till first overlap piece to the rewrite buffer */
1754 for (;;) {
1755 udf_get_adslot(udf_node, slot, &s_ad, &eof);
1756 if (eof) {
1757 DPRINTF(WRITE,
1758 ("Record allocation in node "
1759 "failed: encountered EOF\n"));
1760 UDF_UNLOCK_NODE(udf_node, 0);
1761 buf->b_error = EINVAL;
1762 return;
1763 }
1764 len = udf_rw32(s_ad.len);
1765 flags = UDF_EXT_FLAGS(len);
1766 len = UDF_EXT_LEN(len);
1767
1768 if (flags == UDF_EXT_REDIRECT) {
1769 slot++;
1770 continue;
1771 }
1772
1773 end_foffset = foffset + len;
1774 if (end_foffset > from)
1775 break; /* found */
1776
1777 node_ad_cpy[cpy_slot++] = s_ad;
1778
1779 DPRINTF(ALLOC, ("\t1: vp %d, lb %d, len %d, flags %d "
1780 "-> stack\n",
1781 udf_rw16(s_ad.loc.part_num),
1782 udf_rw32(s_ad.loc.lb_num),
1783 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1784 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1785
1786 foffset = end_foffset;
1787 slot++;
1788 }
1789 restart_slot = slot;
1790 restart_foffset = foffset;
1791
1792 /* 2) trunc overlapping slot at overlap and copy it */
1793 slot_offset = from - foffset;
1794 if (slot_offset > 0) {
1795 DPRINTF(ALLOC, ("\tslot_offset = %d, flags = %d (%d)\n",
1796 slot_offset, flags >> 30, flags));
1797
1798 s_ad.len = udf_rw32(slot_offset | flags);
1799 node_ad_cpy[cpy_slot++] = s_ad;
1800
1801 DPRINTF(ALLOC, ("\t2: vp %d, lb %d, len %d, flags %d "
1802 "-> stack\n",
1803 udf_rw16(s_ad.loc.part_num),
1804 udf_rw32(s_ad.loc.lb_num),
1805 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1806 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1807 }
1808 foffset += slot_offset;
1809
1810 /* 3) insert new mappings */
1811 memset(&s_ad, 0, sizeof(struct long_ad));
1812 lb_num = 0;
1813 for (lb_num = 0; lb_num < num_lb; lb_num++) {
1814 run_start = mapping[lb_num];
1815 run_length = 1;
1816 while (lb_num < num_lb-1) {
1817 if (mapping[lb_num+1] != mapping[lb_num]+1)
1818 if (mapping[lb_num+1] != mapping[lb_num])
1819 break;
1820 run_length++;
1821 lb_num++;
1822 }
1823 /* insert slot for this mapping */
1824 len = run_length * lb_size;
1825
1826 /* bounds checking */
1827 if (foffset + len > till)
1828 len = till - foffset;
1829 KASSERT(foffset + len <= inflen);
1830
1831 s_ad.len = udf_rw32(len | UDF_EXT_ALLOCATED);
1832 s_ad.loc.part_num = udf_rw16(vpart_num);
1833 s_ad.loc.lb_num = udf_rw32(run_start);
1834
1835 foffset += len;
1836
1837 /* paranoia */
1838 if (len == 0) {
1839 DPRINTF(WRITE,
1840 ("Record allocation in node "
1841 "failed: insert failed\n"));
1842 UDF_UNLOCK_NODE(udf_node, 0);
1843 buf->b_error = EINVAL;
1844 return;
1845 }
1846 node_ad_cpy[cpy_slot++] = s_ad;
1847
1848 DPRINTF(ALLOC, ("\t3: insert new mapping vp %d lb %d, len %d, "
1849 "flags %d -> stack\n",
1850 udf_rw16(s_ad.loc.part_num), udf_rw32(s_ad.loc.lb_num),
1851 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1852 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1853 }
1854
1855 /* 4) pop replaced length */
1856 slot = restart_slot;
1857 foffset = restart_foffset;
1858
1859 replace_len = till - foffset; /* total amount of bytes to pop */
1860 slot_offset = from - foffset; /* offset in first encounted slot */
1861 KASSERT((slot_offset % lb_size) == 0);
1862
1863 for (;;) {
1864 udf_get_adslot(udf_node, slot, &s_ad, &eof);
1865 if (eof)
1866 break;
1867
1868 len = udf_rw32(s_ad.len);
1869 flags = UDF_EXT_FLAGS(len);
1870 len = UDF_EXT_LEN(len);
1871 lb_num = udf_rw32(s_ad.loc.lb_num);
1872
1873 if (flags == UDF_EXT_REDIRECT) {
1874 slot++;
1875 continue;
1876 }
1877
1878 DPRINTF(ALLOC, ("\t4i: got slot %d, slot_offset %d, "
1879 "replace_len %d, "
1880 "vp %d, lb %d, len %d, flags %d\n",
1881 slot, slot_offset, replace_len,
1882 udf_rw16(s_ad.loc.part_num),
1883 udf_rw32(s_ad.loc.lb_num),
1884 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1885 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1886
1887 /* adjust for slot offset */
1888 if (slot_offset) {
1889 DPRINTF(ALLOC, ("\t4s: skipping %d\n", slot_offset));
1890 lb_num += slot_offset / lb_size;
1891 len -= slot_offset;
1892 foffset += slot_offset;
1893 replace_len -= slot_offset;
1894
1895 /* mark adjusted */
1896 slot_offset = 0;
1897 }
1898
1899 /* advance for (the rest of) this slot */
1900 replace = MIN(len, replace_len);
1901 DPRINTF(ALLOC, ("\t4d: replacing %d\n", replace));
1902
1903 /* advance for this slot */
1904 if (replace) {
1905 /* note: dont round DOWN on num_lb since we then
1906 * forget the last partial one */
1907 num_lb = (replace + lb_size - 1) / lb_size;
1908 if (flags != UDF_EXT_FREE) {
1909 udf_free_allocated_space(ump, lb_num,
1910 udf_rw16(s_ad.loc.part_num), num_lb);
1911 }
1912 lb_num += num_lb;
1913 len -= replace;
1914 foffset += replace;
1915 replace_len -= replace;
1916 }
1917
1918 /* do we have a slot tail ? */
1919 if (len) {
1920 KASSERT(foffset % lb_size == 0);
1921
1922 /* we arrived at our point, push remainder */
1923 s_ad.len = udf_rw32(len | flags);
1924 s_ad.loc.lb_num = udf_rw32(lb_num);
1925 if (flags == UDF_EXT_FREE)
1926 s_ad.loc.lb_num = udf_rw32(0);
1927 node_ad_cpy[cpy_slot++] = s_ad;
1928 foffset += len;
1929 slot++;
1930
1931 DPRINTF(ALLOC, ("\t4: vp %d, lb %d, len %d, flags %d "
1932 "-> stack\n",
1933 udf_rw16(s_ad.loc.part_num),
1934 udf_rw32(s_ad.loc.lb_num),
1935 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1936 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1937 break;
1938 }
1939
1940 slot++;
1941 }
1942
1943 /* 5) copy remainder */
1944 for (;;) {
1945 udf_get_adslot(udf_node, slot, &s_ad, &eof);
1946 if (eof)
1947 break;
1948
1949 len = udf_rw32(s_ad.len);
1950 flags = UDF_EXT_FLAGS(len);
1951 len = UDF_EXT_LEN(len);
1952
1953 if (flags == UDF_EXT_REDIRECT) {
1954 slot++;
1955 continue;
1956 }
1957
1958 node_ad_cpy[cpy_slot++] = s_ad;
1959
1960 DPRINTF(ALLOC, ("\t5: insert new mapping "
1961 "vp %d lb %d, len %d, flags %d "
1962 "-> stack\n",
1963 udf_rw16(s_ad.loc.part_num),
1964 udf_rw32(s_ad.loc.lb_num),
1965 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1966 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1967
1968 slot++;
1969 }
1970
1971 /* 6) reset node descriptors */
1972 udf_wipe_adslots(udf_node);
1973
1974 /* 7) copy back extents; merge when possible. Recounting on the fly */
1975 cpy_slots = cpy_slot;
1976
1977 c_ad = node_ad_cpy[0];
1978 slot = 0;
1979 DPRINTF(ALLOC, ("\t7s: stack -> got mapping vp %d "
1980 "lb %d, len %d, flags %d\n",
1981 udf_rw16(c_ad.loc.part_num),
1982 udf_rw32(c_ad.loc.lb_num),
1983 UDF_EXT_LEN(udf_rw32(c_ad.len)),
1984 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
1985
1986 for (cpy_slot = 1; cpy_slot < cpy_slots; cpy_slot++) {
1987 s_ad = node_ad_cpy[cpy_slot];
1988
1989 DPRINTF(ALLOC, ("\t7i: stack -> got mapping vp %d "
1990 "lb %d, len %d, flags %d\n",
1991 udf_rw16(s_ad.loc.part_num),
1992 udf_rw32(s_ad.loc.lb_num),
1993 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1994 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1995
1996 /* see if we can merge */
1997 if (udf_ads_merge(lb_size, &c_ad, &s_ad)) {
1998 /* not mergable (anymore) */
1999 DPRINTF(ALLOC, ("\t7: appending vp %d lb %d, "
2000 "len %d, flags %d\n",
2001 udf_rw16(c_ad.loc.part_num),
2002 udf_rw32(c_ad.loc.lb_num),
2003 UDF_EXT_LEN(udf_rw32(c_ad.len)),
2004 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2005
2006 error = udf_append_adslot(udf_node, &slot, &c_ad);
2007 if (error) {
2008 buf->b_error = error;
2009 goto out;
2010 }
2011 c_ad = s_ad;
2012 slot++;
2013 }
2014 }
2015
2016 /* 8) push rest slot (if any) */
2017 if (UDF_EXT_LEN(c_ad.len) > 0) {
2018 DPRINTF(ALLOC, ("\t8: last append vp %d lb %d, "
2019 "len %d, flags %d\n",
2020 udf_rw16(c_ad.loc.part_num),
2021 udf_rw32(c_ad.loc.lb_num),
2022 UDF_EXT_LEN(udf_rw32(c_ad.len)),
2023 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2024
2025 error = udf_append_adslot(udf_node, &slot, &c_ad);
2026 if (error) {
2027 buf->b_error = error;
2028 goto out;
2029 }
2030 }
2031
2032 out:
2033 udf_count_alloc_exts(udf_node);
2034
2035 /* the node's descriptors should now be sane */
2036 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2037 UDF_UNLOCK_NODE(udf_node, 0);
2038
2039 KASSERT(orig_inflen == new_inflen);
2040 KASSERT(new_lbrec >= orig_lbrec);
2041
2042 return;
2043 }
2044
2045 /* --------------------------------------------------------------------- */
2046
2047 int
2048 udf_grow_node(struct udf_node *udf_node, uint64_t new_size)
2049 {
2050 union dscrptr *dscr;
2051 struct vnode *vp = udf_node->vnode;
2052 struct udf_mount *ump = udf_node->ump;
2053 struct file_entry *fe;
2054 struct extfile_entry *efe;
2055 struct icb_tag *icbtag;
2056 struct long_ad c_ad, s_ad;
2057 uint64_t size_diff, old_size, inflen, objsize, chunk, append_len;
2058 uint64_t foffset, end_foffset;
2059 uint64_t orig_inflen, orig_lbrec, new_inflen, new_lbrec;
2060 uint32_t lb_size, dscr_size, crclen, lastblock_grow;
2061 uint32_t len, flags, max_len;
2062 uint32_t max_l_ad, l_ad, l_ea;
2063 uint8_t *data_pos, *evacuated_data;
2064 int icbflags, addr_type;
2065 int slot, cpy_slot;
2066 int eof, error;
2067
2068 DPRINTF(ALLOC, ("udf_grow_node\n"));
2069
2070 UDF_LOCK_NODE(udf_node, 0);
2071 udf_node_sanity_check(udf_node, &orig_inflen, &orig_lbrec);
2072
2073 lb_size = udf_rw32(ump->logical_vol->lb_size);
2074 max_len = ((UDF_EXT_MAXLEN / lb_size) * lb_size);
2075
2076 fe = udf_node->fe;
2077 efe = udf_node->efe;
2078 if (fe) {
2079 dscr = (union dscrptr *) fe;
2080 icbtag = &fe->icbtag;
2081 inflen = udf_rw64(fe->inf_len);
2082 objsize = inflen;
2083 dscr_size = sizeof(struct file_entry) -1;
2084 l_ea = udf_rw32(fe->l_ea);
2085 l_ad = udf_rw32(fe->l_ad);
2086 } else {
2087 dscr = (union dscrptr *) efe;
2088 icbtag = &efe->icbtag;
2089 inflen = udf_rw64(efe->inf_len);
2090 objsize = udf_rw64(efe->obj_size);
2091 dscr_size = sizeof(struct extfile_entry) -1;
2092 l_ea = udf_rw32(efe->l_ea);
2093 l_ad = udf_rw32(efe->l_ad);
2094 }
2095 data_pos = (uint8_t *) dscr + dscr_size + l_ea;
2096 max_l_ad = lb_size - dscr_size - l_ea;
2097
2098 icbflags = udf_rw16(icbtag->flags);
2099 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2100
2101 old_size = inflen;
2102 size_diff = new_size - old_size;
2103
2104 DPRINTF(ALLOC, ("\tfrom %"PRIu64" to %"PRIu64"\n", old_size, new_size));
2105
2106 evacuated_data = NULL;
2107 if (addr_type == UDF_ICB_INTERN_ALLOC) {
2108 if (l_ad + size_diff <= max_l_ad) {
2109 /* only reflect size change directly in the node */
2110 inflen += size_diff;
2111 objsize += size_diff;
2112 l_ad += size_diff;
2113 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea + l_ad;
2114 if (fe) {
2115 fe->inf_len = udf_rw64(inflen);
2116 fe->l_ad = udf_rw32(l_ad);
2117 fe->tag.desc_crc_len = udf_rw32(crclen);
2118 } else {
2119 efe->inf_len = udf_rw64(inflen);
2120 efe->obj_size = udf_rw64(objsize);
2121 efe->l_ad = udf_rw32(l_ad);
2122 efe->tag.desc_crc_len = udf_rw32(crclen);
2123 }
2124 error = 0;
2125
2126 /* set new size for uvm */
2127 uvm_vnp_setsize(vp, old_size);
2128 uvm_vnp_setwritesize(vp, new_size);
2129
2130 #if 0
2131 /* zero append space in buffer */
2132 uvm_vnp_zerorange(vp, old_size, new_size - old_size);
2133 #endif
2134
2135 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2136
2137 /* unlock */
2138 UDF_UNLOCK_NODE(udf_node, 0);
2139
2140 KASSERT(new_inflen == orig_inflen + size_diff);
2141 KASSERT(new_lbrec == orig_lbrec);
2142 KASSERT(new_lbrec == 0);
2143 return 0;
2144 }
2145
2146 DPRINTF(ALLOC, ("\tCONVERT from internal\n"));
2147
2148 if (old_size > 0) {
2149 /* allocate some space and copy in the stuff to keep */
2150 evacuated_data = malloc(lb_size, M_UDFTEMP, M_WAITOK);
2151 memset(evacuated_data, 0, lb_size);
2152
2153 /* node is locked, so safe to exit mutex */
2154 UDF_UNLOCK_NODE(udf_node, 0);
2155
2156 /* read in using the `normal' vn_rdwr() */
2157 error = vn_rdwr(UIO_READ, udf_node->vnode,
2158 evacuated_data, old_size, 0,
2159 UIO_SYSSPACE, IO_ALTSEMANTICS | IO_NODELOCKED,
2160 FSCRED, NULL, NULL);
2161
2162 /* enter again */
2163 UDF_LOCK_NODE(udf_node, 0);
2164 }
2165
2166 /* convert to a normal alloc */
2167 /* XXX HOWTO selecting allocation method ? */
2168 icbflags &= ~UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2169 icbflags |= UDF_ICB_LONG_ALLOC; /* XXX or SHORT_ALLOC */
2170 icbtag->flags = udf_rw16(icbflags);
2171
2172 /* wipe old descriptor space */
2173 udf_wipe_adslots(udf_node);
2174
2175 memset(&c_ad, 0, sizeof(struct long_ad));
2176 c_ad.len = udf_rw32(old_size | UDF_EXT_FREE);
2177 c_ad.loc.part_num = udf_rw16(0); /* not relevant */
2178 c_ad.loc.lb_num = udf_rw32(0); /* not relevant */
2179
2180 slot = 0;
2181 } else {
2182 /* goto the last entry (if any) */
2183 slot = 0;
2184 cpy_slot = 0;
2185 foffset = 0;
2186 memset(&c_ad, 0, sizeof(struct long_ad));
2187 for (;;) {
2188 udf_get_adslot(udf_node, slot, &c_ad, &eof);
2189 if (eof)
2190 break;
2191
2192 len = udf_rw32(c_ad.len);
2193 flags = UDF_EXT_FLAGS(len);
2194 len = UDF_EXT_LEN(len);
2195
2196 end_foffset = foffset + len;
2197 if (flags != UDF_EXT_REDIRECT)
2198 foffset = end_foffset;
2199
2200 slot++;
2201 }
2202 /* at end of adslots */
2203
2204 /* special case if the old size was zero, then there is no last slot */
2205 if (old_size == 0) {
2206 c_ad.len = udf_rw32(0 | UDF_EXT_FREE);
2207 c_ad.loc.part_num = udf_rw16(0); /* not relevant */
2208 c_ad.loc.lb_num = udf_rw32(0); /* not relevant */
2209 } else {
2210 /* refetch last slot */
2211 slot--;
2212 udf_get_adslot(udf_node, slot, &c_ad, &eof);
2213 }
2214 }
2215
2216 /*
2217 * If the length of the last slot is not a multiple of lb_size, adjust
2218 * length so that it is; don't forget to adjust `append_len'! relevant for
2219 * extending existing files
2220 */
2221 len = udf_rw32(c_ad.len);
2222 flags = UDF_EXT_FLAGS(len);
2223 len = UDF_EXT_LEN(len);
2224
2225 lastblock_grow = 0;
2226 if (len % lb_size > 0) {
2227 lastblock_grow = lb_size - (len % lb_size);
2228 lastblock_grow = MIN(size_diff, lastblock_grow);
2229 len += lastblock_grow;
2230 c_ad.len = udf_rw32(len | flags);
2231
2232 /* TODO zero appened space in buffer! */
2233 /* using uvm_vnp_zerorange(vp, old_size, new_size - old_size); ? */
2234 }
2235 memset(&s_ad, 0, sizeof(struct long_ad));
2236
2237 /* size_diff can be bigger than allowed, so grow in chunks */
2238 append_len = size_diff - lastblock_grow;
2239 while (append_len > 0) {
2240 chunk = MIN(append_len, max_len);
2241 s_ad.len = udf_rw32(chunk | UDF_EXT_FREE);
2242 s_ad.loc.part_num = udf_rw16(0);
2243 s_ad.loc.lb_num = udf_rw32(0);
2244
2245 if (udf_ads_merge(lb_size, &c_ad, &s_ad)) {
2246 /* not mergable (anymore) */
2247 error = udf_append_adslot(udf_node, &slot, &c_ad);
2248 if (error)
2249 goto errorout;
2250 slot++;
2251 c_ad = s_ad;
2252 memset(&s_ad, 0, sizeof(struct long_ad));
2253 }
2254 append_len -= chunk;
2255 }
2256
2257 /* if there is a rest piece in the accumulator, append it */
2258 if (UDF_EXT_LEN(udf_rw32(c_ad.len)) > 0) {
2259 error = udf_append_adslot(udf_node, &slot, &c_ad);
2260 if (error)
2261 goto errorout;
2262 slot++;
2263 }
2264
2265 /* if there is a rest piece that didn't fit, append it */
2266 if (UDF_EXT_LEN(udf_rw32(s_ad.len)) > 0) {
2267 error = udf_append_adslot(udf_node, &slot, &s_ad);
2268 if (error)
2269 goto errorout;
2270 slot++;
2271 }
2272
2273 inflen += size_diff;
2274 objsize += size_diff;
2275 if (fe) {
2276 fe->inf_len = udf_rw64(inflen);
2277 } else {
2278 efe->inf_len = udf_rw64(inflen);
2279 efe->obj_size = udf_rw64(objsize);
2280 }
2281 error = 0;
2282
2283 if (evacuated_data) {
2284 /* set new write size for uvm */
2285 uvm_vnp_setwritesize(vp, old_size);
2286
2287 /* write out evacuated data */
2288 error = vn_rdwr(UIO_WRITE, udf_node->vnode,
2289 evacuated_data, old_size, 0,
2290 UIO_SYSSPACE, IO_ALTSEMANTICS | IO_NODELOCKED,
2291 FSCRED, NULL, NULL);
2292 uvm_vnp_setsize(vp, old_size);
2293 }
2294
2295 errorout:
2296 if (evacuated_data)
2297 free(evacuated_data, M_UDFTEMP);
2298
2299 udf_count_alloc_exts(udf_node);
2300
2301 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2302 UDF_UNLOCK_NODE(udf_node, 0);
2303
2304 KASSERT(new_inflen == orig_inflen + size_diff);
2305 KASSERT(new_lbrec == orig_lbrec);
2306
2307 return error;
2308 }
2309
2310 /* --------------------------------------------------------------------- */
2311
2312 int
2313 udf_shrink_node(struct udf_node *udf_node, uint64_t new_size)
2314 {
2315 struct vnode *vp = udf_node->vnode;
2316 struct udf_mount *ump = udf_node->ump;
2317 struct file_entry *fe;
2318 struct extfile_entry *efe;
2319 struct icb_tag *icbtag;
2320 struct long_ad c_ad, s_ad, *node_ad_cpy;
2321 uint64_t size_diff, old_size, inflen, objsize;
2322 uint64_t foffset, end_foffset;
2323 uint64_t orig_inflen, orig_lbrec, new_inflen, new_lbrec;
2324 uint32_t lb_size, dscr_size, crclen;
2325 uint32_t slot_offset;
2326 uint32_t len, flags, max_len;
2327 uint32_t num_lb, lb_num;
2328 uint32_t max_l_ad, l_ad, l_ea;
2329 uint16_t vpart_num;
2330 uint8_t *data_pos;
2331 int icbflags, addr_type;
2332 int slot, cpy_slot, cpy_slots;
2333 int eof, error;
2334
2335 DPRINTF(ALLOC, ("udf_shrink_node\n"));
2336
2337 UDF_LOCK_NODE(udf_node, 0);
2338 udf_node_sanity_check(udf_node, &orig_inflen, &orig_lbrec);
2339
2340 lb_size = udf_rw32(ump->logical_vol->lb_size);
2341 max_len = ((UDF_EXT_MAXLEN / lb_size) * lb_size);
2342
2343 /* do the work */
2344 fe = udf_node->fe;
2345 efe = udf_node->efe;
2346 if (fe) {
2347 icbtag = &fe->icbtag;
2348 inflen = udf_rw64(fe->inf_len);
2349 objsize = inflen;
2350 dscr_size = sizeof(struct file_entry) -1;
2351 l_ea = udf_rw32(fe->l_ea);
2352 l_ad = udf_rw32(fe->l_ad);
2353 data_pos = (uint8_t *) fe + dscr_size + l_ea;
2354 } else {
2355 icbtag = &efe->icbtag;
2356 inflen = udf_rw64(efe->inf_len);
2357 objsize = udf_rw64(efe->obj_size);
2358 dscr_size = sizeof(struct extfile_entry) -1;
2359 l_ea = udf_rw32(efe->l_ea);
2360 l_ad = udf_rw32(efe->l_ad);
2361 data_pos = (uint8_t *) efe + dscr_size + l_ea;
2362 }
2363 max_l_ad = lb_size - dscr_size - l_ea;
2364
2365 icbflags = udf_rw16(icbtag->flags);
2366 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2367
2368 old_size = inflen;
2369 size_diff = old_size - new_size;
2370
2371 DPRINTF(ALLOC, ("\tfrom %"PRIu64" to %"PRIu64"\n", old_size, new_size));
2372
2373 /* shrink the node to its new size */
2374 if (addr_type == UDF_ICB_INTERN_ALLOC) {
2375 /* only reflect size change directly in the node */
2376 KASSERT(new_size <= max_l_ad);
2377 inflen -= size_diff;
2378 objsize -= size_diff;
2379 l_ad -= size_diff;
2380 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea + l_ad;
2381 if (fe) {
2382 fe->inf_len = udf_rw64(inflen);
2383 fe->l_ad = udf_rw32(l_ad);
2384 fe->tag.desc_crc_len = udf_rw32(crclen);
2385 } else {
2386 efe->inf_len = udf_rw64(inflen);
2387 efe->obj_size = udf_rw64(objsize);
2388 efe->l_ad = udf_rw32(l_ad);
2389 efe->tag.desc_crc_len = udf_rw32(crclen);
2390 }
2391 error = 0;
2392
2393 /* clear the space in the descriptor */
2394 KASSERT(old_size > new_size);
2395 memset(data_pos + new_size, 0, old_size - new_size);
2396
2397 /* TODO zero appened space in buffer! */
2398 /* using uvm_vnp_zerorange(vp, old_size, old_size - new_size); ? */
2399
2400 /* set new size for uvm */
2401 uvm_vnp_setsize(vp, new_size);
2402
2403 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2404 UDF_UNLOCK_NODE(udf_node, 0);
2405
2406 KASSERT(new_inflen == orig_inflen - size_diff);
2407 KASSERT(new_lbrec == orig_lbrec);
2408 KASSERT(new_lbrec == 0);
2409
2410 return 0;
2411 }
2412
2413 /* setup node cleanup extents copy space */
2414 node_ad_cpy = malloc(lb_size * UDF_MAX_ALLOC_EXTENTS,
2415 M_UDFMNT, M_WAITOK);
2416 memset(node_ad_cpy, 0, lb_size * UDF_MAX_ALLOC_EXTENTS);
2417
2418 /*
2419 * Shrink the node by releasing the allocations and truncate the last
2420 * allocation to the new size. If the new size fits into the
2421 * allocation descriptor itself, transform it into an
2422 * UDF_ICB_INTERN_ALLOC.
2423 */
2424 slot = 0;
2425 cpy_slot = 0;
2426 foffset = 0;
2427
2428 /* 1) copy till first overlap piece to the rewrite buffer */
2429 for (;;) {
2430 udf_get_adslot(udf_node, slot, &s_ad, &eof);
2431 if (eof) {
2432 DPRINTF(WRITE,
2433 ("Shrink node failed: "
2434 "encountered EOF\n"));
2435 error = EINVAL;
2436 goto errorout; /* panic? */
2437 }
2438 len = udf_rw32(s_ad.len);
2439 flags = UDF_EXT_FLAGS(len);
2440 len = UDF_EXT_LEN(len);
2441
2442 if (flags == UDF_EXT_REDIRECT) {
2443 slot++;
2444 continue;
2445 }
2446
2447 end_foffset = foffset + len;
2448 if (end_foffset > new_size)
2449 break; /* found */
2450
2451 node_ad_cpy[cpy_slot++] = s_ad;
2452
2453 DPRINTF(ALLOC, ("\t1: vp %d, lb %d, len %d, flags %d "
2454 "-> stack\n",
2455 udf_rw16(s_ad.loc.part_num),
2456 udf_rw32(s_ad.loc.lb_num),
2457 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2458 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2459
2460 foffset = end_foffset;
2461 slot++;
2462 }
2463 slot_offset = new_size - foffset;
2464
2465 /* 2) trunc overlapping slot at overlap and copy it */
2466 if (slot_offset > 0) {
2467 lb_num = udf_rw32(s_ad.loc.lb_num);
2468 vpart_num = udf_rw16(s_ad.loc.part_num);
2469
2470 if (flags == UDF_EXT_ALLOCATED) {
2471 /* note: round DOWN on num_lb */
2472 lb_num += (slot_offset + lb_size -1) / lb_size;
2473 num_lb = (len - slot_offset) / lb_size;
2474
2475 udf_free_allocated_space(ump, lb_num, vpart_num, num_lb);
2476 }
2477
2478 s_ad.len = udf_rw32(slot_offset | flags);
2479 node_ad_cpy[cpy_slot++] = s_ad;
2480 slot++;
2481
2482 DPRINTF(ALLOC, ("\t2: vp %d, lb %d, len %d, flags %d "
2483 "-> stack\n",
2484 udf_rw16(s_ad.loc.part_num),
2485 udf_rw32(s_ad.loc.lb_num),
2486 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2487 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2488 }
2489
2490 /* 3) delete remainder */
2491 for (;;) {
2492 udf_get_adslot(udf_node, slot, &s_ad, &eof);
2493 if (eof)
2494 break;
2495
2496 len = udf_rw32(s_ad.len);
2497 flags = UDF_EXT_FLAGS(len);
2498 len = UDF_EXT_LEN(len);
2499
2500 if (flags == UDF_EXT_REDIRECT) {
2501 slot++;
2502 continue;
2503 }
2504
2505 DPRINTF(ALLOC, ("\t3: delete remainder "
2506 "vp %d lb %d, len %d, flags %d\n",
2507 udf_rw16(s_ad.loc.part_num),
2508 udf_rw32(s_ad.loc.lb_num),
2509 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2510 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2511
2512 if (flags == UDF_EXT_ALLOCATED) {
2513 lb_num = udf_rw32(s_ad.loc.lb_num);
2514 vpart_num = udf_rw16(s_ad.loc.part_num);
2515 num_lb = (len + lb_size - 1) / lb_size;
2516
2517 udf_free_allocated_space(ump, lb_num, vpart_num,
2518 num_lb);
2519 }
2520
2521 slot++;
2522 }
2523
2524 /* 4) if it will fit into the descriptor then convert */
2525 if (new_size < max_l_ad) {
2526 /*
2527 * resque/evacuate old piece by reading it in, and convert it
2528 * to internal alloc.
2529 */
2530 if (new_size == 0) {
2531 /* XXX/TODO only for zero sizing now */
2532 udf_wipe_adslots(udf_node);
2533
2534 icbflags &= ~UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2535 icbflags |= UDF_ICB_INTERN_ALLOC;
2536 icbtag->flags = udf_rw16(icbflags);
2537
2538 inflen -= size_diff; KASSERT(inflen == 0);
2539 objsize -= size_diff;
2540 l_ad = new_size;
2541 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea + l_ad;
2542 if (fe) {
2543 fe->inf_len = udf_rw64(inflen);
2544 fe->l_ad = udf_rw32(l_ad);
2545 fe->tag.desc_crc_len = udf_rw32(crclen);
2546 } else {
2547 efe->inf_len = udf_rw64(inflen);
2548 efe->obj_size = udf_rw64(objsize);
2549 efe->l_ad = udf_rw32(l_ad);
2550 efe->tag.desc_crc_len = udf_rw32(crclen);
2551 }
2552 /* eventually copy in evacuated piece */
2553 /* set new size for uvm */
2554 uvm_vnp_setsize(vp, new_size);
2555
2556 free(node_ad_cpy, M_UDFMNT);
2557 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2558
2559 UDF_UNLOCK_NODE(udf_node, 0);
2560
2561 KASSERT(new_inflen == orig_inflen - size_diff);
2562 KASSERT(new_inflen == 0);
2563 KASSERT(new_lbrec == 0);
2564
2565 return 0;
2566 }
2567
2568 printf("UDF_SHRINK_NODE: could convert to internal alloc!\n");
2569 }
2570
2571 /* 5) reset node descriptors */
2572 udf_wipe_adslots(udf_node);
2573
2574 /* 6) copy back extents; merge when possible. Recounting on the fly */
2575 cpy_slots = cpy_slot;
2576
2577 c_ad = node_ad_cpy[0];
2578 slot = 0;
2579 for (cpy_slot = 1; cpy_slot < cpy_slots; cpy_slot++) {
2580 s_ad = node_ad_cpy[cpy_slot];
2581
2582 DPRINTF(ALLOC, ("\t6: stack -> got mapping vp %d "
2583 "lb %d, len %d, flags %d\n",
2584 udf_rw16(s_ad.loc.part_num),
2585 udf_rw32(s_ad.loc.lb_num),
2586 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2587 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2588
2589 /* see if we can merge */
2590 if (udf_ads_merge(lb_size, &c_ad, &s_ad)) {
2591 /* not mergable (anymore) */
2592 DPRINTF(ALLOC, ("\t6: appending vp %d lb %d, "
2593 "len %d, flags %d\n",
2594 udf_rw16(c_ad.loc.part_num),
2595 udf_rw32(c_ad.loc.lb_num),
2596 UDF_EXT_LEN(udf_rw32(c_ad.len)),
2597 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2598
2599 error = udf_append_adslot(udf_node, &slot, &c_ad);
2600 if (error)
2601 goto errorout; /* panic? */
2602 c_ad = s_ad;
2603 slot++;
2604 }
2605 }
2606
2607 /* 7) push rest slot (if any) */
2608 if (UDF_EXT_LEN(c_ad.len) > 0) {
2609 DPRINTF(ALLOC, ("\t7: last append vp %d lb %d, "
2610 "len %d, flags %d\n",
2611 udf_rw16(c_ad.loc.part_num),
2612 udf_rw32(c_ad.loc.lb_num),
2613 UDF_EXT_LEN(udf_rw32(c_ad.len)),
2614 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2615
2616 error = udf_append_adslot(udf_node, &slot, &c_ad);
2617 if (error)
2618 goto errorout; /* panic? */
2619 ;
2620 }
2621
2622 inflen -= size_diff;
2623 objsize -= size_diff;
2624 if (fe) {
2625 fe->inf_len = udf_rw64(inflen);
2626 } else {
2627 efe->inf_len = udf_rw64(inflen);
2628 efe->obj_size = udf_rw64(objsize);
2629 }
2630 error = 0;
2631
2632 /* set new size for uvm */
2633 uvm_vnp_setsize(vp, new_size);
2634
2635 errorout:
2636 free(node_ad_cpy, M_UDFMNT);
2637
2638 udf_count_alloc_exts(udf_node);
2639
2640 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2641 UDF_UNLOCK_NODE(udf_node, 0);
2642
2643 KASSERT(new_inflen == orig_inflen - size_diff);
2644
2645 return error;
2646 }
2647
2648