udf_allocation.c revision 1.12 1 /* $NetBSD: udf_allocation.c,v 1.12 2008/07/16 09:36:08 reinoud Exp $ */
2
3 /*
4 * Copyright (c) 2006, 2008 Reinoud Zandijk
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 *
27 */
28
29 #include <sys/cdefs.h>
30 #ifndef lint
31 __KERNEL_RCSID(0, "$NetBSD: udf_allocation.c,v 1.12 2008/07/16 09:36:08 reinoud Exp $");
32 #endif /* not lint */
33
34
35 #if defined(_KERNEL_OPT)
36 #include "opt_quota.h"
37 #include "opt_compat_netbsd.h"
38 #endif
39
40 /* TODO strip */
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/sysctl.h>
44 #include <sys/namei.h>
45 #include <sys/proc.h>
46 #include <sys/kernel.h>
47 #include <sys/vnode.h>
48 #include <miscfs/genfs/genfs_node.h>
49 #include <sys/mount.h>
50 #include <sys/buf.h>
51 #include <sys/file.h>
52 #include <sys/device.h>
53 #include <sys/disklabel.h>
54 #include <sys/ioctl.h>
55 #include <sys/malloc.h>
56 #include <sys/dirent.h>
57 #include <sys/stat.h>
58 #include <sys/conf.h>
59 #include <sys/kauth.h>
60 #include <sys/kthread.h>
61 #include <dev/clock_subr.h>
62
63 #include <fs/udf/ecma167-udf.h>
64 #include <fs/udf/udf_mount.h>
65
66 #if defined(_KERNEL_OPT)
67 #include "opt_udf.h"
68 #endif
69
70 #include "udf.h"
71 #include "udf_subr.h"
72 #include "udf_bswap.h"
73
74
75 #define VTOI(vnode) ((struct udf_node *) vnode->v_data)
76
77 static void udf_record_allocation_in_node(struct udf_mount *ump,
78 struct buf *buf, uint16_t vpart_num, uint64_t *mapping,
79 struct long_ad *node_ad_cpy);
80
81 /*
82 * IDEA/BUSY: Each udf_node gets its own extentwalker state for all operations;
83 * this will hopefully/likely reduce O(nlog(n)) to O(1) for most functionality
84 * since actions are most likely sequencial and thus seeking doesn't need
85 * searching for the same or adjacent position again.
86 */
87
88 /* --------------------------------------------------------------------- */
89
90 #if 0
91 #if 1
92 static void
93 udf_node_dump(struct udf_node *udf_node) {
94 struct file_entry *fe;
95 struct extfile_entry *efe;
96 struct icb_tag *icbtag;
97 struct long_ad s_ad;
98 uint64_t inflen;
99 uint32_t icbflags, addr_type;
100 uint32_t len, lb_num;
101 uint32_t flags;
102 int part_num;
103 int lb_size, eof, slot;
104
105 if ((udf_verbose & UDF_DEBUG_NODEDUMP) == 0)
106 return;
107
108 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
109
110 fe = udf_node->fe;
111 efe = udf_node->efe;
112 if (fe) {
113 icbtag = &fe->icbtag;
114 inflen = udf_rw64(fe->inf_len);
115 } else {
116 icbtag = &efe->icbtag;
117 inflen = udf_rw64(efe->inf_len);
118 }
119
120 icbflags = udf_rw16(icbtag->flags);
121 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
122
123 printf("udf_node_dump %p :\n", udf_node);
124
125 if (addr_type == UDF_ICB_INTERN_ALLOC) {
126 printf("\tIntern alloc, len = %"PRIu64"\n", inflen);
127 return;
128 }
129
130 printf("\tInflen = %"PRIu64"\n", inflen);
131 printf("\t\t");
132
133 slot = 0;
134 for (;;) {
135 udf_get_adslot(udf_node, slot, &s_ad, &eof);
136 if (eof)
137 break;
138 part_num = udf_rw16(s_ad.loc.part_num);
139 lb_num = udf_rw32(s_ad.loc.lb_num);
140 len = udf_rw32(s_ad.len);
141 flags = UDF_EXT_FLAGS(len);
142 len = UDF_EXT_LEN(len);
143
144 printf("[");
145 if (part_num >= 0)
146 printf("part %d, ", part_num);
147 printf("lb_num %d, len %d", lb_num, len);
148 if (flags)
149 printf(", flags %d", flags>>30);
150 printf("] ");
151
152 if (flags == UDF_EXT_REDIRECT) {
153 printf("\n\textent END\n\tallocation extent\n\t\t");
154 }
155
156 slot++;
157 }
158 printf("\n\tl_ad END\n\n");
159 }
160 #else
161 #define udf_node_dump(a)
162 #endif
163
164
165 static void
166 udf_assert_allocated(struct udf_mount *ump, uint16_t vpart_num,
167 uint32_t lb_num, uint32_t num_lb)
168 {
169 struct udf_bitmap *bitmap;
170 struct part_desc *pdesc;
171 uint32_t ptov;
172 uint32_t bitval;
173 uint8_t *bpos;
174 int bit;
175 int phys_part;
176 int ok;
177
178 DPRINTF(PARANOIA, ("udf_assert_allocated: check virt lbnum %d "
179 "part %d + %d sect\n", lb_num, vpart_num, num_lb));
180
181 /* get partition backing up this vpart_num */
182 pdesc = ump->partitions[ump->vtop[vpart_num]];
183
184 switch (ump->vtop_tp[vpart_num]) {
185 case UDF_VTOP_TYPE_PHYS :
186 case UDF_VTOP_TYPE_SPARABLE :
187 /* free space to freed or unallocated space bitmap */
188 ptov = udf_rw32(pdesc->start_loc);
189 phys_part = ump->vtop[vpart_num];
190
191 /* use unallocated bitmap */
192 bitmap = &ump->part_unalloc_bits[phys_part];
193
194 /* if no bitmaps are defined, bail out */
195 if (bitmap->bits == NULL)
196 break;
197
198 /* check bits */
199 KASSERT(bitmap->bits);
200 ok = 1;
201 bpos = bitmap->bits + lb_num/8;
202 bit = lb_num % 8;
203 while (num_lb > 0) {
204 bitval = (1 << bit);
205 DPRINTF(PARANOIA, ("XXX : check %d, %p, bit %d\n",
206 lb_num, bpos, bit));
207 KASSERT(bitmap->bits + lb_num/8 == bpos);
208 if (*bpos & bitval) {
209 printf("\tlb_num %d is NOT marked busy\n",
210 lb_num);
211 ok = 0;
212 }
213 lb_num++; num_lb--;
214 bit = (bit + 1) % 8;
215 if (bit == 0)
216 bpos++;
217 }
218 if (!ok) {
219 /* KASSERT(0); */
220 }
221
222 break;
223 case UDF_VTOP_TYPE_VIRT :
224 /* TODO check space */
225 KASSERT(num_lb == 1);
226 break;
227 case UDF_VTOP_TYPE_META :
228 /* TODO check space in the metadata bitmap */
229 default:
230 /* not implemented */
231 break;
232 }
233 }
234
235
236 static void
237 udf_node_sanity_check(struct udf_node *udf_node,
238 uint64_t *cnt_inflen, uint64_t *cnt_logblksrec) {
239 struct file_entry *fe;
240 struct extfile_entry *efe;
241 struct icb_tag *icbtag;
242 struct long_ad s_ad;
243 uint64_t inflen, logblksrec;
244 uint32_t icbflags, addr_type;
245 uint32_t len, lb_num, l_ea, l_ad, max_l_ad;
246 uint16_t part_num;
247 int dscr_size, lb_size, flags, whole_lb;
248 int slot, eof;
249
250 // KASSERT(mutex_owned(&udf_node->ump->allocate_mutex));
251
252 if (1)
253 udf_node_dump(udf_node);
254
255 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
256
257 fe = udf_node->fe;
258 efe = udf_node->efe;
259 if (fe) {
260 icbtag = &fe->icbtag;
261 inflen = udf_rw64(fe->inf_len);
262 dscr_size = sizeof(struct file_entry) -1;
263 logblksrec = udf_rw64(fe->logblks_rec);
264 l_ad = udf_rw32(fe->l_ad);
265 l_ea = udf_rw32(fe->l_ea);
266 } else {
267 icbtag = &efe->icbtag;
268 inflen = udf_rw64(efe->inf_len);
269 dscr_size = sizeof(struct extfile_entry) -1;
270 logblksrec = udf_rw64(efe->logblks_rec);
271 l_ad = udf_rw32(efe->l_ad);
272 l_ea = udf_rw32(efe->l_ea);
273 }
274 max_l_ad = lb_size - dscr_size - l_ea;
275 icbflags = udf_rw16(icbtag->flags);
276 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
277
278 /* reset counters */
279 *cnt_inflen = 0;
280 *cnt_logblksrec = 0;
281
282 if (addr_type == UDF_ICB_INTERN_ALLOC) {
283 KASSERT(l_ad <= max_l_ad);
284 KASSERT(l_ad == inflen);
285 *cnt_inflen = inflen;
286 return;
287 }
288
289 /* start counting */
290 whole_lb = 1;
291 slot = 0;
292 for (;;) {
293 udf_get_adslot(udf_node, slot, &s_ad, &eof);
294 if (eof)
295 break;
296 KASSERT(whole_lb == 1);
297
298 part_num = udf_rw16(s_ad.loc.part_num);
299 lb_num = udf_rw32(s_ad.loc.lb_num);
300 len = udf_rw32(s_ad.len);
301 flags = UDF_EXT_FLAGS(len);
302 len = UDF_EXT_LEN(len);
303
304 if (flags != UDF_EXT_REDIRECT) {
305 *cnt_inflen += len;
306 if (flags == UDF_EXT_ALLOCATED) {
307 *cnt_logblksrec += (len + lb_size -1) / lb_size;
308 }
309 } else {
310 KASSERT(len == lb_size);
311 }
312 /* check allocation */
313 if (flags == UDF_EXT_ALLOCATED)
314 udf_assert_allocated(udf_node->ump, part_num, lb_num,
315 (len + lb_size - 1) / lb_size);
316
317 /* check whole lb */
318 whole_lb = ((len % lb_size) == 0);
319
320 slot++;
321 }
322 /* rest should be zero (ad_off > l_ad < max_l_ad - adlen) */
323
324 KASSERT(*cnt_inflen == inflen);
325 KASSERT(*cnt_logblksrec == logblksrec);
326
327 // KASSERT(mutex_owned(&udf_node->ump->allocate_mutex));
328 }
329 #else
330 static void
331 udf_node_sanity_check(struct udf_node *udf_node,
332 uint64_t *cnt_inflen, uint64_t *cnt_logblksrec) {
333 struct file_entry *fe;
334 struct extfile_entry *efe;
335 struct icb_tag *icbtag;
336 uint64_t inflen, logblksrec;
337 int dscr_size, lb_size;
338
339 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
340
341 fe = udf_node->fe;
342 efe = udf_node->efe;
343 if (fe) {
344 icbtag = &fe->icbtag;
345 inflen = udf_rw64(fe->inf_len);
346 dscr_size = sizeof(struct file_entry) -1;
347 logblksrec = udf_rw64(fe->logblks_rec);
348 } else {
349 icbtag = &efe->icbtag;
350 inflen = udf_rw64(efe->inf_len);
351 dscr_size = sizeof(struct extfile_entry) -1;
352 logblksrec = udf_rw64(efe->logblks_rec);
353 }
354 *cnt_logblksrec = logblksrec;
355 *cnt_inflen = inflen;
356 }
357 #endif
358
359 /* --------------------------------------------------------------------- */
360
361 int
362 udf_translate_vtop(struct udf_mount *ump, struct long_ad *icb_loc,
363 uint32_t *lb_numres, uint32_t *extres)
364 {
365 struct part_desc *pdesc;
366 struct spare_map_entry *sme;
367 struct long_ad s_icb_loc;
368 uint64_t foffset, end_foffset;
369 uint32_t lb_size, len;
370 uint32_t lb_num, lb_rel, lb_packet;
371 uint32_t udf_rw32_lbmap, ext_offset;
372 uint16_t vpart;
373 int rel, part, error, eof, slot, flags;
374
375 assert(ump && icb_loc && lb_numres);
376
377 vpart = udf_rw16(icb_loc->loc.part_num);
378 lb_num = udf_rw32(icb_loc->loc.lb_num);
379 if (vpart > UDF_VTOP_RAWPART)
380 return EINVAL;
381
382 translate_again:
383 part = ump->vtop[vpart];
384 pdesc = ump->partitions[part];
385
386 switch (ump->vtop_tp[vpart]) {
387 case UDF_VTOP_TYPE_RAW :
388 /* 1:1 to the end of the device */
389 *lb_numres = lb_num;
390 *extres = INT_MAX;
391 return 0;
392 case UDF_VTOP_TYPE_PHYS :
393 /* transform into its disc logical block */
394 if (lb_num > udf_rw32(pdesc->part_len))
395 return EINVAL;
396 *lb_numres = lb_num + udf_rw32(pdesc->start_loc);
397
398 /* extent from here to the end of the partition */
399 *extres = udf_rw32(pdesc->part_len) - lb_num;
400 return 0;
401 case UDF_VTOP_TYPE_VIRT :
402 /* only maps one logical block, lookup in VAT */
403 if (lb_num >= ump->vat_entries) /* XXX > or >= ? */
404 return EINVAL;
405
406 /* lookup in virtual allocation table file */
407 mutex_enter(&ump->allocate_mutex);
408 error = udf_vat_read(ump->vat_node,
409 (uint8_t *) &udf_rw32_lbmap, 4,
410 ump->vat_offset + lb_num * 4);
411 mutex_exit(&ump->allocate_mutex);
412
413 if (error)
414 return error;
415
416 lb_num = udf_rw32(udf_rw32_lbmap);
417
418 /* transform into its disc logical block */
419 if (lb_num > udf_rw32(pdesc->part_len))
420 return EINVAL;
421 *lb_numres = lb_num + udf_rw32(pdesc->start_loc);
422
423 /* just one logical block */
424 *extres = 1;
425 return 0;
426 case UDF_VTOP_TYPE_SPARABLE :
427 /* check if the packet containing the lb_num is remapped */
428 lb_packet = lb_num / ump->sparable_packet_size;
429 lb_rel = lb_num % ump->sparable_packet_size;
430
431 for (rel = 0; rel < udf_rw16(ump->sparing_table->rt_l); rel++) {
432 sme = &ump->sparing_table->entries[rel];
433 if (lb_packet == udf_rw32(sme->org)) {
434 /* NOTE maps to absolute disc logical block! */
435 *lb_numres = udf_rw32(sme->map) + lb_rel;
436 *extres = ump->sparable_packet_size - lb_rel;
437 return 0;
438 }
439 }
440
441 /* transform into its disc logical block */
442 if (lb_num > udf_rw32(pdesc->part_len))
443 return EINVAL;
444 *lb_numres = lb_num + udf_rw32(pdesc->start_loc);
445
446 /* rest of block */
447 *extres = ump->sparable_packet_size - lb_rel;
448 return 0;
449 case UDF_VTOP_TYPE_META :
450 /* we have to look into the file's allocation descriptors */
451
452 /* use metadatafile allocation mutex */
453 lb_size = udf_rw32(ump->logical_vol->lb_size);
454
455 UDF_LOCK_NODE(ump->metadata_node, 0);
456
457 /* get first overlapping extent */
458 foffset = 0;
459 slot = 0;
460 for (;;) {
461 udf_get_adslot(ump->metadata_node,
462 slot, &s_icb_loc, &eof);
463 DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, "
464 "len = %d, lb_num = %d, part = %d\n",
465 slot, eof,
466 UDF_EXT_FLAGS(udf_rw32(s_icb_loc.len)),
467 UDF_EXT_LEN(udf_rw32(s_icb_loc.len)),
468 udf_rw32(s_icb_loc.loc.lb_num),
469 udf_rw16(s_icb_loc.loc.part_num)));
470 if (eof) {
471 DPRINTF(TRANSLATE,
472 ("Meta partition translation "
473 "failed: can't seek location\n"));
474 UDF_UNLOCK_NODE(ump->metadata_node, 0);
475 return EINVAL;
476 }
477 len = udf_rw32(s_icb_loc.len);
478 flags = UDF_EXT_FLAGS(len);
479 len = UDF_EXT_LEN(len);
480
481 if (flags == UDF_EXT_REDIRECT) {
482 slot++;
483 continue;
484 }
485
486 end_foffset = foffset + len;
487
488 if (end_foffset > lb_num * lb_size)
489 break; /* found */
490 foffset = end_foffset;
491 slot++;
492 }
493 /* found overlapping slot */
494 ext_offset = lb_num * lb_size - foffset;
495
496 /* process extent offset */
497 lb_num = udf_rw32(s_icb_loc.loc.lb_num);
498 vpart = udf_rw16(s_icb_loc.loc.part_num);
499 lb_num += (ext_offset + lb_size -1) / lb_size;
500 len -= ext_offset;
501 ext_offset = 0;
502
503 flags = UDF_EXT_FLAGS(s_icb_loc.len);
504
505 UDF_UNLOCK_NODE(ump->metadata_node, 0);
506 if (flags != UDF_EXT_ALLOCATED) {
507 DPRINTF(TRANSLATE, ("Metadata partition translation "
508 "failed: not allocated\n"));
509 return EINVAL;
510 }
511
512 /*
513 * vpart and lb_num are updated, translate again since we
514 * might be mapped on sparable media
515 */
516 goto translate_again;
517 default:
518 printf("UDF vtop translation scheme %d unimplemented yet\n",
519 ump->vtop_tp[vpart]);
520 }
521
522 return EINVAL;
523 }
524
525 /* --------------------------------------------------------------------- */
526
527 /*
528 * Translate an extent (in logical_blocks) into logical block numbers; used
529 * for read and write operations. DOESNT't check extents.
530 */
531
532 int
533 udf_translate_file_extent(struct udf_node *udf_node,
534 uint32_t from, uint32_t num_lb,
535 uint64_t *map)
536 {
537 struct udf_mount *ump;
538 struct icb_tag *icbtag;
539 struct long_ad t_ad, s_ad;
540 uint64_t transsec;
541 uint64_t foffset, end_foffset;
542 uint32_t transsec32;
543 uint32_t lb_size;
544 uint32_t ext_offset;
545 uint32_t lb_num, len;
546 uint32_t overlap, translen;
547 uint16_t vpart_num;
548 int eof, error, flags;
549 int slot, addr_type, icbflags;
550
551 if (!udf_node)
552 return ENOENT;
553
554 KASSERT(num_lb > 0);
555
556 UDF_LOCK_NODE(udf_node, 0);
557
558 /* initialise derivative vars */
559 ump = udf_node->ump;
560 lb_size = udf_rw32(ump->logical_vol->lb_size);
561
562 if (udf_node->fe) {
563 icbtag = &udf_node->fe->icbtag;
564 } else {
565 icbtag = &udf_node->efe->icbtag;
566 }
567 icbflags = udf_rw16(icbtag->flags);
568 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
569
570 /* do the work */
571 if (addr_type == UDF_ICB_INTERN_ALLOC) {
572 *map = UDF_TRANS_INTERN;
573 UDF_UNLOCK_NODE(udf_node, 0);
574 return 0;
575 }
576
577 /* find first overlapping extent */
578 foffset = 0;
579 slot = 0;
580 for (;;) {
581 udf_get_adslot(udf_node, slot, &s_ad, &eof);
582 DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, len = %d, "
583 "lb_num = %d, part = %d\n", slot, eof,
584 UDF_EXT_FLAGS(udf_rw32(s_ad.len)),
585 UDF_EXT_LEN(udf_rw32(s_ad.len)),
586 udf_rw32(s_ad.loc.lb_num),
587 udf_rw16(s_ad.loc.part_num)));
588 if (eof) {
589 DPRINTF(TRANSLATE,
590 ("Translate file extent "
591 "failed: can't seek location\n"));
592 UDF_UNLOCK_NODE(udf_node, 0);
593 return EINVAL;
594 }
595 len = udf_rw32(s_ad.len);
596 flags = UDF_EXT_FLAGS(len);
597 len = UDF_EXT_LEN(len);
598 lb_num = udf_rw32(s_ad.loc.lb_num);
599
600 if (flags == UDF_EXT_REDIRECT) {
601 slot++;
602 continue;
603 }
604
605 end_foffset = foffset + len;
606
607 if (end_foffset > from * lb_size)
608 break; /* found */
609 foffset = end_foffset;
610 slot++;
611 }
612 /* found overlapping slot */
613 ext_offset = from * lb_size - foffset;
614
615 for (;;) {
616 udf_get_adslot(udf_node, slot, &s_ad, &eof);
617 DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, len = %d, "
618 "lb_num = %d, part = %d\n", slot, eof,
619 UDF_EXT_FLAGS(udf_rw32(s_ad.len)),
620 UDF_EXT_LEN(udf_rw32(s_ad.len)),
621 udf_rw32(s_ad.loc.lb_num),
622 udf_rw16(s_ad.loc.part_num)));
623 if (eof) {
624 DPRINTF(TRANSLATE,
625 ("Translate file extent "
626 "failed: past eof\n"));
627 UDF_UNLOCK_NODE(udf_node, 0);
628 return EINVAL;
629 }
630
631 len = udf_rw32(s_ad.len);
632 flags = UDF_EXT_FLAGS(len);
633 len = UDF_EXT_LEN(len);
634
635 lb_num = udf_rw32(s_ad.loc.lb_num);
636 vpart_num = udf_rw16(s_ad.loc.part_num);
637
638 end_foffset = foffset + len;
639
640 /* process extent, don't forget to advance on ext_offset! */
641 lb_num += (ext_offset + lb_size -1) / lb_size;
642 overlap = (len - ext_offset + lb_size -1) / lb_size;
643 ext_offset = 0;
644
645 /*
646 * note that the while(){} is nessisary for the extent that
647 * the udf_translate_vtop() returns doens't have to span the
648 * whole extent.
649 */
650
651 overlap = MIN(overlap, num_lb);
652 while (overlap && (flags != UDF_EXT_REDIRECT)) {
653 switch (flags) {
654 case UDF_EXT_FREE :
655 case UDF_EXT_ALLOCATED_BUT_NOT_USED :
656 transsec = UDF_TRANS_ZERO;
657 translen = overlap;
658 while (overlap && num_lb && translen) {
659 *map++ = transsec;
660 lb_num++;
661 overlap--; num_lb--; translen--;
662 }
663 break;
664 case UDF_EXT_ALLOCATED :
665 t_ad.loc.lb_num = udf_rw32(lb_num);
666 t_ad.loc.part_num = udf_rw16(vpart_num);
667 error = udf_translate_vtop(ump,
668 &t_ad, &transsec32, &translen);
669 transsec = transsec32;
670 if (error) {
671 UDF_UNLOCK_NODE(udf_node, 0);
672 return error;
673 }
674 while (overlap && num_lb && translen) {
675 *map++ = transsec;
676 lb_num++; transsec++;
677 overlap--; num_lb--; translen--;
678 }
679 break;
680 default:
681 DPRINTF(TRANSLATE,
682 ("Translate file extent "
683 "failed: bad flags %x\n", flags));
684 UDF_UNLOCK_NODE(udf_node, 0);
685 return EINVAL;
686 }
687 }
688 if (num_lb == 0)
689 break;
690
691 if (flags != UDF_EXT_REDIRECT)
692 foffset = end_foffset;
693 slot++;
694 }
695 UDF_UNLOCK_NODE(udf_node, 0);
696
697 return 0;
698 }
699
700 /* --------------------------------------------------------------------- */
701
702 static int
703 udf_search_free_vatloc(struct udf_mount *ump, uint32_t *lbnumres)
704 {
705 uint32_t lb_size, lb_num, lb_map, udf_rw32_lbmap;
706 uint8_t *blob;
707 int entry, chunk, found, error;
708
709 KASSERT(ump);
710 KASSERT(ump->logical_vol);
711
712 lb_size = udf_rw32(ump->logical_vol->lb_size);
713 blob = malloc(lb_size, M_UDFTEMP, M_WAITOK);
714
715 /* TODO static allocation of search chunk */
716
717 lb_num = MIN(ump->vat_entries, ump->vat_last_free_lb);
718 found = 0;
719 error = 0;
720 entry = 0;
721 do {
722 chunk = MIN(lb_size, (ump->vat_entries - lb_num) * 4);
723 if (chunk <= 0)
724 break;
725 /* load in chunk */
726 error = udf_vat_read(ump->vat_node, blob, chunk,
727 ump->vat_offset + lb_num * 4);
728
729 if (error)
730 break;
731
732 /* search this chunk */
733 for (entry=0; entry < chunk /4; entry++, lb_num++) {
734 udf_rw32_lbmap = *((uint32_t *) (blob + entry * 4));
735 lb_map = udf_rw32(udf_rw32_lbmap);
736 if (lb_map == 0xffffffff) {
737 found = 1;
738 break;
739 }
740 }
741 } while (!found);
742 if (error) {
743 printf("udf_search_free_vatloc: error reading in vat chunk "
744 "(lb %d, size %d)\n", lb_num, chunk);
745 }
746
747 if (!found) {
748 /* extend VAT */
749 DPRINTF(WRITE, ("udf_search_free_vatloc: extending\n"));
750 lb_num = ump->vat_entries;
751 ump->vat_entries++;
752 }
753
754 /* mark entry with initialiser just in case */
755 lb_map = udf_rw32(0xfffffffe);
756 udf_vat_write(ump->vat_node, (uint8_t *) &lb_map, 4,
757 ump->vat_offset + lb_num *4);
758 ump->vat_last_free_lb = lb_num;
759
760 free(blob, M_UDFTEMP);
761 *lbnumres = lb_num;
762 return 0;
763 }
764
765
766 static void
767 udf_bitmap_allocate(struct udf_bitmap *bitmap, int ismetadata,
768 uint32_t ptov, uint32_t *num_lb, uint64_t *pmappos, uint64_t *lmappos)
769 {
770 uint32_t offset, lb_num, bit;
771 int32_t diff;
772 uint8_t *bpos;
773 int pass;
774
775 if (!ismetadata) {
776 /* heuristic to keep the two pointers not too close */
777 diff = bitmap->data_pos - bitmap->metadata_pos;
778 if ((diff >= 0) && (diff < 1024))
779 bitmap->data_pos = bitmap->metadata_pos + 1024;
780 }
781 offset = ismetadata ? bitmap->metadata_pos : bitmap->data_pos;
782 offset &= ~7;
783 for (pass = 0; pass < 2; pass++) {
784 if (offset >= bitmap->max_offset)
785 offset = 0;
786
787 while (offset < bitmap->max_offset) {
788 if (*num_lb == 0)
789 break;
790
791 /* use first bit not set */
792 bpos = bitmap->bits + offset/8;
793 bit = ffs(*bpos); /* returns 0 or 1..8 */
794 if (bit == 0) {
795 offset += 8;
796 continue;
797 }
798 DPRINTF(PARANOIA, ("XXX : allocate %d, %p, bit %d\n",
799 offset + bit -1, bpos, bit-1));
800 *bpos &= ~(1 << (bit-1));
801 lb_num = offset + bit-1;
802 *lmappos++ = lb_num;
803 *pmappos++ = lb_num + ptov;
804 *num_lb = *num_lb - 1;
805 // offset = (offset & ~7);
806 }
807 }
808
809 if (ismetadata) {
810 bitmap->metadata_pos = offset;
811 } else {
812 bitmap->data_pos = offset;
813 }
814 }
815
816
817 static void
818 udf_bitmap_free(struct udf_bitmap *bitmap, uint32_t lb_num, uint32_t num_lb)
819 {
820 uint32_t offset;
821 uint32_t bit, bitval;
822 uint8_t *bpos;
823
824 offset = lb_num;
825
826 /* starter bits */
827 bpos = bitmap->bits + offset/8;
828 bit = offset % 8;
829 while ((bit != 0) && (num_lb > 0)) {
830 bitval = (1 << bit);
831 KASSERT((*bpos & bitval) == 0);
832 DPRINTF(PARANOIA, ("XXX : free %d, %p, %d\n",
833 offset, bpos, bit));
834 *bpos |= bitval;
835 offset++; num_lb--;
836 bit = (bit + 1) % 8;
837 }
838 if (num_lb == 0)
839 return;
840
841 /* whole bytes */
842 KASSERT(bit == 0);
843 bpos = bitmap->bits + offset / 8;
844 while (num_lb >= 8) {
845 KASSERT((*bpos == 0));
846 DPRINTF(PARANOIA, ("XXX : free %d + 8, %p\n", offset, bpos));
847 *bpos = 255;
848 offset += 8; num_lb -= 8;
849 bpos++;
850 }
851
852 /* stop bits */
853 KASSERT(num_lb < 8);
854 bit = 0;
855 while (num_lb > 0) {
856 bitval = (1 << bit);
857 KASSERT((*bpos & bitval) == 0);
858 DPRINTF(PARANOIA, ("XXX : free %d, %p, %d\n",
859 offset, bpos, bit));
860 *bpos |= bitval;
861 offset++; num_lb--;
862 bit = (bit + 1) % 8;
863 }
864 }
865
866
867 /* allocate a contiguous sequence of sectornumbers */
868 static int
869 udf_allocate_space(struct udf_mount *ump, int ismetadata, int alloc_type,
870 int num_lb, uint16_t *alloc_partp,
871 uint64_t *lmapping, uint64_t *pmapping)
872 {
873 struct mmc_trackinfo *alloc_track, *other_track;
874 struct udf_bitmap *bitmap;
875 struct part_desc *pdesc;
876 struct logvol_int_desc *lvid;
877 uint64_t *lmappos, *pmappos;
878 uint32_t ptov, lb_num, *freepos, free_lbs;
879 int lb_size, alloc_num_lb;
880 int alloc_part;
881 int error;
882
883 mutex_enter(&ump->allocate_mutex);
884
885 lb_size = udf_rw32(ump->logical_vol->lb_size);
886 KASSERT(lb_size == ump->discinfo.sector_size);
887
888 if (ismetadata) {
889 alloc_part = ump->metadata_part;
890 alloc_track = &ump->metadata_track;
891 other_track = &ump->data_track;
892 } else {
893 alloc_part = ump->data_part;
894 alloc_track = &ump->data_track;
895 other_track = &ump->metadata_track;
896 }
897
898 *alloc_partp = alloc_part;
899
900 error = 0;
901 /* XXX check disc space */
902
903 pdesc = ump->partitions[ump->vtop[alloc_part]];
904 lmappos = lmapping;
905 pmappos = pmapping;
906
907 switch (alloc_type) {
908 case UDF_ALLOC_VAT :
909 /* search empty slot in VAT file */
910 KASSERT(num_lb == 1);
911 error = udf_search_free_vatloc(ump, &lb_num);
912 if (!error) {
913 *lmappos = lb_num;
914 *pmappos = 0; /* will get late-allocated */
915 }
916 break;
917 case UDF_ALLOC_SEQUENTIAL :
918 /* sequential allocation on recordable media */
919 /* calculate offset from physical base partition */
920 ptov = udf_rw32(pdesc->start_loc);
921
922 for (lb_num = 0; lb_num < num_lb; lb_num++) {
923 *pmappos++ = alloc_track->next_writable;
924 *lmappos++ = alloc_track->next_writable - ptov;
925 alloc_track->next_writable++;
926 alloc_track->free_blocks--;
927 }
928 if (alloc_track->tracknr == other_track->tracknr)
929 memcpy(other_track, alloc_track,
930 sizeof(struct mmc_trackinfo));
931 break;
932 case UDF_ALLOC_SPACEMAP :
933 ptov = udf_rw32(pdesc->start_loc);
934
935 /* allocate on unallocated bits page */
936 alloc_num_lb = num_lb;
937 bitmap = &ump->part_unalloc_bits[alloc_part];
938 udf_bitmap_allocate(bitmap, ismetadata, ptov, &alloc_num_lb,
939 pmappos, lmappos);
940 ump->lvclose |= UDF_WRITE_PART_BITMAPS;
941 if (alloc_num_lb) {
942 /* TODO convert freed to unalloc and try again */
943 /* free allocated piece for now */
944 lmappos = lmapping;
945 for (lb_num=0; lb_num < num_lb-alloc_num_lb; lb_num++) {
946 udf_bitmap_free(bitmap, *lmappos++, 1);
947 }
948 error = ENOSPC;
949 }
950 if (!error) {
951 /* adjust freecount */
952 lvid = ump->logvol_integrity;
953 freepos = &lvid->tables[0] + alloc_part;
954 free_lbs = udf_rw32(*freepos);
955 *freepos = udf_rw32(free_lbs - num_lb);
956 }
957 break;
958 case UDF_ALLOC_METABITMAP :
959 case UDF_ALLOC_METASEQUENTIAL :
960 case UDF_ALLOC_RELAXEDSEQUENTIAL :
961 printf("ALERT: udf_allocate_space : allocation %d "
962 "not implemented yet!\n", alloc_type);
963 /* TODO implement, doesn't have to be contiguous */
964 error = ENOSPC;
965 break;
966 }
967
968 #ifdef DEBUG
969 if (udf_verbose & UDF_DEBUG_ALLOC) {
970 lmappos = lmapping;
971 pmappos = pmapping;
972 printf("udf_allocate_space, mapping l->p:\n");
973 for (lb_num = 0; lb_num < num_lb; lb_num++) {
974 printf("\t%"PRIu64" -> %"PRIu64"\n",
975 *lmappos++, *pmappos++);
976 }
977 }
978 #endif
979 mutex_exit(&ump->allocate_mutex);
980
981 return error;
982 }
983
984 /* --------------------------------------------------------------------- */
985
986 void
987 udf_free_allocated_space(struct udf_mount *ump, uint32_t lb_num,
988 uint16_t vpart_num, uint32_t num_lb)
989 {
990 struct udf_bitmap *bitmap;
991 struct part_desc *pdesc;
992 struct logvol_int_desc *lvid;
993 uint32_t ptov, lb_map, udf_rw32_lbmap;
994 uint32_t *freepos, free_lbs;
995 int phys_part;
996 int error;
997
998 DPRINTF(ALLOC, ("udf_free_allocated_space: freeing virt lbnum %d "
999 "part %d + %d sect\n", lb_num, vpart_num, num_lb));
1000
1001 /* no use freeing zero length */
1002 if (num_lb == 0)
1003 return;
1004
1005 mutex_enter(&ump->allocate_mutex);
1006
1007 /* get partition backing up this vpart_num */
1008 pdesc = ump->partitions[ump->vtop[vpart_num]];
1009
1010 switch (ump->vtop_tp[vpart_num]) {
1011 case UDF_VTOP_TYPE_PHYS :
1012 case UDF_VTOP_TYPE_SPARABLE :
1013 /* free space to freed or unallocated space bitmap */
1014 ptov = udf_rw32(pdesc->start_loc);
1015 phys_part = ump->vtop[vpart_num];
1016
1017 /* first try freed space bitmap */
1018 bitmap = &ump->part_freed_bits[phys_part];
1019
1020 /* if not defined, use unallocated bitmap */
1021 if (bitmap->bits == NULL)
1022 bitmap = &ump->part_unalloc_bits[phys_part];
1023
1024 /* if no bitmaps are defined, bail out */
1025 if (bitmap->bits == NULL)
1026 break;
1027
1028 /* free bits if its defined */
1029 KASSERT(bitmap->bits);
1030 ump->lvclose |= UDF_WRITE_PART_BITMAPS;
1031 udf_bitmap_free(bitmap, lb_num, num_lb);
1032
1033 /* adjust freecount */
1034 lvid = ump->logvol_integrity;
1035 freepos = &lvid->tables[0] + vpart_num;
1036 free_lbs = udf_rw32(*freepos);
1037 *freepos = udf_rw32(free_lbs + num_lb);
1038 break;
1039 case UDF_VTOP_TYPE_VIRT :
1040 /* free this VAT entry */
1041 KASSERT(num_lb == 1);
1042
1043 lb_map = 0xffffffff;
1044 udf_rw32_lbmap = udf_rw32(lb_map);
1045 error = udf_vat_write(ump->vat_node,
1046 (uint8_t *) &udf_rw32_lbmap, 4,
1047 ump->vat_offset + lb_num * 4);
1048 KASSERT(error == 0);
1049 ump->vat_last_free_lb = MIN(ump->vat_last_free_lb, lb_num);
1050 break;
1051 case UDF_VTOP_TYPE_META :
1052 /* free space in the metadata bitmap */
1053 default:
1054 printf("ALERT: udf_free_allocated_space : allocation %d "
1055 "not implemented yet!\n", ump->vtop_tp[vpart_num]);
1056 break;
1057 }
1058
1059 mutex_exit(&ump->allocate_mutex);
1060 }
1061
1062 /* --------------------------------------------------------------------- */
1063
1064 int
1065 udf_pre_allocate_space(struct udf_mount *ump, int udf_c_type, int num_lb,
1066 uint16_t *alloc_partp, uint64_t *lmapping, uint64_t *pmapping)
1067 {
1068 int ismetadata, alloc_type;
1069
1070 ismetadata = (udf_c_type == UDF_C_NODE);
1071 alloc_type = ismetadata? ump->meta_alloc : ump->data_alloc;
1072
1073 #ifdef DIAGNOSTIC
1074 if ((alloc_type == UDF_ALLOC_VAT) && (udf_c_type != UDF_C_NODE)) {
1075 panic("udf_pre_allocate_space: bad c_type on VAT!\n");
1076 }
1077 #endif
1078
1079 /* reserve size for VAT allocated data */
1080 if (alloc_type == UDF_ALLOC_VAT) {
1081 mutex_enter(&ump->allocate_mutex);
1082 ump->uncomitted_lb += num_lb;
1083 mutex_exit(&ump->allocate_mutex);
1084 }
1085
1086 return udf_allocate_space(ump, ismetadata, alloc_type,
1087 num_lb, alloc_partp, lmapping, pmapping);
1088 }
1089
1090 /* --------------------------------------------------------------------- */
1091
1092 /*
1093 * Allocate a buf on disc for direct write out. The space doesn't have to be
1094 * contiguous as the caller takes care of this.
1095 */
1096
1097 void
1098 udf_late_allocate_buf(struct udf_mount *ump, struct buf *buf,
1099 uint64_t *lmapping, uint64_t *pmapping, struct long_ad *node_ad_cpy)
1100 {
1101 struct udf_node *udf_node = VTOI(buf->b_vp);
1102 uint16_t vpart_num;
1103 int lb_size, blks, udf_c_type;
1104 int ismetadata, alloc_type;
1105 int num_lb;
1106 int error, s;
1107
1108 /*
1109 * for each sector in the buf, allocate a sector on disc and record
1110 * its position in the provided mapping array.
1111 *
1112 * If its userdata or FIDs, record its location in its node.
1113 */
1114
1115 lb_size = udf_rw32(ump->logical_vol->lb_size);
1116 num_lb = (buf->b_bcount + lb_size -1) / lb_size;
1117 blks = lb_size / DEV_BSIZE;
1118 udf_c_type = buf->b_udf_c_type;
1119
1120 KASSERT(lb_size == ump->discinfo.sector_size);
1121
1122 ismetadata = (udf_c_type == UDF_C_NODE);
1123 alloc_type = ismetadata? ump->meta_alloc : ump->data_alloc;
1124
1125 #ifdef DIAGNOSTIC
1126 if ((alloc_type == UDF_ALLOC_VAT) && (udf_c_type != UDF_C_NODE)) {
1127 panic("udf_late_allocate_buf: bad c_type on VAT!\n");
1128 }
1129 #endif
1130
1131 if (udf_c_type == UDF_C_NODE) {
1132 /* if not VAT, its allready allocated */
1133 if (alloc_type != UDF_ALLOC_VAT)
1134 return;
1135
1136 /* allocate sequential */
1137 alloc_type = UDF_ALLOC_SEQUENTIAL;
1138 }
1139
1140 error = udf_allocate_space(ump, ismetadata, alloc_type,
1141 num_lb, &vpart_num, lmapping, pmapping);
1142 if (error) {
1143 /* ARGH! we've not done our accounting right! */
1144 panic("UDF disc allocation accounting gone wrong");
1145 }
1146
1147 /* commit our sector count */
1148 mutex_enter(&ump->allocate_mutex);
1149 if (num_lb > ump->uncomitted_lb) {
1150 ump->uncomitted_lb = 0;
1151 } else {
1152 ump->uncomitted_lb -= num_lb;
1153 }
1154 mutex_exit(&ump->allocate_mutex);
1155
1156 buf->b_blkno = (*pmapping) * blks;
1157
1158 /* If its userdata or FIDs, record its allocation in its node. */
1159 if ((udf_c_type == UDF_C_USERDATA) || (udf_c_type == UDF_C_FIDS)) {
1160 udf_record_allocation_in_node(ump, buf, vpart_num, lmapping,
1161 node_ad_cpy);
1162 /* decrement our outstanding bufs counter */
1163 s = splbio();
1164 udf_node->outstanding_bufs--;
1165 splx(s);
1166 }
1167 }
1168
1169 /* --------------------------------------------------------------------- */
1170
1171 /*
1172 * Try to merge a1 with the new piece a2. udf_ads_merge returns error when not
1173 * possible (anymore); a2 returns the rest piece.
1174 */
1175
1176 static int
1177 udf_ads_merge(uint32_t lb_size, struct long_ad *a1, struct long_ad *a2)
1178 {
1179 uint32_t max_len, merge_len;
1180 uint32_t a1_len, a2_len;
1181 uint32_t a1_flags, a2_flags;
1182 uint32_t a1_lbnum, a2_lbnum;
1183 uint16_t a1_part, a2_part;
1184
1185 max_len = ((UDF_EXT_MAXLEN / lb_size) * lb_size);
1186
1187 a1_flags = UDF_EXT_FLAGS(udf_rw32(a1->len));
1188 a1_len = UDF_EXT_LEN(udf_rw32(a1->len));
1189 a1_lbnum = udf_rw32(a1->loc.lb_num);
1190 a1_part = udf_rw16(a1->loc.part_num);
1191
1192 a2_flags = UDF_EXT_FLAGS(udf_rw32(a2->len));
1193 a2_len = UDF_EXT_LEN(udf_rw32(a2->len));
1194 a2_lbnum = udf_rw32(a2->loc.lb_num);
1195 a2_part = udf_rw16(a2->loc.part_num);
1196
1197 /* defines same space */
1198 if (a1_flags != a2_flags)
1199 return 1;
1200
1201 if (a1_flags != UDF_EXT_FREE) {
1202 /* the same partition */
1203 if (a1_part != a2_part)
1204 return 1;
1205
1206 /* a2 is successor of a1 */
1207 if (a1_lbnum * lb_size + a1_len != a2_lbnum * lb_size)
1208 return 1;
1209 }
1210
1211 /* merge as most from a2 if possible */
1212 merge_len = MIN(a2_len, max_len - a1_len);
1213 a1_len += merge_len;
1214 a2_len -= merge_len;
1215 a2_lbnum += merge_len/lb_size;
1216
1217 a1->len = udf_rw32(a1_len | a1_flags);
1218 a2->len = udf_rw32(a2_len | a2_flags);
1219 a2->loc.lb_num = udf_rw32(a2_lbnum);
1220
1221 if (a2_len > 0)
1222 return 1;
1223
1224 /* there is space over to merge */
1225 return 0;
1226 }
1227
1228 /* --------------------------------------------------------------------- */
1229
1230 static void
1231 udf_wipe_adslots(struct udf_node *udf_node)
1232 {
1233 struct file_entry *fe;
1234 struct extfile_entry *efe;
1235 struct alloc_ext_entry *ext;
1236 uint64_t inflen, objsize;
1237 uint32_t lb_size, dscr_size, l_ea, l_ad, max_l_ad, crclen;
1238 uint8_t *data_pos;
1239 int extnr;
1240
1241 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1242
1243 fe = udf_node->fe;
1244 efe = udf_node->efe;
1245 if (fe) {
1246 inflen = udf_rw64(fe->inf_len);
1247 objsize = inflen;
1248 dscr_size = sizeof(struct file_entry) -1;
1249 l_ea = udf_rw32(fe->l_ea);
1250 l_ad = udf_rw32(fe->l_ad);
1251 data_pos = (uint8_t *) fe + dscr_size + l_ea;
1252 } else {
1253 inflen = udf_rw64(efe->inf_len);
1254 objsize = udf_rw64(efe->obj_size);
1255 dscr_size = sizeof(struct extfile_entry) -1;
1256 l_ea = udf_rw32(efe->l_ea);
1257 l_ad = udf_rw32(efe->l_ad);
1258 data_pos = (uint8_t *) efe + dscr_size + l_ea;
1259 }
1260 max_l_ad = lb_size - dscr_size - l_ea;
1261
1262 /* wipe fe/efe */
1263 memset(data_pos, 0, max_l_ad);
1264 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea;
1265 if (fe) {
1266 fe->l_ad = udf_rw32(0);
1267 fe->logblks_rec = udf_rw64(0);
1268 fe->tag.desc_crc_len = udf_rw32(crclen);
1269 } else {
1270 efe->l_ad = udf_rw32(0);
1271 efe->logblks_rec = udf_rw64(0);
1272 efe->tag.desc_crc_len = udf_rw32(crclen);
1273 }
1274
1275 /* wipe all allocation extent entries */
1276 for (extnr = 0; extnr < udf_node->num_extensions; extnr++) {
1277 ext = udf_node->ext[extnr];
1278 dscr_size = sizeof(struct alloc_ext_entry) -1;
1279 data_pos = (uint8_t *) ext->data;
1280 max_l_ad = lb_size - dscr_size;
1281 memset(data_pos, 0, max_l_ad);
1282 ext->l_ad = udf_rw32(0);
1283
1284 crclen = dscr_size - UDF_DESC_TAG_LENGTH;
1285 ext->tag.desc_crc_len = udf_rw32(crclen);
1286 }
1287 udf_node->i_flags |= IN_NODE_REBUILD;
1288 }
1289
1290 /* --------------------------------------------------------------------- */
1291
1292 void
1293 udf_get_adslot(struct udf_node *udf_node, int slot, struct long_ad *icb,
1294 int *eof) {
1295 struct file_entry *fe;
1296 struct extfile_entry *efe;
1297 struct alloc_ext_entry *ext;
1298 struct icb_tag *icbtag;
1299 struct short_ad *short_ad;
1300 struct long_ad *long_ad, l_icb;
1301 uint32_t offset;
1302 uint32_t lb_size, dscr_size, l_ea, l_ad, flags;
1303 uint8_t *data_pos;
1304 int icbflags, addr_type, adlen, extnr;
1305
1306 /* determine what descriptor we are in */
1307 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1308
1309 fe = udf_node->fe;
1310 efe = udf_node->efe;
1311 if (fe) {
1312 icbtag = &fe->icbtag;
1313 dscr_size = sizeof(struct file_entry) -1;
1314 l_ea = udf_rw32(fe->l_ea);
1315 l_ad = udf_rw32(fe->l_ad);
1316 data_pos = (uint8_t *) fe + dscr_size + l_ea;
1317 } else {
1318 icbtag = &efe->icbtag;
1319 dscr_size = sizeof(struct extfile_entry) -1;
1320 l_ea = udf_rw32(efe->l_ea);
1321 l_ad = udf_rw32(efe->l_ad);
1322 data_pos = (uint8_t *) efe + dscr_size + l_ea;
1323 }
1324
1325 icbflags = udf_rw16(icbtag->flags);
1326 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
1327
1328 /* just in case we're called on an intern, its EOF */
1329 if (addr_type == UDF_ICB_INTERN_ALLOC) {
1330 memset(icb, 0, sizeof(struct long_ad));
1331 *eof = 1;
1332 return;
1333 }
1334
1335 adlen = 0;
1336 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1337 adlen = sizeof(struct short_ad);
1338 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1339 adlen = sizeof(struct long_ad);
1340 }
1341
1342 /* if offset too big, we go to the allocation extensions */
1343 offset = slot * adlen;
1344 extnr = -1;
1345 while (offset >= l_ad) {
1346 /* check if our last entry is a redirect */
1347 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1348 short_ad = (struct short_ad *) (data_pos + l_ad-adlen);
1349 l_icb.len = short_ad->len;
1350 l_icb.loc.part_num = udf_node->loc.loc.part_num;
1351 l_icb.loc.lb_num = short_ad->lb_num;
1352 } else {
1353 KASSERT(addr_type == UDF_ICB_LONG_ALLOC);
1354 long_ad = (struct long_ad *) (data_pos + l_ad-adlen);
1355 l_icb = *long_ad;
1356 }
1357 flags = UDF_EXT_FLAGS(udf_rw32(l_icb.len));
1358 if (flags != UDF_EXT_REDIRECT) {
1359 l_ad = 0; /* force EOF */
1360 break;
1361 }
1362
1363 /* advance to next extent */
1364 extnr++;
1365 if (extnr >= udf_node->num_extensions) {
1366 l_ad = 0; /* force EOF */
1367 break;
1368 }
1369 offset = offset - l_ad;
1370 ext = udf_node->ext[extnr];
1371 dscr_size = sizeof(struct alloc_ext_entry) -1;
1372 l_ad = udf_rw32(ext->l_ad);
1373 data_pos = (uint8_t *) ext + dscr_size;
1374 }
1375
1376 /* XXX l_ad == 0 should be enough to check */
1377 *eof = (offset >= l_ad) || (l_ad == 0);
1378 if (*eof) {
1379 DPRINTF(PARANOIDADWLK, ("returning EOF, extnr %d, offset %d, "
1380 "l_ad %d\n", extnr, offset, l_ad));
1381 memset(icb, 0, sizeof(struct long_ad));
1382 return;
1383 }
1384
1385 /* get the element */
1386 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1387 short_ad = (struct short_ad *) (data_pos + offset);
1388 icb->len = short_ad->len;
1389 icb->loc.part_num = udf_node->loc.loc.part_num;
1390 icb->loc.lb_num = short_ad->lb_num;
1391 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1392 long_ad = (struct long_ad *) (data_pos + offset);
1393 *icb = *long_ad;
1394 }
1395 DPRINTF(PARANOIDADWLK, ("returning element : v %d, lb %d, len %d, "
1396 "flags %d\n", icb->loc.part_num, icb->loc.lb_num,
1397 UDF_EXT_LEN(icb->len), UDF_EXT_FLAGS(icb->len)));
1398 }
1399
1400 /* --------------------------------------------------------------------- */
1401
1402 int
1403 udf_append_adslot(struct udf_node *udf_node, int *slot, struct long_ad *icb) {
1404 struct udf_mount *ump = udf_node->ump;
1405 union dscrptr *dscr;
1406 struct file_entry *fe;
1407 struct extfile_entry *efe;
1408 struct alloc_ext_entry *ext;
1409 struct icb_tag *icbtag;
1410 struct short_ad *short_ad;
1411 struct long_ad *long_ad, o_icb, l_icb;
1412 uint64_t logblks_rec, *logblks_rec_p;
1413 uint64_t lmapping, pmapping;
1414 uint32_t offset, rest, len, lb_num;
1415 uint32_t lb_size, dscr_size, l_ea, l_ad, *l_ad_p, max_l_ad, crclen;
1416 uint32_t flags;
1417 uint16_t vpart_num;
1418 uint8_t *data_pos;
1419 int icbflags, addr_type, adlen, extnr;
1420 int error;
1421
1422 /* determine what descriptor we are in */
1423 lb_size = udf_rw32(ump->logical_vol->lb_size);
1424
1425 fe = udf_node->fe;
1426 efe = udf_node->efe;
1427 if (fe) {
1428 icbtag = &fe->icbtag;
1429 dscr = (union dscrptr *) fe;
1430 dscr_size = sizeof(struct file_entry) -1;
1431
1432 l_ea = udf_rw32(fe->l_ea);
1433 l_ad_p = &fe->l_ad;
1434 logblks_rec_p = &fe->logblks_rec;
1435 } else {
1436 icbtag = &efe->icbtag;
1437 dscr = (union dscrptr *) efe;
1438 dscr_size = sizeof(struct extfile_entry) -1;
1439
1440 l_ea = udf_rw32(efe->l_ea);
1441 l_ad_p = &efe->l_ad;
1442 logblks_rec_p = &efe->logblks_rec;
1443 }
1444 data_pos = (uint8_t *) dscr + dscr_size + l_ea;
1445 max_l_ad = lb_size - dscr_size - l_ea;
1446
1447 icbflags = udf_rw16(icbtag->flags);
1448 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
1449
1450 /* just in case we're called on an intern, its EOF */
1451 if (addr_type == UDF_ICB_INTERN_ALLOC) {
1452 panic("udf_append_adslot on UDF_ICB_INTERN_ALLOC\n");
1453 }
1454
1455 adlen = 0;
1456 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1457 adlen = sizeof(struct short_ad);
1458 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1459 adlen = sizeof(struct long_ad);
1460 }
1461
1462 /* clean up given long_ad */
1463 #ifdef DIAGNOSTIC
1464 flags = UDF_EXT_FLAGS(udf_rw32(icb->len));
1465 if (flags == UDF_EXT_FREE) {
1466 if ((udf_rw16(icb->loc.part_num) != 0) ||
1467 (udf_rw32(icb->loc.lb_num) != 0))
1468 printf("UDF: warning, cleaning long_ad marked free\n");
1469 icb->loc.part_num = udf_rw16(0);
1470 icb->loc.lb_num = udf_rw32(0);
1471 }
1472 #endif
1473
1474 /* if offset too big, we go to the allocation extensions */
1475 l_ad = udf_rw32(*l_ad_p);
1476 offset = (*slot) * adlen;
1477 extnr = -1;
1478 while (offset >= l_ad) {
1479 /* check if our last entry is a redirect */
1480 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1481 short_ad = (struct short_ad *) (data_pos + l_ad-adlen);
1482 l_icb.len = short_ad->len;
1483 l_icb.loc.part_num = udf_node->loc.loc.part_num;
1484 l_icb.loc.lb_num = short_ad->lb_num;
1485 } else {
1486 KASSERT(addr_type == UDF_ICB_LONG_ALLOC);
1487 long_ad = (struct long_ad *) (data_pos + l_ad-adlen);
1488 l_icb = *long_ad;
1489 }
1490 flags = UDF_EXT_FLAGS(udf_rw32(l_icb.len));
1491 if (flags != UDF_EXT_REDIRECT) {
1492 /* only one past the last one is adressable */
1493 break;
1494 }
1495
1496 /* advance to next extent */
1497 extnr++;
1498 KASSERT(extnr < udf_node->num_extensions);
1499 offset = offset - l_ad;
1500
1501 ext = udf_node->ext[extnr];
1502 dscr = (union dscrptr *) ext;
1503 dscr_size = sizeof(struct alloc_ext_entry) -1;
1504 max_l_ad = lb_size - dscr_size;
1505 l_ad_p = &ext->l_ad;
1506 l_ad = udf_rw32(*l_ad_p);
1507 data_pos = (uint8_t *) ext + dscr_size;
1508 }
1509 DPRINTF(PARANOIDADWLK, ("append, ext %d, offset %d, l_ad %d\n",
1510 extnr, offset, udf_rw32(*l_ad_p)));
1511 KASSERT(l_ad == udf_rw32(*l_ad_p));
1512
1513 /* offset is offset within the current (E)FE/AED */
1514 l_ad = udf_rw32(*l_ad_p);
1515 crclen = udf_rw32(dscr->tag.desc_crc_len);
1516 logblks_rec = udf_rw64(*logblks_rec_p);
1517
1518 /* overwriting old piece? */
1519 if (offset < l_ad) {
1520 /* overwrite entry; compensate for the old element */
1521 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1522 short_ad = (struct short_ad *) (data_pos + offset);
1523 o_icb.len = short_ad->len;
1524 o_icb.loc.part_num = udf_rw16(0); /* ignore */
1525 o_icb.loc.lb_num = short_ad->lb_num;
1526 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1527 long_ad = (struct long_ad *) (data_pos + offset);
1528 o_icb = *long_ad;
1529 } else {
1530 panic("Invalid address type in udf_append_adslot\n");
1531 }
1532
1533 len = udf_rw32(o_icb.len);
1534 if (UDF_EXT_FLAGS(len) == UDF_EXT_ALLOCATED) {
1535 /* adjust counts */
1536 len = UDF_EXT_LEN(len);
1537 logblks_rec -= (len + lb_size -1) / lb_size;
1538 }
1539 }
1540
1541 /* check if we're not appending a redirection */
1542 flags = UDF_EXT_FLAGS(udf_rw32(icb->len));
1543 KASSERT(flags != UDF_EXT_REDIRECT);
1544
1545 /* round down available space */
1546 rest = adlen * ((max_l_ad - offset) / adlen);
1547 if (rest <= adlen) {
1548 /* have to append aed, see if we already have a spare one */
1549 extnr++;
1550 ext = udf_node->ext[extnr];
1551 l_icb = udf_node->ext_loc[extnr];
1552 if (ext == NULL) {
1553 DPRINTF(ALLOC,("adding allocation extent %d\n", extnr));
1554 error = udf_pre_allocate_space(ump, UDF_C_NODE, 1,
1555 &vpart_num, &lmapping, &pmapping);
1556 lb_num = lmapping;
1557 if (error)
1558 return error;
1559
1560 /* initialise pointer to location */
1561 memset(&l_icb, 0, sizeof(struct long_ad));
1562 l_icb.len = udf_rw32(lb_size | UDF_EXT_REDIRECT);
1563 l_icb.loc.lb_num = udf_rw32(lb_num);
1564 l_icb.loc.part_num = udf_rw16(vpart_num);
1565
1566 /* create new aed descriptor */
1567 udf_create_logvol_dscr(ump, udf_node, &l_icb,
1568 (union dscrptr **) &ext);
1569
1570 udf_inittag(ump, &ext->tag, TAGID_ALLOCEXTENT, lb_num);
1571 dscr_size = sizeof(struct alloc_ext_entry) -1;
1572 max_l_ad = lb_size - dscr_size;
1573 memset(ext->data, 0, max_l_ad);
1574 ext->l_ad = udf_rw32(0);
1575 ext->tag.desc_crc_len =
1576 udf_rw32(dscr_size - UDF_DESC_TAG_LENGTH);
1577
1578 /* declare aed */
1579 udf_node->num_extensions++;
1580 udf_node->ext_loc[extnr] = l_icb;
1581 udf_node->ext[extnr] = ext;
1582 }
1583 /* add redirect and adjust l_ad and crclen for old descr */
1584 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1585 short_ad = (struct short_ad *) (data_pos + offset);
1586 short_ad->len = l_icb.len;
1587 short_ad->lb_num = l_icb.loc.lb_num;
1588 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1589 long_ad = (struct long_ad *) (data_pos + offset);
1590 *long_ad = l_icb;
1591 }
1592 l_ad += adlen;
1593 crclen += adlen;
1594 dscr->tag.desc_crc_len = udf_rw32(crclen);
1595 *l_ad_p = udf_rw32(l_ad);
1596
1597 /* advance to the new extension */
1598 KASSERT(ext != NULL);
1599 dscr = (union dscrptr *) ext;
1600 dscr_size = sizeof(struct alloc_ext_entry) -1;
1601 max_l_ad = lb_size - dscr_size;
1602 data_pos = (uint8_t *) dscr + dscr_size;
1603
1604 l_ad_p = &ext->l_ad;
1605 l_ad = udf_rw32(*l_ad_p);
1606 crclen = udf_rw32(dscr->tag.desc_crc_len);
1607 offset = 0;
1608
1609 /* adjust callees slot count for link insert */
1610 *slot += 1;
1611 }
1612
1613 /* write out the element */
1614 DPRINTF(PARANOIDADWLK, ("adding element : %p : v %d, lb %d, "
1615 "len %d, flags %d\n", data_pos + offset,
1616 icb->loc.part_num, icb->loc.lb_num,
1617 UDF_EXT_LEN(icb->len), UDF_EXT_FLAGS(icb->len)));
1618 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1619 short_ad = (struct short_ad *) (data_pos + offset);
1620 short_ad->len = icb->len;
1621 short_ad->lb_num = icb->loc.lb_num;
1622 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1623 long_ad = (struct long_ad *) (data_pos + offset);
1624 *long_ad = *icb;
1625 }
1626
1627 /* adjust logblks recorded count */
1628 flags = UDF_EXT_FLAGS(udf_rw32(icb->len));
1629 if (flags == UDF_EXT_ALLOCATED)
1630 logblks_rec += (UDF_EXT_LEN(icb->len) + lb_size -1) / lb_size;
1631 *logblks_rec_p = udf_rw64(logblks_rec);
1632
1633 /* adjust l_ad and crclen when needed */
1634 if (offset >= l_ad) {
1635 l_ad += adlen;
1636 crclen += adlen;
1637 dscr->tag.desc_crc_len = udf_rw32(crclen);
1638 *l_ad_p = udf_rw32(l_ad);
1639 }
1640
1641 return 0;
1642 }
1643
1644 /* --------------------------------------------------------------------- */
1645
1646 static void
1647 udf_count_alloc_exts(struct udf_node *udf_node)
1648 {
1649 struct long_ad s_ad;
1650 uint32_t lb_num, len, flags;
1651 uint16_t vpart_num;
1652 int slot, eof;
1653 int num_extents, extnr;
1654 int lb_size;
1655
1656 if (udf_node->num_extensions == 0)
1657 return;
1658
1659 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1660 /* count number of allocation extents in use */
1661 num_extents = 0;
1662 slot = 0;
1663 for (;;) {
1664 udf_get_adslot(udf_node, slot, &s_ad, &eof);
1665 if (eof)
1666 break;
1667 len = udf_rw32(s_ad.len);
1668 flags = UDF_EXT_FLAGS(len);
1669
1670 if (flags == UDF_EXT_REDIRECT)
1671 num_extents++;
1672
1673 slot++;
1674 }
1675
1676 DPRINTF(ALLOC, ("udf_count_alloc_ext counted %d live extents\n",
1677 num_extents));
1678
1679 /* XXX choice: we could delay freeing them on node writeout */
1680 /* free excess entries */
1681 extnr = num_extents;
1682 for (;extnr < udf_node->num_extensions; extnr++) {
1683 DPRINTF(ALLOC, ("freeing alloc ext %d\n", extnr));
1684 /* free dscriptor */
1685 s_ad = udf_node->ext_loc[extnr];
1686 udf_free_logvol_dscr(udf_node->ump, &s_ad,
1687 udf_node->ext[extnr]);
1688 udf_node->ext[extnr] = NULL;
1689
1690 /* free disc space */
1691 lb_num = udf_rw32(s_ad.loc.lb_num);
1692 vpart_num = udf_rw16(s_ad.loc.part_num);
1693 udf_free_allocated_space(udf_node->ump, lb_num, vpart_num, 1);
1694
1695 memset(&udf_node->ext_loc[extnr], 0, sizeof(struct long_ad));
1696 }
1697
1698 /* set our new number of allocation extents */
1699 udf_node->num_extensions = num_extents;
1700 }
1701
1702
1703 /* --------------------------------------------------------------------- */
1704
1705 /*
1706 * Adjust the node's allocation descriptors to reflect the new mapping; do
1707 * take note that we might glue to existing allocation descriptors.
1708 *
1709 * XXX Note there can only be one allocation being recorded/mount; maybe
1710 * explicit allocation in shedule thread?
1711 */
1712
1713 static void
1714 udf_record_allocation_in_node(struct udf_mount *ump, struct buf *buf,
1715 uint16_t vpart_num, uint64_t *mapping, struct long_ad *node_ad_cpy)
1716 {
1717 struct vnode *vp = buf->b_vp;
1718 struct udf_node *udf_node = VTOI(vp);
1719 struct file_entry *fe;
1720 struct extfile_entry *efe;
1721 struct icb_tag *icbtag;
1722 struct long_ad s_ad, c_ad;
1723 uint64_t inflen, from, till;
1724 uint64_t foffset, end_foffset, restart_foffset;
1725 uint64_t orig_inflen, orig_lbrec, new_inflen, new_lbrec;
1726 uint32_t num_lb, len, flags, lb_num;
1727 uint32_t run_start;
1728 uint32_t slot_offset, replace_len, replace;
1729 int addr_type, icbflags;
1730 int udf_c_type = buf->b_udf_c_type;
1731 int lb_size, run_length, eof;
1732 int slot, cpy_slot, cpy_slots, restart_slot;
1733 int error;
1734
1735 DPRINTF(ALLOC, ("udf_record_allocation_in_node\n"));
1736
1737 /* sanity check ... should be panic ? */
1738 if ((udf_c_type != UDF_C_USERDATA) && (udf_c_type != UDF_C_FIDS))
1739 return;
1740
1741 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1742
1743 /* do the job */
1744 UDF_LOCK_NODE(udf_node, 0); /* XXX can deadlock ? */
1745 udf_node_sanity_check(udf_node, &orig_inflen, &orig_lbrec);
1746
1747 fe = udf_node->fe;
1748 efe = udf_node->efe;
1749 if (fe) {
1750 icbtag = &fe->icbtag;
1751 inflen = udf_rw64(fe->inf_len);
1752 } else {
1753 icbtag = &efe->icbtag;
1754 inflen = udf_rw64(efe->inf_len);
1755 }
1756
1757 /* do check if `till' is not past file information length */
1758 from = buf->b_lblkno * lb_size;
1759 till = MIN(inflen, from + buf->b_resid);
1760
1761 num_lb = (till - from + lb_size -1) / lb_size;
1762
1763 DPRINTF(ALLOC, ("record allocation from %"PRIu64" + %d\n", from, buf->b_bcount));
1764
1765 icbflags = udf_rw16(icbtag->flags);
1766 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
1767
1768 if (addr_type == UDF_ICB_INTERN_ALLOC) {
1769 /* nothing to do */
1770 /* XXX clean up rest of node? just in case? */
1771 UDF_UNLOCK_NODE(udf_node, 0);
1772 return;
1773 }
1774
1775 slot = 0;
1776 cpy_slot = 0;
1777 foffset = 0;
1778
1779 /* 1) copy till first overlap piece to the rewrite buffer */
1780 for (;;) {
1781 udf_get_adslot(udf_node, slot, &s_ad, &eof);
1782 if (eof) {
1783 DPRINTF(WRITE,
1784 ("Record allocation in node "
1785 "failed: encountered EOF\n"));
1786 UDF_UNLOCK_NODE(udf_node, 0);
1787 buf->b_error = EINVAL;
1788 return;
1789 }
1790 len = udf_rw32(s_ad.len);
1791 flags = UDF_EXT_FLAGS(len);
1792 len = UDF_EXT_LEN(len);
1793
1794 if (flags == UDF_EXT_REDIRECT) {
1795 slot++;
1796 continue;
1797 }
1798
1799 end_foffset = foffset + len;
1800 if (end_foffset > from)
1801 break; /* found */
1802
1803 node_ad_cpy[cpy_slot++] = s_ad;
1804
1805 DPRINTF(ALLOC, ("\t1: vp %d, lb %d, len %d, flags %d "
1806 "-> stack\n",
1807 udf_rw16(s_ad.loc.part_num),
1808 udf_rw32(s_ad.loc.lb_num),
1809 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1810 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1811
1812 foffset = end_foffset;
1813 slot++;
1814 }
1815 restart_slot = slot;
1816 restart_foffset = foffset;
1817
1818 /* 2) trunc overlapping slot at overlap and copy it */
1819 slot_offset = from - foffset;
1820 if (slot_offset > 0) {
1821 DPRINTF(ALLOC, ("\tslot_offset = %d, flags = %d (%d)\n",
1822 slot_offset, flags >> 30, flags));
1823
1824 s_ad.len = udf_rw32(slot_offset | flags);
1825 node_ad_cpy[cpy_slot++] = s_ad;
1826
1827 DPRINTF(ALLOC, ("\t2: vp %d, lb %d, len %d, flags %d "
1828 "-> stack\n",
1829 udf_rw16(s_ad.loc.part_num),
1830 udf_rw32(s_ad.loc.lb_num),
1831 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1832 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1833 }
1834 foffset += slot_offset;
1835
1836 /* 3) insert new mappings */
1837 memset(&s_ad, 0, sizeof(struct long_ad));
1838 lb_num = 0;
1839 for (lb_num = 0; lb_num < num_lb; lb_num++) {
1840 run_start = mapping[lb_num];
1841 run_length = 1;
1842 while (lb_num < num_lb-1) {
1843 if (mapping[lb_num+1] != mapping[lb_num]+1)
1844 if (mapping[lb_num+1] != mapping[lb_num])
1845 break;
1846 run_length++;
1847 lb_num++;
1848 }
1849 /* insert slot for this mapping */
1850 len = run_length * lb_size;
1851
1852 /* bounds checking */
1853 if (foffset + len > till)
1854 len = till - foffset;
1855 KASSERT(foffset + len <= inflen);
1856
1857 s_ad.len = udf_rw32(len | UDF_EXT_ALLOCATED);
1858 s_ad.loc.part_num = udf_rw16(vpart_num);
1859 s_ad.loc.lb_num = udf_rw32(run_start);
1860
1861 foffset += len;
1862
1863 /* paranoia */
1864 if (len == 0) {
1865 DPRINTF(WRITE,
1866 ("Record allocation in node "
1867 "failed: insert failed\n"));
1868 UDF_UNLOCK_NODE(udf_node, 0);
1869 buf->b_error = EINVAL;
1870 return;
1871 }
1872 node_ad_cpy[cpy_slot++] = s_ad;
1873
1874 DPRINTF(ALLOC, ("\t3: insert new mapping vp %d lb %d, len %d, "
1875 "flags %d -> stack\n",
1876 udf_rw16(s_ad.loc.part_num), udf_rw32(s_ad.loc.lb_num),
1877 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1878 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1879 }
1880
1881 /* 4) pop replaced length */
1882 slot = restart_slot;
1883 foffset = restart_foffset;
1884
1885 replace_len = till - foffset; /* total amount of bytes to pop */
1886 slot_offset = from - foffset; /* offset in first encounted slot */
1887 KASSERT((slot_offset % lb_size) == 0);
1888
1889 for (;;) {
1890 udf_get_adslot(udf_node, slot, &s_ad, &eof);
1891 if (eof)
1892 break;
1893
1894 len = udf_rw32(s_ad.len);
1895 flags = UDF_EXT_FLAGS(len);
1896 len = UDF_EXT_LEN(len);
1897 lb_num = udf_rw32(s_ad.loc.lb_num);
1898
1899 if (flags == UDF_EXT_REDIRECT) {
1900 slot++;
1901 continue;
1902 }
1903
1904 DPRINTF(ALLOC, ("\t4i: got slot %d, slot_offset %d, "
1905 "replace_len %d, "
1906 "vp %d, lb %d, len %d, flags %d\n",
1907 slot, slot_offset, replace_len,
1908 udf_rw16(s_ad.loc.part_num),
1909 udf_rw32(s_ad.loc.lb_num),
1910 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1911 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1912
1913 /* adjust for slot offset */
1914 if (slot_offset) {
1915 DPRINTF(ALLOC, ("\t4s: skipping %d\n", slot_offset));
1916 lb_num += slot_offset / lb_size;
1917 len -= slot_offset;
1918 foffset += slot_offset;
1919 replace_len -= slot_offset;
1920
1921 /* mark adjusted */
1922 slot_offset = 0;
1923 }
1924
1925 /* advance for (the rest of) this slot */
1926 replace = MIN(len, replace_len);
1927 DPRINTF(ALLOC, ("\t4d: replacing %d\n", replace));
1928
1929 /* advance for this slot */
1930 if (replace) {
1931 /* note: dont round DOWN on num_lb since we then
1932 * forget the last partial one */
1933 num_lb = (replace + lb_size - 1) / lb_size;
1934 if (flags != UDF_EXT_FREE) {
1935 udf_free_allocated_space(ump, lb_num,
1936 udf_rw16(s_ad.loc.part_num), num_lb);
1937 }
1938 lb_num += num_lb;
1939 len -= replace;
1940 foffset += replace;
1941 replace_len -= replace;
1942 }
1943
1944 /* do we have a slot tail ? */
1945 if (len) {
1946 KASSERT(foffset % lb_size == 0);
1947
1948 /* we arrived at our point, push remainder */
1949 s_ad.len = udf_rw32(len | flags);
1950 s_ad.loc.lb_num = udf_rw32(lb_num);
1951 if (flags == UDF_EXT_FREE)
1952 s_ad.loc.lb_num = udf_rw32(0);
1953 node_ad_cpy[cpy_slot++] = s_ad;
1954 foffset += len;
1955 slot++;
1956
1957 DPRINTF(ALLOC, ("\t4: vp %d, lb %d, len %d, flags %d "
1958 "-> stack\n",
1959 udf_rw16(s_ad.loc.part_num),
1960 udf_rw32(s_ad.loc.lb_num),
1961 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1962 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1963 break;
1964 }
1965
1966 slot++;
1967 }
1968
1969 /* 5) copy remainder */
1970 for (;;) {
1971 udf_get_adslot(udf_node, slot, &s_ad, &eof);
1972 if (eof)
1973 break;
1974
1975 len = udf_rw32(s_ad.len);
1976 flags = UDF_EXT_FLAGS(len);
1977 len = UDF_EXT_LEN(len);
1978
1979 if (flags == UDF_EXT_REDIRECT) {
1980 slot++;
1981 continue;
1982 }
1983
1984 node_ad_cpy[cpy_slot++] = s_ad;
1985
1986 DPRINTF(ALLOC, ("\t5: insert new mapping "
1987 "vp %d lb %d, len %d, flags %d "
1988 "-> stack\n",
1989 udf_rw16(s_ad.loc.part_num),
1990 udf_rw32(s_ad.loc.lb_num),
1991 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1992 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1993
1994 slot++;
1995 }
1996
1997 /* 6) reset node descriptors */
1998 udf_wipe_adslots(udf_node);
1999
2000 /* 7) copy back extents; merge when possible. Recounting on the fly */
2001 cpy_slots = cpy_slot;
2002
2003 c_ad = node_ad_cpy[0];
2004 slot = 0;
2005 DPRINTF(ALLOC, ("\t7s: stack -> got mapping vp %d "
2006 "lb %d, len %d, flags %d\n",
2007 udf_rw16(c_ad.loc.part_num),
2008 udf_rw32(c_ad.loc.lb_num),
2009 UDF_EXT_LEN(udf_rw32(c_ad.len)),
2010 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2011
2012 for (cpy_slot = 1; cpy_slot < cpy_slots; cpy_slot++) {
2013 s_ad = node_ad_cpy[cpy_slot];
2014
2015 DPRINTF(ALLOC, ("\t7i: stack -> got mapping vp %d "
2016 "lb %d, len %d, flags %d\n",
2017 udf_rw16(s_ad.loc.part_num),
2018 udf_rw32(s_ad.loc.lb_num),
2019 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2020 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2021
2022 /* see if we can merge */
2023 if (udf_ads_merge(lb_size, &c_ad, &s_ad)) {
2024 /* not mergable (anymore) */
2025 DPRINTF(ALLOC, ("\t7: appending vp %d lb %d, "
2026 "len %d, flags %d\n",
2027 udf_rw16(c_ad.loc.part_num),
2028 udf_rw32(c_ad.loc.lb_num),
2029 UDF_EXT_LEN(udf_rw32(c_ad.len)),
2030 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2031
2032 error = udf_append_adslot(udf_node, &slot, &c_ad);
2033 if (error) {
2034 buf->b_error = error;
2035 goto out;
2036 }
2037 c_ad = s_ad;
2038 slot++;
2039 }
2040 }
2041
2042 /* 8) push rest slot (if any) */
2043 if (UDF_EXT_LEN(c_ad.len) > 0) {
2044 DPRINTF(ALLOC, ("\t8: last append vp %d lb %d, "
2045 "len %d, flags %d\n",
2046 udf_rw16(c_ad.loc.part_num),
2047 udf_rw32(c_ad.loc.lb_num),
2048 UDF_EXT_LEN(udf_rw32(c_ad.len)),
2049 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2050
2051 error = udf_append_adslot(udf_node, &slot, &c_ad);
2052 if (error) {
2053 buf->b_error = error;
2054 goto out;
2055 }
2056 }
2057
2058 out:
2059 udf_count_alloc_exts(udf_node);
2060
2061 /* the node's descriptors should now be sane */
2062 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2063 UDF_UNLOCK_NODE(udf_node, 0);
2064
2065 KASSERT(orig_inflen == new_inflen);
2066 KASSERT(new_lbrec >= orig_lbrec);
2067
2068 return;
2069 }
2070
2071 /* --------------------------------------------------------------------- */
2072
2073 int
2074 udf_grow_node(struct udf_node *udf_node, uint64_t new_size)
2075 {
2076 union dscrptr *dscr;
2077 struct vnode *vp = udf_node->vnode;
2078 struct udf_mount *ump = udf_node->ump;
2079 struct file_entry *fe;
2080 struct extfile_entry *efe;
2081 struct icb_tag *icbtag;
2082 struct long_ad c_ad, s_ad;
2083 uint64_t size_diff, old_size, inflen, objsize, chunk, append_len;
2084 uint64_t foffset, end_foffset;
2085 uint64_t orig_inflen, orig_lbrec, new_inflen, new_lbrec;
2086 uint32_t lb_size, dscr_size, crclen, lastblock_grow;
2087 uint32_t len, flags, max_len;
2088 uint32_t max_l_ad, l_ad, l_ea;
2089 uint8_t *data_pos, *evacuated_data;
2090 int icbflags, addr_type;
2091 int slot, cpy_slot;
2092 int eof, error;
2093
2094 DPRINTF(ALLOC, ("udf_grow_node\n"));
2095
2096 UDF_LOCK_NODE(udf_node, 0);
2097 udf_node_sanity_check(udf_node, &orig_inflen, &orig_lbrec);
2098
2099 lb_size = udf_rw32(ump->logical_vol->lb_size);
2100 max_len = ((UDF_EXT_MAXLEN / lb_size) * lb_size);
2101
2102 fe = udf_node->fe;
2103 efe = udf_node->efe;
2104 if (fe) {
2105 dscr = (union dscrptr *) fe;
2106 icbtag = &fe->icbtag;
2107 inflen = udf_rw64(fe->inf_len);
2108 objsize = inflen;
2109 dscr_size = sizeof(struct file_entry) -1;
2110 l_ea = udf_rw32(fe->l_ea);
2111 l_ad = udf_rw32(fe->l_ad);
2112 } else {
2113 dscr = (union dscrptr *) efe;
2114 icbtag = &efe->icbtag;
2115 inflen = udf_rw64(efe->inf_len);
2116 objsize = udf_rw64(efe->obj_size);
2117 dscr_size = sizeof(struct extfile_entry) -1;
2118 l_ea = udf_rw32(efe->l_ea);
2119 l_ad = udf_rw32(efe->l_ad);
2120 }
2121 data_pos = (uint8_t *) dscr + dscr_size + l_ea;
2122 max_l_ad = lb_size - dscr_size - l_ea;
2123
2124 icbflags = udf_rw16(icbtag->flags);
2125 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2126
2127 old_size = inflen;
2128 size_diff = new_size - old_size;
2129
2130 DPRINTF(ALLOC, ("\tfrom %"PRIu64" to %"PRIu64"\n", old_size, new_size));
2131
2132 evacuated_data = NULL;
2133 if (addr_type == UDF_ICB_INTERN_ALLOC) {
2134 if (l_ad + size_diff <= max_l_ad) {
2135 /* only reflect size change directly in the node */
2136 inflen += size_diff;
2137 objsize += size_diff;
2138 l_ad += size_diff;
2139 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea + l_ad;
2140 if (fe) {
2141 fe->inf_len = udf_rw64(inflen);
2142 fe->l_ad = udf_rw32(l_ad);
2143 fe->tag.desc_crc_len = udf_rw32(crclen);
2144 } else {
2145 efe->inf_len = udf_rw64(inflen);
2146 efe->obj_size = udf_rw64(objsize);
2147 efe->l_ad = udf_rw32(l_ad);
2148 efe->tag.desc_crc_len = udf_rw32(crclen);
2149 }
2150 error = 0;
2151
2152 /* set new size for uvm */
2153 uvm_vnp_setsize(vp, old_size);
2154 uvm_vnp_setwritesize(vp, new_size);
2155
2156 #if 0
2157 /* zero append space in buffer */
2158 uvm_vnp_zerorange(vp, old_size, new_size - old_size);
2159 #endif
2160
2161 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2162
2163 /* unlock */
2164 UDF_UNLOCK_NODE(udf_node, 0);
2165
2166 KASSERT(new_inflen == orig_inflen + size_diff);
2167 KASSERT(new_lbrec == orig_lbrec);
2168 KASSERT(new_lbrec == 0);
2169 return 0;
2170 }
2171
2172 DPRINTF(ALLOC, ("\tCONVERT from internal\n"));
2173
2174 if (old_size > 0) {
2175 /* allocate some space and copy in the stuff to keep */
2176 evacuated_data = malloc(lb_size, M_UDFTEMP, M_WAITOK);
2177 memset(evacuated_data, 0, lb_size);
2178
2179 /* node is locked, so safe to exit mutex */
2180 UDF_UNLOCK_NODE(udf_node, 0);
2181
2182 /* read in using the `normal' vn_rdwr() */
2183 error = vn_rdwr(UIO_READ, udf_node->vnode,
2184 evacuated_data, old_size, 0,
2185 UIO_SYSSPACE, IO_ALTSEMANTICS | IO_NODELOCKED,
2186 FSCRED, NULL, NULL);
2187
2188 /* enter again */
2189 UDF_LOCK_NODE(udf_node, 0);
2190 }
2191
2192 /* convert to a normal alloc */
2193 /* XXX HOWTO selecting allocation method ? */
2194 icbflags &= ~UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2195 icbflags |= UDF_ICB_LONG_ALLOC; /* XXX or SHORT_ALLOC */
2196 icbtag->flags = udf_rw16(icbflags);
2197
2198 /* wipe old descriptor space */
2199 udf_wipe_adslots(udf_node);
2200
2201 memset(&c_ad, 0, sizeof(struct long_ad));
2202 c_ad.len = udf_rw32(old_size | UDF_EXT_FREE);
2203 c_ad.loc.part_num = udf_rw16(0); /* not relevant */
2204 c_ad.loc.lb_num = udf_rw32(0); /* not relevant */
2205
2206 slot = 0;
2207 } else {
2208 /* goto the last entry (if any) */
2209 slot = 0;
2210 cpy_slot = 0;
2211 foffset = 0;
2212 memset(&c_ad, 0, sizeof(struct long_ad));
2213 for (;;) {
2214 udf_get_adslot(udf_node, slot, &c_ad, &eof);
2215 if (eof)
2216 break;
2217
2218 len = udf_rw32(c_ad.len);
2219 flags = UDF_EXT_FLAGS(len);
2220 len = UDF_EXT_LEN(len);
2221
2222 end_foffset = foffset + len;
2223 if (flags != UDF_EXT_REDIRECT)
2224 foffset = end_foffset;
2225
2226 slot++;
2227 }
2228 /* at end of adslots */
2229
2230 /* special case if the old size was zero, then there is no last slot */
2231 if (old_size == 0) {
2232 c_ad.len = udf_rw32(0 | UDF_EXT_FREE);
2233 c_ad.loc.part_num = udf_rw16(0); /* not relevant */
2234 c_ad.loc.lb_num = udf_rw32(0); /* not relevant */
2235 } else {
2236 /* refetch last slot */
2237 slot--;
2238 udf_get_adslot(udf_node, slot, &c_ad, &eof);
2239 }
2240 }
2241
2242 /*
2243 * If the length of the last slot is not a multiple of lb_size, adjust
2244 * length so that it is; don't forget to adjust `append_len'! relevant for
2245 * extending existing files
2246 */
2247 len = udf_rw32(c_ad.len);
2248 flags = UDF_EXT_FLAGS(len);
2249 len = UDF_EXT_LEN(len);
2250
2251 lastblock_grow = 0;
2252 if (len % lb_size > 0) {
2253 lastblock_grow = lb_size - (len % lb_size);
2254 lastblock_grow = MIN(size_diff, lastblock_grow);
2255 len += lastblock_grow;
2256 c_ad.len = udf_rw32(len | flags);
2257
2258 /* TODO zero appened space in buffer! */
2259 /* using uvm_vnp_zerorange(vp, old_size, new_size - old_size); ? */
2260 }
2261 memset(&s_ad, 0, sizeof(struct long_ad));
2262
2263 /* size_diff can be bigger than allowed, so grow in chunks */
2264 append_len = size_diff - lastblock_grow;
2265 while (append_len > 0) {
2266 chunk = MIN(append_len, max_len);
2267 s_ad.len = udf_rw32(chunk | UDF_EXT_FREE);
2268 s_ad.loc.part_num = udf_rw16(0);
2269 s_ad.loc.lb_num = udf_rw32(0);
2270
2271 if (udf_ads_merge(lb_size, &c_ad, &s_ad)) {
2272 /* not mergable (anymore) */
2273 error = udf_append_adslot(udf_node, &slot, &c_ad);
2274 if (error)
2275 goto errorout;
2276 slot++;
2277 c_ad = s_ad;
2278 memset(&s_ad, 0, sizeof(struct long_ad));
2279 }
2280 append_len -= chunk;
2281 }
2282
2283 /* if there is a rest piece in the accumulator, append it */
2284 if (UDF_EXT_LEN(udf_rw32(c_ad.len)) > 0) {
2285 error = udf_append_adslot(udf_node, &slot, &c_ad);
2286 if (error)
2287 goto errorout;
2288 slot++;
2289 }
2290
2291 /* if there is a rest piece that didn't fit, append it */
2292 if (UDF_EXT_LEN(udf_rw32(s_ad.len)) > 0) {
2293 error = udf_append_adslot(udf_node, &slot, &s_ad);
2294 if (error)
2295 goto errorout;
2296 slot++;
2297 }
2298
2299 inflen += size_diff;
2300 objsize += size_diff;
2301 if (fe) {
2302 fe->inf_len = udf_rw64(inflen);
2303 } else {
2304 efe->inf_len = udf_rw64(inflen);
2305 efe->obj_size = udf_rw64(objsize);
2306 }
2307 error = 0;
2308
2309 if (evacuated_data) {
2310 /* set new write size for uvm */
2311 uvm_vnp_setwritesize(vp, old_size);
2312
2313 /* write out evacuated data */
2314 error = vn_rdwr(UIO_WRITE, udf_node->vnode,
2315 evacuated_data, old_size, 0,
2316 UIO_SYSSPACE, IO_ALTSEMANTICS | IO_NODELOCKED,
2317 FSCRED, NULL, NULL);
2318 uvm_vnp_setsize(vp, old_size);
2319 }
2320
2321 errorout:
2322 if (evacuated_data)
2323 free(evacuated_data, M_UDFTEMP);
2324
2325 udf_count_alloc_exts(udf_node);
2326
2327 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2328 UDF_UNLOCK_NODE(udf_node, 0);
2329
2330 KASSERT(new_inflen == orig_inflen + size_diff);
2331 KASSERT(new_lbrec == orig_lbrec);
2332
2333 return error;
2334 }
2335
2336 /* --------------------------------------------------------------------- */
2337
2338 int
2339 udf_shrink_node(struct udf_node *udf_node, uint64_t new_size)
2340 {
2341 struct vnode *vp = udf_node->vnode;
2342 struct udf_mount *ump = udf_node->ump;
2343 struct file_entry *fe;
2344 struct extfile_entry *efe;
2345 struct icb_tag *icbtag;
2346 struct long_ad c_ad, s_ad, *node_ad_cpy;
2347 uint64_t size_diff, old_size, inflen, objsize;
2348 uint64_t foffset, end_foffset;
2349 uint64_t orig_inflen, orig_lbrec, new_inflen, new_lbrec;
2350 uint32_t lb_size, dscr_size, crclen;
2351 uint32_t slot_offset;
2352 uint32_t len, flags, max_len;
2353 uint32_t num_lb, lb_num;
2354 uint32_t max_l_ad, l_ad, l_ea;
2355 uint16_t vpart_num;
2356 uint8_t *data_pos;
2357 int icbflags, addr_type;
2358 int slot, cpy_slot, cpy_slots;
2359 int eof, error;
2360
2361 DPRINTF(ALLOC, ("udf_shrink_node\n"));
2362
2363 UDF_LOCK_NODE(udf_node, 0);
2364 udf_node_sanity_check(udf_node, &orig_inflen, &orig_lbrec);
2365
2366 lb_size = udf_rw32(ump->logical_vol->lb_size);
2367 max_len = ((UDF_EXT_MAXLEN / lb_size) * lb_size);
2368
2369 /* do the work */
2370 fe = udf_node->fe;
2371 efe = udf_node->efe;
2372 if (fe) {
2373 icbtag = &fe->icbtag;
2374 inflen = udf_rw64(fe->inf_len);
2375 objsize = inflen;
2376 dscr_size = sizeof(struct file_entry) -1;
2377 l_ea = udf_rw32(fe->l_ea);
2378 l_ad = udf_rw32(fe->l_ad);
2379 data_pos = (uint8_t *) fe + dscr_size + l_ea;
2380 } else {
2381 icbtag = &efe->icbtag;
2382 inflen = udf_rw64(efe->inf_len);
2383 objsize = udf_rw64(efe->obj_size);
2384 dscr_size = sizeof(struct extfile_entry) -1;
2385 l_ea = udf_rw32(efe->l_ea);
2386 l_ad = udf_rw32(efe->l_ad);
2387 data_pos = (uint8_t *) efe + dscr_size + l_ea;
2388 }
2389 max_l_ad = lb_size - dscr_size - l_ea;
2390
2391 icbflags = udf_rw16(icbtag->flags);
2392 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2393
2394 old_size = inflen;
2395 size_diff = old_size - new_size;
2396
2397 DPRINTF(ALLOC, ("\tfrom %"PRIu64" to %"PRIu64"\n", old_size, new_size));
2398
2399 /* shrink the node to its new size */
2400 if (addr_type == UDF_ICB_INTERN_ALLOC) {
2401 /* only reflect size change directly in the node */
2402 KASSERT(new_size <= max_l_ad);
2403 inflen -= size_diff;
2404 objsize -= size_diff;
2405 l_ad -= size_diff;
2406 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea + l_ad;
2407 if (fe) {
2408 fe->inf_len = udf_rw64(inflen);
2409 fe->l_ad = udf_rw32(l_ad);
2410 fe->tag.desc_crc_len = udf_rw32(crclen);
2411 } else {
2412 efe->inf_len = udf_rw64(inflen);
2413 efe->obj_size = udf_rw64(objsize);
2414 efe->l_ad = udf_rw32(l_ad);
2415 efe->tag.desc_crc_len = udf_rw32(crclen);
2416 }
2417 error = 0;
2418
2419 /* clear the space in the descriptor */
2420 KASSERT(old_size > new_size);
2421 memset(data_pos + new_size, 0, old_size - new_size);
2422
2423 /* TODO zero appened space in buffer! */
2424 /* using uvm_vnp_zerorange(vp, old_size, old_size - new_size); ? */
2425
2426 /* set new size for uvm */
2427 uvm_vnp_setsize(vp, new_size);
2428
2429 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2430 UDF_UNLOCK_NODE(udf_node, 0);
2431
2432 KASSERT(new_inflen == orig_inflen - size_diff);
2433 KASSERT(new_lbrec == orig_lbrec);
2434 KASSERT(new_lbrec == 0);
2435
2436 return 0;
2437 }
2438
2439 /* setup node cleanup extents copy space */
2440 node_ad_cpy = malloc(lb_size * UDF_MAX_ALLOC_EXTENTS,
2441 M_UDFMNT, M_WAITOK);
2442 memset(node_ad_cpy, 0, lb_size * UDF_MAX_ALLOC_EXTENTS);
2443
2444 /*
2445 * Shrink the node by releasing the allocations and truncate the last
2446 * allocation to the new size. If the new size fits into the
2447 * allocation descriptor itself, transform it into an
2448 * UDF_ICB_INTERN_ALLOC.
2449 */
2450 slot = 0;
2451 cpy_slot = 0;
2452 foffset = 0;
2453
2454 /* 1) copy till first overlap piece to the rewrite buffer */
2455 for (;;) {
2456 udf_get_adslot(udf_node, slot, &s_ad, &eof);
2457 if (eof) {
2458 DPRINTF(WRITE,
2459 ("Shrink node failed: "
2460 "encountered EOF\n"));
2461 error = EINVAL;
2462 goto errorout; /* panic? */
2463 }
2464 len = udf_rw32(s_ad.len);
2465 flags = UDF_EXT_FLAGS(len);
2466 len = UDF_EXT_LEN(len);
2467
2468 if (flags == UDF_EXT_REDIRECT) {
2469 slot++;
2470 continue;
2471 }
2472
2473 end_foffset = foffset + len;
2474 if (end_foffset > new_size)
2475 break; /* found */
2476
2477 node_ad_cpy[cpy_slot++] = s_ad;
2478
2479 DPRINTF(ALLOC, ("\t1: vp %d, lb %d, len %d, flags %d "
2480 "-> stack\n",
2481 udf_rw16(s_ad.loc.part_num),
2482 udf_rw32(s_ad.loc.lb_num),
2483 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2484 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2485
2486 foffset = end_foffset;
2487 slot++;
2488 }
2489 slot_offset = new_size - foffset;
2490
2491 /* 2) trunc overlapping slot at overlap and copy it */
2492 if (slot_offset > 0) {
2493 lb_num = udf_rw32(s_ad.loc.lb_num);
2494 vpart_num = udf_rw16(s_ad.loc.part_num);
2495
2496 if (flags == UDF_EXT_ALLOCATED) {
2497 /* note: round DOWN on num_lb */
2498 lb_num += (slot_offset + lb_size -1) / lb_size;
2499 num_lb = (len - slot_offset) / lb_size;
2500
2501 udf_free_allocated_space(ump, lb_num, vpart_num, num_lb);
2502 }
2503
2504 s_ad.len = udf_rw32(slot_offset | flags);
2505 node_ad_cpy[cpy_slot++] = s_ad;
2506 slot++;
2507
2508 DPRINTF(ALLOC, ("\t2: vp %d, lb %d, len %d, flags %d "
2509 "-> stack\n",
2510 udf_rw16(s_ad.loc.part_num),
2511 udf_rw32(s_ad.loc.lb_num),
2512 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2513 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2514 }
2515
2516 /* 3) delete remainder */
2517 for (;;) {
2518 udf_get_adslot(udf_node, slot, &s_ad, &eof);
2519 if (eof)
2520 break;
2521
2522 len = udf_rw32(s_ad.len);
2523 flags = UDF_EXT_FLAGS(len);
2524 len = UDF_EXT_LEN(len);
2525
2526 if (flags == UDF_EXT_REDIRECT) {
2527 slot++;
2528 continue;
2529 }
2530
2531 DPRINTF(ALLOC, ("\t3: delete remainder "
2532 "vp %d lb %d, len %d, flags %d\n",
2533 udf_rw16(s_ad.loc.part_num),
2534 udf_rw32(s_ad.loc.lb_num),
2535 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2536 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2537
2538 if (flags == UDF_EXT_ALLOCATED) {
2539 lb_num = udf_rw32(s_ad.loc.lb_num);
2540 vpart_num = udf_rw16(s_ad.loc.part_num);
2541 num_lb = (len + lb_size - 1) / lb_size;
2542
2543 udf_free_allocated_space(ump, lb_num, vpart_num,
2544 num_lb);
2545 }
2546
2547 slot++;
2548 }
2549
2550 /* 4) if it will fit into the descriptor then convert */
2551 if (new_size < max_l_ad) {
2552 /*
2553 * resque/evacuate old piece by reading it in, and convert it
2554 * to internal alloc.
2555 */
2556 if (new_size == 0) {
2557 /* XXX/TODO only for zero sizing now */
2558 udf_wipe_adslots(udf_node);
2559
2560 icbflags &= ~UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2561 icbflags |= UDF_ICB_INTERN_ALLOC;
2562 icbtag->flags = udf_rw16(icbflags);
2563
2564 inflen -= size_diff; KASSERT(inflen == 0);
2565 objsize -= size_diff;
2566 l_ad = new_size;
2567 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea + l_ad;
2568 if (fe) {
2569 fe->inf_len = udf_rw64(inflen);
2570 fe->l_ad = udf_rw32(l_ad);
2571 fe->tag.desc_crc_len = udf_rw32(crclen);
2572 } else {
2573 efe->inf_len = udf_rw64(inflen);
2574 efe->obj_size = udf_rw64(objsize);
2575 efe->l_ad = udf_rw32(l_ad);
2576 efe->tag.desc_crc_len = udf_rw32(crclen);
2577 }
2578 /* eventually copy in evacuated piece */
2579 /* set new size for uvm */
2580 uvm_vnp_setsize(vp, new_size);
2581
2582 free(node_ad_cpy, M_UDFMNT);
2583 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2584
2585 UDF_UNLOCK_NODE(udf_node, 0);
2586
2587 KASSERT(new_inflen == orig_inflen - size_diff);
2588 KASSERT(new_inflen == 0);
2589 KASSERT(new_lbrec == 0);
2590
2591 return 0;
2592 }
2593
2594 printf("UDF_SHRINK_NODE: could convert to internal alloc!\n");
2595 }
2596
2597 /* 5) reset node descriptors */
2598 udf_wipe_adslots(udf_node);
2599
2600 /* 6) copy back extents; merge when possible. Recounting on the fly */
2601 cpy_slots = cpy_slot;
2602
2603 c_ad = node_ad_cpy[0];
2604 slot = 0;
2605 for (cpy_slot = 1; cpy_slot < cpy_slots; cpy_slot++) {
2606 s_ad = node_ad_cpy[cpy_slot];
2607
2608 DPRINTF(ALLOC, ("\t6: stack -> got mapping vp %d "
2609 "lb %d, len %d, flags %d\n",
2610 udf_rw16(s_ad.loc.part_num),
2611 udf_rw32(s_ad.loc.lb_num),
2612 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2613 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2614
2615 /* see if we can merge */
2616 if (udf_ads_merge(lb_size, &c_ad, &s_ad)) {
2617 /* not mergable (anymore) */
2618 DPRINTF(ALLOC, ("\t6: appending vp %d lb %d, "
2619 "len %d, flags %d\n",
2620 udf_rw16(c_ad.loc.part_num),
2621 udf_rw32(c_ad.loc.lb_num),
2622 UDF_EXT_LEN(udf_rw32(c_ad.len)),
2623 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2624
2625 error = udf_append_adslot(udf_node, &slot, &c_ad);
2626 if (error)
2627 goto errorout; /* panic? */
2628 c_ad = s_ad;
2629 slot++;
2630 }
2631 }
2632
2633 /* 7) push rest slot (if any) */
2634 if (UDF_EXT_LEN(c_ad.len) > 0) {
2635 DPRINTF(ALLOC, ("\t7: last append vp %d lb %d, "
2636 "len %d, flags %d\n",
2637 udf_rw16(c_ad.loc.part_num),
2638 udf_rw32(c_ad.loc.lb_num),
2639 UDF_EXT_LEN(udf_rw32(c_ad.len)),
2640 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2641
2642 error = udf_append_adslot(udf_node, &slot, &c_ad);
2643 if (error)
2644 goto errorout; /* panic? */
2645 ;
2646 }
2647
2648 inflen -= size_diff;
2649 objsize -= size_diff;
2650 if (fe) {
2651 fe->inf_len = udf_rw64(inflen);
2652 } else {
2653 efe->inf_len = udf_rw64(inflen);
2654 efe->obj_size = udf_rw64(objsize);
2655 }
2656 error = 0;
2657
2658 /* set new size for uvm */
2659 uvm_vnp_setsize(vp, new_size);
2660
2661 errorout:
2662 free(node_ad_cpy, M_UDFMNT);
2663
2664 udf_count_alloc_exts(udf_node);
2665
2666 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2667 UDF_UNLOCK_NODE(udf_node, 0);
2668
2669 KASSERT(new_inflen == orig_inflen - size_diff);
2670
2671 return error;
2672 }
2673
2674