udf_allocation.c revision 1.10 1 /* $NetBSD: udf_allocation.c,v 1.10 2008/07/03 18:03:01 reinoud Exp $ */
2
3 /*
4 * Copyright (c) 2006, 2008 Reinoud Zandijk
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 *
27 */
28
29 #include <sys/cdefs.h>
30 #ifndef lint
31 __KERNEL_RCSID(0, "$NetBSD: udf_allocation.c,v 1.10 2008/07/03 18:03:01 reinoud Exp $");
32 #endif /* not lint */
33
34
35 #if defined(_KERNEL_OPT)
36 #include "opt_quota.h"
37 #include "opt_compat_netbsd.h"
38 #endif
39
40 /* TODO strip */
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/sysctl.h>
44 #include <sys/namei.h>
45 #include <sys/proc.h>
46 #include <sys/kernel.h>
47 #include <sys/vnode.h>
48 #include <miscfs/genfs/genfs_node.h>
49 #include <sys/mount.h>
50 #include <sys/buf.h>
51 #include <sys/file.h>
52 #include <sys/device.h>
53 #include <sys/disklabel.h>
54 #include <sys/ioctl.h>
55 #include <sys/malloc.h>
56 #include <sys/dirent.h>
57 #include <sys/stat.h>
58 #include <sys/conf.h>
59 #include <sys/kauth.h>
60 #include <sys/kthread.h>
61 #include <dev/clock_subr.h>
62
63 #include <fs/udf/ecma167-udf.h>
64 #include <fs/udf/udf_mount.h>
65
66 #if defined(_KERNEL_OPT)
67 #include "opt_udf.h"
68 #endif
69
70 #include "udf.h"
71 #include "udf_subr.h"
72 #include "udf_bswap.h"
73
74
75 #define VTOI(vnode) ((struct udf_node *) vnode->v_data)
76
77 static void udf_record_allocation_in_node(struct udf_mount *ump,
78 struct buf *buf, uint16_t vpart_num, uint64_t *mapping,
79 struct long_ad *node_ad_cpy);
80
81 /*
82 * IDEA/BUSY: Each udf_node gets its own extentwalker state for all operations;
83 * this will hopefully/likely reduce O(nlog(n)) to O(1) for most functionality
84 * since actions are most likely sequencial and thus seeking doesn't need
85 * searching for the same or adjacent position again.
86 */
87
88 /* --------------------------------------------------------------------- */
89 //#ifdef DEBUG
90 #if 1
91 #if 1
92 static void
93 udf_node_dump(struct udf_node *udf_node) {
94 struct file_entry *fe;
95 struct extfile_entry *efe;
96 struct icb_tag *icbtag;
97 struct short_ad *short_ad;
98 struct long_ad *long_ad;
99 uint64_t inflen;
100 uint32_t icbflags, addr_type, max_l_ad;
101 uint32_t len, lb_num;
102 uint8_t *data_pos;
103 int part_num;
104 int adlen, ad_off, dscr_size, l_ea, l_ad, lb_size, flags;
105
106 if ((udf_verbose & UDF_DEBUG_NODEDUMP) == 0)
107 return;
108
109 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
110
111 fe = udf_node->fe;
112 efe = udf_node->efe;
113 if (fe) {
114 icbtag = &fe->icbtag;
115 inflen = udf_rw64(fe->inf_len);
116 dscr_size = sizeof(struct file_entry) -1;
117 l_ea = udf_rw32(fe->l_ea);
118 l_ad = udf_rw32(fe->l_ad);
119 data_pos = (uint8_t *) fe + dscr_size + l_ea;
120 } else {
121 icbtag = &efe->icbtag;
122 inflen = udf_rw64(efe->inf_len);
123 dscr_size = sizeof(struct extfile_entry) -1;
124 l_ea = udf_rw32(efe->l_ea);
125 l_ad = udf_rw32(efe->l_ad);
126 data_pos = (uint8_t *) efe + dscr_size + l_ea;
127 }
128 max_l_ad = lb_size - dscr_size - l_ea;
129
130 icbflags = udf_rw16(icbtag->flags);
131 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
132
133 printf("udf_node_dump:\n");
134 printf("\tudf_node %p\n", udf_node);
135
136 if (addr_type == UDF_ICB_INTERN_ALLOC) {
137 printf("\t\tIntern alloc, len = %"PRIu64"\n", inflen);
138 return;
139 }
140
141 printf("\t\tInflen = %"PRIu64"\n", inflen);
142 printf("\t\tl_ad = %d\n", l_ad);
143
144 if (addr_type == UDF_ICB_SHORT_ALLOC) {
145 adlen = sizeof(struct short_ad);
146 } else {
147 adlen = sizeof(struct long_ad);
148 }
149
150 printf("\t\t");
151 for (ad_off = 0; ad_off < l_ad; ad_off += adlen) {
152 if (addr_type == UDF_ICB_SHORT_ALLOC) {
153 short_ad = (struct short_ad *) (data_pos + ad_off);
154 len = udf_rw32(short_ad->len);
155 lb_num = udf_rw32(short_ad->lb_num);
156 part_num = -1;
157 flags = UDF_EXT_FLAGS(len);
158 len = UDF_EXT_LEN(len);
159 } else {
160 long_ad = (struct long_ad *) (data_pos + ad_off);
161 len = udf_rw32(long_ad->len);
162 lb_num = udf_rw32(long_ad->loc.lb_num);
163 part_num = udf_rw16(long_ad->loc.part_num);
164 flags = UDF_EXT_FLAGS(len);
165 len = UDF_EXT_LEN(len);
166 }
167 printf("[");
168 if (part_num >= 0)
169 printf("part %d, ", part_num);
170 printf("lb_num %d, len %d", lb_num, len);
171 if (flags)
172 printf(", flags %d", flags>>30);
173 printf("] ");
174 if (ad_off + adlen == l_ad)
175 printf("\n\t\tl_ad END\n\t\t");
176 }
177 printf("\n");
178 }
179 #else
180 #define udf_node_dump(a)
181 #endif
182
183
184 static void
185 udf_assert_allocated(struct udf_mount *ump, uint16_t vpart_num,
186 uint32_t lb_num, uint32_t num_lb)
187 {
188 struct udf_bitmap *bitmap;
189 struct part_desc *pdesc;
190 uint32_t ptov;
191 uint32_t bitval;
192 uint8_t *bpos;
193 int bit;
194 int phys_part;
195 int ok;
196
197 DPRINTF(PARANOIA, ("udf_assert_allocated: check virt lbnum %d "
198 "part %d + %d sect\n", lb_num, vpart_num, num_lb));
199
200 /* get partition backing up this vpart_num */
201 pdesc = ump->partitions[ump->vtop[vpart_num]];
202
203 switch (ump->vtop_tp[vpart_num]) {
204 case UDF_VTOP_TYPE_PHYS :
205 case UDF_VTOP_TYPE_SPARABLE :
206 /* free space to freed or unallocated space bitmap */
207 ptov = udf_rw32(pdesc->start_loc);
208 phys_part = ump->vtop[vpart_num];
209
210 /* use unallocated bitmap */
211 bitmap = &ump->part_unalloc_bits[phys_part];
212
213 /* if no bitmaps are defined, bail out */
214 if (bitmap->bits == NULL)
215 break;
216
217 /* check bits */
218 KASSERT(bitmap->bits);
219 ok = 1;
220 bpos = bitmap->bits + lb_num/8;
221 bit = lb_num % 8;
222 while (num_lb > 0) {
223 bitval = (1 << bit);
224 DPRINTF(PARANOIA, ("XXX : check %d, %p, bit %d\n",
225 lb_num, bpos, bit));
226 KASSERT(bitmap->bits + lb_num/8 == bpos);
227 if (*bpos & bitval) {
228 printf("\tlb_num %d is NOT marked busy\n",
229 lb_num);
230 ok = 0;
231 }
232 lb_num++; num_lb--;
233 bit = (bit + 1) % 8;
234 if (bit == 0)
235 bpos++;
236 }
237 if (!ok) {
238 /* KASSERT(0); */
239 }
240
241 break;
242 case UDF_VTOP_TYPE_VIRT :
243 /* TODO check space */
244 KASSERT(num_lb == 1);
245 break;
246 case UDF_VTOP_TYPE_META :
247 /* TODO check space in the metadata bitmap */
248 default:
249 /* not implemented */
250 break;
251 }
252 }
253
254
255 static void
256 udf_node_sanity_check(struct udf_node *udf_node,
257 uint64_t *cnt_inflen, uint64_t *cnt_logblksrec) {
258 struct file_entry *fe;
259 struct extfile_entry *efe;
260 struct icb_tag *icbtag;
261 struct short_ad *short_ad;
262 struct long_ad *long_ad;
263 uint64_t inflen, logblksrec;
264 uint32_t icbflags, addr_type, max_l_ad;
265 uint32_t len, lb_num;
266 uint8_t *data_pos;
267 uint16_t part_num;
268 int adlen, ad_off, dscr_size, l_ea, l_ad, lb_size, flags, whole_lb;
269
270 // KASSERT(mutex_owned(&udf_node->ump->allocate_mutex));
271
272 if (1)
273 udf_node_dump(udf_node);
274
275 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
276
277 fe = udf_node->fe;
278 efe = udf_node->efe;
279 if (fe) {
280 icbtag = &fe->icbtag;
281 inflen = udf_rw64(fe->inf_len);
282 logblksrec = udf_rw64(fe->logblks_rec);
283 dscr_size = sizeof(struct file_entry) -1;
284 l_ea = udf_rw32(fe->l_ea);
285 l_ad = udf_rw32(fe->l_ad);
286 data_pos = (uint8_t *) fe + dscr_size + l_ea;
287 } else {
288 icbtag = &efe->icbtag;
289 inflen = udf_rw64(efe->inf_len);
290 logblksrec = udf_rw64(efe->logblks_rec);
291 dscr_size = sizeof(struct extfile_entry) -1;
292 l_ea = udf_rw32(efe->l_ea);
293 l_ad = udf_rw32(efe->l_ad);
294 data_pos = (uint8_t *) efe + dscr_size + l_ea;
295 }
296 max_l_ad = lb_size - dscr_size - l_ea;
297 icbflags = udf_rw16(icbtag->flags);
298 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
299
300 /* reset counters */
301 *cnt_inflen = 0;
302 *cnt_logblksrec = 0;
303
304 if (addr_type == UDF_ICB_INTERN_ALLOC) {
305 KASSERT(l_ad <= max_l_ad);
306 KASSERT(l_ad == inflen);
307 *cnt_inflen = inflen;
308 return;
309 }
310
311 if (addr_type == UDF_ICB_SHORT_ALLOC) {
312 adlen = sizeof(struct short_ad);
313 } else {
314 adlen = sizeof(struct long_ad);
315 }
316
317 /* start counting */
318 whole_lb = 1;
319 for (ad_off = 0; ad_off < l_ad; ad_off += adlen) {
320 KASSERT(whole_lb == 1);
321 if (addr_type == UDF_ICB_SHORT_ALLOC) {
322 short_ad = (struct short_ad *) (data_pos + ad_off);
323 len = udf_rw32(short_ad->len);
324 lb_num = udf_rw32(short_ad->lb_num);
325 part_num = udf_rw16(udf_node->loc.loc.part_num);
326 flags = UDF_EXT_FLAGS(len);
327 len = UDF_EXT_LEN(len);
328 } else {
329 long_ad = (struct long_ad *) (data_pos + ad_off);
330 len = udf_rw32(long_ad->len);
331 lb_num = udf_rw32(long_ad->loc.lb_num);
332 part_num = udf_rw16(long_ad->loc.part_num);
333 flags = UDF_EXT_FLAGS(len);
334 len = UDF_EXT_LEN(len);
335 }
336 if (flags != UDF_EXT_REDIRECT) {
337 *cnt_inflen += len;
338 if (flags == UDF_EXT_ALLOCATED) {
339 *cnt_logblksrec += (len + lb_size -1) / lb_size;
340 }
341 } else {
342 KASSERT(len == lb_size);
343 }
344 /* check allocation */
345 if (flags == UDF_EXT_ALLOCATED)
346 udf_assert_allocated(udf_node->ump, part_num, lb_num,
347 (len + lb_size - 1) / lb_size);
348
349 /* check whole lb */
350 whole_lb = ((len % lb_size) == 0);
351 }
352 /* rest should be zero (ad_off > l_ad < max_l_ad - adlen) */
353
354 KASSERT(*cnt_inflen == inflen);
355 KASSERT(*cnt_logblksrec == logblksrec);
356
357 // KASSERT(mutex_owned(&udf_node->ump->allocate_mutex));
358 }
359 #else
360 #define udf_node_sanity_check(a, b, c)
361 #endif
362
363 /* --------------------------------------------------------------------- */
364
365 int
366 udf_translate_vtop(struct udf_mount *ump, struct long_ad *icb_loc,
367 uint32_t *lb_numres, uint32_t *extres)
368 {
369 struct part_desc *pdesc;
370 struct spare_map_entry *sme;
371 struct long_ad s_icb_loc;
372 uint64_t foffset, end_foffset;
373 uint32_t lb_size, len;
374 uint32_t lb_num, lb_rel, lb_packet;
375 uint32_t udf_rw32_lbmap, ext_offset;
376 uint16_t vpart;
377 int rel, part, error, eof, slot, flags;
378
379 assert(ump && icb_loc && lb_numres);
380
381 vpart = udf_rw16(icb_loc->loc.part_num);
382 lb_num = udf_rw32(icb_loc->loc.lb_num);
383 if (vpart > UDF_VTOP_RAWPART)
384 return EINVAL;
385
386 translate_again:
387 part = ump->vtop[vpart];
388 pdesc = ump->partitions[part];
389
390 switch (ump->vtop_tp[vpart]) {
391 case UDF_VTOP_TYPE_RAW :
392 /* 1:1 to the end of the device */
393 *lb_numres = lb_num;
394 *extres = INT_MAX;
395 return 0;
396 case UDF_VTOP_TYPE_PHYS :
397 /* transform into its disc logical block */
398 if (lb_num > udf_rw32(pdesc->part_len))
399 return EINVAL;
400 *lb_numres = lb_num + udf_rw32(pdesc->start_loc);
401
402 /* extent from here to the end of the partition */
403 *extres = udf_rw32(pdesc->part_len) - lb_num;
404 return 0;
405 case UDF_VTOP_TYPE_VIRT :
406 /* only maps one logical block, lookup in VAT */
407 if (lb_num >= ump->vat_entries) /* XXX > or >= ? */
408 return EINVAL;
409
410 /* lookup in virtual allocation table file */
411 mutex_enter(&ump->allocate_mutex);
412 error = udf_vat_read(ump->vat_node,
413 (uint8_t *) &udf_rw32_lbmap, 4,
414 ump->vat_offset + lb_num * 4);
415 mutex_exit(&ump->allocate_mutex);
416
417 if (error)
418 return error;
419
420 lb_num = udf_rw32(udf_rw32_lbmap);
421
422 /* transform into its disc logical block */
423 if (lb_num > udf_rw32(pdesc->part_len))
424 return EINVAL;
425 *lb_numres = lb_num + udf_rw32(pdesc->start_loc);
426
427 /* just one logical block */
428 *extres = 1;
429 return 0;
430 case UDF_VTOP_TYPE_SPARABLE :
431 /* check if the packet containing the lb_num is remapped */
432 lb_packet = lb_num / ump->sparable_packet_size;
433 lb_rel = lb_num % ump->sparable_packet_size;
434
435 for (rel = 0; rel < udf_rw16(ump->sparing_table->rt_l); rel++) {
436 sme = &ump->sparing_table->entries[rel];
437 if (lb_packet == udf_rw32(sme->org)) {
438 /* NOTE maps to absolute disc logical block! */
439 *lb_numres = udf_rw32(sme->map) + lb_rel;
440 *extres = ump->sparable_packet_size - lb_rel;
441 return 0;
442 }
443 }
444
445 /* transform into its disc logical block */
446 if (lb_num > udf_rw32(pdesc->part_len))
447 return EINVAL;
448 *lb_numres = lb_num + udf_rw32(pdesc->start_loc);
449
450 /* rest of block */
451 *extres = ump->sparable_packet_size - lb_rel;
452 return 0;
453 case UDF_VTOP_TYPE_META :
454 /* we have to look into the file's allocation descriptors */
455
456 /* use metadatafile allocation mutex */
457 lb_size = udf_rw32(ump->logical_vol->lb_size);
458
459 UDF_LOCK_NODE(ump->metadata_node, 0);
460
461 /* get first overlapping extent */
462 foffset = 0;
463 slot = 0;
464 for (;;) {
465 udf_get_adslot(ump->metadata_node,
466 slot, &s_icb_loc, &eof);
467 DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, "
468 "len = %d, lb_num = %d, part = %d\n",
469 slot, eof,
470 UDF_EXT_FLAGS(udf_rw32(s_icb_loc.len)),
471 UDF_EXT_LEN(udf_rw32(s_icb_loc.len)),
472 udf_rw32(s_icb_loc.loc.lb_num),
473 udf_rw16(s_icb_loc.loc.part_num)));
474 if (eof) {
475 DPRINTF(TRANSLATE,
476 ("Meta partition translation "
477 "failed: can't seek location\n"));
478 UDF_UNLOCK_NODE(ump->metadata_node, 0);
479 return EINVAL;
480 }
481 len = udf_rw32(s_icb_loc.len);
482 flags = UDF_EXT_FLAGS(len);
483 len = UDF_EXT_LEN(len);
484
485 if (flags == UDF_EXT_REDIRECT) {
486 slot++;
487 continue;
488 }
489
490 end_foffset = foffset + len;
491
492 if (end_foffset > lb_num * lb_size)
493 break; /* found */
494 foffset = end_foffset;
495 slot++;
496 }
497 /* found overlapping slot */
498 ext_offset = lb_num * lb_size - foffset;
499
500 /* process extent offset */
501 lb_num = udf_rw32(s_icb_loc.loc.lb_num);
502 vpart = udf_rw16(s_icb_loc.loc.part_num);
503 lb_num += (ext_offset + lb_size -1) / lb_size;
504 len -= ext_offset;
505 ext_offset = 0;
506
507 flags = UDF_EXT_FLAGS(s_icb_loc.len);
508
509 UDF_UNLOCK_NODE(ump->metadata_node, 0);
510 if (flags != UDF_EXT_ALLOCATED) {
511 DPRINTF(TRANSLATE, ("Metadata partition translation "
512 "failed: not allocated\n"));
513 return EINVAL;
514 }
515
516 /*
517 * vpart and lb_num are updated, translate again since we
518 * might be mapped on sparable media
519 */
520 goto translate_again;
521 default:
522 printf("UDF vtop translation scheme %d unimplemented yet\n",
523 ump->vtop_tp[vpart]);
524 }
525
526 return EINVAL;
527 }
528
529 /* --------------------------------------------------------------------- */
530
531 /*
532 * Translate an extent (in logical_blocks) into logical block numbers; used
533 * for read and write operations. DOESNT't check extents.
534 */
535
536 int
537 udf_translate_file_extent(struct udf_node *udf_node,
538 uint32_t from, uint32_t num_lb,
539 uint64_t *map)
540 {
541 struct udf_mount *ump;
542 struct icb_tag *icbtag;
543 struct long_ad t_ad, s_ad;
544 uint64_t transsec;
545 uint64_t foffset, end_foffset;
546 uint32_t transsec32;
547 uint32_t lb_size;
548 uint32_t ext_offset;
549 uint32_t lb_num, len;
550 uint32_t overlap, translen;
551 uint16_t vpart_num;
552 int eof, error, flags;
553 int slot, addr_type, icbflags;
554
555 if (!udf_node)
556 return ENOENT;
557
558 KASSERT(num_lb > 0);
559
560 UDF_LOCK_NODE(udf_node, 0);
561
562 /* initialise derivative vars */
563 ump = udf_node->ump;
564 lb_size = udf_rw32(ump->logical_vol->lb_size);
565
566 if (udf_node->fe) {
567 icbtag = &udf_node->fe->icbtag;
568 } else {
569 icbtag = &udf_node->efe->icbtag;
570 }
571 icbflags = udf_rw16(icbtag->flags);
572 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
573
574 /* do the work */
575 if (addr_type == UDF_ICB_INTERN_ALLOC) {
576 *map = UDF_TRANS_INTERN;
577 UDF_UNLOCK_NODE(udf_node, 0);
578 return 0;
579 }
580
581 /* find first overlapping extent */
582 foffset = 0;
583 slot = 0;
584 for (;;) {
585 udf_get_adslot(udf_node, slot, &s_ad, &eof);
586 DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, len = %d, "
587 "lb_num = %d, part = %d\n", slot, eof,
588 UDF_EXT_FLAGS(udf_rw32(s_ad.len)),
589 UDF_EXT_LEN(udf_rw32(s_ad.len)),
590 udf_rw32(s_ad.loc.lb_num),
591 udf_rw16(s_ad.loc.part_num)));
592 if (eof) {
593 DPRINTF(TRANSLATE,
594 ("Translate file extent "
595 "failed: can't seek location\n"));
596 UDF_UNLOCK_NODE(udf_node, 0);
597 return EINVAL;
598 }
599 len = udf_rw32(s_ad.len);
600 flags = UDF_EXT_FLAGS(len);
601 len = UDF_EXT_LEN(len);
602 lb_num = udf_rw32(s_ad.loc.lb_num);
603
604 if (flags == UDF_EXT_REDIRECT) {
605 slot++;
606 continue;
607 }
608
609 end_foffset = foffset + len;
610
611 if (end_foffset > from * lb_size)
612 break; /* found */
613 foffset = end_foffset;
614 slot++;
615 }
616 /* found overlapping slot */
617 ext_offset = from * lb_size - foffset;
618
619 for (;;) {
620 udf_get_adslot(udf_node, slot, &s_ad, &eof);
621 DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, len = %d, "
622 "lb_num = %d, part = %d\n", slot, eof,
623 UDF_EXT_FLAGS(udf_rw32(s_ad.len)),
624 UDF_EXT_LEN(udf_rw32(s_ad.len)),
625 udf_rw32(s_ad.loc.lb_num),
626 udf_rw16(s_ad.loc.part_num)));
627 if (eof) {
628 DPRINTF(TRANSLATE,
629 ("Translate file extent "
630 "failed: past eof\n"));
631 UDF_UNLOCK_NODE(udf_node, 0);
632 return EINVAL;
633 }
634
635 len = udf_rw32(s_ad.len);
636 flags = UDF_EXT_FLAGS(len);
637 len = UDF_EXT_LEN(len);
638
639 lb_num = udf_rw32(s_ad.loc.lb_num);
640 vpart_num = udf_rw16(s_ad.loc.part_num);
641
642 end_foffset = foffset + len;
643
644 /* process extent, don't forget to advance on ext_offset! */
645 lb_num += (ext_offset + lb_size -1) / lb_size;
646 overlap = (len - ext_offset + lb_size -1) / lb_size;
647 ext_offset = 0;
648
649 /*
650 * note that the while(){} is nessisary for the extent that
651 * the udf_translate_vtop() returns doens't have to span the
652 * whole extent.
653 */
654
655 overlap = MIN(overlap, num_lb);
656 while (overlap && (flags != UDF_EXT_REDIRECT)) {
657 switch (flags) {
658 case UDF_EXT_FREE :
659 case UDF_EXT_ALLOCATED_BUT_NOT_USED :
660 transsec = UDF_TRANS_ZERO;
661 translen = overlap;
662 while (overlap && num_lb && translen) {
663 *map++ = transsec;
664 lb_num++;
665 overlap--; num_lb--; translen--;
666 }
667 break;
668 case UDF_EXT_ALLOCATED :
669 t_ad.loc.lb_num = udf_rw32(lb_num);
670 t_ad.loc.part_num = udf_rw16(vpart_num);
671 error = udf_translate_vtop(ump,
672 &t_ad, &transsec32, &translen);
673 transsec = transsec32;
674 if (error) {
675 UDF_UNLOCK_NODE(udf_node, 0);
676 return error;
677 }
678 while (overlap && num_lb && translen) {
679 *map++ = transsec;
680 lb_num++; transsec++;
681 overlap--; num_lb--; translen--;
682 }
683 break;
684 default:
685 DPRINTF(TRANSLATE,
686 ("Translate file extent "
687 "failed: bad flags %x\n", flags));
688 UDF_UNLOCK_NODE(udf_node, 0);
689 return EINVAL;
690 }
691 }
692 if (num_lb == 0)
693 break;
694
695 if (flags != UDF_EXT_REDIRECT)
696 foffset = end_foffset;
697 slot++;
698 }
699 UDF_UNLOCK_NODE(udf_node, 0);
700
701 return 0;
702 }
703
704 /* --------------------------------------------------------------------- */
705
706 static int
707 udf_search_free_vatloc(struct udf_mount *ump, uint32_t *lbnumres)
708 {
709 uint32_t lb_size, lb_num, lb_map, udf_rw32_lbmap;
710 uint8_t *blob;
711 int entry, chunk, found, error;
712
713 KASSERT(ump);
714 KASSERT(ump->logical_vol);
715
716 lb_size = udf_rw32(ump->logical_vol->lb_size);
717 blob = malloc(lb_size, M_UDFTEMP, M_WAITOK);
718
719 /* TODO static allocation of search chunk */
720
721 lb_num = MIN(ump->vat_entries, ump->vat_last_free_lb);
722 found = 0;
723 error = 0;
724 entry = 0;
725 do {
726 chunk = MIN(lb_size, (ump->vat_entries - lb_num) * 4);
727 if (chunk <= 0)
728 break;
729 /* load in chunk */
730 error = udf_vat_read(ump->vat_node, blob, chunk,
731 ump->vat_offset + lb_num * 4);
732
733 if (error)
734 break;
735
736 /* search this chunk */
737 for (entry=0; entry < chunk /4; entry++, lb_num++) {
738 udf_rw32_lbmap = *((uint32_t *) (blob + entry * 4));
739 lb_map = udf_rw32(udf_rw32_lbmap);
740 if (lb_map == 0xffffffff) {
741 found = 1;
742 break;
743 }
744 }
745 } while (!found);
746 if (error) {
747 printf("udf_search_free_vatloc: error reading in vat chunk "
748 "(lb %d, size %d)\n", lb_num, chunk);
749 }
750
751 if (!found) {
752 /* extend VAT */
753 DPRINTF(WRITE, ("udf_search_free_vatloc: extending\n"));
754 lb_num = ump->vat_entries;
755 ump->vat_entries++;
756 }
757
758 /* mark entry with initialiser just in case */
759 lb_map = udf_rw32(0xfffffffe);
760 udf_vat_write(ump->vat_node, (uint8_t *) &lb_map, 4,
761 ump->vat_offset + lb_num *4);
762 ump->vat_last_free_lb = lb_num;
763
764 free(blob, M_UDFTEMP);
765 *lbnumres = lb_num;
766 return 0;
767 }
768
769
770 static void
771 udf_bitmap_allocate(struct udf_bitmap *bitmap, int ismetadata,
772 uint32_t ptov, uint32_t *num_lb, uint64_t *pmappos, uint64_t *lmappos)
773 {
774 uint32_t offset, lb_num, bit;
775 int32_t diff;
776 uint8_t *bpos;
777 int pass;
778
779 if (!ismetadata) {
780 /* heuristic to keep the two pointers not too close */
781 diff = bitmap->data_pos - bitmap->metadata_pos;
782 if ((diff >= 0) && (diff < 1024))
783 bitmap->data_pos = bitmap->metadata_pos + 1024;
784 }
785 offset = ismetadata ? bitmap->metadata_pos : bitmap->data_pos;
786 offset &= ~7;
787 for (pass = 0; pass < 2; pass++) {
788 if (offset >= bitmap->max_offset)
789 offset = 0;
790
791 while (offset < bitmap->max_offset) {
792 if (*num_lb == 0)
793 break;
794
795 /* use first bit not set */
796 bpos = bitmap->bits + offset/8;
797 bit = ffs(*bpos); /* returns 0 or 1..8 */
798 if (bit == 0) {
799 offset += 8;
800 continue;
801 }
802 DPRINTF(PARANOIA, ("XXX : allocate %d, %p, bit %d\n",
803 offset + bit -1, bpos, bit-1));
804 *bpos &= ~(1 << (bit-1));
805 lb_num = offset + bit-1;
806 *lmappos++ = lb_num;
807 *pmappos++ = lb_num + ptov;
808 *num_lb = *num_lb - 1;
809 // offset = (offset & ~7);
810 }
811 }
812
813 if (ismetadata) {
814 bitmap->metadata_pos = offset;
815 } else {
816 bitmap->data_pos = offset;
817 }
818 }
819
820
821 static void
822 udf_bitmap_free(struct udf_bitmap *bitmap, uint32_t lb_num, uint32_t num_lb)
823 {
824 uint32_t offset;
825 uint32_t bit, bitval;
826 uint8_t *bpos;
827
828 offset = lb_num;
829
830 /* starter bits */
831 bpos = bitmap->bits + offset/8;
832 bit = offset % 8;
833 while ((bit != 0) && (num_lb > 0)) {
834 bitval = (1 << bit);
835 KASSERT((*bpos & bitval) == 0);
836 DPRINTF(PARANOIA, ("XXX : free %d, %p, %d\n",
837 offset, bpos, bit));
838 *bpos |= bitval;
839 offset++; num_lb--;
840 bit = (bit + 1) % 8;
841 }
842 if (num_lb == 0)
843 return;
844
845 /* whole bytes */
846 KASSERT(bit == 0);
847 bpos = bitmap->bits + offset / 8;
848 while (num_lb >= 8) {
849 KASSERT((*bpos == 0));
850 DPRINTF(PARANOIA, ("XXX : free %d + 8, %p\n", offset, bpos));
851 *bpos = 255;
852 offset += 8; num_lb -= 8;
853 bpos++;
854 }
855
856 /* stop bits */
857 KASSERT(num_lb < 8);
858 bit = 0;
859 while (num_lb > 0) {
860 bitval = (1 << bit);
861 KASSERT((*bpos & bitval) == 0);
862 DPRINTF(PARANOIA, ("XXX : free %d, %p, %d\n",
863 offset, bpos, bit));
864 *bpos |= bitval;
865 offset++; num_lb--;
866 bit = (bit + 1) % 8;
867 }
868 }
869
870
871 /* allocate a contiguous sequence of sectornumbers */
872 static int
873 udf_allocate_space(struct udf_mount *ump, int ismetadata, int alloc_type,
874 int num_lb, uint16_t *alloc_partp,
875 uint64_t *lmapping, uint64_t *pmapping)
876 {
877 struct mmc_trackinfo *alloc_track, *other_track;
878 struct udf_bitmap *bitmap;
879 struct part_desc *pdesc;
880 struct logvol_int_desc *lvid;
881 uint64_t *lmappos, *pmappos;
882 uint32_t ptov, lb_num, *freepos, free_lbs;
883 int lb_size, alloc_num_lb;
884 int alloc_part;
885 int error;
886
887 mutex_enter(&ump->allocate_mutex);
888
889 lb_size = udf_rw32(ump->logical_vol->lb_size);
890 KASSERT(lb_size == ump->discinfo.sector_size);
891
892 if (ismetadata) {
893 alloc_part = ump->metadata_part;
894 alloc_track = &ump->metadata_track;
895 other_track = &ump->data_track;
896 } else {
897 alloc_part = ump->data_part;
898 alloc_track = &ump->data_track;
899 other_track = &ump->metadata_track;
900 }
901
902 *alloc_partp = alloc_part;
903
904 error = 0;
905 /* XXX check disc space */
906
907 pdesc = ump->partitions[ump->vtop[alloc_part]];
908 lmappos = lmapping;
909 pmappos = pmapping;
910
911 switch (alloc_type) {
912 case UDF_ALLOC_VAT :
913 /* search empty slot in VAT file */
914 KASSERT(num_lb == 1);
915 error = udf_search_free_vatloc(ump, &lb_num);
916 if (!error) {
917 *lmappos = lb_num;
918 *pmappos = 0; /* will get late-allocated */
919 }
920 break;
921 case UDF_ALLOC_SEQUENTIAL :
922 /* sequential allocation on recordable media */
923 /* calculate offset from physical base partition */
924 ptov = udf_rw32(pdesc->start_loc);
925
926 for (lb_num = 0; lb_num < num_lb; lb_num++) {
927 *pmappos++ = alloc_track->next_writable;
928 *lmappos++ = alloc_track->next_writable - ptov;
929 alloc_track->next_writable++;
930 alloc_track->free_blocks--;
931 }
932 if (alloc_track->tracknr == other_track->tracknr)
933 memcpy(other_track, alloc_track,
934 sizeof(struct mmc_trackinfo));
935 break;
936 case UDF_ALLOC_SPACEMAP :
937 ptov = udf_rw32(pdesc->start_loc);
938
939 /* allocate on unallocated bits page */
940 alloc_num_lb = num_lb;
941 bitmap = &ump->part_unalloc_bits[alloc_part];
942 udf_bitmap_allocate(bitmap, ismetadata, ptov, &alloc_num_lb,
943 pmappos, lmappos);
944 ump->lvclose |= UDF_WRITE_PART_BITMAPS;
945 if (alloc_num_lb) {
946 /* TODO convert freed to unalloc and try again */
947 /* free allocated piece for now */
948 lmappos = lmapping;
949 for (lb_num=0; lb_num < num_lb-alloc_num_lb; lb_num++) {
950 udf_bitmap_free(bitmap, *lmappos++, 1);
951 }
952 error = ENOSPC;
953 }
954 if (!error) {
955 /* adjust freecount */
956 lvid = ump->logvol_integrity;
957 freepos = &lvid->tables[0] + alloc_part;
958 free_lbs = udf_rw32(*freepos);
959 *freepos = udf_rw32(free_lbs - num_lb);
960 }
961 break;
962 case UDF_ALLOC_METABITMAP :
963 case UDF_ALLOC_METASEQUENTIAL :
964 case UDF_ALLOC_RELAXEDSEQUENTIAL :
965 printf("ALERT: udf_allocate_space : allocation %d "
966 "not implemented yet!\n", alloc_type);
967 /* TODO implement, doesn't have to be contiguous */
968 error = ENOSPC;
969 break;
970 }
971
972 #ifdef DEBUG
973 if (udf_verbose & UDF_DEBUG_ALLOC) {
974 lmappos = lmapping;
975 pmappos = pmapping;
976 printf("udf_allocate_space, mapping l->p:\n");
977 for (lb_num = 0; lb_num < num_lb; lb_num++) {
978 printf("\t%"PRIu64" -> %"PRIu64"\n",
979 *lmappos++, *pmappos++);
980 }
981 }
982 #endif
983 mutex_exit(&ump->allocate_mutex);
984
985 return error;
986 }
987
988 /* --------------------------------------------------------------------- */
989
990 void
991 udf_free_allocated_space(struct udf_mount *ump, uint32_t lb_num,
992 uint16_t vpart_num, uint32_t num_lb)
993 {
994 struct udf_bitmap *bitmap;
995 struct part_desc *pdesc;
996 struct logvol_int_desc *lvid;
997 uint32_t ptov, lb_map, udf_rw32_lbmap;
998 uint32_t *freepos, free_lbs;
999 int phys_part;
1000 int error;
1001
1002 DPRINTF(ALLOC, ("udf_free_allocated_space: freeing virt lbnum %d "
1003 "part %d + %d sect\n", lb_num, vpart_num, num_lb));
1004
1005 /* no use freeing zero length */
1006 if (num_lb == 0)
1007 return;
1008
1009 mutex_enter(&ump->allocate_mutex);
1010
1011 /* get partition backing up this vpart_num */
1012 pdesc = ump->partitions[ump->vtop[vpart_num]];
1013
1014 switch (ump->vtop_tp[vpart_num]) {
1015 case UDF_VTOP_TYPE_PHYS :
1016 case UDF_VTOP_TYPE_SPARABLE :
1017 /* free space to freed or unallocated space bitmap */
1018 ptov = udf_rw32(pdesc->start_loc);
1019 phys_part = ump->vtop[vpart_num];
1020
1021 /* first try freed space bitmap */
1022 bitmap = &ump->part_freed_bits[phys_part];
1023
1024 /* if not defined, use unallocated bitmap */
1025 if (bitmap->bits == NULL)
1026 bitmap = &ump->part_unalloc_bits[phys_part];
1027
1028 /* if no bitmaps are defined, bail out */
1029 if (bitmap->bits == NULL)
1030 break;
1031
1032 /* free bits if its defined */
1033 KASSERT(bitmap->bits);
1034 ump->lvclose |= UDF_WRITE_PART_BITMAPS;
1035 udf_bitmap_free(bitmap, lb_num, num_lb);
1036
1037 /* adjust freecount */
1038 lvid = ump->logvol_integrity;
1039 freepos = &lvid->tables[0] + vpart_num;
1040 free_lbs = udf_rw32(*freepos);
1041 *freepos = udf_rw32(free_lbs + num_lb);
1042 break;
1043 case UDF_VTOP_TYPE_VIRT :
1044 /* free this VAT entry */
1045 KASSERT(num_lb == 1);
1046
1047 lb_map = 0xffffffff;
1048 udf_rw32_lbmap = udf_rw32(lb_map);
1049 error = udf_vat_write(ump->vat_node,
1050 (uint8_t *) &udf_rw32_lbmap, 4,
1051 ump->vat_offset + lb_num * 4);
1052 KASSERT(error == 0);
1053 ump->vat_last_free_lb = MIN(ump->vat_last_free_lb, lb_num);
1054 break;
1055 case UDF_VTOP_TYPE_META :
1056 /* free space in the metadata bitmap */
1057 default:
1058 printf("ALERT: udf_free_allocated_space : allocation %d "
1059 "not implemented yet!\n", ump->vtop_tp[vpart_num]);
1060 break;
1061 }
1062
1063 mutex_exit(&ump->allocate_mutex);
1064 }
1065
1066 /* --------------------------------------------------------------------- */
1067
1068 int
1069 udf_pre_allocate_space(struct udf_mount *ump, int udf_c_type, int num_lb,
1070 uint16_t *alloc_partp, uint64_t *lmapping, uint64_t *pmapping)
1071 {
1072 int ismetadata, alloc_type;
1073
1074 ismetadata = (udf_c_type == UDF_C_NODE);
1075 alloc_type = ismetadata? ump->meta_alloc : ump->data_alloc;
1076
1077 #ifdef DIAGNOSTIC
1078 if ((alloc_type == UDF_ALLOC_VAT) && (udf_c_type != UDF_C_NODE)) {
1079 panic("udf_pre_allocate_space: bad c_type on VAT!\n");
1080 }
1081 #endif
1082
1083 /* reserve size for VAT allocated data */
1084 if (alloc_type == UDF_ALLOC_VAT) {
1085 mutex_enter(&ump->allocate_mutex);
1086 ump->uncomitted_lb += num_lb;
1087 mutex_exit(&ump->allocate_mutex);
1088 }
1089
1090 return udf_allocate_space(ump, ismetadata, alloc_type,
1091 num_lb, alloc_partp, lmapping, pmapping);
1092 }
1093
1094 /* --------------------------------------------------------------------- */
1095
1096 /*
1097 * Allocate a buf on disc for direct write out. The space doesn't have to be
1098 * contiguous as the caller takes care of this.
1099 */
1100
1101 void
1102 udf_late_allocate_buf(struct udf_mount *ump, struct buf *buf,
1103 uint64_t *lmapping, uint64_t *pmapping, struct long_ad *node_ad_cpy)
1104 {
1105 struct udf_node *udf_node = VTOI(buf->b_vp);
1106 uint16_t vpart_num;
1107 int lb_size, blks, udf_c_type;
1108 int ismetadata, alloc_type;
1109 int num_lb;
1110 int error, s;
1111
1112 /*
1113 * for each sector in the buf, allocate a sector on disc and record
1114 * its position in the provided mapping array.
1115 *
1116 * If its userdata or FIDs, record its location in its node.
1117 */
1118
1119 lb_size = udf_rw32(ump->logical_vol->lb_size);
1120 num_lb = (buf->b_bcount + lb_size -1) / lb_size;
1121 blks = lb_size / DEV_BSIZE;
1122 udf_c_type = buf->b_udf_c_type;
1123
1124 KASSERT(lb_size == ump->discinfo.sector_size);
1125
1126 ismetadata = (udf_c_type == UDF_C_NODE);
1127 alloc_type = ismetadata? ump->meta_alloc : ump->data_alloc;
1128
1129 #ifdef DIAGNOSTIC
1130 if ((alloc_type == UDF_ALLOC_VAT) && (udf_c_type != UDF_C_NODE)) {
1131 panic("udf_late_allocate_buf: bad c_type on VAT!\n");
1132 }
1133 #endif
1134
1135 if (udf_c_type == UDF_C_NODE) {
1136 /* if not VAT, its allready allocated */
1137 if (alloc_type != UDF_ALLOC_VAT)
1138 return;
1139
1140 /* allocate sequential */
1141 alloc_type = UDF_ALLOC_SEQUENTIAL;
1142 }
1143
1144 error = udf_allocate_space(ump, ismetadata, alloc_type,
1145 num_lb, &vpart_num, lmapping, pmapping);
1146 if (error) {
1147 /* ARGH! we've not done our accounting right! */
1148 panic("UDF disc allocation accounting gone wrong");
1149 }
1150
1151 /* commit our sector count */
1152 mutex_enter(&ump->allocate_mutex);
1153 if (num_lb > ump->uncomitted_lb) {
1154 ump->uncomitted_lb = 0;
1155 } else {
1156 ump->uncomitted_lb -= num_lb;
1157 }
1158 mutex_exit(&ump->allocate_mutex);
1159
1160 buf->b_blkno = (*pmapping) * blks;
1161
1162 /* If its userdata or FIDs, record its allocation in its node. */
1163 if ((udf_c_type == UDF_C_USERDATA) || (udf_c_type == UDF_C_FIDS)) {
1164 udf_record_allocation_in_node(ump, buf, vpart_num, lmapping,
1165 node_ad_cpy);
1166 /* decrement our outstanding bufs counter */
1167 s = splbio();
1168 udf_node->outstanding_bufs--;
1169 splx(s);
1170 }
1171 }
1172
1173 /* --------------------------------------------------------------------- */
1174
1175 /*
1176 * Try to merge a1 with the new piece a2. udf_ads_merge returns error when not
1177 * possible (anymore); a2 returns the rest piece.
1178 */
1179
1180 static int
1181 udf_ads_merge(uint32_t lb_size, struct long_ad *a1, struct long_ad *a2)
1182 {
1183 uint32_t max_len, merge_len;
1184 uint32_t a1_len, a2_len;
1185 uint32_t a1_flags, a2_flags;
1186 uint32_t a1_lbnum, a2_lbnum;
1187 uint16_t a1_part, a2_part;
1188
1189 max_len = ((UDF_EXT_MAXLEN / lb_size) * lb_size);
1190
1191 a1_flags = UDF_EXT_FLAGS(udf_rw32(a1->len));
1192 a1_len = UDF_EXT_LEN(udf_rw32(a1->len));
1193 a1_lbnum = udf_rw32(a1->loc.lb_num);
1194 a1_part = udf_rw16(a1->loc.part_num);
1195
1196 a2_flags = UDF_EXT_FLAGS(udf_rw32(a2->len));
1197 a2_len = UDF_EXT_LEN(udf_rw32(a2->len));
1198 a2_lbnum = udf_rw32(a2->loc.lb_num);
1199 a2_part = udf_rw16(a2->loc.part_num);
1200
1201 /* defines same space */
1202 if (a1_flags != a2_flags)
1203 return 1;
1204
1205 if (a1_flags != UDF_EXT_FREE) {
1206 /* the same partition */
1207 if (a1_part != a2_part)
1208 return 1;
1209
1210 /* a2 is successor of a1 */
1211 if (a1_lbnum * lb_size + a1_len != a2_lbnum * lb_size)
1212 return 1;
1213 }
1214
1215 /* merge as most from a2 if possible */
1216 merge_len = MIN(a2_len, max_len - a1_len);
1217 a1_len += merge_len;
1218 a2_len -= merge_len;
1219 a2_lbnum += merge_len/lb_size;
1220
1221 a1->len = udf_rw32(a1_len | a1_flags);
1222 a2->len = udf_rw32(a2_len | a2_flags);
1223 a2->loc.lb_num = udf_rw32(a2_lbnum);
1224
1225 if (a2_len > 0)
1226 return 1;
1227
1228 /* there is space over to merge */
1229 return 0;
1230 }
1231
1232 /* --------------------------------------------------------------------- */
1233
1234 static void
1235 udf_wipe_adslots(struct udf_node *udf_node)
1236 {
1237 struct file_entry *fe;
1238 struct extfile_entry *efe;
1239 struct alloc_ext_entry *ext;
1240 uint64_t inflen, objsize;
1241 uint32_t lb_size, dscr_size, l_ea, l_ad, max_l_ad, crclen;
1242 uint8_t *data_pos;
1243 int extnr;
1244
1245 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1246
1247 fe = udf_node->fe;
1248 efe = udf_node->efe;
1249 if (fe) {
1250 inflen = udf_rw64(fe->inf_len);
1251 objsize = inflen;
1252 dscr_size = sizeof(struct file_entry) -1;
1253 l_ea = udf_rw32(fe->l_ea);
1254 l_ad = udf_rw32(fe->l_ad);
1255 data_pos = (uint8_t *) fe + dscr_size + l_ea;
1256 } else {
1257 inflen = udf_rw64(efe->inf_len);
1258 objsize = udf_rw64(efe->obj_size);
1259 dscr_size = sizeof(struct extfile_entry) -1;
1260 l_ea = udf_rw32(efe->l_ea);
1261 l_ad = udf_rw32(efe->l_ad);
1262 data_pos = (uint8_t *) efe + dscr_size + l_ea;
1263 }
1264 max_l_ad = lb_size - dscr_size - l_ea;
1265
1266 /* wipe fe/efe */
1267 memset(data_pos, 0, max_l_ad);
1268 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea;
1269 if (fe) {
1270 fe->l_ad = udf_rw32(0);
1271 fe->logblks_rec = udf_rw64(0);
1272 fe->tag.desc_crc_len = udf_rw32(crclen);
1273 } else {
1274 efe->l_ad = udf_rw32(0);
1275 efe->logblks_rec = udf_rw64(0);
1276 efe->tag.desc_crc_len = udf_rw32(crclen);
1277 }
1278
1279 /* wipe all allocation extent entries */
1280 for (extnr = 0; extnr < udf_node->num_extensions; extnr++) {
1281 ext = udf_node->ext[extnr];
1282 dscr_size = sizeof(struct alloc_ext_entry) -1;
1283 max_l_ad = lb_size - dscr_size;
1284 memset(data_pos, 0, max_l_ad);
1285 ext->l_ad = udf_rw32(0);
1286
1287 crclen = dscr_size - UDF_DESC_TAG_LENGTH;
1288 ext->tag.desc_crc_len = udf_rw32(crclen);
1289 }
1290 }
1291
1292 /* --------------------------------------------------------------------- */
1293
1294 void
1295 udf_get_adslot(struct udf_node *udf_node, int slot, struct long_ad *icb,
1296 int *eof) {
1297 struct file_entry *fe;
1298 struct extfile_entry *efe;
1299 struct alloc_ext_entry *ext;
1300 struct icb_tag *icbtag;
1301 struct short_ad *short_ad;
1302 struct long_ad *long_ad;
1303 uint32_t offset;
1304 uint32_t lb_size, dscr_size, l_ea, l_ad, max_l_ad;
1305 uint8_t *data_pos;
1306 int icbflags, addr_type, adlen, extnr;
1307
1308 /* determine what descriptor we are in */
1309 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1310
1311 fe = udf_node->fe;
1312 efe = udf_node->efe;
1313 if (fe) {
1314 icbtag = &fe->icbtag;
1315 dscr_size = sizeof(struct file_entry) -1;
1316 l_ea = udf_rw32(fe->l_ea);
1317 l_ad = udf_rw32(fe->l_ad);
1318 data_pos = (uint8_t *) fe + dscr_size + l_ea;
1319 } else {
1320 icbtag = &efe->icbtag;
1321 dscr_size = sizeof(struct extfile_entry) -1;
1322 l_ea = udf_rw32(efe->l_ea);
1323 l_ad = udf_rw32(efe->l_ad);
1324 data_pos = (uint8_t *) efe + dscr_size + l_ea;
1325 }
1326 max_l_ad = lb_size - dscr_size - l_ea;
1327
1328 icbflags = udf_rw16(icbtag->flags);
1329 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
1330
1331 /* just in case we're called on an intern, its EOF */
1332 if (addr_type == UDF_ICB_INTERN_ALLOC) {
1333 memset(icb, 0, sizeof(struct long_ad));
1334 *eof = 1;
1335 return;
1336 }
1337
1338 adlen = 0;
1339 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1340 adlen = sizeof(struct short_ad);
1341 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1342 adlen = sizeof(struct long_ad);
1343 }
1344
1345 /* if offset too big, we go to the allocation extensions */
1346 offset = slot * adlen;
1347 extnr = -1;
1348 while (offset >= max_l_ad) {
1349 extnr++;
1350 offset -= max_l_ad;
1351 ext = udf_node->ext[extnr];
1352 dscr_size = sizeof(struct alloc_ext_entry) -1;
1353 l_ad = udf_rw32(ext->l_ad);
1354 max_l_ad = lb_size - dscr_size;
1355 data_pos = (uint8_t *) ext + dscr_size;
1356 if (extnr > udf_node->num_extensions) {
1357 l_ad = 0; /* force EOF */
1358 break;
1359 }
1360 }
1361
1362 *eof = (offset >= l_ad) || (l_ad == 0);
1363 if (*eof) {
1364 memset(icb, 0, sizeof(struct long_ad));
1365 return;
1366 }
1367
1368 /* get the element */
1369 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1370 short_ad = (struct short_ad *) (data_pos + offset);
1371 icb->len = short_ad->len;
1372 icb->loc.part_num = udf_node->loc.loc.part_num;
1373 icb->loc.lb_num = short_ad->lb_num;
1374 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1375 long_ad = (struct long_ad *) (data_pos + offset);
1376 *icb = *long_ad;
1377 }
1378 }
1379
1380 /* --------------------------------------------------------------------- */
1381
1382 int
1383 udf_append_adslot(struct udf_node *udf_node, int slot, struct long_ad *icb) {
1384 union dscrptr *dscr;
1385 struct file_entry *fe;
1386 struct extfile_entry *efe;
1387 struct alloc_ext_entry *ext;
1388 struct icb_tag *icbtag;
1389 struct short_ad *short_ad;
1390 struct long_ad *long_ad, o_icb;
1391 uint64_t logblks_rec, *logblks_rec_p;
1392 uint32_t offset, rest, len;
1393 uint32_t lb_size, dscr_size, l_ea, l_ad, *l_ad_p, max_l_ad, crclen;
1394 uint8_t *data_pos;
1395 int icbflags, addr_type, adlen, extnr;
1396
1397 /* determine what descriptor we are in */
1398 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1399
1400 fe = udf_node->fe;
1401 efe = udf_node->efe;
1402 if (fe) {
1403 icbtag = &fe->icbtag;
1404 dscr = (union dscrptr *) fe;
1405 dscr_size = sizeof(struct file_entry) -1;
1406
1407 l_ea = udf_rw32(fe->l_ea);
1408 l_ad_p = &fe->l_ad;
1409 logblks_rec_p = &fe->logblks_rec;
1410 } else {
1411 icbtag = &efe->icbtag;
1412 dscr = (union dscrptr *) efe;
1413 dscr_size = sizeof(struct extfile_entry) -1;
1414
1415 l_ea = udf_rw32(efe->l_ea);
1416 l_ad_p = &efe->l_ad;
1417 logblks_rec_p = &efe->logblks_rec;
1418 }
1419 data_pos = (uint8_t *) dscr + dscr_size + l_ea;
1420 max_l_ad = lb_size - dscr_size - l_ea;
1421
1422 icbflags = udf_rw16(icbtag->flags);
1423 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
1424
1425 /* just in case we're called on an intern, its EOF */
1426 if (addr_type == UDF_ICB_INTERN_ALLOC) {
1427 panic("udf_append_adslot on UDF_ICB_INTERN_ALLOC\n");
1428 }
1429
1430 adlen = 0;
1431 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1432 adlen = sizeof(struct short_ad);
1433 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1434 adlen = sizeof(struct long_ad);
1435 }
1436
1437 /* clean up given long_ad */
1438 #ifdef DIAGNOSTIC
1439 if (UDF_EXT_FLAGS(udf_rw32(icb->len)) == UDF_EXT_FREE) {
1440 if ((udf_rw16(icb->loc.part_num) != 0) ||
1441 (udf_rw32(icb->loc.lb_num) != 0))
1442 printf("UDF: warning, cleaning long_ad marked free\n");
1443 icb->loc.part_num = udf_rw16(0);
1444 icb->loc.lb_num = udf_rw32(0);
1445 }
1446 #endif
1447
1448 /* if offset too big, we go to the allocation extensions */
1449 offset = slot * adlen;
1450 extnr = 0;
1451 while (offset > max_l_ad) {
1452 offset -= max_l_ad;
1453 ext = udf_node->ext[extnr];
1454 dscr = (union dscrptr *) ext;
1455 dscr_size = sizeof(struct alloc_ext_entry) -1;
1456
1457 KASSERT(ext != NULL);
1458 l_ad_p = &ext->l_ad;
1459 max_l_ad = lb_size - dscr_size;
1460 data_pos = (uint8_t *) dscr + dscr_size;
1461
1462 extnr++;
1463 }
1464 /* offset is offset within the current (E)FE/AED */
1465 l_ad = udf_rw32(*l_ad_p);
1466 crclen = udf_rw32(dscr->tag.desc_crc_len);
1467 logblks_rec = udf_rw64(*logblks_rec_p);
1468
1469 if (extnr > udf_node->num_extensions)
1470 return EFBIG; /* too fragmented */
1471
1472 /* overwriting old piece? */
1473 if (offset < l_ad) {
1474 /* overwrite entry; compensate for the old element */
1475 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1476 short_ad = (struct short_ad *) (data_pos + offset);
1477 o_icb.len = short_ad->len;
1478 o_icb.loc.part_num = udf_rw16(0); /* ignore */
1479 o_icb.loc.lb_num = short_ad->lb_num;
1480 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1481 long_ad = (struct long_ad *) (data_pos + offset);
1482 o_icb = *long_ad;
1483 } else {
1484 panic("Invalid address type in udf_append_adslot\n");
1485 }
1486
1487 len = udf_rw32(o_icb.len);
1488 if (UDF_EXT_FLAGS(len) == UDF_EXT_ALLOCATED) {
1489 /* adjust counts */
1490 len = UDF_EXT_LEN(len);
1491 logblks_rec -= (len + lb_size -1) / lb_size;
1492 }
1493 }
1494
1495 /* calculate rest space in this descriptor */
1496 rest = max_l_ad - offset;
1497 if (rest <= adlen) {
1498 /* create redirect and link new allocation extension */
1499 printf("udf_append_to_adslot: can't create allocation extention yet\n");
1500 return EFBIG;
1501 }
1502
1503 /* write out the element */
1504 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1505 short_ad = (struct short_ad *) (data_pos + offset);
1506 short_ad->len = icb->len;
1507 short_ad->lb_num = icb->loc.lb_num;
1508 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1509 long_ad = (struct long_ad *) (data_pos + offset);
1510 *long_ad = *icb;
1511 }
1512
1513 /* adjust logblks recorded count */
1514 if (UDF_EXT_FLAGS(icb->len) == UDF_EXT_ALLOCATED)
1515 logblks_rec += (UDF_EXT_LEN(icb->len) + lb_size -1) / lb_size;
1516 *logblks_rec_p = udf_rw64(logblks_rec);
1517
1518 /* adjust l_ad and crclen when needed */
1519 if (offset >= l_ad) {
1520 l_ad += adlen;
1521 crclen += adlen;
1522 dscr->tag.desc_crc_len = udf_rw32(crclen);
1523 *l_ad_p = udf_rw32(l_ad);
1524 }
1525
1526 return 0;
1527 }
1528
1529 /* --------------------------------------------------------------------- */
1530
1531 /*
1532 * Adjust the node's allocation descriptors to reflect the new mapping; do
1533 * take note that we might glue to existing allocation descriptors.
1534 *
1535 * XXX Note there can only be one allocation being recorded/mount; maybe
1536 * explicit allocation in shedule thread?
1537 */
1538
1539 static void
1540 udf_record_allocation_in_node(struct udf_mount *ump, struct buf *buf,
1541 uint16_t vpart_num, uint64_t *mapping, struct long_ad *node_ad_cpy)
1542 {
1543 struct vnode *vp = buf->b_vp;
1544 struct udf_node *udf_node = VTOI(vp);
1545 struct file_entry *fe;
1546 struct extfile_entry *efe;
1547 struct icb_tag *icbtag;
1548 struct long_ad s_ad, c_ad;
1549 uint64_t inflen, from, till;
1550 uint64_t foffset, end_foffset, restart_foffset;
1551 uint64_t orig_inflen, orig_lbrec, new_inflen, new_lbrec;
1552 uint32_t num_lb, len, flags, lb_num;
1553 uint32_t run_start;
1554 uint32_t slot_offset, replace_len, replace;
1555 int addr_type, icbflags;
1556 int udf_c_type = buf->b_udf_c_type;
1557 int lb_size, run_length, eof;
1558 int slot, cpy_slot, cpy_slots, restart_slot;
1559 int error;
1560
1561 DPRINTF(ALLOC, ("udf_record_allocation_in_node\n"));
1562
1563 /* sanity check ... should be panic ? */
1564 if ((udf_c_type != UDF_C_USERDATA) && (udf_c_type != UDF_C_FIDS))
1565 return;
1566
1567 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1568
1569 /* do the job */
1570 UDF_LOCK_NODE(udf_node, 0); /* XXX can deadlock ? */
1571 udf_node_sanity_check(udf_node, &orig_inflen, &orig_lbrec);
1572
1573 fe = udf_node->fe;
1574 efe = udf_node->efe;
1575 if (fe) {
1576 icbtag = &fe->icbtag;
1577 inflen = udf_rw64(fe->inf_len);
1578 } else {
1579 icbtag = &efe->icbtag;
1580 inflen = udf_rw64(efe->inf_len);
1581 }
1582
1583 /* do check if `till' is not past file information length */
1584 from = buf->b_lblkno * lb_size;
1585 till = MIN(inflen, from + buf->b_resid);
1586
1587 num_lb = (till - from + lb_size -1) / lb_size;
1588
1589 DPRINTF(ALLOC, ("record allocation from %"PRIu64" + %d\n", from, buf->b_bcount));
1590
1591 icbflags = udf_rw16(icbtag->flags);
1592 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
1593
1594 if (addr_type == UDF_ICB_INTERN_ALLOC) {
1595 /* nothing to do */
1596 /* XXX clean up rest of node? just in case? */
1597 UDF_UNLOCK_NODE(udf_node, 0);
1598 return;
1599 }
1600
1601 slot = 0;
1602 cpy_slot = 0;
1603 foffset = 0;
1604
1605 /* 1) copy till first overlap piece to the rewrite buffer */
1606 for (;;) {
1607 udf_get_adslot(udf_node, slot, &s_ad, &eof);
1608 if (eof) {
1609 DPRINTF(WRITE,
1610 ("Record allocation in node "
1611 "failed: encountered EOF\n"));
1612 UDF_UNLOCK_NODE(udf_node, 0);
1613 buf->b_error = EINVAL;
1614 return;
1615 }
1616 len = udf_rw32(s_ad.len);
1617 flags = UDF_EXT_FLAGS(len);
1618 len = UDF_EXT_LEN(len);
1619
1620 if (flags == UDF_EXT_REDIRECT) {
1621 slot++;
1622 continue;
1623 }
1624
1625 end_foffset = foffset + len;
1626 if (end_foffset > from)
1627 break; /* found */
1628
1629 node_ad_cpy[cpy_slot++] = s_ad;
1630
1631 DPRINTF(ALLOC, ("\t1: vp %d, lb %d, len %d, flags %d "
1632 "-> stack\n",
1633 udf_rw16(s_ad.loc.part_num),
1634 udf_rw32(s_ad.loc.lb_num),
1635 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1636 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1637
1638 foffset = end_foffset;
1639 slot++;
1640 }
1641 restart_slot = slot;
1642 restart_foffset = foffset;
1643
1644 /* 2) trunc overlapping slot at overlap and copy it */
1645 slot_offset = from - foffset;
1646 if (slot_offset > 0) {
1647 DPRINTF(ALLOC, ("\tslot_offset = %d, flags = %d (%d)\n",
1648 slot_offset, flags >> 30, flags));
1649
1650 s_ad.len = udf_rw32(slot_offset | flags);
1651 node_ad_cpy[cpy_slot++] = s_ad;
1652
1653 DPRINTF(ALLOC, ("\t2: vp %d, lb %d, len %d, flags %d "
1654 "-> stack\n",
1655 udf_rw16(s_ad.loc.part_num),
1656 udf_rw32(s_ad.loc.lb_num),
1657 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1658 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1659 }
1660 foffset += slot_offset;
1661
1662 /* 3) insert new mappings */
1663 memset(&s_ad, 0, sizeof(struct long_ad));
1664 lb_num = 0;
1665 for (lb_num = 0; lb_num < num_lb; lb_num++) {
1666 run_start = mapping[lb_num];
1667 run_length = 1;
1668 while (lb_num < num_lb-1) {
1669 if (mapping[lb_num+1] != mapping[lb_num]+1)
1670 if (mapping[lb_num+1] != mapping[lb_num])
1671 break;
1672 run_length++;
1673 lb_num++;
1674 }
1675 /* insert slot for this mapping */
1676 len = run_length * lb_size;
1677
1678 /* bounds checking */
1679 if (foffset + len > till)
1680 len = till - foffset;
1681 KASSERT(foffset + len <= inflen);
1682
1683 s_ad.len = udf_rw32(len | UDF_EXT_ALLOCATED);
1684 s_ad.loc.part_num = udf_rw16(vpart_num);
1685 s_ad.loc.lb_num = udf_rw32(run_start);
1686
1687 foffset += len;
1688
1689 /* paranoia */
1690 if (len == 0) {
1691 DPRINTF(WRITE,
1692 ("Record allocation in node "
1693 "failed: insert failed\n"));
1694 UDF_UNLOCK_NODE(udf_node, 0);
1695 buf->b_error = EINVAL;
1696 return;
1697 }
1698 node_ad_cpy[cpy_slot++] = s_ad;
1699
1700 DPRINTF(ALLOC, ("\t3: insert new mapping vp %d lb %d, len %d, "
1701 "flags %d -> stack\n",
1702 udf_rw16(s_ad.loc.part_num), udf_rw32(s_ad.loc.lb_num),
1703 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1704 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1705 }
1706
1707 /* 4) pop replaced length */
1708 slot = restart_slot;
1709 foffset = restart_foffset;
1710
1711 replace_len = till - foffset; /* total amount of bytes to pop */
1712 slot_offset = from - foffset; /* offset in first encounted slot */
1713 KASSERT((slot_offset % lb_size) == 0);
1714
1715 for (;;) {
1716 udf_get_adslot(udf_node, slot, &s_ad, &eof);
1717 if (eof)
1718 break;
1719
1720 len = udf_rw32(s_ad.len);
1721 flags = UDF_EXT_FLAGS(len);
1722 len = UDF_EXT_LEN(len);
1723 lb_num = udf_rw32(s_ad.loc.lb_num);
1724
1725 if (flags == UDF_EXT_REDIRECT) {
1726 slot++;
1727 continue;
1728 }
1729
1730 DPRINTF(ALLOC, ("\t4i: got slot %d, slot_offset %d, "
1731 "replace_len %d, "
1732 "vp %d, lb %d, len %d, flags %d\n",
1733 slot, slot_offset, replace_len,
1734 udf_rw16(s_ad.loc.part_num),
1735 udf_rw32(s_ad.loc.lb_num),
1736 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1737 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1738
1739 /* adjust for slot offset */
1740 if (slot_offset) {
1741 DPRINTF(ALLOC, ("\t4s: skipping %d\n", slot_offset));
1742 lb_num += slot_offset / lb_size;
1743 len -= slot_offset;
1744 foffset += slot_offset;
1745 replace_len -= slot_offset;
1746
1747 /* mark adjusted */
1748 slot_offset = 0;
1749 }
1750
1751 /* advance for (the rest of) this slot */
1752 replace = MIN(len, replace_len);
1753 DPRINTF(ALLOC, ("\t4d: replacing %d\n", replace));
1754
1755 /* advance for this slot */
1756 if (replace) {
1757 /* note: dont round DOWN on num_lb since we then
1758 * forget the last partial one */
1759 num_lb = (replace + lb_size - 1) / lb_size;
1760 if (flags != UDF_EXT_FREE) {
1761 udf_free_allocated_space(ump, lb_num,
1762 udf_rw16(s_ad.loc.part_num), num_lb);
1763 }
1764 lb_num += num_lb;
1765 len -= replace;
1766 foffset += replace;
1767 replace_len -= replace;
1768 }
1769
1770 /* do we have a slot tail ? */
1771 if (len) {
1772 KASSERT(foffset % lb_size == 0);
1773
1774 /* we arrived at our point, push remainder */
1775 s_ad.len = udf_rw32(len | flags);
1776 s_ad.loc.lb_num = udf_rw32(lb_num);
1777 if (flags == UDF_EXT_FREE)
1778 s_ad.loc.lb_num = udf_rw32(0);
1779 node_ad_cpy[cpy_slot++] = s_ad;
1780 foffset += len;
1781 slot++;
1782
1783 DPRINTF(ALLOC, ("\t4: vp %d, lb %d, len %d, flags %d "
1784 "-> stack\n",
1785 udf_rw16(s_ad.loc.part_num),
1786 udf_rw32(s_ad.loc.lb_num),
1787 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1788 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1789 break;
1790 }
1791
1792 slot++;
1793 }
1794
1795 /* 5) copy remainder */
1796 for (;;) {
1797 udf_get_adslot(udf_node, slot, &s_ad, &eof);
1798 if (eof)
1799 break;
1800
1801 len = udf_rw32(s_ad.len);
1802 flags = UDF_EXT_FLAGS(len);
1803 len = UDF_EXT_LEN(len);
1804
1805 if (flags == UDF_EXT_REDIRECT) {
1806 slot++;
1807 continue;
1808 }
1809
1810 node_ad_cpy[cpy_slot++] = s_ad;
1811
1812 DPRINTF(ALLOC, ("\t5: insert new mapping "
1813 "vp %d lb %d, len %d, flags %d "
1814 "-> stack\n",
1815 udf_rw16(s_ad.loc.part_num),
1816 udf_rw32(s_ad.loc.lb_num),
1817 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1818 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1819
1820 slot++;
1821 }
1822
1823 /* 6) reset node descriptors */
1824 udf_wipe_adslots(udf_node);
1825
1826 /* 7) copy back extents; merge when possible. Recounting on the fly */
1827 cpy_slots = cpy_slot;
1828
1829 c_ad = node_ad_cpy[0];
1830 slot = 0;
1831 DPRINTF(ALLOC, ("\t7s: stack -> got mapping vp %d "
1832 "lb %d, len %d, flags %d\n",
1833 udf_rw16(c_ad.loc.part_num),
1834 udf_rw32(c_ad.loc.lb_num),
1835 UDF_EXT_LEN(udf_rw32(c_ad.len)),
1836 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
1837
1838 for (cpy_slot = 1; cpy_slot < cpy_slots; cpy_slot++) {
1839 s_ad = node_ad_cpy[cpy_slot];
1840
1841 DPRINTF(ALLOC, ("\t7i: stack -> got mapping vp %d "
1842 "lb %d, len %d, flags %d\n",
1843 udf_rw16(s_ad.loc.part_num),
1844 udf_rw32(s_ad.loc.lb_num),
1845 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1846 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1847
1848 /* see if we can merge */
1849 if (udf_ads_merge(lb_size, &c_ad, &s_ad)) {
1850 /* not mergable (anymore) */
1851 DPRINTF(ALLOC, ("\t7: appending vp %d lb %d, "
1852 "len %d, flags %d\n",
1853 udf_rw16(c_ad.loc.part_num),
1854 udf_rw32(c_ad.loc.lb_num),
1855 UDF_EXT_LEN(udf_rw32(c_ad.len)),
1856 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
1857
1858 error = udf_append_adslot(udf_node, slot, &c_ad);
1859 if (error) {
1860 buf->b_error = error;
1861 goto out;
1862 }
1863 c_ad = s_ad;
1864 slot++;
1865 }
1866 }
1867
1868 /* 8) push rest slot (if any) */
1869 if (UDF_EXT_LEN(c_ad.len) > 0) {
1870 DPRINTF(ALLOC, ("\t8: last append vp %d lb %d, "
1871 "len %d, flags %d\n",
1872 udf_rw16(c_ad.loc.part_num),
1873 udf_rw32(c_ad.loc.lb_num),
1874 UDF_EXT_LEN(udf_rw32(c_ad.len)),
1875 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
1876
1877 error = udf_append_adslot(udf_node, slot, &c_ad);
1878 if (error) {
1879 buf->b_error = error;
1880 goto out;
1881 }
1882 }
1883
1884 out:
1885 /* the node's descriptors should now be sane */
1886 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
1887 UDF_UNLOCK_NODE(udf_node, 0);
1888
1889 KASSERT(orig_inflen == new_inflen);
1890 KASSERT(new_lbrec >= orig_lbrec);
1891
1892 return;
1893 }
1894
1895 /* --------------------------------------------------------------------- */
1896
1897 int
1898 udf_grow_node(struct udf_node *udf_node, uint64_t new_size)
1899 {
1900 union dscrptr *dscr;
1901 struct vnode *vp = udf_node->vnode;
1902 struct udf_mount *ump = udf_node->ump;
1903 struct file_entry *fe;
1904 struct extfile_entry *efe;
1905 struct icb_tag *icbtag;
1906 struct long_ad c_ad, s_ad;
1907 uint64_t size_diff, old_size, inflen, objsize, chunk, append_len;
1908 uint64_t foffset, end_foffset;
1909 uint64_t orig_inflen, orig_lbrec, new_inflen, new_lbrec;
1910 uint32_t lb_size, dscr_size, crclen, lastblock_grow;
1911 uint32_t len, flags, max_len;
1912 uint32_t max_l_ad, l_ad, l_ea;
1913 uint8_t *data_pos, *evacuated_data;
1914 int icbflags, addr_type;
1915 int slot, cpy_slot;
1916 int eof, error;
1917
1918 DPRINTF(ALLOC, ("udf_grow_node\n"));
1919
1920 UDF_LOCK_NODE(udf_node, 0);
1921 udf_node_sanity_check(udf_node, &orig_inflen, &orig_lbrec);
1922
1923 lb_size = udf_rw32(ump->logical_vol->lb_size);
1924 max_len = ((UDF_EXT_MAXLEN / lb_size) * lb_size);
1925
1926 fe = udf_node->fe;
1927 efe = udf_node->efe;
1928 if (fe) {
1929 dscr = (union dscrptr *) fe;
1930 icbtag = &fe->icbtag;
1931 inflen = udf_rw64(fe->inf_len);
1932 objsize = inflen;
1933 dscr_size = sizeof(struct file_entry) -1;
1934 l_ea = udf_rw32(fe->l_ea);
1935 l_ad = udf_rw32(fe->l_ad);
1936 } else {
1937 dscr = (union dscrptr *) efe;
1938 icbtag = &efe->icbtag;
1939 inflen = udf_rw64(efe->inf_len);
1940 objsize = udf_rw64(efe->obj_size);
1941 dscr_size = sizeof(struct extfile_entry) -1;
1942 l_ea = udf_rw32(efe->l_ea);
1943 l_ad = udf_rw32(efe->l_ad);
1944 }
1945 data_pos = (uint8_t *) dscr + dscr_size + l_ea;
1946 max_l_ad = lb_size - dscr_size - l_ea;
1947
1948 icbflags = udf_rw16(icbtag->flags);
1949 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
1950
1951 old_size = inflen;
1952 size_diff = new_size - old_size;
1953
1954 DPRINTF(ALLOC, ("\tfrom %"PRIu64" to %"PRIu64"\n", old_size, new_size));
1955
1956 evacuated_data = NULL;
1957 if (addr_type == UDF_ICB_INTERN_ALLOC) {
1958 if (l_ad + size_diff <= max_l_ad) {
1959 /* only reflect size change directly in the node */
1960 inflen += size_diff;
1961 objsize += size_diff;
1962 l_ad += size_diff;
1963 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea + l_ad;
1964 if (fe) {
1965 fe->inf_len = udf_rw64(inflen);
1966 fe->l_ad = udf_rw32(l_ad);
1967 fe->tag.desc_crc_len = udf_rw32(crclen);
1968 } else {
1969 efe->inf_len = udf_rw64(inflen);
1970 efe->obj_size = udf_rw64(objsize);
1971 efe->l_ad = udf_rw32(l_ad);
1972 efe->tag.desc_crc_len = udf_rw32(crclen);
1973 }
1974 error = 0;
1975
1976 /* set new size for uvm */
1977 uvm_vnp_setsize(vp, old_size);
1978 uvm_vnp_setwritesize(vp, new_size);
1979
1980 #if 0
1981 /* zero append space in buffer */
1982 uvm_vnp_zerorange(vp, old_size, new_size - old_size);
1983 #endif
1984
1985 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
1986
1987 /* unlock */
1988 UDF_UNLOCK_NODE(udf_node, 0);
1989
1990 KASSERT(new_inflen == orig_inflen + size_diff);
1991 KASSERT(new_lbrec == orig_lbrec);
1992 KASSERT(new_lbrec == 0);
1993 return 0;
1994 }
1995
1996 DPRINTF(ALLOC, ("\tCONVERT from internal\n"));
1997
1998 if (old_size > 0) {
1999 /* allocate some space and copy in the stuff to keep */
2000 evacuated_data = malloc(lb_size, M_UDFTEMP, M_WAITOK);
2001 memset(evacuated_data, 0, lb_size);
2002
2003 /* node is locked, so safe to exit mutex */
2004 UDF_UNLOCK_NODE(udf_node, 0);
2005
2006 /* read in using the `normal' vn_rdwr() */
2007 error = vn_rdwr(UIO_READ, udf_node->vnode,
2008 evacuated_data, old_size, 0,
2009 UIO_SYSSPACE, IO_ALTSEMANTICS | IO_NODELOCKED,
2010 FSCRED, NULL, NULL);
2011
2012 /* enter again */
2013 UDF_LOCK_NODE(udf_node, 0);
2014 }
2015
2016 /* convert to a normal alloc */
2017 /* XXX HOWTO selecting allocation method ? */
2018 icbflags &= ~UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2019 icbflags |= UDF_ICB_LONG_ALLOC; /* XXX or SHORT_ALLOC */
2020 icbtag->flags = udf_rw16(icbflags);
2021
2022 /* wipe old descriptor space */
2023 udf_wipe_adslots(udf_node);
2024
2025 memset(&c_ad, 0, sizeof(struct long_ad));
2026 c_ad.len = udf_rw32(old_size | UDF_EXT_FREE);
2027 c_ad.loc.part_num = udf_rw16(0); /* not relevant */
2028 c_ad.loc.lb_num = udf_rw32(0); /* not relevant */
2029
2030 slot = 0;
2031 } else {
2032 /* goto the last entry (if any) */
2033 slot = 0;
2034 cpy_slot = 0;
2035 foffset = 0;
2036 memset(&c_ad, 0, sizeof(struct long_ad));
2037 for (;;) {
2038 udf_get_adslot(udf_node, slot, &c_ad, &eof);
2039 if (eof)
2040 break;
2041
2042 len = udf_rw32(c_ad.len);
2043 flags = UDF_EXT_FLAGS(len);
2044 len = UDF_EXT_LEN(len);
2045
2046 end_foffset = foffset + len;
2047 if (flags != UDF_EXT_REDIRECT)
2048 foffset = end_foffset;
2049
2050 slot++;
2051 }
2052 /* at end of adslots */
2053
2054 /* special case if the old size was zero, then there is no last slot */
2055 if (old_size == 0) {
2056 c_ad.len = udf_rw32(0 | UDF_EXT_FREE);
2057 c_ad.loc.part_num = udf_rw16(0); /* not relevant */
2058 c_ad.loc.lb_num = udf_rw32(0); /* not relevant */
2059 } else {
2060 /* refetch last slot */
2061 slot--;
2062 udf_get_adslot(udf_node, slot, &c_ad, &eof);
2063 }
2064 }
2065
2066 /*
2067 * If the length of the last slot is not a multiple of lb_size, adjust
2068 * length so that it is; don't forget to adjust `append_len'! relevant for
2069 * extending existing files
2070 */
2071 len = udf_rw32(c_ad.len);
2072 flags = UDF_EXT_FLAGS(len);
2073 len = UDF_EXT_LEN(len);
2074
2075 lastblock_grow = 0;
2076 if (len % lb_size > 0) {
2077 lastblock_grow = lb_size - (len % lb_size);
2078 lastblock_grow = MIN(size_diff, lastblock_grow);
2079 len += lastblock_grow;
2080 c_ad.len = udf_rw32(len | flags);
2081
2082 /* TODO zero appened space in buffer! */
2083 /* using uvm_vnp_zerorange(vp, old_size, new_size - old_size); ? */
2084 }
2085 memset(&s_ad, 0, sizeof(struct long_ad));
2086
2087 /* size_diff can be bigger than allowed, so grow in chunks */
2088 append_len = size_diff - lastblock_grow;
2089 while (append_len > 0) {
2090 chunk = MIN(append_len, max_len);
2091 s_ad.len = udf_rw32(chunk | UDF_EXT_FREE);
2092 s_ad.loc.part_num = udf_rw16(0);
2093 s_ad.loc.lb_num = udf_rw32(0);
2094
2095 if (udf_ads_merge(lb_size, &c_ad, &s_ad)) {
2096 /* not mergable (anymore) */
2097 error = udf_append_adslot(udf_node, slot, &c_ad);
2098 if (error)
2099 goto errorout;
2100 slot++;
2101 c_ad = s_ad;
2102 memset(&s_ad, 0, sizeof(struct long_ad));
2103 }
2104 append_len -= chunk;
2105 }
2106
2107 /* if there is a rest piece in the accumulator, append it */
2108 if (UDF_EXT_LEN(udf_rw32(c_ad.len)) > 0) {
2109 error = udf_append_adslot(udf_node, slot, &c_ad);
2110 if (error)
2111 goto errorout;
2112 slot++;
2113 }
2114
2115 /* if there is a rest piece that didn't fit, append it */
2116 if (UDF_EXT_LEN(udf_rw32(s_ad.len)) > 0) {
2117 error = udf_append_adslot(udf_node, slot, &s_ad);
2118 if (error)
2119 goto errorout;
2120 slot++;
2121 }
2122
2123 inflen += size_diff;
2124 objsize += size_diff;
2125 if (fe) {
2126 fe->inf_len = udf_rw64(inflen);
2127 } else {
2128 efe->inf_len = udf_rw64(inflen);
2129 efe->obj_size = udf_rw64(objsize);
2130 }
2131 error = 0;
2132
2133 if (evacuated_data) {
2134 /* set new write size for uvm */
2135 uvm_vnp_setwritesize(vp, old_size);
2136
2137 /* write out evacuated data */
2138 error = vn_rdwr(UIO_WRITE, udf_node->vnode,
2139 evacuated_data, old_size, 0,
2140 UIO_SYSSPACE, IO_ALTSEMANTICS | IO_NODELOCKED,
2141 FSCRED, NULL, NULL);
2142 uvm_vnp_setsize(vp, old_size);
2143 }
2144
2145 errorout:
2146 if (evacuated_data)
2147 free(evacuated_data, M_UDFTEMP);
2148
2149 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2150 UDF_UNLOCK_NODE(udf_node, 0);
2151
2152 KASSERT(new_inflen == orig_inflen + size_diff);
2153 KASSERT(new_lbrec == orig_lbrec);
2154
2155 return error;
2156 }
2157
2158 /* --------------------------------------------------------------------- */
2159
2160 int
2161 udf_shrink_node(struct udf_node *udf_node, uint64_t new_size)
2162 {
2163 struct vnode *vp = udf_node->vnode;
2164 struct udf_mount *ump = udf_node->ump;
2165 struct file_entry *fe;
2166 struct extfile_entry *efe;
2167 struct icb_tag *icbtag;
2168 struct long_ad c_ad, s_ad, *node_ad_cpy;
2169 uint64_t size_diff, old_size, inflen, objsize;
2170 uint64_t foffset, end_foffset;
2171 uint64_t orig_inflen, orig_lbrec, new_inflen, new_lbrec;
2172 uint32_t lb_size, dscr_size, crclen;
2173 uint32_t slot_offset;
2174 uint32_t len, flags, max_len;
2175 uint32_t num_lb, lb_num;
2176 uint32_t max_l_ad, l_ad, l_ea;
2177 uint16_t vpart_num;
2178 uint8_t *data_pos;
2179 int icbflags, addr_type;
2180 int slot, cpy_slot, cpy_slots;
2181 int eof, error;
2182
2183 DPRINTF(ALLOC, ("udf_shrink_node\n"));
2184
2185 UDF_LOCK_NODE(udf_node, 0);
2186 udf_node_sanity_check(udf_node, &orig_inflen, &orig_lbrec);
2187
2188 lb_size = udf_rw32(ump->logical_vol->lb_size);
2189 max_len = ((UDF_EXT_MAXLEN / lb_size) * lb_size);
2190
2191 /* do the work */
2192 fe = udf_node->fe;
2193 efe = udf_node->efe;
2194 if (fe) {
2195 icbtag = &fe->icbtag;
2196 inflen = udf_rw64(fe->inf_len);
2197 objsize = inflen;
2198 dscr_size = sizeof(struct file_entry) -1;
2199 l_ea = udf_rw32(fe->l_ea);
2200 l_ad = udf_rw32(fe->l_ad);
2201 data_pos = (uint8_t *) fe + dscr_size + l_ea;
2202 } else {
2203 icbtag = &efe->icbtag;
2204 inflen = udf_rw64(efe->inf_len);
2205 objsize = udf_rw64(efe->obj_size);
2206 dscr_size = sizeof(struct extfile_entry) -1;
2207 l_ea = udf_rw32(efe->l_ea);
2208 l_ad = udf_rw32(efe->l_ad);
2209 data_pos = (uint8_t *) efe + dscr_size + l_ea;
2210 }
2211 max_l_ad = lb_size - dscr_size - l_ea;
2212
2213 icbflags = udf_rw16(icbtag->flags);
2214 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2215
2216 old_size = inflen;
2217 size_diff = old_size - new_size;
2218
2219 DPRINTF(ALLOC, ("\tfrom %"PRIu64" to %"PRIu64"\n", old_size, new_size));
2220
2221 /* shrink the node to its new size */
2222 if (addr_type == UDF_ICB_INTERN_ALLOC) {
2223 /* only reflect size change directly in the node */
2224 KASSERT(new_size <= max_l_ad);
2225 inflen -= size_diff;
2226 objsize -= size_diff;
2227 l_ad -= size_diff;
2228 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea + l_ad;
2229 if (fe) {
2230 fe->inf_len = udf_rw64(inflen);
2231 fe->l_ad = udf_rw32(l_ad);
2232 fe->tag.desc_crc_len = udf_rw32(crclen);
2233 } else {
2234 efe->inf_len = udf_rw64(inflen);
2235 efe->obj_size = udf_rw64(objsize);
2236 efe->l_ad = udf_rw32(l_ad);
2237 efe->tag.desc_crc_len = udf_rw32(crclen);
2238 }
2239 error = 0;
2240
2241 /* clear the space in the descriptor */
2242 KASSERT(old_size > new_size);
2243 memset(data_pos + new_size, 0, old_size - new_size);
2244
2245 /* TODO zero appened space in buffer! */
2246 /* using uvm_vnp_zerorange(vp, old_size, old_size - new_size); ? */
2247
2248 /* set new size for uvm */
2249 uvm_vnp_setsize(vp, new_size);
2250
2251 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2252 UDF_UNLOCK_NODE(udf_node, 0);
2253
2254 KASSERT(new_inflen == orig_inflen - size_diff);
2255 KASSERT(new_lbrec == orig_lbrec);
2256 KASSERT(new_lbrec == 0);
2257
2258 return 0;
2259 }
2260
2261 /* setup node cleanup extents copy space */
2262 node_ad_cpy = malloc(lb_size * UDF_MAX_ALLOC_EXTENTS,
2263 M_UDFMNT, M_WAITOK);
2264 memset(node_ad_cpy, 0, lb_size * UDF_MAX_ALLOC_EXTENTS);
2265
2266 /*
2267 * Shrink the node by releasing the allocations and truncate the last
2268 * allocation to the new size. If the new size fits into the
2269 * allocation descriptor itself, transform it into an
2270 * UDF_ICB_INTERN_ALLOC.
2271 */
2272 slot = 0;
2273 cpy_slot = 0;
2274 foffset = 0;
2275
2276 /* 1) copy till first overlap piece to the rewrite buffer */
2277 for (;;) {
2278 udf_get_adslot(udf_node, slot, &s_ad, &eof);
2279 if (eof) {
2280 DPRINTF(WRITE,
2281 ("Shrink node failed: "
2282 "encountered EOF\n"));
2283 error = EINVAL;
2284 goto errorout; /* panic? */
2285 }
2286 len = udf_rw32(s_ad.len);
2287 flags = UDF_EXT_FLAGS(len);
2288 len = UDF_EXT_LEN(len);
2289
2290 if (flags == UDF_EXT_REDIRECT) {
2291 slot++;
2292 continue;
2293 }
2294
2295 end_foffset = foffset + len;
2296 if (end_foffset > new_size)
2297 break; /* found */
2298
2299 node_ad_cpy[cpy_slot++] = s_ad;
2300
2301 DPRINTF(ALLOC, ("\t1: vp %d, lb %d, len %d, flags %d "
2302 "-> stack\n",
2303 udf_rw16(s_ad.loc.part_num),
2304 udf_rw32(s_ad.loc.lb_num),
2305 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2306 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2307
2308 foffset = end_foffset;
2309 slot++;
2310 }
2311 slot_offset = new_size - foffset;
2312
2313 /* 2) trunc overlapping slot at overlap and copy it */
2314 if (slot_offset > 0) {
2315 lb_num = udf_rw32(s_ad.loc.lb_num);
2316 vpart_num = udf_rw16(s_ad.loc.part_num);
2317
2318 if (flags == UDF_EXT_ALLOCATED) {
2319 /* note: round DOWN on num_lb */
2320 lb_num += (slot_offset + lb_size -1) / lb_size;
2321 num_lb = (len - slot_offset) / lb_size;
2322
2323 udf_free_allocated_space(ump, lb_num, vpart_num, num_lb);
2324 }
2325
2326 s_ad.len = udf_rw32(slot_offset | flags);
2327 node_ad_cpy[cpy_slot++] = s_ad;
2328 slot++;
2329
2330 DPRINTF(ALLOC, ("\t2: vp %d, lb %d, len %d, flags %d "
2331 "-> stack\n",
2332 udf_rw16(s_ad.loc.part_num),
2333 udf_rw32(s_ad.loc.lb_num),
2334 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2335 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2336 }
2337
2338 /* 3) delete remainder */
2339 for (;;) {
2340 udf_get_adslot(udf_node, slot, &s_ad, &eof);
2341 if (eof)
2342 break;
2343
2344 len = udf_rw32(s_ad.len);
2345 flags = UDF_EXT_FLAGS(len);
2346 len = UDF_EXT_LEN(len);
2347
2348 if (flags == UDF_EXT_REDIRECT) {
2349 slot++;
2350 continue;
2351 }
2352
2353 DPRINTF(ALLOC, ("\t3: delete remainder "
2354 "vp %d lb %d, len %d, flags %d\n",
2355 udf_rw16(s_ad.loc.part_num),
2356 udf_rw32(s_ad.loc.lb_num),
2357 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2358 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2359
2360 if (flags == UDF_EXT_ALLOCATED) {
2361 lb_num = udf_rw32(s_ad.loc.lb_num);
2362 vpart_num = udf_rw16(s_ad.loc.part_num);
2363 num_lb = (len + lb_size - 1) / lb_size;
2364
2365 udf_free_allocated_space(ump, lb_num, vpart_num,
2366 num_lb);
2367 }
2368
2369 slot++;
2370 }
2371
2372 /* 4) if it will fit into the descriptor then convert */
2373 if (new_size < max_l_ad) {
2374 /*
2375 * resque/evacuate old piece by reading it in, and convert it
2376 * to internal alloc.
2377 */
2378 if (new_size == 0) {
2379 /* XXX/TODO only for zero sizing now */
2380 udf_wipe_adslots(udf_node);
2381
2382 icbflags &= ~UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2383 icbflags |= UDF_ICB_INTERN_ALLOC;
2384 icbtag->flags = udf_rw16(icbflags);
2385
2386 inflen -= size_diff; KASSERT(inflen == 0);
2387 objsize -= size_diff;
2388 l_ad = new_size;
2389 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea + l_ad;
2390 if (fe) {
2391 fe->inf_len = udf_rw64(inflen);
2392 fe->l_ad = udf_rw32(l_ad);
2393 fe->tag.desc_crc_len = udf_rw32(crclen);
2394 } else {
2395 efe->inf_len = udf_rw64(inflen);
2396 efe->obj_size = udf_rw64(objsize);
2397 efe->l_ad = udf_rw32(l_ad);
2398 efe->tag.desc_crc_len = udf_rw32(crclen);
2399 }
2400 /* eventually copy in evacuated piece */
2401 /* set new size for uvm */
2402 uvm_vnp_setsize(vp, new_size);
2403
2404 free(node_ad_cpy, M_UDFMNT);
2405 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2406
2407 UDF_UNLOCK_NODE(udf_node, 0);
2408
2409 KASSERT(new_inflen == orig_inflen - size_diff);
2410 KASSERT(new_inflen == 0);
2411 KASSERT(new_lbrec == 0);
2412
2413 return 0;
2414 }
2415
2416 printf("UDF_SHRINK_NODE: could convert to internal alloc!\n");
2417 }
2418
2419 /* 5) reset node descriptors */
2420 udf_wipe_adslots(udf_node);
2421
2422 /* 6) copy back extents; merge when possible. Recounting on the fly */
2423 cpy_slots = cpy_slot;
2424
2425 c_ad = node_ad_cpy[0];
2426 slot = 0;
2427 for (cpy_slot = 1; cpy_slot < cpy_slots; cpy_slot++) {
2428 s_ad = node_ad_cpy[cpy_slot];
2429
2430 DPRINTF(ALLOC, ("\t6: stack -> got mapping vp %d "
2431 "lb %d, len %d, flags %d\n",
2432 udf_rw16(s_ad.loc.part_num),
2433 udf_rw32(s_ad.loc.lb_num),
2434 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2435 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2436
2437 /* see if we can merge */
2438 if (udf_ads_merge(lb_size, &c_ad, &s_ad)) {
2439 /* not mergable (anymore) */
2440 DPRINTF(ALLOC, ("\t6: appending vp %d lb %d, "
2441 "len %d, flags %d\n",
2442 udf_rw16(c_ad.loc.part_num),
2443 udf_rw32(c_ad.loc.lb_num),
2444 UDF_EXT_LEN(udf_rw32(c_ad.len)),
2445 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2446
2447 error = udf_append_adslot(udf_node, slot, &c_ad);
2448 if (error)
2449 goto errorout; /* panic? */
2450 c_ad = s_ad;
2451 slot++;
2452 }
2453 }
2454
2455 /* 7) push rest slot (if any) */
2456 if (UDF_EXT_LEN(c_ad.len) > 0) {
2457 DPRINTF(ALLOC, ("\t7: last append vp %d lb %d, "
2458 "len %d, flags %d\n",
2459 udf_rw16(c_ad.loc.part_num),
2460 udf_rw32(c_ad.loc.lb_num),
2461 UDF_EXT_LEN(udf_rw32(c_ad.len)),
2462 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2463
2464 error = udf_append_adslot(udf_node, slot, &c_ad);
2465 if (error)
2466 goto errorout; /* panic? */
2467 ;
2468 }
2469
2470 inflen -= size_diff;
2471 objsize -= size_diff;
2472 if (fe) {
2473 fe->inf_len = udf_rw64(inflen);
2474 } else {
2475 efe->inf_len = udf_rw64(inflen);
2476 efe->obj_size = udf_rw64(objsize);
2477 }
2478 error = 0;
2479
2480 /* set new size for uvm */
2481 uvm_vnp_setsize(vp, new_size);
2482
2483 errorout:
2484 free(node_ad_cpy, M_UDFMNT);
2485
2486 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2487 UDF_UNLOCK_NODE(udf_node, 0);
2488
2489 KASSERT(new_inflen == orig_inflen - size_diff);
2490
2491 return error;
2492 }
2493
2494