udf_allocation.c revision 1.9 1 /* $NetBSD: udf_allocation.c,v 1.9 2008/07/02 13:25:33 reinoud Exp $ */
2
3 /*
4 * Copyright (c) 2006, 2008 Reinoud Zandijk
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 *
27 */
28
29 #include <sys/cdefs.h>
30 #ifndef lint
31 __KERNEL_RCSID(0, "$NetBSD: udf_allocation.c,v 1.9 2008/07/02 13:25:33 reinoud Exp $");
32 #endif /* not lint */
33
34
35 #if defined(_KERNEL_OPT)
36 #include "opt_quota.h"
37 #include "opt_compat_netbsd.h"
38 #endif
39
40 /* TODO strip */
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/sysctl.h>
44 #include <sys/namei.h>
45 #include <sys/proc.h>
46 #include <sys/kernel.h>
47 #include <sys/vnode.h>
48 #include <miscfs/genfs/genfs_node.h>
49 #include <sys/mount.h>
50 #include <sys/buf.h>
51 #include <sys/file.h>
52 #include <sys/device.h>
53 #include <sys/disklabel.h>
54 #include <sys/ioctl.h>
55 #include <sys/malloc.h>
56 #include <sys/dirent.h>
57 #include <sys/stat.h>
58 #include <sys/conf.h>
59 #include <sys/kauth.h>
60 #include <sys/kthread.h>
61 #include <dev/clock_subr.h>
62
63 #include <fs/udf/ecma167-udf.h>
64 #include <fs/udf/udf_mount.h>
65
66 #if defined(_KERNEL_OPT)
67 #include "opt_udf.h"
68 #endif
69
70 #include "udf.h"
71 #include "udf_subr.h"
72 #include "udf_bswap.h"
73
74
75 #define VTOI(vnode) ((struct udf_node *) vnode->v_data)
76
77 static void udf_record_allocation_in_node(struct udf_mount *ump,
78 struct buf *buf, uint16_t vpart_num, uint64_t *mapping,
79 struct long_ad *node_ad_cpy);
80
81 /*
82 * IDEA/BUSY: Each udf_node gets its own extentwalker state for all operations;
83 * this will hopefully/likely reduce O(nlog(n)) to O(1) for most functionality
84 * since actions are most likely sequencial and thus seeking doesn't need
85 * searching for the same or adjacent position again.
86 */
87
88 /* --------------------------------------------------------------------- */
89 //#ifdef DEBUG
90 #if 1
91 #if 1
92 static void
93 udf_node_dump(struct udf_node *udf_node) {
94 struct file_entry *fe;
95 struct extfile_entry *efe;
96 struct icb_tag *icbtag;
97 struct short_ad *short_ad;
98 struct long_ad *long_ad;
99 uint64_t inflen;
100 uint32_t icbflags, addr_type, max_l_ad;
101 uint32_t len, lb_num;
102 uint8_t *data_pos;
103 int part_num;
104 int adlen, ad_off, dscr_size, l_ea, l_ad, lb_size, flags;
105
106 if ((udf_verbose & UDF_DEBUG_ADWLK) == 0)
107 return;
108
109 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
110
111 fe = udf_node->fe;
112 efe = udf_node->efe;
113 if (fe) {
114 icbtag = &fe->icbtag;
115 inflen = udf_rw64(fe->inf_len);
116 dscr_size = sizeof(struct file_entry) -1;
117 l_ea = udf_rw32(fe->l_ea);
118 l_ad = udf_rw32(fe->l_ad);
119 data_pos = (uint8_t *) fe + dscr_size + l_ea;
120 } else {
121 icbtag = &efe->icbtag;
122 inflen = udf_rw64(efe->inf_len);
123 dscr_size = sizeof(struct extfile_entry) -1;
124 l_ea = udf_rw32(efe->l_ea);
125 l_ad = udf_rw32(efe->l_ad);
126 data_pos = (uint8_t *) efe + dscr_size + l_ea;
127 }
128 max_l_ad = lb_size - dscr_size - l_ea;
129
130 icbflags = udf_rw16(icbtag->flags);
131 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
132
133 printf("udf_node_dump:\n");
134 printf("\tudf_node %p\n", udf_node);
135
136 if (addr_type == UDF_ICB_INTERN_ALLOC) {
137 printf("\t\tIntern alloc, len = %"PRIu64"\n", inflen);
138 return;
139 }
140
141 printf("\t\tInflen = %"PRIu64"\n", inflen);
142 printf("\t\tl_ad = %d\n", l_ad);
143
144 if (addr_type == UDF_ICB_SHORT_ALLOC) {
145 adlen = sizeof(struct short_ad);
146 } else {
147 adlen = sizeof(struct long_ad);
148 }
149
150 printf("\t\t");
151 for (ad_off = 0; ad_off < max_l_ad-adlen; ad_off += adlen) {
152 if (addr_type == UDF_ICB_SHORT_ALLOC) {
153 short_ad = (struct short_ad *) (data_pos + ad_off);
154 len = udf_rw32(short_ad->len);
155 lb_num = udf_rw32(short_ad->lb_num);
156 part_num = -1;
157 flags = UDF_EXT_FLAGS(len);
158 len = UDF_EXT_LEN(len);
159 } else {
160 long_ad = (struct long_ad *) (data_pos + ad_off);
161 len = udf_rw32(long_ad->len);
162 lb_num = udf_rw32(long_ad->loc.lb_num);
163 part_num = udf_rw16(long_ad->loc.part_num);
164 flags = UDF_EXT_FLAGS(len);
165 len = UDF_EXT_LEN(len);
166 }
167 printf("[");
168 if (part_num >= 0)
169 printf("part %d, ", part_num);
170 printf("lb_num %d, len %d", lb_num, len);
171 if (flags)
172 printf(", flags %d", flags);
173 printf("] ");
174 if (ad_off + adlen == l_ad)
175 printf("\n\t\tl_ad END\n\t\t");
176 }
177 printf("\n");
178 }
179 #else
180 #define udf_node_dump(a)
181 #endif
182
183
184 static void
185 udf_assert_allocated(struct udf_mount *ump, uint16_t vpart_num,
186 uint32_t lb_num, uint32_t num_lb)
187 {
188 struct udf_bitmap *bitmap;
189 struct part_desc *pdesc;
190 uint32_t ptov;
191 uint32_t bitval;
192 uint8_t *bpos;
193 int bit;
194 int phys_part;
195 int ok;
196
197 DPRINTF(ALLOC, ("udf_assert_allocated: check virt lbnum %d "
198 "part %d + %d sect\n", lb_num, vpart_num, num_lb));
199
200 /* get partition backing up this vpart_num */
201 pdesc = ump->partitions[ump->vtop[vpart_num]];
202
203 switch (ump->vtop_tp[vpart_num]) {
204 case UDF_VTOP_TYPE_PHYS :
205 case UDF_VTOP_TYPE_SPARABLE :
206 /* free space to freed or unallocated space bitmap */
207 ptov = udf_rw32(pdesc->start_loc);
208 phys_part = ump->vtop[vpart_num];
209
210 /* use unallocated bitmap */
211 bitmap = &ump->part_unalloc_bits[phys_part];
212
213 /* if no bitmaps are defined, bail out */
214 if (bitmap->bits == NULL)
215 break;
216
217 /* check bits */
218 KASSERT(bitmap->bits);
219 ok = 1;
220 bpos = bitmap->bits + lb_num/8;
221 bit = lb_num % 8;
222 while (num_lb > 0) {
223 bitval = (1 << bit);
224 DPRINTF(PARANOIA, ("XXX : check %d, %p, bit %d\n",
225 lb_num, bpos, bit));
226 KASSERT(bitmap->bits + lb_num/8 == bpos);
227 if (*bpos & bitval) {
228 printf("\tlb_num %d is NOT marked busy\n",
229 lb_num);
230 ok = 0;
231 }
232 lb_num++; num_lb--;
233 bit = (bit + 1) % 8;
234 if (bit == 0)
235 bpos++;
236 }
237 if (!ok) {
238 /* KASSERT(0); */
239 }
240
241 break;
242 case UDF_VTOP_TYPE_VIRT :
243 /* TODO check space */
244 KASSERT(num_lb == 1);
245 break;
246 case UDF_VTOP_TYPE_META :
247 /* TODO check space in the metadata bitmap */
248 default:
249 /* not implemented */
250 break;
251 }
252 }
253
254
255 static void
256 udf_node_sanity_check(struct udf_node *udf_node,
257 uint64_t *cnt_inflen, uint64_t *cnt_logblksrec) {
258 struct file_entry *fe;
259 struct extfile_entry *efe;
260 struct icb_tag *icbtag;
261 struct short_ad *short_ad;
262 struct long_ad *long_ad;
263 uint64_t inflen, logblksrec;
264 uint32_t icbflags, addr_type, max_l_ad;
265 uint32_t len, lb_num;
266 uint8_t *data_pos;
267 uint16_t part_num;
268 int adlen, ad_off, dscr_size, l_ea, l_ad, lb_size, flags, whole_lb;
269
270 // KASSERT(mutex_owned(&udf_node->ump->allocate_mutex));
271
272 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
273
274 fe = udf_node->fe;
275 efe = udf_node->efe;
276 if (fe) {
277 icbtag = &fe->icbtag;
278 inflen = udf_rw64(fe->inf_len);
279 logblksrec = udf_rw64(fe->logblks_rec);
280 dscr_size = sizeof(struct file_entry) -1;
281 l_ea = udf_rw32(fe->l_ea);
282 l_ad = udf_rw32(fe->l_ad);
283 data_pos = (uint8_t *) fe + dscr_size + l_ea;
284 } else {
285 icbtag = &efe->icbtag;
286 inflen = udf_rw64(efe->inf_len);
287 logblksrec = udf_rw64(efe->logblks_rec);
288 dscr_size = sizeof(struct extfile_entry) -1;
289 l_ea = udf_rw32(efe->l_ea);
290 l_ad = udf_rw32(efe->l_ad);
291 data_pos = (uint8_t *) efe + dscr_size + l_ea;
292 }
293 max_l_ad = lb_size - dscr_size - l_ea;
294 icbflags = udf_rw16(icbtag->flags);
295 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
296
297 /* reset counters */
298 *cnt_inflen = 0;
299 *cnt_logblksrec = 0;
300
301 if (addr_type == UDF_ICB_INTERN_ALLOC) {
302 KASSERT(l_ad <= max_l_ad);
303 KASSERT(l_ad == inflen);
304 *cnt_inflen = inflen;
305 return;
306 }
307
308 if (addr_type == UDF_ICB_SHORT_ALLOC) {
309 adlen = sizeof(struct short_ad);
310 } else {
311 adlen = sizeof(struct long_ad);
312 }
313
314 /* start counting */
315 whole_lb = 1;
316 for (ad_off = 0; ad_off < l_ad; ad_off += adlen) {
317 KASSERT(whole_lb == 1);
318 if (addr_type == UDF_ICB_SHORT_ALLOC) {
319 short_ad = (struct short_ad *) (data_pos + ad_off);
320 len = udf_rw32(short_ad->len);
321 lb_num = udf_rw32(short_ad->lb_num);
322 part_num = udf_rw16(udf_node->loc.loc.part_num);
323 flags = UDF_EXT_FLAGS(len);
324 len = UDF_EXT_LEN(len);
325 } else {
326 long_ad = (struct long_ad *) (data_pos + ad_off);
327 len = udf_rw32(long_ad->len);
328 lb_num = udf_rw32(long_ad->loc.lb_num);
329 part_num = udf_rw16(long_ad->loc.part_num);
330 flags = UDF_EXT_FLAGS(len);
331 len = UDF_EXT_LEN(len);
332 }
333 if (flags != UDF_EXT_REDIRECT) {
334 *cnt_inflen += len;
335 if (flags == UDF_EXT_ALLOCATED) {
336 *cnt_logblksrec += (len + lb_size -1) / lb_size;
337 }
338 } else {
339 KASSERT(len == lb_size);
340 }
341 /* check allocation */
342 if (flags == UDF_EXT_ALLOCATED)
343 udf_assert_allocated(udf_node->ump, part_num, lb_num,
344 (len + lb_size - 1) / lb_size);
345
346 /* check whole lb */
347 whole_lb = ((len % lb_size) == 0);
348 }
349 /* rest should be zero (ad_off > l_ad < max_l_ad - adlen) */
350
351 KASSERT(*cnt_inflen == inflen);
352 KASSERT(*cnt_logblksrec == logblksrec);
353
354 // KASSERT(mutex_owned(&udf_node->ump->allocate_mutex));
355 if (0)
356 udf_node_dump(udf_node);
357 }
358 #else
359 #define udf_node_sanity_check(a, b, c)
360 #endif
361
362 /* --------------------------------------------------------------------- */
363
364 int
365 udf_translate_vtop(struct udf_mount *ump, struct long_ad *icb_loc,
366 uint32_t *lb_numres, uint32_t *extres)
367 {
368 struct part_desc *pdesc;
369 struct spare_map_entry *sme;
370 struct long_ad s_icb_loc;
371 uint64_t foffset, end_foffset;
372 uint32_t lb_size, len;
373 uint32_t lb_num, lb_rel, lb_packet;
374 uint32_t udf_rw32_lbmap, ext_offset;
375 uint16_t vpart;
376 int rel, part, error, eof, slot, flags;
377
378 assert(ump && icb_loc && lb_numres);
379
380 vpart = udf_rw16(icb_loc->loc.part_num);
381 lb_num = udf_rw32(icb_loc->loc.lb_num);
382 if (vpart > UDF_VTOP_RAWPART)
383 return EINVAL;
384
385 translate_again:
386 part = ump->vtop[vpart];
387 pdesc = ump->partitions[part];
388
389 switch (ump->vtop_tp[vpart]) {
390 case UDF_VTOP_TYPE_RAW :
391 /* 1:1 to the end of the device */
392 *lb_numres = lb_num;
393 *extres = INT_MAX;
394 return 0;
395 case UDF_VTOP_TYPE_PHYS :
396 /* transform into its disc logical block */
397 if (lb_num > udf_rw32(pdesc->part_len))
398 return EINVAL;
399 *lb_numres = lb_num + udf_rw32(pdesc->start_loc);
400
401 /* extent from here to the end of the partition */
402 *extres = udf_rw32(pdesc->part_len) - lb_num;
403 return 0;
404 case UDF_VTOP_TYPE_VIRT :
405 /* only maps one logical block, lookup in VAT */
406 if (lb_num >= ump->vat_entries) /* XXX > or >= ? */
407 return EINVAL;
408
409 /* lookup in virtual allocation table file */
410 mutex_enter(&ump->allocate_mutex);
411 error = udf_vat_read(ump->vat_node,
412 (uint8_t *) &udf_rw32_lbmap, 4,
413 ump->vat_offset + lb_num * 4);
414 mutex_exit(&ump->allocate_mutex);
415
416 if (error)
417 return error;
418
419 lb_num = udf_rw32(udf_rw32_lbmap);
420
421 /* transform into its disc logical block */
422 if (lb_num > udf_rw32(pdesc->part_len))
423 return EINVAL;
424 *lb_numres = lb_num + udf_rw32(pdesc->start_loc);
425
426 /* just one logical block */
427 *extres = 1;
428 return 0;
429 case UDF_VTOP_TYPE_SPARABLE :
430 /* check if the packet containing the lb_num is remapped */
431 lb_packet = lb_num / ump->sparable_packet_size;
432 lb_rel = lb_num % ump->sparable_packet_size;
433
434 for (rel = 0; rel < udf_rw16(ump->sparing_table->rt_l); rel++) {
435 sme = &ump->sparing_table->entries[rel];
436 if (lb_packet == udf_rw32(sme->org)) {
437 /* NOTE maps to absolute disc logical block! */
438 *lb_numres = udf_rw32(sme->map) + lb_rel;
439 *extres = ump->sparable_packet_size - lb_rel;
440 return 0;
441 }
442 }
443
444 /* transform into its disc logical block */
445 if (lb_num > udf_rw32(pdesc->part_len))
446 return EINVAL;
447 *lb_numres = lb_num + udf_rw32(pdesc->start_loc);
448
449 /* rest of block */
450 *extres = ump->sparable_packet_size - lb_rel;
451 return 0;
452 case UDF_VTOP_TYPE_META :
453 /* we have to look into the file's allocation descriptors */
454
455 /* use metadatafile allocation mutex */
456 lb_size = udf_rw32(ump->logical_vol->lb_size);
457
458 UDF_LOCK_NODE(ump->metadata_node, 0);
459
460 /* get first overlapping extent */
461 foffset = 0;
462 slot = 0;
463 for (;;) {
464 udf_get_adslot(ump->metadata_node,
465 slot, &s_icb_loc, &eof);
466 DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, "
467 "len = %d, lb_num = %d, part = %d\n",
468 slot, eof,
469 UDF_EXT_FLAGS(udf_rw32(s_icb_loc.len)),
470 UDF_EXT_LEN(udf_rw32(s_icb_loc.len)),
471 udf_rw32(s_icb_loc.loc.lb_num),
472 udf_rw16(s_icb_loc.loc.part_num)));
473 if (eof) {
474 DPRINTF(TRANSLATE,
475 ("Meta partition translation "
476 "failed: can't seek location\n"));
477 UDF_UNLOCK_NODE(ump->metadata_node, 0);
478 return EINVAL;
479 }
480 len = udf_rw32(s_icb_loc.len);
481 flags = UDF_EXT_FLAGS(len);
482 len = UDF_EXT_LEN(len);
483
484 if (flags == UDF_EXT_REDIRECT) {
485 slot++;
486 continue;
487 }
488
489 end_foffset = foffset + len;
490
491 if (end_foffset > lb_num * lb_size)
492 break; /* found */
493 foffset = end_foffset;
494 slot++;
495 }
496 /* found overlapping slot */
497 ext_offset = lb_num * lb_size - foffset;
498
499 /* process extent offset */
500 lb_num = udf_rw32(s_icb_loc.loc.lb_num);
501 vpart = udf_rw16(s_icb_loc.loc.part_num);
502 lb_num += (ext_offset + lb_size -1) / lb_size;
503 len -= ext_offset;
504 ext_offset = 0;
505
506 flags = UDF_EXT_FLAGS(s_icb_loc.len);
507
508 UDF_UNLOCK_NODE(ump->metadata_node, 0);
509 if (flags != UDF_EXT_ALLOCATED) {
510 DPRINTF(TRANSLATE, ("Metadata partition translation "
511 "failed: not allocated\n"));
512 return EINVAL;
513 }
514
515 /*
516 * vpart and lb_num are updated, translate again since we
517 * might be mapped on sparable media
518 */
519 goto translate_again;
520 default:
521 printf("UDF vtop translation scheme %d unimplemented yet\n",
522 ump->vtop_tp[vpart]);
523 }
524
525 return EINVAL;
526 }
527
528 /* --------------------------------------------------------------------- */
529
530 /*
531 * Translate an extent (in logical_blocks) into logical block numbers; used
532 * for read and write operations. DOESNT't check extents.
533 */
534
535 int
536 udf_translate_file_extent(struct udf_node *udf_node,
537 uint32_t from, uint32_t num_lb,
538 uint64_t *map)
539 {
540 struct udf_mount *ump;
541 struct icb_tag *icbtag;
542 struct long_ad t_ad, s_ad;
543 uint64_t transsec;
544 uint64_t foffset, end_foffset;
545 uint32_t transsec32;
546 uint32_t lb_size;
547 uint32_t ext_offset;
548 uint32_t lb_num, len;
549 uint32_t overlap, translen;
550 uint16_t vpart_num;
551 int eof, error, flags;
552 int slot, addr_type, icbflags;
553
554 if (!udf_node)
555 return ENOENT;
556
557 KASSERT(num_lb > 0);
558
559 UDF_LOCK_NODE(udf_node, 0);
560
561 /* initialise derivative vars */
562 ump = udf_node->ump;
563 lb_size = udf_rw32(ump->logical_vol->lb_size);
564
565 if (udf_node->fe) {
566 icbtag = &udf_node->fe->icbtag;
567 } else {
568 icbtag = &udf_node->efe->icbtag;
569 }
570 icbflags = udf_rw16(icbtag->flags);
571 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
572
573 /* do the work */
574 if (addr_type == UDF_ICB_INTERN_ALLOC) {
575 *map = UDF_TRANS_INTERN;
576 UDF_UNLOCK_NODE(udf_node, 0);
577 return 0;
578 }
579
580 /* find first overlapping extent */
581 foffset = 0;
582 slot = 0;
583 for (;;) {
584 udf_get_adslot(udf_node, slot, &s_ad, &eof);
585 DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, len = %d, "
586 "lb_num = %d, part = %d\n", slot, eof,
587 UDF_EXT_FLAGS(udf_rw32(s_ad.len)),
588 UDF_EXT_LEN(udf_rw32(s_ad.len)),
589 udf_rw32(s_ad.loc.lb_num),
590 udf_rw16(s_ad.loc.part_num)));
591 if (eof) {
592 DPRINTF(TRANSLATE,
593 ("Translate file extent "
594 "failed: can't seek location\n"));
595 UDF_UNLOCK_NODE(udf_node, 0);
596 return EINVAL;
597 }
598 len = udf_rw32(s_ad.len);
599 flags = UDF_EXT_FLAGS(len);
600 len = UDF_EXT_LEN(len);
601 lb_num = udf_rw32(s_ad.loc.lb_num);
602
603 if (flags == UDF_EXT_REDIRECT) {
604 slot++;
605 continue;
606 }
607
608 end_foffset = foffset + len;
609
610 if (end_foffset > from * lb_size)
611 break; /* found */
612 foffset = end_foffset;
613 slot++;
614 }
615 /* found overlapping slot */
616 ext_offset = from * lb_size - foffset;
617
618 for (;;) {
619 udf_get_adslot(udf_node, slot, &s_ad, &eof);
620 DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, len = %d, "
621 "lb_num = %d, part = %d\n", slot, eof,
622 UDF_EXT_FLAGS(udf_rw32(s_ad.len)),
623 UDF_EXT_LEN(udf_rw32(s_ad.len)),
624 udf_rw32(s_ad.loc.lb_num),
625 udf_rw16(s_ad.loc.part_num)));
626 if (eof) {
627 DPRINTF(TRANSLATE,
628 ("Translate file extent "
629 "failed: past eof\n"));
630 UDF_UNLOCK_NODE(udf_node, 0);
631 return EINVAL;
632 }
633
634 len = udf_rw32(s_ad.len);
635 flags = UDF_EXT_FLAGS(len);
636 len = UDF_EXT_LEN(len);
637
638 lb_num = udf_rw32(s_ad.loc.lb_num);
639 vpart_num = udf_rw16(s_ad.loc.part_num);
640
641 end_foffset = foffset + len;
642
643 /* process extent, don't forget to advance on ext_offset! */
644 lb_num += (ext_offset + lb_size -1) / lb_size;
645 overlap = (len - ext_offset + lb_size -1) / lb_size;
646 ext_offset = 0;
647
648 /*
649 * note that the while(){} is nessisary for the extent that
650 * the udf_translate_vtop() returns doens't have to span the
651 * whole extent.
652 */
653
654 overlap = MIN(overlap, num_lb);
655 while (overlap && (flags != UDF_EXT_REDIRECT)) {
656 switch (flags) {
657 case UDF_EXT_FREE :
658 case UDF_EXT_ALLOCATED_BUT_NOT_USED :
659 transsec = UDF_TRANS_ZERO;
660 translen = overlap;
661 while (overlap && num_lb && translen) {
662 *map++ = transsec;
663 lb_num++;
664 overlap--; num_lb--; translen--;
665 }
666 break;
667 case UDF_EXT_ALLOCATED :
668 t_ad.loc.lb_num = udf_rw32(lb_num);
669 t_ad.loc.part_num = udf_rw16(vpart_num);
670 error = udf_translate_vtop(ump,
671 &t_ad, &transsec32, &translen);
672 transsec = transsec32;
673 if (error) {
674 UDF_UNLOCK_NODE(udf_node, 0);
675 return error;
676 }
677 while (overlap && num_lb && translen) {
678 *map++ = transsec;
679 lb_num++; transsec++;
680 overlap--; num_lb--; translen--;
681 }
682 break;
683 default:
684 DPRINTF(TRANSLATE,
685 ("Translate file extent "
686 "failed: bad flags %x\n", flags));
687 UDF_UNLOCK_NODE(udf_node, 0);
688 return EINVAL;
689 }
690 }
691 if (num_lb == 0)
692 break;
693
694 if (flags != UDF_EXT_REDIRECT)
695 foffset = end_foffset;
696 slot++;
697 }
698 UDF_UNLOCK_NODE(udf_node, 0);
699
700 return 0;
701 }
702
703 /* --------------------------------------------------------------------- */
704
705 static int
706 udf_search_free_vatloc(struct udf_mount *ump, uint32_t *lbnumres)
707 {
708 uint32_t lb_size, lb_num, lb_map, udf_rw32_lbmap;
709 uint8_t *blob;
710 int entry, chunk, found, error;
711
712 KASSERT(ump);
713 KASSERT(ump->logical_vol);
714
715 lb_size = udf_rw32(ump->logical_vol->lb_size);
716 blob = malloc(lb_size, M_UDFTEMP, M_WAITOK);
717
718 /* TODO static allocation of search chunk */
719
720 lb_num = MIN(ump->vat_entries, ump->vat_last_free_lb);
721 found = 0;
722 error = 0;
723 entry = 0;
724 do {
725 chunk = MIN(lb_size, (ump->vat_entries - lb_num) * 4);
726 if (chunk <= 0)
727 break;
728 /* load in chunk */
729 error = udf_vat_read(ump->vat_node, blob, chunk,
730 ump->vat_offset + lb_num * 4);
731
732 if (error)
733 break;
734
735 /* search this chunk */
736 for (entry=0; entry < chunk /4; entry++, lb_num++) {
737 udf_rw32_lbmap = *((uint32_t *) (blob + entry * 4));
738 lb_map = udf_rw32(udf_rw32_lbmap);
739 if (lb_map == 0xffffffff) {
740 found = 1;
741 break;
742 }
743 }
744 } while (!found);
745 if (error) {
746 printf("udf_search_free_vatloc: error reading in vat chunk "
747 "(lb %d, size %d)\n", lb_num, chunk);
748 }
749
750 if (!found) {
751 /* extend VAT */
752 DPRINTF(WRITE, ("udf_search_free_vatloc: extending\n"));
753 lb_num = ump->vat_entries;
754 ump->vat_entries++;
755 }
756
757 /* mark entry with initialiser just in case */
758 lb_map = udf_rw32(0xfffffffe);
759 udf_vat_write(ump->vat_node, (uint8_t *) &lb_map, 4,
760 ump->vat_offset + lb_num *4);
761 ump->vat_last_free_lb = lb_num;
762
763 free(blob, M_UDFTEMP);
764 *lbnumres = lb_num;
765 return 0;
766 }
767
768
769 static void
770 udf_bitmap_allocate(struct udf_bitmap *bitmap, int ismetadata,
771 uint32_t ptov, uint32_t *num_lb, uint64_t *pmappos, uint64_t *lmappos)
772 {
773 uint32_t offset, lb_num, bit;
774 int32_t diff;
775 uint8_t *bpos;
776 int pass;
777
778 if (!ismetadata) {
779 /* heuristic to keep the two pointers not too close */
780 diff = bitmap->data_pos - bitmap->metadata_pos;
781 if ((diff >= 0) && (diff < 1024))
782 bitmap->data_pos = bitmap->metadata_pos + 1024;
783 }
784 offset = ismetadata ? bitmap->metadata_pos : bitmap->data_pos;
785 offset &= ~7;
786 for (pass = 0; pass < 2; pass++) {
787 if (offset >= bitmap->max_offset)
788 offset = 0;
789
790 while (offset < bitmap->max_offset) {
791 if (*num_lb == 0)
792 break;
793
794 /* use first bit not set */
795 bpos = bitmap->bits + offset/8;
796 bit = ffs(*bpos); /* returns 0 or 1..8 */
797 if (bit == 0) {
798 offset += 8;
799 continue;
800 }
801 DPRINTF(PARANOIA, ("XXX : allocate %d, %p, bit %d\n",
802 offset + bit -1, bpos, bit-1));
803 *bpos &= ~(1 << (bit-1));
804 lb_num = offset + bit-1;
805 *lmappos++ = lb_num;
806 *pmappos++ = lb_num + ptov;
807 *num_lb = *num_lb - 1;
808 // offset = (offset & ~7);
809 }
810 }
811
812 if (ismetadata) {
813 bitmap->metadata_pos = offset;
814 } else {
815 bitmap->data_pos = offset;
816 }
817 }
818
819
820 static void
821 udf_bitmap_free(struct udf_bitmap *bitmap, uint32_t lb_num, uint32_t num_lb)
822 {
823 uint32_t offset;
824 uint32_t bit, bitval;
825 uint8_t *bpos;
826
827 offset = lb_num;
828
829 /* starter bits */
830 bpos = bitmap->bits + offset/8;
831 bit = offset % 8;
832 while ((bit != 0) && (num_lb > 0)) {
833 bitval = (1 << bit);
834 KASSERT((*bpos & bitval) == 0);
835 DPRINTF(PARANOIA, ("XXX : free %d, %p, %d\n",
836 offset, bpos, bit));
837 *bpos |= bitval;
838 offset++; num_lb--;
839 bit = (bit + 1) % 8;
840 }
841 if (num_lb == 0)
842 return;
843
844 /* whole bytes */
845 KASSERT(bit == 0);
846 bpos = bitmap->bits + offset / 8;
847 while (num_lb >= 8) {
848 KASSERT((*bpos == 0));
849 DPRINTF(PARANOIA, ("XXX : free %d + 8, %p\n", offset, bpos));
850 *bpos = 255;
851 offset += 8; num_lb -= 8;
852 bpos++;
853 }
854
855 /* stop bits */
856 KASSERT(num_lb < 8);
857 bit = 0;
858 while (num_lb > 0) {
859 bitval = (1 << bit);
860 KASSERT((*bpos & bitval) == 0);
861 DPRINTF(PARANOIA, ("XXX : free %d, %p, %d\n",
862 offset, bpos, bit));
863 *bpos |= bitval;
864 offset++; num_lb--;
865 bit = (bit + 1) % 8;
866 }
867 }
868
869
870 /* allocate a contiguous sequence of sectornumbers */
871 static int
872 udf_allocate_space(struct udf_mount *ump, int ismetadata, int alloc_type,
873 int num_lb, uint16_t *alloc_partp,
874 uint64_t *lmapping, uint64_t *pmapping)
875 {
876 struct mmc_trackinfo *alloc_track, *other_track;
877 struct udf_bitmap *bitmap;
878 struct part_desc *pdesc;
879 struct logvol_int_desc *lvid;
880 uint64_t *lmappos, *pmappos;
881 uint32_t ptov, lb_num, *freepos, free_lbs;
882 int lb_size, alloc_num_lb;
883 int alloc_part;
884 int error;
885
886 mutex_enter(&ump->allocate_mutex);
887
888 lb_size = udf_rw32(ump->logical_vol->lb_size);
889 KASSERT(lb_size == ump->discinfo.sector_size);
890
891 if (ismetadata) {
892 alloc_part = ump->metadata_part;
893 alloc_track = &ump->metadata_track;
894 other_track = &ump->data_track;
895 } else {
896 alloc_part = ump->data_part;
897 alloc_track = &ump->data_track;
898 other_track = &ump->metadata_track;
899 }
900
901 *alloc_partp = alloc_part;
902
903 error = 0;
904 /* XXX check disc space */
905
906 pdesc = ump->partitions[ump->vtop[alloc_part]];
907 lmappos = lmapping;
908 pmappos = pmapping;
909
910 switch (alloc_type) {
911 case UDF_ALLOC_VAT :
912 /* search empty slot in VAT file */
913 KASSERT(num_lb == 1);
914 error = udf_search_free_vatloc(ump, &lb_num);
915 if (!error) {
916 *lmappos = lb_num;
917 *pmappos = 0; /* will get late-allocated */
918 }
919 break;
920 case UDF_ALLOC_SEQUENTIAL :
921 /* sequential allocation on recordable media */
922 /* calculate offset from physical base partition */
923 ptov = udf_rw32(pdesc->start_loc);
924
925 for (lb_num = 0; lb_num < num_lb; lb_num++) {
926 *pmappos++ = alloc_track->next_writable;
927 *lmappos++ = alloc_track->next_writable - ptov;
928 alloc_track->next_writable++;
929 alloc_track->free_blocks--;
930 }
931 if (alloc_track->tracknr == other_track->tracknr)
932 memcpy(other_track, alloc_track,
933 sizeof(struct mmc_trackinfo));
934 break;
935 case UDF_ALLOC_SPACEMAP :
936 ptov = udf_rw32(pdesc->start_loc);
937
938 /* allocate on unallocated bits page */
939 alloc_num_lb = num_lb;
940 bitmap = &ump->part_unalloc_bits[alloc_part];
941 udf_bitmap_allocate(bitmap, ismetadata, ptov, &alloc_num_lb,
942 pmappos, lmappos);
943 ump->lvclose |= UDF_WRITE_PART_BITMAPS;
944 if (alloc_num_lb) {
945 /* TODO convert freed to unalloc and try again */
946 /* free allocated piece for now */
947 lmappos = lmapping;
948 for (lb_num=0; lb_num < num_lb-alloc_num_lb; lb_num++) {
949 udf_bitmap_free(bitmap, *lmappos++, 1);
950 }
951 error = ENOSPC;
952 }
953 if (!error) {
954 /* adjust freecount */
955 lvid = ump->logvol_integrity;
956 freepos = &lvid->tables[0] + alloc_part;
957 free_lbs = udf_rw32(*freepos);
958 *freepos = udf_rw32(free_lbs - num_lb);
959 }
960 break;
961 case UDF_ALLOC_METABITMAP :
962 case UDF_ALLOC_METASEQUENTIAL :
963 case UDF_ALLOC_RELAXEDSEQUENTIAL :
964 printf("ALERT: udf_allocate_space : allocation %d "
965 "not implemented yet!\n", alloc_type);
966 /* TODO implement, doesn't have to be contiguous */
967 error = ENOSPC;
968 break;
969 }
970
971 #ifdef DEBUG
972 if (udf_verbose & UDF_DEBUG_ALLOC) {
973 lmappos = lmapping;
974 pmappos = pmapping;
975 printf("udf_allocate_space, mapping l->p:\n");
976 for (lb_num = 0; lb_num < num_lb; lb_num++) {
977 printf("\t%"PRIu64" -> %"PRIu64"\n",
978 *lmappos++, *pmappos++);
979 }
980 }
981 #endif
982 mutex_exit(&ump->allocate_mutex);
983
984 return error;
985 }
986
987 /* --------------------------------------------------------------------- */
988
989 void
990 udf_free_allocated_space(struct udf_mount *ump, uint32_t lb_num,
991 uint16_t vpart_num, uint32_t num_lb)
992 {
993 struct udf_bitmap *bitmap;
994 struct part_desc *pdesc;
995 struct logvol_int_desc *lvid;
996 uint32_t ptov, lb_map, udf_rw32_lbmap;
997 uint32_t *freepos, free_lbs;
998 int phys_part;
999 int error;
1000
1001 DPRINTF(ALLOC, ("udf_free_allocated_space: freeing virt lbnum %d "
1002 "part %d + %d sect\n", lb_num, vpart_num, num_lb));
1003
1004 mutex_enter(&ump->allocate_mutex);
1005
1006 /* get partition backing up this vpart_num */
1007 pdesc = ump->partitions[ump->vtop[vpart_num]];
1008
1009 switch (ump->vtop_tp[vpart_num]) {
1010 case UDF_VTOP_TYPE_PHYS :
1011 case UDF_VTOP_TYPE_SPARABLE :
1012 /* free space to freed or unallocated space bitmap */
1013 ptov = udf_rw32(pdesc->start_loc);
1014 phys_part = ump->vtop[vpart_num];
1015
1016 /* first try freed space bitmap */
1017 bitmap = &ump->part_freed_bits[phys_part];
1018
1019 /* if not defined, use unallocated bitmap */
1020 if (bitmap->bits == NULL)
1021 bitmap = &ump->part_unalloc_bits[phys_part];
1022
1023 /* if no bitmaps are defined, bail out */
1024 if (bitmap->bits == NULL)
1025 break;
1026
1027 /* free bits if its defined */
1028 KASSERT(bitmap->bits);
1029 ump->lvclose |= UDF_WRITE_PART_BITMAPS;
1030 udf_bitmap_free(bitmap, lb_num, num_lb);
1031
1032 /* adjust freecount */
1033 lvid = ump->logvol_integrity;
1034 freepos = &lvid->tables[0] + vpart_num;
1035 free_lbs = udf_rw32(*freepos);
1036 *freepos = udf_rw32(free_lbs + num_lb);
1037 break;
1038 case UDF_VTOP_TYPE_VIRT :
1039 /* free this VAT entry */
1040 KASSERT(num_lb == 1);
1041
1042 lb_map = 0xffffffff;
1043 udf_rw32_lbmap = udf_rw32(lb_map);
1044 error = udf_vat_write(ump->vat_node,
1045 (uint8_t *) &udf_rw32_lbmap, 4,
1046 ump->vat_offset + lb_num * 4);
1047 KASSERT(error == 0);
1048 ump->vat_last_free_lb = MIN(ump->vat_last_free_lb, lb_num);
1049 break;
1050 case UDF_VTOP_TYPE_META :
1051 /* free space in the metadata bitmap */
1052 default:
1053 printf("ALERT: udf_free_allocated_space : allocation %d "
1054 "not implemented yet!\n", ump->vtop_tp[vpart_num]);
1055 break;
1056 }
1057
1058 mutex_exit(&ump->allocate_mutex);
1059 }
1060
1061 /* --------------------------------------------------------------------- */
1062
1063 int
1064 udf_pre_allocate_space(struct udf_mount *ump, int udf_c_type, int num_lb,
1065 uint16_t *alloc_partp, uint64_t *lmapping, uint64_t *pmapping)
1066 {
1067 int ismetadata, alloc_type;
1068
1069 ismetadata = (udf_c_type == UDF_C_NODE);
1070 alloc_type = ismetadata? ump->meta_alloc : ump->data_alloc;
1071
1072 #ifdef DIAGNOSTIC
1073 if ((alloc_type == UDF_ALLOC_VAT) && (udf_c_type != UDF_C_NODE)) {
1074 panic("udf_pre_allocate_space: bad c_type on VAT!\n");
1075 }
1076 #endif
1077
1078 /* reserve size for VAT allocated data */
1079 if (alloc_type == UDF_ALLOC_VAT) {
1080 mutex_enter(&ump->allocate_mutex);
1081 ump->uncomitted_lb += num_lb;
1082 mutex_exit(&ump->allocate_mutex);
1083 }
1084
1085 return udf_allocate_space(ump, ismetadata, alloc_type,
1086 num_lb, alloc_partp, lmapping, pmapping);
1087 }
1088
1089 /* --------------------------------------------------------------------- */
1090
1091 /*
1092 * Allocate a buf on disc for direct write out. The space doesn't have to be
1093 * contiguous as the caller takes care of this.
1094 */
1095
1096 void
1097 udf_late_allocate_buf(struct udf_mount *ump, struct buf *buf,
1098 uint64_t *lmapping, uint64_t *pmapping, struct long_ad *node_ad_cpy)
1099 {
1100 struct udf_node *udf_node = VTOI(buf->b_vp);
1101 uint16_t vpart_num;
1102 int lb_size, blks, udf_c_type;
1103 int ismetadata, alloc_type;
1104 int num_lb;
1105 int error, s;
1106
1107 /*
1108 * for each sector in the buf, allocate a sector on disc and record
1109 * its position in the provided mapping array.
1110 *
1111 * If its userdata or FIDs, record its location in its node.
1112 */
1113
1114 lb_size = udf_rw32(ump->logical_vol->lb_size);
1115 num_lb = (buf->b_bcount + lb_size -1) / lb_size;
1116 blks = lb_size / DEV_BSIZE;
1117 udf_c_type = buf->b_udf_c_type;
1118
1119 KASSERT(lb_size == ump->discinfo.sector_size);
1120
1121 ismetadata = (udf_c_type == UDF_C_NODE);
1122 alloc_type = ismetadata? ump->meta_alloc : ump->data_alloc;
1123
1124 #ifdef DIAGNOSTIC
1125 if ((alloc_type == UDF_ALLOC_VAT) && (udf_c_type != UDF_C_NODE)) {
1126 panic("udf_late_allocate_buf: bad c_type on VAT!\n");
1127 }
1128 #endif
1129
1130 if (udf_c_type == UDF_C_NODE) {
1131 /* if not VAT, its allready allocated */
1132 if (alloc_type != UDF_ALLOC_VAT)
1133 return;
1134
1135 /* allocate sequential */
1136 alloc_type = UDF_ALLOC_SEQUENTIAL;
1137 }
1138
1139 error = udf_allocate_space(ump, ismetadata, alloc_type,
1140 num_lb, &vpart_num, lmapping, pmapping);
1141 if (error) {
1142 /* ARGH! we've not done our accounting right! */
1143 panic("UDF disc allocation accounting gone wrong");
1144 }
1145
1146 /* commit our sector count */
1147 mutex_enter(&ump->allocate_mutex);
1148 if (num_lb > ump->uncomitted_lb) {
1149 ump->uncomitted_lb = 0;
1150 } else {
1151 ump->uncomitted_lb -= num_lb;
1152 }
1153 mutex_exit(&ump->allocate_mutex);
1154
1155 buf->b_blkno = (*pmapping) * blks;
1156
1157 /* If its userdata or FIDs, record its allocation in its node. */
1158 if ((udf_c_type == UDF_C_USERDATA) || (udf_c_type == UDF_C_FIDS)) {
1159 udf_record_allocation_in_node(ump, buf, vpart_num, lmapping,
1160 node_ad_cpy);
1161 /* decrement our outstanding bufs counter */
1162 s = splbio();
1163 udf_node->outstanding_bufs--;
1164 splx(s);
1165 }
1166 }
1167
1168 /* --------------------------------------------------------------------- */
1169
1170 /*
1171 * Try to merge a1 with the new piece a2. udf_ads_merge returns error when not
1172 * possible (anymore); a2 returns the rest piece.
1173 */
1174
1175 static int
1176 udf_ads_merge(uint32_t lb_size, struct long_ad *a1, struct long_ad *a2)
1177 {
1178 uint32_t max_len, merge_len;
1179 uint32_t a1_len, a2_len;
1180 uint32_t a1_flags, a2_flags;
1181 uint32_t a1_lbnum, a2_lbnum;
1182 uint16_t a1_part, a2_part;
1183
1184 max_len = ((UDF_EXT_MAXLEN / lb_size) * lb_size);
1185
1186 a1_flags = UDF_EXT_FLAGS(udf_rw32(a1->len));
1187 a1_len = UDF_EXT_LEN(udf_rw32(a1->len));
1188 a1_lbnum = udf_rw32(a1->loc.lb_num);
1189 a1_part = udf_rw16(a1->loc.part_num);
1190
1191 a2_flags = UDF_EXT_FLAGS(udf_rw32(a2->len));
1192 a2_len = UDF_EXT_LEN(udf_rw32(a2->len));
1193 a2_lbnum = udf_rw32(a2->loc.lb_num);
1194 a2_part = udf_rw16(a2->loc.part_num);
1195
1196 /* defines same space */
1197 if (a1_flags != a2_flags)
1198 return 1;
1199
1200 if (a1_flags != UDF_EXT_FREE) {
1201 /* the same partition */
1202 if (a1_part != a2_part)
1203 return 1;
1204
1205 /* a2 is successor of a1 */
1206 if (a1_lbnum * lb_size + a1_len != a2_lbnum * lb_size)
1207 return 1;
1208 }
1209
1210 /* merge as most from a2 if possible */
1211 merge_len = MIN(a2_len, max_len - a1_len);
1212 a1_len += merge_len;
1213 a2_len -= merge_len;
1214 a2_lbnum += merge_len/lb_size;
1215
1216 a1->len = udf_rw32(a1_len | a1_flags);
1217 a2->len = udf_rw32(a2_len | a2_flags);
1218 a2->loc.lb_num = udf_rw32(a2_lbnum);
1219
1220 if (a2_len > 0)
1221 return 1;
1222
1223 /* there is space over to merge */
1224 return 0;
1225 }
1226
1227 /* --------------------------------------------------------------------- */
1228
1229 static void
1230 udf_wipe_adslots(struct udf_node *udf_node)
1231 {
1232 struct file_entry *fe;
1233 struct extfile_entry *efe;
1234 struct alloc_ext_entry *ext;
1235 uint64_t inflen, objsize;
1236 uint32_t lb_size, dscr_size, l_ea, l_ad, max_l_ad, crclen;
1237 uint8_t *data_pos;
1238 int extnr;
1239
1240 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1241
1242 fe = udf_node->fe;
1243 efe = udf_node->efe;
1244 if (fe) {
1245 inflen = udf_rw64(fe->inf_len);
1246 objsize = inflen;
1247 dscr_size = sizeof(struct file_entry) -1;
1248 l_ea = udf_rw32(fe->l_ea);
1249 l_ad = udf_rw32(fe->l_ad);
1250 data_pos = (uint8_t *) fe + dscr_size + l_ea;
1251 } else {
1252 inflen = udf_rw64(efe->inf_len);
1253 objsize = udf_rw64(efe->obj_size);
1254 dscr_size = sizeof(struct extfile_entry) -1;
1255 l_ea = udf_rw32(efe->l_ea);
1256 l_ad = udf_rw32(efe->l_ad);
1257 data_pos = (uint8_t *) efe + dscr_size + l_ea;
1258 }
1259 max_l_ad = lb_size - dscr_size - l_ea;
1260
1261 /* wipe fe/efe */
1262 memset(data_pos, 0, max_l_ad);
1263 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea;
1264 if (fe) {
1265 fe->l_ad = udf_rw32(0);
1266 fe->logblks_rec = udf_rw64(0);
1267 fe->tag.desc_crc_len = udf_rw32(crclen);
1268 } else {
1269 efe->l_ad = udf_rw32(0);
1270 efe->logblks_rec = udf_rw64(0);
1271 efe->tag.desc_crc_len = udf_rw32(crclen);
1272 }
1273
1274 /* wipe all allocation extent entries */
1275 for (extnr = 0; extnr < udf_node->num_extensions; extnr++) {
1276 ext = udf_node->ext[extnr];
1277 dscr_size = sizeof(struct alloc_ext_entry) -1;
1278 max_l_ad = lb_size - dscr_size;
1279 memset(data_pos, 0, max_l_ad);
1280 ext->l_ad = udf_rw32(0);
1281
1282 crclen = dscr_size - UDF_DESC_TAG_LENGTH;
1283 ext->tag.desc_crc_len = udf_rw32(crclen);
1284 }
1285 }
1286
1287 /* --------------------------------------------------------------------- */
1288
1289 void
1290 udf_get_adslot(struct udf_node *udf_node, int slot, struct long_ad *icb,
1291 int *eof) {
1292 struct file_entry *fe;
1293 struct extfile_entry *efe;
1294 struct alloc_ext_entry *ext;
1295 struct icb_tag *icbtag;
1296 struct short_ad *short_ad;
1297 struct long_ad *long_ad;
1298 uint32_t offset;
1299 uint32_t lb_size, dscr_size, l_ea, l_ad, max_l_ad;
1300 uint8_t *data_pos;
1301 int icbflags, addr_type, adlen, extnr;
1302
1303 /* determine what descriptor we are in */
1304 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1305
1306 fe = udf_node->fe;
1307 efe = udf_node->efe;
1308 if (fe) {
1309 icbtag = &fe->icbtag;
1310 dscr_size = sizeof(struct file_entry) -1;
1311 l_ea = udf_rw32(fe->l_ea);
1312 l_ad = udf_rw32(fe->l_ad);
1313 data_pos = (uint8_t *) fe + dscr_size + l_ea;
1314 } else {
1315 icbtag = &efe->icbtag;
1316 dscr_size = sizeof(struct extfile_entry) -1;
1317 l_ea = udf_rw32(efe->l_ea);
1318 l_ad = udf_rw32(efe->l_ad);
1319 data_pos = (uint8_t *) efe + dscr_size + l_ea;
1320 }
1321 max_l_ad = lb_size - dscr_size - l_ea;
1322
1323 icbflags = udf_rw16(icbtag->flags);
1324 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
1325
1326 /* just in case we're called on an intern, its EOF */
1327 if (addr_type == UDF_ICB_INTERN_ALLOC) {
1328 memset(icb, 0, sizeof(struct long_ad));
1329 *eof = 1;
1330 return;
1331 }
1332
1333 adlen = 0;
1334 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1335 adlen = sizeof(struct short_ad);
1336 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1337 adlen = sizeof(struct long_ad);
1338 }
1339
1340 /* if offset too big, we go to the allocation extensions */
1341 offset = slot * adlen;
1342 extnr = -1;
1343 while (offset >= max_l_ad) {
1344 extnr++;
1345 offset -= max_l_ad;
1346 ext = udf_node->ext[extnr];
1347 dscr_size = sizeof(struct alloc_ext_entry) -1;
1348 l_ad = udf_rw32(ext->l_ad);
1349 max_l_ad = lb_size - dscr_size;
1350 data_pos = (uint8_t *) ext + dscr_size;
1351 if (extnr > udf_node->num_extensions) {
1352 l_ad = 0; /* force EOF */
1353 break;
1354 }
1355 }
1356
1357 *eof = (offset >= l_ad) || (l_ad == 0);
1358 if (*eof) {
1359 memset(icb, 0, sizeof(struct long_ad));
1360 return;
1361 }
1362
1363 /* get the element */
1364 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1365 short_ad = (struct short_ad *) (data_pos + offset);
1366 icb->len = short_ad->len;
1367 icb->loc.part_num = udf_node->loc.loc.part_num;
1368 icb->loc.lb_num = short_ad->lb_num;
1369 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1370 long_ad = (struct long_ad *) (data_pos + offset);
1371 *icb = *long_ad;
1372 }
1373 }
1374
1375 /* --------------------------------------------------------------------- */
1376
1377 int
1378 udf_append_adslot(struct udf_node *udf_node, int slot, struct long_ad *icb) {
1379 union dscrptr *dscr;
1380 struct file_entry *fe;
1381 struct extfile_entry *efe;
1382 struct alloc_ext_entry *ext;
1383 struct icb_tag *icbtag;
1384 struct short_ad *short_ad;
1385 struct long_ad *long_ad, o_icb;
1386 uint64_t logblks_rec, *logblks_rec_p;
1387 uint32_t offset, rest, len;
1388 uint32_t lb_size, dscr_size, l_ea, l_ad, *l_ad_p, max_l_ad, crclen;
1389 uint8_t *data_pos;
1390 int icbflags, addr_type, adlen, extnr;
1391
1392 /* determine what descriptor we are in */
1393 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1394
1395 fe = udf_node->fe;
1396 efe = udf_node->efe;
1397 if (fe) {
1398 icbtag = &fe->icbtag;
1399 dscr = (union dscrptr *) fe;
1400 dscr_size = sizeof(struct file_entry) -1;
1401
1402 l_ea = udf_rw32(fe->l_ea);
1403 l_ad_p = &fe->l_ad;
1404 logblks_rec_p = &fe->logblks_rec;
1405 } else {
1406 icbtag = &efe->icbtag;
1407 dscr = (union dscrptr *) efe;
1408 dscr_size = sizeof(struct extfile_entry) -1;
1409
1410 l_ea = udf_rw32(efe->l_ea);
1411 l_ad_p = &efe->l_ad;
1412 logblks_rec_p = &efe->logblks_rec;
1413 }
1414 data_pos = (uint8_t *) dscr + dscr_size + l_ea;
1415 max_l_ad = lb_size - dscr_size - l_ea;
1416
1417 icbflags = udf_rw16(icbtag->flags);
1418 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
1419
1420 /* just in case we're called on an intern, its EOF */
1421 if (addr_type == UDF_ICB_INTERN_ALLOC) {
1422 panic("udf_append_adslot on UDF_ICB_INTERN_ALLOC\n");
1423 }
1424
1425 adlen = 0;
1426 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1427 adlen = sizeof(struct short_ad);
1428 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1429 adlen = sizeof(struct long_ad);
1430 }
1431
1432 /* clean up given long_ad */
1433 #ifdef DIAGNOSTIC
1434 if (UDF_EXT_FLAGS(udf_rw32(icb->len)) == UDF_EXT_FREE) {
1435 if ((udf_rw16(icb->loc.part_num) != 0) ||
1436 (udf_rw32(icb->loc.lb_num) != 0))
1437 printf("UDF: warning, cleaning long_ad marked free\n");
1438 icb->loc.part_num = udf_rw16(0);
1439 icb->loc.lb_num = udf_rw32(0);
1440 }
1441 #endif
1442
1443 /* if offset too big, we go to the allocation extensions */
1444 offset = slot * adlen;
1445 extnr = 0;
1446 while (offset > max_l_ad) {
1447 offset -= max_l_ad;
1448 ext = udf_node->ext[extnr];
1449 dscr = (union dscrptr *) ext;
1450 dscr_size = sizeof(struct alloc_ext_entry) -1;
1451
1452 KASSERT(ext != NULL);
1453 l_ad_p = &ext->l_ad;
1454 max_l_ad = lb_size - dscr_size;
1455 data_pos = (uint8_t *) dscr + dscr_size;
1456
1457 extnr++;
1458 }
1459 /* offset is offset within the current (E)FE/AED */
1460 l_ad = udf_rw32(*l_ad_p);
1461 crclen = udf_rw32(dscr->tag.desc_crc_len);
1462 logblks_rec = udf_rw64(*logblks_rec_p);
1463
1464 if (extnr > udf_node->num_extensions)
1465 return EFBIG; /* too fragmented */
1466
1467 /* overwriting old piece? */
1468 if (offset < l_ad) {
1469 /* overwrite entry; compensate for the old element */
1470 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1471 short_ad = (struct short_ad *) (data_pos + offset);
1472 o_icb.len = short_ad->len;
1473 o_icb.loc.part_num = udf_rw16(0); /* ignore */
1474 o_icb.loc.lb_num = short_ad->lb_num;
1475 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1476 long_ad = (struct long_ad *) (data_pos + offset);
1477 o_icb = *long_ad;
1478 } else {
1479 panic("Invalid address type in udf_append_adslot\n");
1480 }
1481
1482 len = udf_rw32(o_icb.len);
1483 if (UDF_EXT_FLAGS(len) == UDF_EXT_ALLOCATED) {
1484 /* adjust counts */
1485 len = UDF_EXT_LEN(len);
1486 logblks_rec -= (len + lb_size -1) / lb_size;
1487 }
1488 }
1489
1490 /* calculate rest space in this descriptor */
1491 rest = max_l_ad - offset;
1492 if (rest <= adlen) {
1493 /* create redirect and link new allocation extension */
1494 printf("udf_append_to_adslot: can't create allocation extention yet\n");
1495 return EFBIG;
1496 }
1497
1498 /* write out the element */
1499 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1500 short_ad = (struct short_ad *) (data_pos + offset);
1501 short_ad->len = icb->len;
1502 short_ad->lb_num = icb->loc.lb_num;
1503 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1504 long_ad = (struct long_ad *) (data_pos + offset);
1505 *long_ad = *icb;
1506 }
1507
1508 /* adjust logblks recorded count */
1509 if (UDF_EXT_FLAGS(icb->len) == UDF_EXT_ALLOCATED)
1510 logblks_rec += (UDF_EXT_LEN(icb->len) + lb_size -1) / lb_size;
1511 *logblks_rec_p = udf_rw64(logblks_rec);
1512
1513 /* adjust l_ad and crclen when needed */
1514 if (offset >= l_ad) {
1515 l_ad += adlen;
1516 crclen += adlen;
1517 dscr->tag.desc_crc_len = udf_rw32(crclen);
1518 *l_ad_p = udf_rw32(l_ad);
1519 }
1520
1521 return 0;
1522 }
1523
1524 /* --------------------------------------------------------------------- */
1525
1526 /*
1527 * Adjust the node's allocation descriptors to reflect the new mapping; do
1528 * take note that we might glue to existing allocation descriptors.
1529 *
1530 * XXX Note there can only be one allocation being recorded/mount; maybe
1531 * explicit allocation in shedule thread?
1532 */
1533
1534 static void
1535 udf_record_allocation_in_node(struct udf_mount *ump, struct buf *buf,
1536 uint16_t vpart_num, uint64_t *mapping, struct long_ad *node_ad_cpy)
1537 {
1538 struct vnode *vp = buf->b_vp;
1539 struct udf_node *udf_node = VTOI(vp);
1540 struct file_entry *fe;
1541 struct extfile_entry *efe;
1542 struct icb_tag *icbtag;
1543 struct long_ad s_ad, c_ad;
1544 uint64_t inflen, from, till;
1545 uint64_t foffset, end_foffset, restart_foffset;
1546 uint64_t orig_inflen, orig_lbrec, new_inflen, new_lbrec;
1547 uint32_t num_lb, len, flags, lb_num;
1548 uint32_t run_start;
1549 uint32_t slot_offset, replace_len, replace;
1550 int addr_type, icbflags;
1551 int udf_c_type = buf->b_udf_c_type;
1552 int lb_size, run_length, eof;
1553 int slot, cpy_slot, cpy_slots, restart_slot;
1554 int error;
1555
1556 DPRINTF(ALLOC, ("udf_record_allocation_in_node\n"));
1557
1558 /* sanity check ... should be panic ? */
1559 if ((udf_c_type != UDF_C_USERDATA) && (udf_c_type != UDF_C_FIDS))
1560 return;
1561
1562 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1563
1564 /* do the job */
1565 UDF_LOCK_NODE(udf_node, 0); /* XXX can deadlock ? */
1566 udf_node_sanity_check(udf_node, &orig_inflen, &orig_lbrec);
1567
1568 fe = udf_node->fe;
1569 efe = udf_node->efe;
1570 if (fe) {
1571 icbtag = &fe->icbtag;
1572 inflen = udf_rw64(fe->inf_len);
1573 } else {
1574 icbtag = &efe->icbtag;
1575 inflen = udf_rw64(efe->inf_len);
1576 }
1577
1578 /* do check if `till' is not past file information length */
1579 from = buf->b_lblkno * lb_size;
1580 till = MIN(inflen, from + buf->b_resid);
1581
1582 num_lb = (till - from + lb_size -1) / lb_size;
1583
1584 DPRINTF(ALLOC, ("record allocation from %"PRIu64" + %d\n", from, buf->b_bcount));
1585
1586 icbflags = udf_rw16(icbtag->flags);
1587 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
1588
1589 if (addr_type == UDF_ICB_INTERN_ALLOC) {
1590 /* nothing to do */
1591 /* XXX clean up rest of node? just in case? */
1592 UDF_UNLOCK_NODE(udf_node, 0);
1593 return;
1594 }
1595
1596 slot = 0;
1597 cpy_slot = 0;
1598 foffset = 0;
1599
1600 /* 1) copy till first overlap piece to the rewrite buffer */
1601 for (;;) {
1602 udf_get_adslot(udf_node, slot, &s_ad, &eof);
1603 if (eof) {
1604 DPRINTF(WRITE,
1605 ("Record allocation in node "
1606 "failed: encountered EOF\n"));
1607 UDF_UNLOCK_NODE(udf_node, 0);
1608 buf->b_error = EINVAL;
1609 return;
1610 }
1611 len = udf_rw32(s_ad.len);
1612 flags = UDF_EXT_FLAGS(len);
1613 len = UDF_EXT_LEN(len);
1614
1615 if (flags == UDF_EXT_REDIRECT) {
1616 slot++;
1617 continue;
1618 }
1619
1620 end_foffset = foffset + len;
1621 if (end_foffset > from)
1622 break; /* found */
1623
1624 node_ad_cpy[cpy_slot++] = s_ad;
1625
1626 DPRINTF(ALLOC, ("\t1: vp %d, lb %d, len %d, flags %d "
1627 "-> stack\n",
1628 udf_rw16(s_ad.loc.part_num),
1629 udf_rw32(s_ad.loc.lb_num),
1630 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1631 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1632
1633 foffset = end_foffset;
1634 slot++;
1635 }
1636 restart_slot = slot;
1637 restart_foffset = foffset;
1638
1639 /* 2) trunc overlapping slot at overlap and copy it */
1640 slot_offset = from - foffset;
1641 if (slot_offset > 0) {
1642 DPRINTF(ALLOC, ("\tslot_offset = %d, flags = %d (%d)\n",
1643 slot_offset, flags >> 30, flags));
1644
1645 s_ad.len = udf_rw32(slot_offset | flags);
1646 node_ad_cpy[cpy_slot++] = s_ad;
1647
1648 DPRINTF(ALLOC, ("\t2: vp %d, lb %d, len %d, flags %d "
1649 "-> stack\n",
1650 udf_rw16(s_ad.loc.part_num),
1651 udf_rw32(s_ad.loc.lb_num),
1652 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1653 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1654 }
1655 foffset += slot_offset;
1656
1657 /* 3) insert new mappings */
1658 memset(&s_ad, 0, sizeof(struct long_ad));
1659 lb_num = 0;
1660 for (lb_num = 0; lb_num < num_lb; lb_num++) {
1661 run_start = mapping[lb_num];
1662 run_length = 1;
1663 while (lb_num < num_lb-1) {
1664 if (mapping[lb_num+1] != mapping[lb_num]+1)
1665 if (mapping[lb_num+1] != mapping[lb_num])
1666 break;
1667 run_length++;
1668 lb_num++;
1669 }
1670 /* insert slot for this mapping */
1671 len = run_length * lb_size;
1672
1673 /* bounds checking */
1674 if (foffset + len > till)
1675 len = till - foffset;
1676 KASSERT(foffset + len <= inflen);
1677
1678 s_ad.len = udf_rw32(len | UDF_EXT_ALLOCATED);
1679 s_ad.loc.part_num = udf_rw16(vpart_num);
1680 s_ad.loc.lb_num = udf_rw32(run_start);
1681
1682 foffset += len;
1683
1684 /* paranoia */
1685 if (len == 0) {
1686 DPRINTF(WRITE,
1687 ("Record allocation in node "
1688 "failed: insert failed\n"));
1689 UDF_UNLOCK_NODE(udf_node, 0);
1690 buf->b_error = EINVAL;
1691 return;
1692 }
1693 node_ad_cpy[cpy_slot++] = s_ad;
1694
1695 DPRINTF(ALLOC, ("\t3: insert new mapping vp %d lb %d, len %d, "
1696 "flags %d -> stack\n",
1697 udf_rw16(s_ad.loc.part_num), udf_rw32(s_ad.loc.lb_num),
1698 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1699 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1700 }
1701
1702 /* 4) pop replaced length */
1703 slot = restart_slot;
1704 foffset = restart_foffset;
1705
1706 replace_len = till - foffset; /* total amount of bytes to pop */
1707 slot_offset = from - foffset; /* offset in first encounted slot */
1708 KASSERT((slot_offset % lb_size) == 0);
1709
1710 for (;;) {
1711 udf_get_adslot(udf_node, slot, &s_ad, &eof);
1712 if (eof)
1713 break;
1714
1715 len = udf_rw32(s_ad.len);
1716 flags = UDF_EXT_FLAGS(len);
1717 len = UDF_EXT_LEN(len);
1718 lb_num = udf_rw32(s_ad.loc.lb_num);
1719
1720 if (flags == UDF_EXT_REDIRECT) {
1721 slot++;
1722 continue;
1723 }
1724
1725 DPRINTF(ALLOC, ("\t4i: got slot %d, slot_offset %d, "
1726 "replace_len %d, "
1727 "vp %d, lb %d, len %d, flags %d\n",
1728 slot, slot_offset, replace_len,
1729 udf_rw16(s_ad.loc.part_num),
1730 udf_rw32(s_ad.loc.lb_num),
1731 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1732 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1733
1734 /* adjust for slot offset */
1735 if (slot_offset) {
1736 DPRINTF(ALLOC, ("\t4s: skipping %d\n", slot_offset));
1737 lb_num += slot_offset / lb_size;
1738 len -= slot_offset;
1739 foffset += slot_offset;
1740 replace_len -= slot_offset;
1741
1742 /* mark adjusted */
1743 slot_offset = 0;
1744 }
1745
1746 /* advance for (the rest of) this slot */
1747 replace = MIN(len, replace_len);
1748 DPRINTF(ALLOC, ("\t4d: replacing %d\n", replace));
1749
1750 /* advance for this slot */
1751 if (replace) {
1752 num_lb = (replace + lb_size - 1) / lb_size;
1753 if (flags != UDF_EXT_FREE) {
1754 udf_free_allocated_space(ump, lb_num,
1755 udf_rw16(s_ad.loc.part_num), num_lb);
1756 }
1757 lb_num += num_lb;
1758 len -= replace;
1759 foffset += replace;
1760 replace_len -= replace;
1761 }
1762
1763 /* do we have a slot tail ? */
1764 if (len) {
1765 KASSERT(foffset % lb_size == 0);
1766
1767 /* we arrived at our point, push remainder */
1768 s_ad.len = udf_rw32(len | flags);
1769 s_ad.loc.lb_num = udf_rw32(lb_num);
1770 if (flags == UDF_EXT_FREE)
1771 s_ad.loc.lb_num = udf_rw32(0);
1772 node_ad_cpy[cpy_slot++] = s_ad;
1773 foffset += len;
1774 slot++;
1775
1776 DPRINTF(ALLOC, ("\t4: vp %d, lb %d, len %d, flags %d "
1777 "-> stack\n",
1778 udf_rw16(s_ad.loc.part_num),
1779 udf_rw32(s_ad.loc.lb_num),
1780 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1781 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1782 break;
1783 }
1784
1785 slot++;
1786 }
1787
1788 /* 5) copy remainder */
1789 for (;;) {
1790 udf_get_adslot(udf_node, slot, &s_ad, &eof);
1791 if (eof)
1792 break;
1793
1794 len = udf_rw32(s_ad.len);
1795 flags = UDF_EXT_FLAGS(len);
1796 len = UDF_EXT_LEN(len);
1797
1798 if (flags == UDF_EXT_REDIRECT) {
1799 slot++;
1800 continue;
1801 }
1802
1803 node_ad_cpy[cpy_slot++] = s_ad;
1804
1805 DPRINTF(ALLOC, ("\t5: insert new mapping "
1806 "vp %d lb %d, len %d, flags %d "
1807 "-> stack\n",
1808 udf_rw16(s_ad.loc.part_num),
1809 udf_rw32(s_ad.loc.lb_num),
1810 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1811 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1812
1813 slot++;
1814 }
1815
1816 /* 6) reset node descriptors */
1817 udf_wipe_adslots(udf_node);
1818
1819 /* 7) copy back extents; merge when possible. Recounting on the fly */
1820 cpy_slots = cpy_slot;
1821
1822 c_ad = node_ad_cpy[0];
1823 slot = 0;
1824 DPRINTF(ALLOC, ("\t7s: stack -> got mapping vp %d "
1825 "lb %d, len %d, flags %d\n",
1826 udf_rw16(c_ad.loc.part_num),
1827 udf_rw32(c_ad.loc.lb_num),
1828 UDF_EXT_LEN(udf_rw32(c_ad.len)),
1829 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
1830
1831 for (cpy_slot = 1; cpy_slot < cpy_slots; cpy_slot++) {
1832 s_ad = node_ad_cpy[cpy_slot];
1833
1834 DPRINTF(ALLOC, ("\t7i: stack -> got mapping vp %d "
1835 "lb %d, len %d, flags %d\n",
1836 udf_rw16(s_ad.loc.part_num),
1837 udf_rw32(s_ad.loc.lb_num),
1838 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1839 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1840
1841 /* see if we can merge */
1842 if (udf_ads_merge(lb_size, &c_ad, &s_ad)) {
1843 /* not mergable (anymore) */
1844 DPRINTF(ALLOC, ("\t7: appending vp %d lb %d, "
1845 "len %d, flags %d\n",
1846 udf_rw16(c_ad.loc.part_num),
1847 udf_rw32(c_ad.loc.lb_num),
1848 UDF_EXT_LEN(udf_rw32(c_ad.len)),
1849 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
1850
1851 error = udf_append_adslot(udf_node, slot, &c_ad);
1852 if (error) {
1853 buf->b_error = error;
1854 goto out;
1855 }
1856 c_ad = s_ad;
1857 slot++;
1858 }
1859 }
1860
1861 /* 8) push rest slot (if any) */
1862 if (UDF_EXT_LEN(c_ad.len) > 0) {
1863 DPRINTF(ALLOC, ("\t8: last append vp %d lb %d, "
1864 "len %d, flags %d\n",
1865 udf_rw16(c_ad.loc.part_num),
1866 udf_rw32(c_ad.loc.lb_num),
1867 UDF_EXT_LEN(udf_rw32(c_ad.len)),
1868 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
1869
1870 error = udf_append_adslot(udf_node, slot, &c_ad);
1871 if (error) {
1872 buf->b_error = error;
1873 goto out;
1874 }
1875 }
1876
1877 out:
1878 /* the node's descriptors should now be sane */
1879 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
1880 UDF_UNLOCK_NODE(udf_node, 0);
1881
1882 KASSERT(orig_inflen == new_inflen);
1883 KASSERT(new_lbrec >= orig_lbrec);
1884
1885 return;
1886 }
1887
1888 /* --------------------------------------------------------------------- */
1889
1890 int
1891 udf_grow_node(struct udf_node *udf_node, uint64_t new_size)
1892 {
1893 union dscrptr *dscr;
1894 struct vnode *vp = udf_node->vnode;
1895 struct udf_mount *ump = udf_node->ump;
1896 struct file_entry *fe;
1897 struct extfile_entry *efe;
1898 struct icb_tag *icbtag;
1899 struct long_ad c_ad, s_ad;
1900 uint64_t size_diff, old_size, inflen, objsize, chunk, append_len;
1901 uint64_t foffset, end_foffset;
1902 uint64_t orig_inflen, orig_lbrec, new_inflen, new_lbrec;
1903 uint32_t lb_size, dscr_size, crclen, lastblock_grow;
1904 uint32_t len, flags, max_len;
1905 uint32_t max_l_ad, l_ad, l_ea;
1906 uint8_t *data_pos, *evacuated_data;
1907 int icbflags, addr_type;
1908 int slot, cpy_slot;
1909 int eof, error;
1910
1911 DPRINTF(ALLOC, ("udf_grow_node\n"));
1912
1913 UDF_LOCK_NODE(udf_node, 0);
1914 udf_node_sanity_check(udf_node, &orig_inflen, &orig_lbrec);
1915
1916 lb_size = udf_rw32(ump->logical_vol->lb_size);
1917 max_len = ((UDF_EXT_MAXLEN / lb_size) * lb_size);
1918
1919 fe = udf_node->fe;
1920 efe = udf_node->efe;
1921 if (fe) {
1922 dscr = (union dscrptr *) fe;
1923 icbtag = &fe->icbtag;
1924 inflen = udf_rw64(fe->inf_len);
1925 objsize = inflen;
1926 dscr_size = sizeof(struct file_entry) -1;
1927 l_ea = udf_rw32(fe->l_ea);
1928 l_ad = udf_rw32(fe->l_ad);
1929 } else {
1930 dscr = (union dscrptr *) efe;
1931 icbtag = &efe->icbtag;
1932 inflen = udf_rw64(efe->inf_len);
1933 objsize = udf_rw64(efe->obj_size);
1934 dscr_size = sizeof(struct extfile_entry) -1;
1935 l_ea = udf_rw32(efe->l_ea);
1936 l_ad = udf_rw32(efe->l_ad);
1937 }
1938 data_pos = (uint8_t *) dscr + dscr_size + l_ea;
1939 max_l_ad = lb_size - dscr_size - l_ea;
1940
1941 icbflags = udf_rw16(icbtag->flags);
1942 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
1943
1944 old_size = inflen;
1945 size_diff = new_size - old_size;
1946
1947 DPRINTF(ALLOC, ("\tfrom %"PRIu64" to %"PRIu64"\n", old_size, new_size));
1948
1949 evacuated_data = NULL;
1950 if (addr_type == UDF_ICB_INTERN_ALLOC) {
1951 if (l_ad + size_diff <= max_l_ad) {
1952 /* only reflect size change directly in the node */
1953 inflen += size_diff;
1954 objsize += size_diff;
1955 l_ad += size_diff;
1956 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea + l_ad;
1957 if (fe) {
1958 fe->inf_len = udf_rw64(inflen);
1959 fe->l_ad = udf_rw32(l_ad);
1960 fe->tag.desc_crc_len = udf_rw32(crclen);
1961 } else {
1962 efe->inf_len = udf_rw64(inflen);
1963 efe->obj_size = udf_rw64(objsize);
1964 efe->l_ad = udf_rw32(l_ad);
1965 efe->tag.desc_crc_len = udf_rw32(crclen);
1966 }
1967 error = 0;
1968
1969 /* set new size for uvm */
1970 uvm_vnp_setsize(vp, old_size);
1971 uvm_vnp_setwritesize(vp, new_size);
1972
1973 #if 0
1974 /* zero append space in buffer */
1975 uvm_vnp_zerorange(vp, old_size, new_size - old_size);
1976 #endif
1977
1978 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
1979
1980 /* unlock */
1981 UDF_UNLOCK_NODE(udf_node, 0);
1982
1983 KASSERT(new_inflen == orig_inflen + size_diff);
1984 KASSERT(new_lbrec == orig_lbrec);
1985 KASSERT(new_lbrec == 0);
1986 return 0;
1987 }
1988
1989 DPRINTF(ALLOC, ("\tCONVERT from internal\n"));
1990
1991 if (old_size > 0) {
1992 /* allocate some space and copy in the stuff to keep */
1993 evacuated_data = malloc(lb_size, M_UDFTEMP, M_WAITOK);
1994 memset(evacuated_data, 0, lb_size);
1995
1996 /* node is locked, so safe to exit mutex */
1997 UDF_UNLOCK_NODE(udf_node, 0);
1998
1999 /* read in using the `normal' vn_rdwr() */
2000 error = vn_rdwr(UIO_READ, udf_node->vnode,
2001 evacuated_data, old_size, 0,
2002 UIO_SYSSPACE, IO_ALTSEMANTICS | IO_NODELOCKED,
2003 FSCRED, NULL, NULL);
2004
2005 /* enter again */
2006 UDF_LOCK_NODE(udf_node, 0);
2007 }
2008
2009 /* convert to a normal alloc */
2010 /* XXX HOWTO selecting allocation method ? */
2011 icbflags &= ~UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2012 icbflags |= UDF_ICB_LONG_ALLOC; /* XXX or SHORT_ALLOC */
2013 icbtag->flags = udf_rw16(icbflags);
2014
2015 /* wipe old descriptor space */
2016 udf_wipe_adslots(udf_node);
2017
2018 memset(&c_ad, 0, sizeof(struct long_ad));
2019 c_ad.len = udf_rw32(old_size | UDF_EXT_FREE);
2020 c_ad.loc.part_num = udf_rw16(0); /* not relevant */
2021 c_ad.loc.lb_num = udf_rw32(0); /* not relevant */
2022
2023 slot = 0;
2024 } else {
2025 /* goto the last entry (if any) */
2026 slot = 0;
2027 cpy_slot = 0;
2028 foffset = 0;
2029 memset(&c_ad, 0, sizeof(struct long_ad));
2030 for (;;) {
2031 udf_get_adslot(udf_node, slot, &c_ad, &eof);
2032 if (eof)
2033 break;
2034
2035 len = udf_rw32(c_ad.len);
2036 flags = UDF_EXT_FLAGS(len);
2037 len = UDF_EXT_LEN(len);
2038
2039 end_foffset = foffset + len;
2040 if (flags != UDF_EXT_REDIRECT)
2041 foffset = end_foffset;
2042
2043 slot++;
2044 }
2045 /* at end of adslots */
2046
2047 /* special case if the old size was zero, then there is no last slot */
2048 if (old_size == 0) {
2049 c_ad.len = udf_rw32(0 | UDF_EXT_FREE);
2050 c_ad.loc.part_num = udf_rw16(0); /* not relevant */
2051 c_ad.loc.lb_num = udf_rw32(0); /* not relevant */
2052 } else {
2053 /* refetch last slot */
2054 slot--;
2055 udf_get_adslot(udf_node, slot, &c_ad, &eof);
2056 }
2057 }
2058
2059 /*
2060 * If the length of the last slot is not a multiple of lb_size, adjust
2061 * length so that it is; don't forget to adjust `append_len'! relevant for
2062 * extending existing files
2063 */
2064 len = udf_rw32(c_ad.len);
2065 flags = UDF_EXT_FLAGS(len);
2066 len = UDF_EXT_LEN(len);
2067
2068 lastblock_grow = 0;
2069 if (len % lb_size > 0) {
2070 lastblock_grow = lb_size - (len % lb_size);
2071 lastblock_grow = MIN(size_diff, lastblock_grow);
2072 len += lastblock_grow;
2073 c_ad.len = udf_rw32(len | flags);
2074
2075 /* TODO zero appened space in buffer! */
2076 /* using uvm_vnp_zerorange(vp, old_size, new_size - old_size); ? */
2077 }
2078 memset(&s_ad, 0, sizeof(struct long_ad));
2079
2080 /* size_diff can be bigger than allowed, so grow in chunks */
2081 append_len = size_diff - lastblock_grow;
2082 while (append_len > 0) {
2083 chunk = MIN(append_len, max_len);
2084 s_ad.len = udf_rw32(chunk | UDF_EXT_FREE);
2085 s_ad.loc.part_num = udf_rw16(0);
2086 s_ad.loc.lb_num = udf_rw32(0);
2087
2088 if (udf_ads_merge(lb_size, &c_ad, &s_ad)) {
2089 /* not mergable (anymore) */
2090 error = udf_append_adslot(udf_node, slot, &c_ad);
2091 if (error)
2092 goto errorout;
2093 slot++;
2094 c_ad = s_ad;
2095 memset(&s_ad, 0, sizeof(struct long_ad));
2096 }
2097 append_len -= chunk;
2098 }
2099
2100 /* if there is a rest piece in the accumulator, append it */
2101 if (UDF_EXT_LEN(udf_rw32(c_ad.len)) > 0) {
2102 error = udf_append_adslot(udf_node, slot, &c_ad);
2103 if (error)
2104 goto errorout;
2105 slot++;
2106 }
2107
2108 /* if there is a rest piece that didn't fit, append it */
2109 if (UDF_EXT_LEN(udf_rw32(s_ad.len)) > 0) {
2110 error = udf_append_adslot(udf_node, slot, &s_ad);
2111 if (error)
2112 goto errorout;
2113 slot++;
2114 }
2115
2116 inflen += size_diff;
2117 objsize += size_diff;
2118 if (fe) {
2119 fe->inf_len = udf_rw64(inflen);
2120 } else {
2121 efe->inf_len = udf_rw64(inflen);
2122 efe->obj_size = udf_rw64(objsize);
2123 }
2124 error = 0;
2125
2126 if (evacuated_data) {
2127 /* set new write size for uvm */
2128 uvm_vnp_setwritesize(vp, old_size);
2129
2130 /* write out evacuated data */
2131 error = vn_rdwr(UIO_WRITE, udf_node->vnode,
2132 evacuated_data, old_size, 0,
2133 UIO_SYSSPACE, IO_ALTSEMANTICS | IO_NODELOCKED,
2134 FSCRED, NULL, NULL);
2135 uvm_vnp_setsize(vp, old_size);
2136 }
2137
2138 errorout:
2139 if (evacuated_data)
2140 free(evacuated_data, M_UDFTEMP);
2141
2142 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2143 UDF_UNLOCK_NODE(udf_node, 0);
2144
2145 KASSERT(new_inflen == orig_inflen + size_diff);
2146 KASSERT(new_lbrec == orig_lbrec);
2147
2148 return error;
2149 }
2150
2151 /* --------------------------------------------------------------------- */
2152
2153 int
2154 udf_shrink_node(struct udf_node *udf_node, uint64_t new_size)
2155 {
2156 struct vnode *vp = udf_node->vnode;
2157 struct udf_mount *ump = udf_node->ump;
2158 struct file_entry *fe;
2159 struct extfile_entry *efe;
2160 struct icb_tag *icbtag;
2161 struct long_ad c_ad, s_ad, *node_ad_cpy;
2162 uint64_t size_diff, old_size, inflen, objsize;
2163 uint64_t foffset, end_foffset;
2164 uint64_t orig_inflen, orig_lbrec, new_inflen, new_lbrec;
2165 uint32_t lb_size, dscr_size, crclen;
2166 uint32_t slot_offset;
2167 uint32_t len, flags, max_len;
2168 uint32_t num_lb, lb_num;
2169 uint32_t max_l_ad, l_ad, l_ea;
2170 uint16_t vpart_num;
2171 uint8_t *data_pos;
2172 int icbflags, addr_type;
2173 int slot, cpy_slot, cpy_slots;
2174 int eof, error;
2175
2176 DPRINTF(ALLOC, ("udf_shrink_node\n"));
2177
2178 UDF_LOCK_NODE(udf_node, 0);
2179 udf_node_sanity_check(udf_node, &orig_inflen, &orig_lbrec);
2180
2181 lb_size = udf_rw32(ump->logical_vol->lb_size);
2182 max_len = ((UDF_EXT_MAXLEN / lb_size) * lb_size);
2183
2184 /* do the work */
2185 fe = udf_node->fe;
2186 efe = udf_node->efe;
2187 if (fe) {
2188 icbtag = &fe->icbtag;
2189 inflen = udf_rw64(fe->inf_len);
2190 objsize = inflen;
2191 dscr_size = sizeof(struct file_entry) -1;
2192 l_ea = udf_rw32(fe->l_ea);
2193 l_ad = udf_rw32(fe->l_ad);
2194 data_pos = (uint8_t *) fe + dscr_size + l_ea;
2195 } else {
2196 icbtag = &efe->icbtag;
2197 inflen = udf_rw64(efe->inf_len);
2198 objsize = udf_rw64(efe->obj_size);
2199 dscr_size = sizeof(struct extfile_entry) -1;
2200 l_ea = udf_rw32(efe->l_ea);
2201 l_ad = udf_rw32(efe->l_ad);
2202 data_pos = (uint8_t *) efe + dscr_size + l_ea;
2203 }
2204 max_l_ad = lb_size - dscr_size - l_ea;
2205
2206 icbflags = udf_rw16(icbtag->flags);
2207 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2208
2209 old_size = inflen;
2210 size_diff = old_size - new_size;
2211
2212 DPRINTF(ALLOC, ("\tfrom %"PRIu64" to %"PRIu64"\n", old_size, new_size));
2213
2214 /* shrink the node to its new size */
2215 if (addr_type == UDF_ICB_INTERN_ALLOC) {
2216 /* only reflect size change directly in the node */
2217 KASSERT(new_size <= max_l_ad);
2218 inflen -= size_diff;
2219 objsize -= size_diff;
2220 l_ad -= size_diff;
2221 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea + l_ad;
2222 if (fe) {
2223 fe->inf_len = udf_rw64(inflen);
2224 fe->l_ad = udf_rw32(l_ad);
2225 fe->tag.desc_crc_len = udf_rw32(crclen);
2226 } else {
2227 efe->inf_len = udf_rw64(inflen);
2228 efe->obj_size = udf_rw64(objsize);
2229 efe->l_ad = udf_rw32(l_ad);
2230 efe->tag.desc_crc_len = udf_rw32(crclen);
2231 }
2232 error = 0;
2233
2234 /* clear the space in the descriptor */
2235 KASSERT(old_size > new_size);
2236 memset(data_pos + new_size, 0, old_size - new_size);
2237
2238 /* TODO zero appened space in buffer! */
2239 /* using uvm_vnp_zerorange(vp, old_size, old_size - new_size); ? */
2240
2241 /* set new size for uvm */
2242 uvm_vnp_setsize(vp, new_size);
2243
2244 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2245 UDF_UNLOCK_NODE(udf_node, 0);
2246
2247 KASSERT(new_inflen == orig_inflen - size_diff);
2248 KASSERT(new_lbrec == orig_lbrec);
2249 KASSERT(new_lbrec == 0);
2250
2251 return 0;
2252 }
2253
2254 /* setup node cleanup extents copy space */
2255 node_ad_cpy = malloc(lb_size * UDF_MAX_ALLOC_EXTENTS,
2256 M_UDFMNT, M_WAITOK);
2257 memset(node_ad_cpy, 0, lb_size * UDF_MAX_ALLOC_EXTENTS);
2258
2259 /*
2260 * Shrink the node by releasing the allocations and truncate the last
2261 * allocation to the new size. If the new size fits into the
2262 * allocation descriptor itself, transform it into an
2263 * UDF_ICB_INTERN_ALLOC.
2264 */
2265 slot = 0;
2266 cpy_slot = 0;
2267 foffset = 0;
2268
2269 /* 1) copy till first overlap piece to the rewrite buffer */
2270 for (;;) {
2271 udf_get_adslot(udf_node, slot, &s_ad, &eof);
2272 if (eof) {
2273 DPRINTF(WRITE,
2274 ("Shrink node failed: "
2275 "encountered EOF\n"));
2276 error = EINVAL;
2277 goto errorout; /* panic? */
2278 }
2279 len = udf_rw32(s_ad.len);
2280 flags = UDF_EXT_FLAGS(len);
2281 len = UDF_EXT_LEN(len);
2282
2283 if (flags == UDF_EXT_REDIRECT) {
2284 slot++;
2285 continue;
2286 }
2287
2288 end_foffset = foffset + len;
2289 if (end_foffset > new_size)
2290 break; /* found */
2291
2292 node_ad_cpy[cpy_slot++] = s_ad;
2293
2294 DPRINTF(ALLOC, ("\t1: vp %d, lb %d, len %d, flags %d "
2295 "-> stack\n",
2296 udf_rw16(s_ad.loc.part_num),
2297 udf_rw32(s_ad.loc.lb_num),
2298 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2299 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2300
2301 foffset = end_foffset;
2302 slot++;
2303 }
2304 slot_offset = new_size - foffset;
2305
2306 /* 2) trunc overlapping slot at overlap and copy it */
2307 if (slot_offset > 0) {
2308 lb_num = udf_rw32(s_ad.loc.lb_num);
2309 vpart_num = udf_rw16(s_ad.loc.part_num);
2310
2311 if (flags == UDF_EXT_ALLOCATED) {
2312 lb_num += (slot_offset + lb_size -1) / lb_size;
2313 num_lb = (len - slot_offset + lb_size - 1) / lb_size;
2314
2315 udf_free_allocated_space(ump, lb_num, vpart_num, num_lb);
2316 }
2317
2318 s_ad.len = udf_rw32(slot_offset | flags);
2319 node_ad_cpy[cpy_slot++] = s_ad;
2320 slot++;
2321
2322 DPRINTF(ALLOC, ("\t2: vp %d, lb %d, len %d, flags %d "
2323 "-> stack\n",
2324 udf_rw16(s_ad.loc.part_num),
2325 udf_rw32(s_ad.loc.lb_num),
2326 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2327 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2328 }
2329
2330 /* 3) delete remainder */
2331 for (;;) {
2332 udf_get_adslot(udf_node, slot, &s_ad, &eof);
2333 if (eof)
2334 break;
2335
2336 len = udf_rw32(s_ad.len);
2337 flags = UDF_EXT_FLAGS(len);
2338 len = UDF_EXT_LEN(len);
2339
2340 if (flags == UDF_EXT_REDIRECT) {
2341 slot++;
2342 continue;
2343 }
2344
2345 DPRINTF(ALLOC, ("\t3: delete remainder "
2346 "vp %d lb %d, len %d, flags %d\n",
2347 udf_rw16(s_ad.loc.part_num),
2348 udf_rw32(s_ad.loc.lb_num),
2349 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2350 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2351
2352 if (flags == UDF_EXT_ALLOCATED) {
2353 lb_num = udf_rw32(s_ad.loc.lb_num);
2354 vpart_num = udf_rw16(s_ad.loc.part_num);
2355 num_lb = (len + lb_size - 1) / lb_size;
2356
2357 udf_free_allocated_space(ump, lb_num, vpart_num,
2358 num_lb);
2359 }
2360
2361 slot++;
2362 }
2363
2364 /* 4) if it will fit into the descriptor then convert */
2365 if (new_size < max_l_ad) {
2366 /*
2367 * resque/evacuate old piece by reading it in, and convert it
2368 * to internal alloc.
2369 */
2370 if (new_size == 0) {
2371 /* XXX/TODO only for zero sizing now */
2372 udf_wipe_adslots(udf_node);
2373
2374 icbflags &= ~UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2375 icbflags |= UDF_ICB_INTERN_ALLOC;
2376 icbtag->flags = udf_rw16(icbflags);
2377
2378 inflen -= size_diff; KASSERT(inflen == 0);
2379 objsize -= size_diff;
2380 l_ad = new_size;
2381 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea + l_ad;
2382 if (fe) {
2383 fe->inf_len = udf_rw64(inflen);
2384 fe->l_ad = udf_rw32(l_ad);
2385 fe->tag.desc_crc_len = udf_rw32(crclen);
2386 } else {
2387 efe->inf_len = udf_rw64(inflen);
2388 efe->obj_size = udf_rw64(objsize);
2389 efe->l_ad = udf_rw32(l_ad);
2390 efe->tag.desc_crc_len = udf_rw32(crclen);
2391 }
2392 /* eventually copy in evacuated piece */
2393 /* set new size for uvm */
2394 uvm_vnp_setsize(vp, new_size);
2395
2396 free(node_ad_cpy, M_UDFMNT);
2397 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2398
2399 UDF_UNLOCK_NODE(udf_node, 0);
2400
2401 KASSERT(new_inflen == orig_inflen - size_diff);
2402 KASSERT(new_inflen == 0);
2403 KASSERT(new_lbrec == 0);
2404
2405 return 0;
2406 }
2407
2408 printf("UDF_SHRINK_NODE: could convert to internal alloc!\n");
2409 }
2410
2411 /* 5) reset node descriptors */
2412 udf_wipe_adslots(udf_node);
2413
2414 /* 6) copy back extents; merge when possible. Recounting on the fly */
2415 cpy_slots = cpy_slot;
2416
2417 c_ad = node_ad_cpy[0];
2418 slot = 0;
2419 for (cpy_slot = 1; cpy_slot < cpy_slots; cpy_slot++) {
2420 s_ad = node_ad_cpy[cpy_slot];
2421
2422 DPRINTF(ALLOC, ("\t6: stack -> got mapping vp %d "
2423 "lb %d, len %d, flags %d\n",
2424 udf_rw16(s_ad.loc.part_num),
2425 udf_rw32(s_ad.loc.lb_num),
2426 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2427 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2428
2429 /* see if we can merge */
2430 if (udf_ads_merge(lb_size, &c_ad, &s_ad)) {
2431 /* not mergable (anymore) */
2432 DPRINTF(ALLOC, ("\t6: appending vp %d lb %d, "
2433 "len %d, flags %d\n",
2434 udf_rw16(c_ad.loc.part_num),
2435 udf_rw32(c_ad.loc.lb_num),
2436 UDF_EXT_LEN(udf_rw32(c_ad.len)),
2437 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2438
2439 error = udf_append_adslot(udf_node, slot, &c_ad);
2440 if (error)
2441 goto errorout; /* panic? */
2442 c_ad = s_ad;
2443 slot++;
2444 }
2445 }
2446
2447 /* 7) push rest slot (if any) */
2448 if (UDF_EXT_LEN(c_ad.len) > 0) {
2449 DPRINTF(ALLOC, ("\t7: last append vp %d lb %d, "
2450 "len %d, flags %d\n",
2451 udf_rw16(c_ad.loc.part_num),
2452 udf_rw32(c_ad.loc.lb_num),
2453 UDF_EXT_LEN(udf_rw32(c_ad.len)),
2454 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2455
2456 error = udf_append_adslot(udf_node, slot, &c_ad);
2457 if (error)
2458 goto errorout; /* panic? */
2459 ;
2460 }
2461
2462 inflen -= size_diff;
2463 objsize -= size_diff;
2464 if (fe) {
2465 fe->inf_len = udf_rw64(inflen);
2466 } else {
2467 efe->inf_len = udf_rw64(inflen);
2468 efe->obj_size = udf_rw64(objsize);
2469 }
2470 error = 0;
2471
2472 /* set new size for uvm */
2473 uvm_vnp_setsize(vp, new_size);
2474
2475 errorout:
2476 free(node_ad_cpy, M_UDFMNT);
2477
2478 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2479 UDF_UNLOCK_NODE(udf_node, 0);
2480
2481 KASSERT(new_inflen == orig_inflen - size_diff);
2482
2483 return error;
2484 }
2485
2486