udf_allocation.c revision 1.17 1 /* $NetBSD: udf_allocation.c,v 1.17 2008/08/06 13:41:12 reinoud Exp $ */
2
3 /*
4 * Copyright (c) 2006, 2008 Reinoud Zandijk
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 *
27 */
28
29 #include <sys/cdefs.h>
30 #ifndef lint
31 __KERNEL_RCSID(0, "$NetBSD: udf_allocation.c,v 1.17 2008/08/06 13:41:12 reinoud Exp $");
32 #endif /* not lint */
33
34
35 #if defined(_KERNEL_OPT)
36 #include "opt_quota.h"
37 #include "opt_compat_netbsd.h"
38 #endif
39
40 /* TODO strip */
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/sysctl.h>
44 #include <sys/namei.h>
45 #include <sys/proc.h>
46 #include <sys/kernel.h>
47 #include <sys/vnode.h>
48 #include <miscfs/genfs/genfs_node.h>
49 #include <sys/mount.h>
50 #include <sys/buf.h>
51 #include <sys/file.h>
52 #include <sys/device.h>
53 #include <sys/disklabel.h>
54 #include <sys/ioctl.h>
55 #include <sys/malloc.h>
56 #include <sys/dirent.h>
57 #include <sys/stat.h>
58 #include <sys/conf.h>
59 #include <sys/kauth.h>
60 #include <sys/kthread.h>
61 #include <dev/clock_subr.h>
62
63 #include <fs/udf/ecma167-udf.h>
64 #include <fs/udf/udf_mount.h>
65
66 #if defined(_KERNEL_OPT)
67 #include "opt_udf.h"
68 #endif
69
70 #include "udf.h"
71 #include "udf_subr.h"
72 #include "udf_bswap.h"
73
74
75 #define VTOI(vnode) ((struct udf_node *) vnode->v_data)
76
77 static void udf_record_allocation_in_node(struct udf_mount *ump,
78 struct buf *buf, uint16_t vpart_num, uint64_t *mapping,
79 struct long_ad *node_ad_cpy);
80
81 /*
82 * IDEA/BUSY: Each udf_node gets its own extentwalker state for all operations;
83 * this will hopefully/likely reduce O(nlog(n)) to O(1) for most functionality
84 * since actions are most likely sequencial and thus seeking doesn't need
85 * searching for the same or adjacent position again.
86 */
87
88 /* --------------------------------------------------------------------- */
89
90 #if 0
91 #if 1
92 static void
93 udf_node_dump(struct udf_node *udf_node) {
94 struct file_entry *fe;
95 struct extfile_entry *efe;
96 struct icb_tag *icbtag;
97 struct long_ad s_ad;
98 uint64_t inflen;
99 uint32_t icbflags, addr_type;
100 uint32_t len, lb_num;
101 uint32_t flags;
102 int part_num;
103 int lb_size, eof, slot;
104
105 if ((udf_verbose & UDF_DEBUG_NODEDUMP) == 0)
106 return;
107
108 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
109
110 fe = udf_node->fe;
111 efe = udf_node->efe;
112 if (fe) {
113 icbtag = &fe->icbtag;
114 inflen = udf_rw64(fe->inf_len);
115 } else {
116 icbtag = &efe->icbtag;
117 inflen = udf_rw64(efe->inf_len);
118 }
119
120 icbflags = udf_rw16(icbtag->flags);
121 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
122
123 printf("udf_node_dump %p :\n", udf_node);
124
125 if (addr_type == UDF_ICB_INTERN_ALLOC) {
126 printf("\tIntern alloc, len = %"PRIu64"\n", inflen);
127 return;
128 }
129
130 printf("\tInflen = %"PRIu64"\n", inflen);
131 printf("\t\t");
132
133 slot = 0;
134 for (;;) {
135 udf_get_adslot(udf_node, slot, &s_ad, &eof);
136 if (eof)
137 break;
138 part_num = udf_rw16(s_ad.loc.part_num);
139 lb_num = udf_rw32(s_ad.loc.lb_num);
140 len = udf_rw32(s_ad.len);
141 flags = UDF_EXT_FLAGS(len);
142 len = UDF_EXT_LEN(len);
143
144 printf("[");
145 if (part_num >= 0)
146 printf("part %d, ", part_num);
147 printf("lb_num %d, len %d", lb_num, len);
148 if (flags)
149 printf(", flags %d", flags>>30);
150 printf("] ");
151
152 if (flags == UDF_EXT_REDIRECT) {
153 printf("\n\textent END\n\tallocation extent\n\t\t");
154 }
155
156 slot++;
157 }
158 printf("\n\tl_ad END\n\n");
159 }
160 #else
161 #define udf_node_dump(a)
162 #endif
163
164
165 static void
166 udf_assert_allocated(struct udf_mount *ump, uint16_t vpart_num,
167 uint32_t lb_num, uint32_t num_lb)
168 {
169 struct udf_bitmap *bitmap;
170 struct part_desc *pdesc;
171 uint32_t ptov;
172 uint32_t bitval;
173 uint8_t *bpos;
174 int bit;
175 int phys_part;
176 int ok;
177
178 DPRINTF(PARANOIA, ("udf_assert_allocated: check virt lbnum %d "
179 "part %d + %d sect\n", lb_num, vpart_num, num_lb));
180
181 /* get partition backing up this vpart_num */
182 pdesc = ump->partitions[ump->vtop[vpart_num]];
183
184 switch (ump->vtop_tp[vpart_num]) {
185 case UDF_VTOP_TYPE_PHYS :
186 case UDF_VTOP_TYPE_SPARABLE :
187 /* free space to freed or unallocated space bitmap */
188 ptov = udf_rw32(pdesc->start_loc);
189 phys_part = ump->vtop[vpart_num];
190
191 /* use unallocated bitmap */
192 bitmap = &ump->part_unalloc_bits[phys_part];
193
194 /* if no bitmaps are defined, bail out */
195 if (bitmap->bits == NULL)
196 break;
197
198 /* check bits */
199 KASSERT(bitmap->bits);
200 ok = 1;
201 bpos = bitmap->bits + lb_num/8;
202 bit = lb_num % 8;
203 while (num_lb > 0) {
204 bitval = (1 << bit);
205 DPRINTF(PARANOIA, ("XXX : check %d, %p, bit %d\n",
206 lb_num, bpos, bit));
207 KASSERT(bitmap->bits + lb_num/8 == bpos);
208 if (*bpos & bitval) {
209 printf("\tlb_num %d is NOT marked busy\n",
210 lb_num);
211 ok = 0;
212 }
213 lb_num++; num_lb--;
214 bit = (bit + 1) % 8;
215 if (bit == 0)
216 bpos++;
217 }
218 if (!ok) {
219 /* KASSERT(0); */
220 }
221
222 break;
223 case UDF_VTOP_TYPE_VIRT :
224 /* TODO check space */
225 KASSERT(num_lb == 1);
226 break;
227 case UDF_VTOP_TYPE_META :
228 /* TODO check space in the metadata bitmap */
229 default:
230 /* not implemented */
231 break;
232 }
233 }
234
235
236 static void
237 udf_node_sanity_check(struct udf_node *udf_node,
238 uint64_t *cnt_inflen, uint64_t *cnt_logblksrec)
239 {
240 union dscrptr *dscr;
241 struct file_entry *fe;
242 struct extfile_entry *efe;
243 struct icb_tag *icbtag;
244 struct long_ad s_ad;
245 uint64_t inflen, logblksrec;
246 uint32_t icbflags, addr_type;
247 uint32_t len, lb_num, l_ea, l_ad, max_l_ad;
248 uint16_t part_num;
249 uint8_t *data_pos;
250 int dscr_size, lb_size, flags, whole_lb;
251 int i, slot, eof;
252
253 // KASSERT(mutex_owned(&udf_node->ump->allocate_mutex));
254
255 if (1)
256 udf_node_dump(udf_node);
257
258 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
259
260 fe = udf_node->fe;
261 efe = udf_node->efe;
262 if (fe) {
263 dscr = (union dscrptr *) fe;
264 icbtag = &fe->icbtag;
265 inflen = udf_rw64(fe->inf_len);
266 dscr_size = sizeof(struct file_entry) -1;
267 logblksrec = udf_rw64(fe->logblks_rec);
268 l_ad = udf_rw32(fe->l_ad);
269 l_ea = udf_rw32(fe->l_ea);
270 } else {
271 dscr = (union dscrptr *) efe;
272 icbtag = &efe->icbtag;
273 inflen = udf_rw64(efe->inf_len);
274 dscr_size = sizeof(struct extfile_entry) -1;
275 logblksrec = udf_rw64(efe->logblks_rec);
276 l_ad = udf_rw32(efe->l_ad);
277 l_ea = udf_rw32(efe->l_ea);
278 }
279 data_pos = (uint8_t *) dscr + dscr_size + l_ea;
280 max_l_ad = lb_size - dscr_size - l_ea;
281 icbflags = udf_rw16(icbtag->flags);
282 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
283
284 /* check if tail is zero */
285 DPRINTF(PARANOIA, ("Sanity check blank tail\n"));
286 for (i = l_ad; i < max_l_ad; i++) {
287 if (data_pos[i] != 0)
288 printf( "sanity_check: violation: node byte %d "
289 "has value %d\n", i, data_pos[i]);
290 }
291
292 /* reset counters */
293 *cnt_inflen = 0;
294 *cnt_logblksrec = 0;
295
296 if (addr_type == UDF_ICB_INTERN_ALLOC) {
297 KASSERT(l_ad <= max_l_ad);
298 KASSERT(l_ad == inflen);
299 *cnt_inflen = inflen;
300 return;
301 }
302
303 /* start counting */
304 whole_lb = 1;
305 slot = 0;
306 for (;;) {
307 udf_get_adslot(udf_node, slot, &s_ad, &eof);
308 if (eof)
309 break;
310 KASSERT(whole_lb == 1);
311
312 part_num = udf_rw16(s_ad.loc.part_num);
313 lb_num = udf_rw32(s_ad.loc.lb_num);
314 len = udf_rw32(s_ad.len);
315 flags = UDF_EXT_FLAGS(len);
316 len = UDF_EXT_LEN(len);
317
318 if (flags != UDF_EXT_REDIRECT) {
319 *cnt_inflen += len;
320 if (flags == UDF_EXT_ALLOCATED) {
321 *cnt_logblksrec += (len + lb_size -1) / lb_size;
322 }
323 } else {
324 KASSERT(len == lb_size);
325 }
326 /* check allocation */
327 if (flags == UDF_EXT_ALLOCATED)
328 udf_assert_allocated(udf_node->ump, part_num, lb_num,
329 (len + lb_size - 1) / lb_size);
330
331 /* check whole lb */
332 whole_lb = ((len % lb_size) == 0);
333
334 slot++;
335 }
336 /* rest should be zero (ad_off > l_ad < max_l_ad - adlen) */
337
338 KASSERT(*cnt_inflen == inflen);
339 KASSERT(*cnt_logblksrec == logblksrec);
340
341 // KASSERT(mutex_owned(&udf_node->ump->allocate_mutex));
342 }
343 #else
344 static void
345 udf_node_sanity_check(struct udf_node *udf_node,
346 uint64_t *cnt_inflen, uint64_t *cnt_logblksrec) {
347 struct file_entry *fe;
348 struct extfile_entry *efe;
349 struct icb_tag *icbtag;
350 uint64_t inflen, logblksrec;
351 int dscr_size, lb_size;
352
353 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
354
355 fe = udf_node->fe;
356 efe = udf_node->efe;
357 if (fe) {
358 icbtag = &fe->icbtag;
359 inflen = udf_rw64(fe->inf_len);
360 dscr_size = sizeof(struct file_entry) -1;
361 logblksrec = udf_rw64(fe->logblks_rec);
362 } else {
363 icbtag = &efe->icbtag;
364 inflen = udf_rw64(efe->inf_len);
365 dscr_size = sizeof(struct extfile_entry) -1;
366 logblksrec = udf_rw64(efe->logblks_rec);
367 }
368 *cnt_logblksrec = logblksrec;
369 *cnt_inflen = inflen;
370 }
371 #endif
372
373 /* --------------------------------------------------------------------- */
374
375 int
376 udf_translate_vtop(struct udf_mount *ump, struct long_ad *icb_loc,
377 uint32_t *lb_numres, uint32_t *extres)
378 {
379 struct part_desc *pdesc;
380 struct spare_map_entry *sme;
381 struct long_ad s_icb_loc;
382 uint64_t foffset, end_foffset;
383 uint32_t lb_size, len;
384 uint32_t lb_num, lb_rel, lb_packet;
385 uint32_t udf_rw32_lbmap, ext_offset;
386 uint16_t vpart;
387 int rel, part, error, eof, slot, flags;
388
389 assert(ump && icb_loc && lb_numres);
390
391 vpart = udf_rw16(icb_loc->loc.part_num);
392 lb_num = udf_rw32(icb_loc->loc.lb_num);
393 if (vpart > UDF_VTOP_RAWPART)
394 return EINVAL;
395
396 translate_again:
397 part = ump->vtop[vpart];
398 pdesc = ump->partitions[part];
399
400 switch (ump->vtop_tp[vpart]) {
401 case UDF_VTOP_TYPE_RAW :
402 /* 1:1 to the end of the device */
403 *lb_numres = lb_num;
404 *extres = INT_MAX;
405 return 0;
406 case UDF_VTOP_TYPE_PHYS :
407 /* transform into its disc logical block */
408 if (lb_num > udf_rw32(pdesc->part_len))
409 return EINVAL;
410 *lb_numres = lb_num + udf_rw32(pdesc->start_loc);
411
412 /* extent from here to the end of the partition */
413 *extres = udf_rw32(pdesc->part_len) - lb_num;
414 return 0;
415 case UDF_VTOP_TYPE_VIRT :
416 /* only maps one logical block, lookup in VAT */
417 if (lb_num >= ump->vat_entries) /* XXX > or >= ? */
418 return EINVAL;
419
420 /* lookup in virtual allocation table file */
421 mutex_enter(&ump->allocate_mutex);
422 error = udf_vat_read(ump->vat_node,
423 (uint8_t *) &udf_rw32_lbmap, 4,
424 ump->vat_offset + lb_num * 4);
425 mutex_exit(&ump->allocate_mutex);
426
427 if (error)
428 return error;
429
430 lb_num = udf_rw32(udf_rw32_lbmap);
431
432 /* transform into its disc logical block */
433 if (lb_num > udf_rw32(pdesc->part_len))
434 return EINVAL;
435 *lb_numres = lb_num + udf_rw32(pdesc->start_loc);
436
437 /* just one logical block */
438 *extres = 1;
439 return 0;
440 case UDF_VTOP_TYPE_SPARABLE :
441 /* check if the packet containing the lb_num is remapped */
442 lb_packet = lb_num / ump->sparable_packet_size;
443 lb_rel = lb_num % ump->sparable_packet_size;
444
445 for (rel = 0; rel < udf_rw16(ump->sparing_table->rt_l); rel++) {
446 sme = &ump->sparing_table->entries[rel];
447 if (lb_packet == udf_rw32(sme->org)) {
448 /* NOTE maps to absolute disc logical block! */
449 *lb_numres = udf_rw32(sme->map) + lb_rel;
450 *extres = ump->sparable_packet_size - lb_rel;
451 return 0;
452 }
453 }
454
455 /* transform into its disc logical block */
456 if (lb_num > udf_rw32(pdesc->part_len))
457 return EINVAL;
458 *lb_numres = lb_num + udf_rw32(pdesc->start_loc);
459
460 /* rest of block */
461 *extres = ump->sparable_packet_size - lb_rel;
462 return 0;
463 case UDF_VTOP_TYPE_META :
464 /* we have to look into the file's allocation descriptors */
465
466 /* use metadatafile allocation mutex */
467 lb_size = udf_rw32(ump->logical_vol->lb_size);
468
469 UDF_LOCK_NODE(ump->metadata_node, 0);
470
471 /* get first overlapping extent */
472 foffset = 0;
473 slot = 0;
474 for (;;) {
475 udf_get_adslot(ump->metadata_node,
476 slot, &s_icb_loc, &eof);
477 DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, "
478 "len = %d, lb_num = %d, part = %d\n",
479 slot, eof,
480 UDF_EXT_FLAGS(udf_rw32(s_icb_loc.len)),
481 UDF_EXT_LEN(udf_rw32(s_icb_loc.len)),
482 udf_rw32(s_icb_loc.loc.lb_num),
483 udf_rw16(s_icb_loc.loc.part_num)));
484 if (eof) {
485 DPRINTF(TRANSLATE,
486 ("Meta partition translation "
487 "failed: can't seek location\n"));
488 UDF_UNLOCK_NODE(ump->metadata_node, 0);
489 return EINVAL;
490 }
491 len = udf_rw32(s_icb_loc.len);
492 flags = UDF_EXT_FLAGS(len);
493 len = UDF_EXT_LEN(len);
494
495 if (flags == UDF_EXT_REDIRECT) {
496 slot++;
497 continue;
498 }
499
500 end_foffset = foffset + len;
501
502 if (end_foffset > lb_num * lb_size)
503 break; /* found */
504 foffset = end_foffset;
505 slot++;
506 }
507 /* found overlapping slot */
508 ext_offset = lb_num * lb_size - foffset;
509
510 /* process extent offset */
511 lb_num = udf_rw32(s_icb_loc.loc.lb_num);
512 vpart = udf_rw16(s_icb_loc.loc.part_num);
513 lb_num += (ext_offset + lb_size -1) / lb_size;
514 len -= ext_offset;
515 ext_offset = 0;
516
517 flags = UDF_EXT_FLAGS(s_icb_loc.len);
518
519 UDF_UNLOCK_NODE(ump->metadata_node, 0);
520 if (flags != UDF_EXT_ALLOCATED) {
521 DPRINTF(TRANSLATE, ("Metadata partition translation "
522 "failed: not allocated\n"));
523 return EINVAL;
524 }
525
526 /*
527 * vpart and lb_num are updated, translate again since we
528 * might be mapped on sparable media
529 */
530 goto translate_again;
531 default:
532 printf("UDF vtop translation scheme %d unimplemented yet\n",
533 ump->vtop_tp[vpart]);
534 }
535
536 return EINVAL;
537 }
538
539
540 /* XXX provisional primitive braindead version */
541 /* TODO use ext_res */
542 void
543 udf_translate_vtop_list(struct udf_mount *ump, uint32_t sectors,
544 uint16_t vpart_num, uint64_t *lmapping, uint64_t *pmapping)
545 {
546 struct long_ad loc;
547 uint32_t lb_numres, ext_res;
548 int sector;
549
550 for (sector = 0; sector < sectors; sector++) {
551 memset(&loc, 0, sizeof(struct long_ad));
552 loc.loc.part_num = udf_rw16(vpart_num);
553 loc.loc.lb_num = udf_rw32(*lmapping);
554 udf_translate_vtop(ump, &loc, &lb_numres, &ext_res);
555 *pmapping = lb_numres;
556 lmapping++; pmapping++;
557 }
558 }
559
560
561 /* --------------------------------------------------------------------- */
562
563 /*
564 * Translate an extent (in logical_blocks) into logical block numbers; used
565 * for read and write operations. DOESNT't check extents.
566 */
567
568 int
569 udf_translate_file_extent(struct udf_node *udf_node,
570 uint32_t from, uint32_t num_lb,
571 uint64_t *map)
572 {
573 struct udf_mount *ump;
574 struct icb_tag *icbtag;
575 struct long_ad t_ad, s_ad;
576 uint64_t transsec;
577 uint64_t foffset, end_foffset;
578 uint32_t transsec32;
579 uint32_t lb_size;
580 uint32_t ext_offset;
581 uint32_t lb_num, len;
582 uint32_t overlap, translen;
583 uint16_t vpart_num;
584 int eof, error, flags;
585 int slot, addr_type, icbflags;
586
587 if (!udf_node)
588 return ENOENT;
589
590 KASSERT(num_lb > 0);
591
592 UDF_LOCK_NODE(udf_node, 0);
593
594 /* initialise derivative vars */
595 ump = udf_node->ump;
596 lb_size = udf_rw32(ump->logical_vol->lb_size);
597
598 if (udf_node->fe) {
599 icbtag = &udf_node->fe->icbtag;
600 } else {
601 icbtag = &udf_node->efe->icbtag;
602 }
603 icbflags = udf_rw16(icbtag->flags);
604 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
605
606 /* do the work */
607 if (addr_type == UDF_ICB_INTERN_ALLOC) {
608 *map = UDF_TRANS_INTERN;
609 UDF_UNLOCK_NODE(udf_node, 0);
610 return 0;
611 }
612
613 /* find first overlapping extent */
614 foffset = 0;
615 slot = 0;
616 for (;;) {
617 udf_get_adslot(udf_node, slot, &s_ad, &eof);
618 DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, len = %d, "
619 "lb_num = %d, part = %d\n", slot, eof,
620 UDF_EXT_FLAGS(udf_rw32(s_ad.len)),
621 UDF_EXT_LEN(udf_rw32(s_ad.len)),
622 udf_rw32(s_ad.loc.lb_num),
623 udf_rw16(s_ad.loc.part_num)));
624 if (eof) {
625 DPRINTF(TRANSLATE,
626 ("Translate file extent "
627 "failed: can't seek location\n"));
628 UDF_UNLOCK_NODE(udf_node, 0);
629 return EINVAL;
630 }
631 len = udf_rw32(s_ad.len);
632 flags = UDF_EXT_FLAGS(len);
633 len = UDF_EXT_LEN(len);
634 lb_num = udf_rw32(s_ad.loc.lb_num);
635
636 if (flags == UDF_EXT_REDIRECT) {
637 slot++;
638 continue;
639 }
640
641 end_foffset = foffset + len;
642
643 if (end_foffset > from * lb_size)
644 break; /* found */
645 foffset = end_foffset;
646 slot++;
647 }
648 /* found overlapping slot */
649 ext_offset = from * lb_size - foffset;
650
651 for (;;) {
652 udf_get_adslot(udf_node, slot, &s_ad, &eof);
653 DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, len = %d, "
654 "lb_num = %d, part = %d\n", slot, eof,
655 UDF_EXT_FLAGS(udf_rw32(s_ad.len)),
656 UDF_EXT_LEN(udf_rw32(s_ad.len)),
657 udf_rw32(s_ad.loc.lb_num),
658 udf_rw16(s_ad.loc.part_num)));
659 if (eof) {
660 DPRINTF(TRANSLATE,
661 ("Translate file extent "
662 "failed: past eof\n"));
663 UDF_UNLOCK_NODE(udf_node, 0);
664 return EINVAL;
665 }
666
667 len = udf_rw32(s_ad.len);
668 flags = UDF_EXT_FLAGS(len);
669 len = UDF_EXT_LEN(len);
670
671 lb_num = udf_rw32(s_ad.loc.lb_num);
672 vpart_num = udf_rw16(s_ad.loc.part_num);
673
674 end_foffset = foffset + len;
675
676 /* process extent, don't forget to advance on ext_offset! */
677 lb_num += (ext_offset + lb_size -1) / lb_size;
678 overlap = (len - ext_offset + lb_size -1) / lb_size;
679 ext_offset = 0;
680
681 /*
682 * note that the while(){} is nessisary for the extent that
683 * the udf_translate_vtop() returns doens't have to span the
684 * whole extent.
685 */
686
687 overlap = MIN(overlap, num_lb);
688 while (overlap && (flags != UDF_EXT_REDIRECT)) {
689 switch (flags) {
690 case UDF_EXT_FREE :
691 case UDF_EXT_ALLOCATED_BUT_NOT_USED :
692 transsec = UDF_TRANS_ZERO;
693 translen = overlap;
694 while (overlap && num_lb && translen) {
695 *map++ = transsec;
696 lb_num++;
697 overlap--; num_lb--; translen--;
698 }
699 break;
700 case UDF_EXT_ALLOCATED :
701 t_ad.loc.lb_num = udf_rw32(lb_num);
702 t_ad.loc.part_num = udf_rw16(vpart_num);
703 error = udf_translate_vtop(ump,
704 &t_ad, &transsec32, &translen);
705 transsec = transsec32;
706 if (error) {
707 UDF_UNLOCK_NODE(udf_node, 0);
708 return error;
709 }
710 while (overlap && num_lb && translen) {
711 *map++ = transsec;
712 lb_num++; transsec++;
713 overlap--; num_lb--; translen--;
714 }
715 break;
716 default:
717 DPRINTF(TRANSLATE,
718 ("Translate file extent "
719 "failed: bad flags %x\n", flags));
720 UDF_UNLOCK_NODE(udf_node, 0);
721 return EINVAL;
722 }
723 }
724 if (num_lb == 0)
725 break;
726
727 if (flags != UDF_EXT_REDIRECT)
728 foffset = end_foffset;
729 slot++;
730 }
731 UDF_UNLOCK_NODE(udf_node, 0);
732
733 return 0;
734 }
735
736 /* --------------------------------------------------------------------- */
737
738 static int
739 udf_search_free_vatloc(struct udf_mount *ump, uint32_t *lbnumres)
740 {
741 uint32_t lb_size, lb_num, lb_map, udf_rw32_lbmap;
742 uint8_t *blob;
743 int entry, chunk, found, error;
744
745 KASSERT(ump);
746 KASSERT(ump->logical_vol);
747
748 lb_size = udf_rw32(ump->logical_vol->lb_size);
749 blob = malloc(lb_size, M_UDFTEMP, M_WAITOK);
750
751 /* TODO static allocation of search chunk */
752
753 lb_num = MIN(ump->vat_entries, ump->vat_last_free_lb);
754 found = 0;
755 error = 0;
756 entry = 0;
757 do {
758 chunk = MIN(lb_size, (ump->vat_entries - lb_num) * 4);
759 if (chunk <= 0)
760 break;
761 /* load in chunk */
762 error = udf_vat_read(ump->vat_node, blob, chunk,
763 ump->vat_offset + lb_num * 4);
764
765 if (error)
766 break;
767
768 /* search this chunk */
769 for (entry=0; entry < chunk /4; entry++, lb_num++) {
770 udf_rw32_lbmap = *((uint32_t *) (blob + entry * 4));
771 lb_map = udf_rw32(udf_rw32_lbmap);
772 if (lb_map == 0xffffffff) {
773 found = 1;
774 break;
775 }
776 }
777 } while (!found);
778 if (error) {
779 printf("udf_search_free_vatloc: error reading in vat chunk "
780 "(lb %d, size %d)\n", lb_num, chunk);
781 }
782
783 if (!found) {
784 /* extend VAT */
785 DPRINTF(WRITE, ("udf_search_free_vatloc: extending\n"));
786 lb_num = ump->vat_entries;
787 ump->vat_entries++;
788 }
789
790 /* mark entry with initialiser just in case */
791 lb_map = udf_rw32(0xfffffffe);
792 udf_vat_write(ump->vat_node, (uint8_t *) &lb_map, 4,
793 ump->vat_offset + lb_num *4);
794 ump->vat_last_free_lb = lb_num;
795
796 free(blob, M_UDFTEMP);
797 *lbnumres = lb_num;
798 return 0;
799 }
800
801
802 static void
803 udf_bitmap_allocate(struct udf_bitmap *bitmap, int ismetadata,
804 uint32_t *num_lb, uint64_t *lmappos)
805 {
806 uint32_t offset, lb_num, bit;
807 int32_t diff;
808 uint8_t *bpos;
809 int pass;
810
811 if (!ismetadata) {
812 /* heuristic to keep the two pointers not too close */
813 diff = bitmap->data_pos - bitmap->metadata_pos;
814 if ((diff >= 0) && (diff < 1024))
815 bitmap->data_pos = bitmap->metadata_pos + 1024;
816 }
817 offset = ismetadata ? bitmap->metadata_pos : bitmap->data_pos;
818 offset &= ~7;
819 for (pass = 0; pass < 2; pass++) {
820 if (offset >= bitmap->max_offset)
821 offset = 0;
822
823 while (offset < bitmap->max_offset) {
824 if (*num_lb == 0)
825 break;
826
827 /* use first bit not set */
828 bpos = bitmap->bits + offset/8;
829 bit = ffs(*bpos); /* returns 0 or 1..8 */
830 if (bit == 0) {
831 offset += 8;
832 continue;
833 }
834 DPRINTF(PARANOIA, ("XXX : allocate %d, %p, bit %d\n",
835 offset + bit -1, bpos, bit-1));
836 *bpos &= ~(1 << (bit-1));
837 lb_num = offset + bit-1;
838 *lmappos++ = lb_num;
839 *num_lb = *num_lb - 1;
840 // offset = (offset & ~7);
841 }
842 }
843
844 if (ismetadata) {
845 bitmap->metadata_pos = offset;
846 } else {
847 bitmap->data_pos = offset;
848 }
849 }
850
851
852 static void
853 udf_bitmap_free(struct udf_bitmap *bitmap, uint32_t lb_num, uint32_t num_lb)
854 {
855 uint32_t offset;
856 uint32_t bit, bitval;
857 uint8_t *bpos;
858
859 offset = lb_num;
860
861 /* starter bits */
862 bpos = bitmap->bits + offset/8;
863 bit = offset % 8;
864 while ((bit != 0) && (num_lb > 0)) {
865 bitval = (1 << bit);
866 KASSERT((*bpos & bitval) == 0);
867 DPRINTF(PARANOIA, ("XXX : free %d, %p, %d\n",
868 offset, bpos, bit));
869 *bpos |= bitval;
870 offset++; num_lb--;
871 bit = (bit + 1) % 8;
872 }
873 if (num_lb == 0)
874 return;
875
876 /* whole bytes */
877 KASSERT(bit == 0);
878 bpos = bitmap->bits + offset / 8;
879 while (num_lb >= 8) {
880 KASSERT((*bpos == 0));
881 DPRINTF(PARANOIA, ("XXX : free %d + 8, %p\n", offset, bpos));
882 *bpos = 255;
883 offset += 8; num_lb -= 8;
884 bpos++;
885 }
886
887 /* stop bits */
888 KASSERT(num_lb < 8);
889 bit = 0;
890 while (num_lb > 0) {
891 bitval = (1 << bit);
892 KASSERT((*bpos & bitval) == 0);
893 DPRINTF(PARANOIA, ("XXX : free %d, %p, %d\n",
894 offset, bpos, bit));
895 *bpos |= bitval;
896 offset++; num_lb--;
897 bit = (bit + 1) % 8;
898 }
899 }
900
901
902 /* allocate a contiguous sequence of sectornumbers */
903 static int
904 udf_allocate_space(struct udf_mount *ump, int udf_c_type,
905 uint16_t vpart_num, uint32_t num_lb, uint64_t *lmapping)
906 {
907 struct mmc_trackinfo *alloc_track, *other_track;
908 struct udf_bitmap *bitmap;
909 struct part_desc *pdesc;
910 struct logvol_int_desc *lvid;
911 uint64_t *lmappos;
912 uint32_t ptov, lb_num, *freepos, free_lbs;
913 int lb_size, alloc_num_lb;
914 int alloc_type, error;
915 int is_node;
916
917 DPRINTF(CALL, ("udf_allocate_space(ctype %d, vpart %d, num_lb %d\n",
918 udf_c_type, vpart_num, num_lb));
919 mutex_enter(&ump->allocate_mutex);
920
921 lb_size = udf_rw32(ump->logical_vol->lb_size);
922 KASSERT(lb_size == ump->discinfo.sector_size);
923
924 /* XXX TODO check disc space */
925
926 alloc_type = ump->vtop_alloc[vpart_num];
927 is_node = (udf_c_type == UDF_C_NODE);
928
929 lmappos = lmapping;
930 error = 0;
931 switch (alloc_type) {
932 case UDF_ALLOC_VAT :
933 /* search empty slot in VAT file */
934 KASSERT(num_lb == 1);
935 error = udf_search_free_vatloc(ump, &lb_num);
936 if (!error)
937 *lmappos = lb_num;
938 break;
939 case UDF_ALLOC_SEQUENTIAL :
940 /* sequential allocation on recordable media */
941 /* get partition backing up this vpart_num_num */
942 pdesc = ump->partitions[ump->vtop[vpart_num]];
943
944 /* calculate offset from physical base partition */
945 ptov = udf_rw32(pdesc->start_loc);
946
947 /* get our track descriptors */
948 if (vpart_num == ump->node_part) {
949 alloc_track = &ump->metadata_track;
950 other_track = &ump->data_track;
951 } else {
952 alloc_track = &ump->data_track;
953 other_track = &ump->metadata_track;
954 }
955
956 /* allocate */
957 for (lb_num = 0; lb_num < num_lb; lb_num++) {
958 *lmappos++ = alloc_track->next_writable - ptov;
959 alloc_track->next_writable++;
960 alloc_track->free_blocks--;
961 }
962
963 /* keep other track up-to-date */
964 if (alloc_track->tracknr == other_track->tracknr)
965 memcpy(other_track, alloc_track,
966 sizeof(struct mmc_trackinfo));
967 break;
968 case UDF_ALLOC_SPACEMAP :
969 /* try to allocate on unallocated bits */
970 alloc_num_lb = num_lb;
971 bitmap = &ump->part_unalloc_bits[vpart_num];
972 udf_bitmap_allocate(bitmap, is_node, &alloc_num_lb, lmappos);
973 ump->lvclose |= UDF_WRITE_PART_BITMAPS;
974
975 /* have we allocated all? */
976 if (alloc_num_lb) {
977 /* TODO convert freed to unalloc and try again */
978 /* free allocated piece for now */
979 lmappos = lmapping;
980 for (lb_num=0; lb_num < num_lb-alloc_num_lb; lb_num++) {
981 udf_bitmap_free(bitmap, *lmappos++, 1);
982 }
983 error = ENOSPC;
984 }
985 if (!error) {
986 /* adjust freecount */
987 lvid = ump->logvol_integrity;
988 freepos = &lvid->tables[0] + vpart_num;
989 free_lbs = udf_rw32(*freepos);
990 *freepos = udf_rw32(free_lbs - num_lb);
991 }
992 break;
993 case UDF_ALLOC_METABITMAP : /* UDF 2.50, 2.60 BluRay-RE */
994 /* allocate on metadata unallocated bits */
995 alloc_num_lb = num_lb;
996 bitmap = &ump->metadata_unalloc_bits;
997 udf_bitmap_allocate(bitmap, is_node, &alloc_num_lb, lmappos);
998 ump->lvclose |= UDF_WRITE_PART_BITMAPS;
999
1000 /* have we allocated all? */
1001 if (alloc_num_lb) {
1002 /* YIKES! TODO we need to extend the metadata partition */
1003 /* free allocated piece for now */
1004 lmappos = lmapping;
1005 for (lb_num=0; lb_num < num_lb-alloc_num_lb; lb_num++) {
1006 udf_bitmap_free(bitmap, *lmappos++, 1);
1007 }
1008 error = ENOSPC;
1009 }
1010 if (!error) {
1011 /* adjust freecount */
1012 lvid = ump->logvol_integrity;
1013 freepos = &lvid->tables[0] + vpart_num;
1014 free_lbs = udf_rw32(*freepos);
1015 *freepos = udf_rw32(free_lbs - num_lb);
1016 }
1017 break;
1018 case UDF_ALLOC_METASEQUENTIAL : /* UDF 2.60 BluRay-R */
1019 case UDF_ALLOC_RELAXEDSEQUENTIAL : /* UDF 2.50/~meta BluRay-R */
1020 printf("ALERT: udf_allocate_space : allocation %d "
1021 "not implemented yet!\n", alloc_type);
1022 /* TODO implement, doesn't have to be contiguous */
1023 error = ENOSPC;
1024 break;
1025 }
1026
1027 #ifdef DEBUG
1028 if (udf_verbose & UDF_DEBUG_ALLOC) {
1029 lmappos = lmapping;
1030 printf("udf_allocate_space, allocated logical lba :\n");
1031 for (lb_num = 0; lb_num < num_lb; lb_num++) {
1032 printf("%s %"PRIu64",", (lb_num > 0)?",":"",
1033 *lmappos++);
1034 }
1035 printf("\n");
1036 }
1037 #endif
1038 mutex_exit(&ump->allocate_mutex);
1039
1040 return error;
1041 }
1042
1043 /* --------------------------------------------------------------------- */
1044
1045 void
1046 udf_free_allocated_space(struct udf_mount *ump, uint32_t lb_num,
1047 uint16_t vpart_num, uint32_t num_lb)
1048 {
1049 struct udf_bitmap *bitmap;
1050 struct part_desc *pdesc;
1051 struct logvol_int_desc *lvid;
1052 uint32_t ptov, lb_map, udf_rw32_lbmap;
1053 uint32_t *freepos, free_lbs;
1054 int phys_part;
1055 int error;
1056
1057 DPRINTF(ALLOC, ("udf_free_allocated_space: freeing virt lbnum %d "
1058 "part %d + %d sect\n", lb_num, vpart_num, num_lb));
1059
1060 /* no use freeing zero length */
1061 if (num_lb == 0)
1062 return;
1063
1064 mutex_enter(&ump->allocate_mutex);
1065
1066 /* get partition backing up this vpart_num */
1067 pdesc = ump->partitions[ump->vtop[vpart_num]];
1068
1069 switch (ump->vtop_tp[vpart_num]) {
1070 case UDF_VTOP_TYPE_PHYS :
1071 case UDF_VTOP_TYPE_SPARABLE :
1072 /* free space to freed or unallocated space bitmap */
1073 ptov = udf_rw32(pdesc->start_loc);
1074 phys_part = ump->vtop[vpart_num];
1075
1076 /* first try freed space bitmap */
1077 bitmap = &ump->part_freed_bits[phys_part];
1078
1079 /* if not defined, use unallocated bitmap */
1080 if (bitmap->bits == NULL)
1081 bitmap = &ump->part_unalloc_bits[phys_part];
1082
1083 /* if no bitmaps are defined, bail out; XXX OK? */
1084 if (bitmap->bits == NULL)
1085 break;
1086
1087 /* free bits if its defined */
1088 KASSERT(bitmap->bits);
1089 ump->lvclose |= UDF_WRITE_PART_BITMAPS;
1090 udf_bitmap_free(bitmap, lb_num, num_lb);
1091
1092 /* adjust freecount */
1093 lvid = ump->logvol_integrity;
1094 freepos = &lvid->tables[0] + vpart_num;
1095 free_lbs = udf_rw32(*freepos);
1096 *freepos = udf_rw32(free_lbs + num_lb);
1097 break;
1098 case UDF_VTOP_TYPE_VIRT :
1099 /* free this VAT entry */
1100 KASSERT(num_lb == 1);
1101
1102 lb_map = 0xffffffff;
1103 udf_rw32_lbmap = udf_rw32(lb_map);
1104 error = udf_vat_write(ump->vat_node,
1105 (uint8_t *) &udf_rw32_lbmap, 4,
1106 ump->vat_offset + lb_num * 4);
1107 KASSERT(error == 0);
1108 ump->vat_last_free_lb = MIN(ump->vat_last_free_lb, lb_num);
1109 break;
1110 case UDF_VTOP_TYPE_META :
1111 /* free space in the metadata bitmap */
1112 bitmap = &ump->metadata_unalloc_bits;
1113 KASSERT(bitmap->bits);
1114
1115 ump->lvclose |= UDF_WRITE_PART_BITMAPS;
1116 udf_bitmap_free(bitmap, lb_num, num_lb);
1117
1118 /* adjust freecount */
1119 lvid = ump->logvol_integrity;
1120 freepos = &lvid->tables[0] + vpart_num;
1121 free_lbs = udf_rw32(*freepos);
1122 *freepos = udf_rw32(free_lbs + num_lb);
1123 break;
1124 default:
1125 printf("ALERT: udf_free_allocated_space : allocation %d "
1126 "not implemented yet!\n", ump->vtop_tp[vpart_num]);
1127 break;
1128 }
1129
1130 mutex_exit(&ump->allocate_mutex);
1131 }
1132
1133 /* --------------------------------------------------------------------- */
1134
1135 int
1136 udf_pre_allocate_space(struct udf_mount *ump, int udf_c_type,
1137 uint32_t num_lb, uint16_t vpartnr, uint64_t *lmapping)
1138 {
1139 /* TODO properly maintain uncomitted_lb per partition */
1140
1141 /* reserve size for VAT allocated data */
1142 if (ump->vtop_alloc[vpartnr] == UDF_ALLOC_VAT) {
1143 mutex_enter(&ump->allocate_mutex);
1144 ump->uncomitted_lb += num_lb;
1145 mutex_exit(&ump->allocate_mutex);
1146 }
1147
1148 return udf_allocate_space(ump, udf_c_type, vpartnr, num_lb, lmapping);
1149 }
1150
1151 /* --------------------------------------------------------------------- */
1152
1153 /*
1154 * Allocate a buf on disc for direct write out. The space doesn't have to be
1155 * contiguous as the caller takes care of this.
1156 */
1157
1158 void
1159 udf_late_allocate_buf(struct udf_mount *ump, struct buf *buf,
1160 uint64_t *lmapping, struct long_ad *node_ad_cpy, uint16_t *vpart_nump)
1161 {
1162 struct udf_node *udf_node = VTOI(buf->b_vp);
1163 int lb_size, blks, udf_c_type;
1164 int vpart_num, num_lb;
1165 int error, s;
1166
1167 /*
1168 * for each sector in the buf, allocate a sector on disc and record
1169 * its position in the provided mapping array.
1170 *
1171 * If its userdata or FIDs, record its location in its node.
1172 */
1173
1174 lb_size = udf_rw32(ump->logical_vol->lb_size);
1175 num_lb = (buf->b_bcount + lb_size -1) / lb_size;
1176 blks = lb_size / DEV_BSIZE;
1177 udf_c_type = buf->b_udf_c_type;
1178
1179 KASSERT(lb_size == ump->discinfo.sector_size);
1180
1181 /* select partition to record the buffer on */
1182 vpart_num = ump->data_part;
1183 if (udf_c_type == UDF_C_NODE)
1184 vpart_num = ump->node_part;
1185 if (udf_c_type == UDF_C_FIDS)
1186 vpart_num = ump->fids_part;
1187 *vpart_nump = vpart_num;
1188
1189 if (udf_c_type == UDF_C_NODE) {
1190 /* if not VAT, its allready allocated */
1191 if (ump->vtop_alloc[ump->node_part] != UDF_ALLOC_VAT)
1192 return;
1193
1194 /* allocate on its backing sequential partition */
1195 vpart_num = ump->data_part;
1196 }
1197
1198 /* do allocation on the selected partition */
1199 error = udf_allocate_space(ump, udf_c_type,
1200 vpart_num, num_lb, lmapping);
1201 if (error) {
1202 /* ARGH! we've not done our accounting right! */
1203 panic("UDF disc allocation accounting gone wrong");
1204 }
1205
1206 /* commit our sector count */
1207 mutex_enter(&ump->allocate_mutex);
1208 if (num_lb > ump->uncomitted_lb) {
1209 ump->uncomitted_lb = 0;
1210 } else {
1211 ump->uncomitted_lb -= num_lb;
1212 }
1213 mutex_exit(&ump->allocate_mutex);
1214
1215 /* If its userdata or FIDs, record its allocation in its node. */
1216 if ((udf_c_type == UDF_C_USERDATA) ||
1217 (udf_c_type == UDF_C_FIDS) ||
1218 (udf_c_type == UDF_C_METADATA_SBM))
1219 {
1220 udf_record_allocation_in_node(ump, buf, vpart_num, lmapping,
1221 node_ad_cpy);
1222 /* decrement our outstanding bufs counter */
1223 s = splbio();
1224 udf_node->outstanding_bufs--;
1225 splx(s);
1226 }
1227 }
1228
1229 /* --------------------------------------------------------------------- */
1230
1231 /*
1232 * Try to merge a1 with the new piece a2. udf_ads_merge returns error when not
1233 * possible (anymore); a2 returns the rest piece.
1234 */
1235
1236 static int
1237 udf_ads_merge(uint32_t lb_size, struct long_ad *a1, struct long_ad *a2)
1238 {
1239 uint32_t max_len, merge_len;
1240 uint32_t a1_len, a2_len;
1241 uint32_t a1_flags, a2_flags;
1242 uint32_t a1_lbnum, a2_lbnum;
1243 uint16_t a1_part, a2_part;
1244
1245 max_len = ((UDF_EXT_MAXLEN / lb_size) * lb_size);
1246
1247 a1_flags = UDF_EXT_FLAGS(udf_rw32(a1->len));
1248 a1_len = UDF_EXT_LEN(udf_rw32(a1->len));
1249 a1_lbnum = udf_rw32(a1->loc.lb_num);
1250 a1_part = udf_rw16(a1->loc.part_num);
1251
1252 a2_flags = UDF_EXT_FLAGS(udf_rw32(a2->len));
1253 a2_len = UDF_EXT_LEN(udf_rw32(a2->len));
1254 a2_lbnum = udf_rw32(a2->loc.lb_num);
1255 a2_part = udf_rw16(a2->loc.part_num);
1256
1257 /* defines same space */
1258 if (a1_flags != a2_flags)
1259 return 1;
1260
1261 if (a1_flags != UDF_EXT_FREE) {
1262 /* the same partition */
1263 if (a1_part != a2_part)
1264 return 1;
1265
1266 /* a2 is successor of a1 */
1267 if (a1_lbnum * lb_size + a1_len != a2_lbnum * lb_size)
1268 return 1;
1269 }
1270
1271 /* merge as most from a2 if possible */
1272 merge_len = MIN(a2_len, max_len - a1_len);
1273 a1_len += merge_len;
1274 a2_len -= merge_len;
1275 a2_lbnum += merge_len/lb_size;
1276
1277 a1->len = udf_rw32(a1_len | a1_flags);
1278 a2->len = udf_rw32(a2_len | a2_flags);
1279 a2->loc.lb_num = udf_rw32(a2_lbnum);
1280
1281 if (a2_len > 0)
1282 return 1;
1283
1284 /* there is space over to merge */
1285 return 0;
1286 }
1287
1288 /* --------------------------------------------------------------------- */
1289
1290 static void
1291 udf_wipe_adslots(struct udf_node *udf_node)
1292 {
1293 struct file_entry *fe;
1294 struct extfile_entry *efe;
1295 struct alloc_ext_entry *ext;
1296 uint64_t inflen, objsize;
1297 uint32_t lb_size, dscr_size, l_ea, l_ad, max_l_ad, crclen;
1298 uint8_t *data_pos;
1299 int extnr;
1300
1301 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1302
1303 fe = udf_node->fe;
1304 efe = udf_node->efe;
1305 if (fe) {
1306 inflen = udf_rw64(fe->inf_len);
1307 objsize = inflen;
1308 dscr_size = sizeof(struct file_entry) -1;
1309 l_ea = udf_rw32(fe->l_ea);
1310 l_ad = udf_rw32(fe->l_ad);
1311 data_pos = (uint8_t *) fe + dscr_size + l_ea;
1312 } else {
1313 inflen = udf_rw64(efe->inf_len);
1314 objsize = udf_rw64(efe->obj_size);
1315 dscr_size = sizeof(struct extfile_entry) -1;
1316 l_ea = udf_rw32(efe->l_ea);
1317 l_ad = udf_rw32(efe->l_ad);
1318 data_pos = (uint8_t *) efe + dscr_size + l_ea;
1319 }
1320 max_l_ad = lb_size - dscr_size - l_ea;
1321
1322 /* wipe fe/efe */
1323 memset(data_pos, 0, max_l_ad);
1324 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea;
1325 if (fe) {
1326 fe->l_ad = udf_rw32(0);
1327 fe->logblks_rec = udf_rw64(0);
1328 fe->tag.desc_crc_len = udf_rw32(crclen);
1329 } else {
1330 efe->l_ad = udf_rw32(0);
1331 efe->logblks_rec = udf_rw64(0);
1332 efe->tag.desc_crc_len = udf_rw32(crclen);
1333 }
1334
1335 /* wipe all allocation extent entries */
1336 for (extnr = 0; extnr < udf_node->num_extensions; extnr++) {
1337 ext = udf_node->ext[extnr];
1338 dscr_size = sizeof(struct alloc_ext_entry) -1;
1339 data_pos = (uint8_t *) ext->data;
1340 max_l_ad = lb_size - dscr_size;
1341 memset(data_pos, 0, max_l_ad);
1342 ext->l_ad = udf_rw32(0);
1343
1344 crclen = dscr_size - UDF_DESC_TAG_LENGTH;
1345 ext->tag.desc_crc_len = udf_rw32(crclen);
1346 }
1347 udf_node->i_flags |= IN_NODE_REBUILD;
1348 }
1349
1350 /* --------------------------------------------------------------------- */
1351
1352 void
1353 udf_get_adslot(struct udf_node *udf_node, int slot, struct long_ad *icb,
1354 int *eof) {
1355 struct file_entry *fe;
1356 struct extfile_entry *efe;
1357 struct alloc_ext_entry *ext;
1358 struct icb_tag *icbtag;
1359 struct short_ad *short_ad;
1360 struct long_ad *long_ad, l_icb;
1361 uint32_t offset;
1362 uint32_t lb_size, dscr_size, l_ea, l_ad, flags;
1363 uint8_t *data_pos;
1364 int icbflags, addr_type, adlen, extnr;
1365
1366 /* determine what descriptor we are in */
1367 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1368
1369 fe = udf_node->fe;
1370 efe = udf_node->efe;
1371 if (fe) {
1372 icbtag = &fe->icbtag;
1373 dscr_size = sizeof(struct file_entry) -1;
1374 l_ea = udf_rw32(fe->l_ea);
1375 l_ad = udf_rw32(fe->l_ad);
1376 data_pos = (uint8_t *) fe + dscr_size + l_ea;
1377 } else {
1378 icbtag = &efe->icbtag;
1379 dscr_size = sizeof(struct extfile_entry) -1;
1380 l_ea = udf_rw32(efe->l_ea);
1381 l_ad = udf_rw32(efe->l_ad);
1382 data_pos = (uint8_t *) efe + dscr_size + l_ea;
1383 }
1384
1385 icbflags = udf_rw16(icbtag->flags);
1386 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
1387
1388 /* just in case we're called on an intern, its EOF */
1389 if (addr_type == UDF_ICB_INTERN_ALLOC) {
1390 memset(icb, 0, sizeof(struct long_ad));
1391 *eof = 1;
1392 return;
1393 }
1394
1395 adlen = 0;
1396 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1397 adlen = sizeof(struct short_ad);
1398 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1399 adlen = sizeof(struct long_ad);
1400 }
1401
1402 /* if offset too big, we go to the allocation extensions */
1403 offset = slot * adlen;
1404 extnr = -1;
1405 while (offset >= l_ad) {
1406 /* check if our last entry is a redirect */
1407 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1408 short_ad = (struct short_ad *) (data_pos + l_ad-adlen);
1409 l_icb.len = short_ad->len;
1410 l_icb.loc.part_num = udf_node->loc.loc.part_num;
1411 l_icb.loc.lb_num = short_ad->lb_num;
1412 } else {
1413 KASSERT(addr_type == UDF_ICB_LONG_ALLOC);
1414 long_ad = (struct long_ad *) (data_pos + l_ad-adlen);
1415 l_icb = *long_ad;
1416 }
1417 flags = UDF_EXT_FLAGS(udf_rw32(l_icb.len));
1418 if (flags != UDF_EXT_REDIRECT) {
1419 l_ad = 0; /* force EOF */
1420 break;
1421 }
1422
1423 /* advance to next extent */
1424 extnr++;
1425 if (extnr >= udf_node->num_extensions) {
1426 l_ad = 0; /* force EOF */
1427 break;
1428 }
1429 offset = offset - l_ad;
1430 ext = udf_node->ext[extnr];
1431 dscr_size = sizeof(struct alloc_ext_entry) -1;
1432 l_ad = udf_rw32(ext->l_ad);
1433 data_pos = (uint8_t *) ext + dscr_size;
1434 }
1435
1436 /* XXX l_ad == 0 should be enough to check */
1437 *eof = (offset >= l_ad) || (l_ad == 0);
1438 if (*eof) {
1439 DPRINTF(PARANOIDADWLK, ("returning EOF, extnr %d, offset %d, "
1440 "l_ad %d\n", extnr, offset, l_ad));
1441 memset(icb, 0, sizeof(struct long_ad));
1442 return;
1443 }
1444
1445 /* get the element */
1446 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1447 short_ad = (struct short_ad *) (data_pos + offset);
1448 icb->len = short_ad->len;
1449 icb->loc.part_num = udf_node->loc.loc.part_num;
1450 icb->loc.lb_num = short_ad->lb_num;
1451 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1452 long_ad = (struct long_ad *) (data_pos + offset);
1453 *icb = *long_ad;
1454 }
1455 DPRINTF(PARANOIDADWLK, ("returning element : v %d, lb %d, len %d, "
1456 "flags %d\n", icb->loc.part_num, icb->loc.lb_num,
1457 UDF_EXT_LEN(icb->len), UDF_EXT_FLAGS(icb->len)));
1458 }
1459
1460 /* --------------------------------------------------------------------- */
1461
1462 int
1463 udf_append_adslot(struct udf_node *udf_node, int *slot, struct long_ad *icb) {
1464 struct udf_mount *ump = udf_node->ump;
1465 union dscrptr *dscr, *extdscr;
1466 struct file_entry *fe;
1467 struct extfile_entry *efe;
1468 struct alloc_ext_entry *ext;
1469 struct icb_tag *icbtag;
1470 struct short_ad *short_ad;
1471 struct long_ad *long_ad, o_icb, l_icb;
1472 uint64_t logblks_rec, *logblks_rec_p;
1473 uint64_t lmapping;
1474 uint32_t offset, rest, len, lb_num;
1475 uint32_t lb_size, dscr_size, l_ea, l_ad, *l_ad_p, max_l_ad, crclen;
1476 uint32_t flags;
1477 uint16_t vpart_num;
1478 uint8_t *data_pos;
1479 int icbflags, addr_type, adlen, extnr;
1480 int error;
1481
1482 lb_size = udf_rw32(ump->logical_vol->lb_size);
1483 vpart_num = udf_rw16(udf_node->loc.loc.part_num);
1484
1485 /* determine what descriptor we are in */
1486 fe = udf_node->fe;
1487 efe = udf_node->efe;
1488 if (fe) {
1489 icbtag = &fe->icbtag;
1490 dscr = (union dscrptr *) fe;
1491 dscr_size = sizeof(struct file_entry) -1;
1492
1493 l_ea = udf_rw32(fe->l_ea);
1494 l_ad_p = &fe->l_ad;
1495 logblks_rec_p = &fe->logblks_rec;
1496 } else {
1497 icbtag = &efe->icbtag;
1498 dscr = (union dscrptr *) efe;
1499 dscr_size = sizeof(struct extfile_entry) -1;
1500
1501 l_ea = udf_rw32(efe->l_ea);
1502 l_ad_p = &efe->l_ad;
1503 logblks_rec_p = &efe->logblks_rec;
1504 }
1505 data_pos = (uint8_t *) dscr + dscr_size + l_ea;
1506 max_l_ad = lb_size - dscr_size - l_ea;
1507
1508 icbflags = udf_rw16(icbtag->flags);
1509 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
1510
1511 /* just in case we're called on an intern, its EOF */
1512 if (addr_type == UDF_ICB_INTERN_ALLOC) {
1513 panic("udf_append_adslot on UDF_ICB_INTERN_ALLOC\n");
1514 }
1515
1516 adlen = 0;
1517 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1518 adlen = sizeof(struct short_ad);
1519 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1520 adlen = sizeof(struct long_ad);
1521 }
1522
1523 /* clean up given long_ad since it can be a synthesized one */
1524 flags = UDF_EXT_FLAGS(udf_rw32(icb->len));
1525 if (flags == UDF_EXT_FREE) {
1526 icb->loc.part_num = udf_rw16(0);
1527 icb->loc.lb_num = udf_rw32(0);
1528 }
1529
1530 /* if offset too big, we go to the allocation extensions */
1531 l_ad = udf_rw32(*l_ad_p);
1532 offset = (*slot) * adlen;
1533 extnr = -1;
1534 while (offset >= l_ad) {
1535 /* check if our last entry is a redirect */
1536 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1537 short_ad = (struct short_ad *) (data_pos + l_ad-adlen);
1538 l_icb.len = short_ad->len;
1539 l_icb.loc.part_num = udf_node->loc.loc.part_num;
1540 l_icb.loc.lb_num = short_ad->lb_num;
1541 } else {
1542 KASSERT(addr_type == UDF_ICB_LONG_ALLOC);
1543 long_ad = (struct long_ad *) (data_pos + l_ad-adlen);
1544 l_icb = *long_ad;
1545 }
1546 flags = UDF_EXT_FLAGS(udf_rw32(l_icb.len));
1547 if (flags != UDF_EXT_REDIRECT) {
1548 /* only one past the last one is adressable */
1549 break;
1550 }
1551
1552 /* advance to next extent */
1553 extnr++;
1554 KASSERT(extnr < udf_node->num_extensions);
1555 offset = offset - l_ad;
1556
1557 ext = udf_node->ext[extnr];
1558 dscr = (union dscrptr *) ext;
1559 dscr_size = sizeof(struct alloc_ext_entry) -1;
1560 max_l_ad = lb_size - dscr_size;
1561 l_ad_p = &ext->l_ad;
1562 l_ad = udf_rw32(*l_ad_p);
1563 data_pos = (uint8_t *) ext + dscr_size;
1564 }
1565 DPRINTF(PARANOIDADWLK, ("append, ext %d, offset %d, l_ad %d\n",
1566 extnr, offset, udf_rw32(*l_ad_p)));
1567 KASSERT(l_ad == udf_rw32(*l_ad_p));
1568
1569 /* offset is offset within the current (E)FE/AED */
1570 l_ad = udf_rw32(*l_ad_p);
1571 crclen = udf_rw32(dscr->tag.desc_crc_len);
1572 logblks_rec = udf_rw64(*logblks_rec_p);
1573
1574 /* overwriting old piece? */
1575 if (offset < l_ad) {
1576 /* overwrite entry; compensate for the old element */
1577 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1578 short_ad = (struct short_ad *) (data_pos + offset);
1579 o_icb.len = short_ad->len;
1580 o_icb.loc.part_num = udf_rw16(0); /* ignore */
1581 o_icb.loc.lb_num = short_ad->lb_num;
1582 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1583 long_ad = (struct long_ad *) (data_pos + offset);
1584 o_icb = *long_ad;
1585 } else {
1586 panic("Invalid address type in udf_append_adslot\n");
1587 }
1588
1589 len = udf_rw32(o_icb.len);
1590 if (UDF_EXT_FLAGS(len) == UDF_EXT_ALLOCATED) {
1591 /* adjust counts */
1592 len = UDF_EXT_LEN(len);
1593 logblks_rec -= (len + lb_size -1) / lb_size;
1594 }
1595 }
1596
1597 /* check if we're not appending a redirection */
1598 flags = UDF_EXT_FLAGS(udf_rw32(icb->len));
1599 KASSERT(flags != UDF_EXT_REDIRECT);
1600
1601 /* round down available space */
1602 rest = adlen * ((max_l_ad - offset) / adlen);
1603 if (rest <= adlen) {
1604 /* have to append aed, see if we already have a spare one */
1605 extnr++;
1606 ext = udf_node->ext[extnr];
1607 l_icb = udf_node->ext_loc[extnr];
1608 if (ext == NULL) {
1609 DPRINTF(ALLOC,("adding allocation extent %d\n", extnr));
1610
1611 error = udf_pre_allocate_space(ump, UDF_C_NODE, 1,
1612 vpart_num, &lmapping);
1613 lb_num = lmapping;
1614 if (error)
1615 return error;
1616
1617 /* initialise pointer to location */
1618 memset(&l_icb, 0, sizeof(struct long_ad));
1619 l_icb.len = udf_rw32(lb_size | UDF_EXT_REDIRECT);
1620 l_icb.loc.lb_num = udf_rw32(lb_num);
1621 l_icb.loc.part_num = udf_rw16(vpart_num);
1622
1623 /* create new aed descriptor */
1624 udf_create_logvol_dscr(ump, udf_node, &l_icb, &extdscr);
1625 ext = &extdscr->aee;
1626
1627 udf_inittag(ump, &ext->tag, TAGID_ALLOCEXTENT, lb_num);
1628 dscr_size = sizeof(struct alloc_ext_entry) -1;
1629 max_l_ad = lb_size - dscr_size;
1630 memset(ext->data, 0, max_l_ad);
1631 ext->l_ad = udf_rw32(0);
1632 ext->tag.desc_crc_len =
1633 udf_rw32(dscr_size - UDF_DESC_TAG_LENGTH);
1634
1635 /* declare aed */
1636 udf_node->num_extensions++;
1637 udf_node->ext_loc[extnr] = l_icb;
1638 udf_node->ext[extnr] = ext;
1639 }
1640 /* add redirect and adjust l_ad and crclen for old descr */
1641 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1642 short_ad = (struct short_ad *) (data_pos + offset);
1643 short_ad->len = l_icb.len;
1644 short_ad->lb_num = l_icb.loc.lb_num;
1645 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1646 long_ad = (struct long_ad *) (data_pos + offset);
1647 *long_ad = l_icb;
1648 }
1649 l_ad += adlen;
1650 crclen += adlen;
1651 dscr->tag.desc_crc_len = udf_rw32(crclen);
1652 *l_ad_p = udf_rw32(l_ad);
1653
1654 /* advance to the new extension */
1655 KASSERT(ext != NULL);
1656 dscr = (union dscrptr *) ext;
1657 dscr_size = sizeof(struct alloc_ext_entry) -1;
1658 max_l_ad = lb_size - dscr_size;
1659 data_pos = (uint8_t *) dscr + dscr_size;
1660
1661 l_ad_p = &ext->l_ad;
1662 l_ad = udf_rw32(*l_ad_p);
1663 crclen = udf_rw32(dscr->tag.desc_crc_len);
1664 offset = 0;
1665
1666 /* adjust callees slot count for link insert */
1667 *slot += 1;
1668 }
1669
1670 /* write out the element */
1671 DPRINTF(PARANOIDADWLK, ("adding element : %p : v %d, lb %d, "
1672 "len %d, flags %d\n", data_pos + offset,
1673 icb->loc.part_num, icb->loc.lb_num,
1674 UDF_EXT_LEN(icb->len), UDF_EXT_FLAGS(icb->len)));
1675 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1676 short_ad = (struct short_ad *) (data_pos + offset);
1677 short_ad->len = icb->len;
1678 short_ad->lb_num = icb->loc.lb_num;
1679 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1680 long_ad = (struct long_ad *) (data_pos + offset);
1681 *long_ad = *icb;
1682 }
1683
1684 /* adjust logblks recorded count */
1685 flags = UDF_EXT_FLAGS(udf_rw32(icb->len));
1686 if (flags == UDF_EXT_ALLOCATED)
1687 logblks_rec += (UDF_EXT_LEN(icb->len) + lb_size -1) / lb_size;
1688 *logblks_rec_p = udf_rw64(logblks_rec);
1689
1690 /* adjust l_ad and crclen when needed */
1691 if (offset >= l_ad) {
1692 l_ad += adlen;
1693 crclen += adlen;
1694 dscr->tag.desc_crc_len = udf_rw32(crclen);
1695 *l_ad_p = udf_rw32(l_ad);
1696 }
1697
1698 return 0;
1699 }
1700
1701 /* --------------------------------------------------------------------- */
1702
1703 static void
1704 udf_count_alloc_exts(struct udf_node *udf_node)
1705 {
1706 struct long_ad s_ad;
1707 uint32_t lb_num, len, flags;
1708 uint16_t vpart_num;
1709 int slot, eof;
1710 int num_extents, extnr;
1711 int lb_size;
1712
1713 if (udf_node->num_extensions == 0)
1714 return;
1715
1716 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1717 /* count number of allocation extents in use */
1718 num_extents = 0;
1719 slot = 0;
1720 for (;;) {
1721 udf_get_adslot(udf_node, slot, &s_ad, &eof);
1722 if (eof)
1723 break;
1724 len = udf_rw32(s_ad.len);
1725 flags = UDF_EXT_FLAGS(len);
1726
1727 if (flags == UDF_EXT_REDIRECT)
1728 num_extents++;
1729
1730 slot++;
1731 }
1732
1733 DPRINTF(ALLOC, ("udf_count_alloc_ext counted %d live extents\n",
1734 num_extents));
1735
1736 /* XXX choice: we could delay freeing them on node writeout */
1737 /* free excess entries */
1738 extnr = num_extents;
1739 for (;extnr < udf_node->num_extensions; extnr++) {
1740 DPRINTF(ALLOC, ("freeing alloc ext %d\n", extnr));
1741 /* free dscriptor */
1742 s_ad = udf_node->ext_loc[extnr];
1743 udf_free_logvol_dscr(udf_node->ump, &s_ad,
1744 udf_node->ext[extnr]);
1745 udf_node->ext[extnr] = NULL;
1746
1747 /* free disc space */
1748 lb_num = udf_rw32(s_ad.loc.lb_num);
1749 vpart_num = udf_rw16(s_ad.loc.part_num);
1750 udf_free_allocated_space(udf_node->ump, lb_num, vpart_num, 1);
1751
1752 memset(&udf_node->ext_loc[extnr], 0, sizeof(struct long_ad));
1753 }
1754
1755 /* set our new number of allocation extents */
1756 udf_node->num_extensions = num_extents;
1757 }
1758
1759
1760 /* --------------------------------------------------------------------- */
1761
1762 /*
1763 * Adjust the node's allocation descriptors to reflect the new mapping; do
1764 * take note that we might glue to existing allocation descriptors.
1765 *
1766 * XXX Note there can only be one allocation being recorded/mount; maybe
1767 * explicit allocation in shedule thread?
1768 */
1769
1770 static void
1771 udf_record_allocation_in_node(struct udf_mount *ump, struct buf *buf,
1772 uint16_t vpart_num, uint64_t *mapping, struct long_ad *node_ad_cpy)
1773 {
1774 struct vnode *vp = buf->b_vp;
1775 struct udf_node *udf_node = VTOI(vp);
1776 struct file_entry *fe;
1777 struct extfile_entry *efe;
1778 struct icb_tag *icbtag;
1779 struct long_ad s_ad, c_ad;
1780 uint64_t inflen, from, till;
1781 uint64_t foffset, end_foffset, restart_foffset;
1782 uint64_t orig_inflen, orig_lbrec, new_inflen, new_lbrec;
1783 uint32_t num_lb, len, flags, lb_num;
1784 uint32_t run_start;
1785 uint32_t slot_offset, replace_len, replace;
1786 int addr_type, icbflags;
1787 // int udf_c_type = buf->b_udf_c_type;
1788 int lb_size, run_length, eof;
1789 int slot, cpy_slot, cpy_slots, restart_slot;
1790 int error;
1791
1792 DPRINTF(ALLOC, ("udf_record_allocation_in_node\n"));
1793
1794 #if 0
1795 /* XXX disable sanity check for now */
1796 /* sanity check ... should be panic ? */
1797 if ((udf_c_type != UDF_C_USERDATA) && (udf_c_type != UDF_C_FIDS))
1798 return;
1799 #endif
1800
1801 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1802
1803 /* do the job */
1804 UDF_LOCK_NODE(udf_node, 0); /* XXX can deadlock ? */
1805 udf_node_sanity_check(udf_node, &orig_inflen, &orig_lbrec);
1806
1807 fe = udf_node->fe;
1808 efe = udf_node->efe;
1809 if (fe) {
1810 icbtag = &fe->icbtag;
1811 inflen = udf_rw64(fe->inf_len);
1812 } else {
1813 icbtag = &efe->icbtag;
1814 inflen = udf_rw64(efe->inf_len);
1815 }
1816
1817 /* do check if `till' is not past file information length */
1818 from = buf->b_lblkno * lb_size;
1819 till = MIN(inflen, from + buf->b_resid);
1820
1821 num_lb = (till - from + lb_size -1) / lb_size;
1822
1823 DPRINTF(ALLOC, ("record allocation from %"PRIu64" + %d\n", from, buf->b_bcount));
1824
1825 icbflags = udf_rw16(icbtag->flags);
1826 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
1827
1828 if (addr_type == UDF_ICB_INTERN_ALLOC) {
1829 /* nothing to do */
1830 /* XXX clean up rest of node? just in case? */
1831 UDF_UNLOCK_NODE(udf_node, 0);
1832 return;
1833 }
1834
1835 slot = 0;
1836 cpy_slot = 0;
1837 foffset = 0;
1838
1839 /* 1) copy till first overlap piece to the rewrite buffer */
1840 for (;;) {
1841 udf_get_adslot(udf_node, slot, &s_ad, &eof);
1842 if (eof) {
1843 DPRINTF(WRITE,
1844 ("Record allocation in node "
1845 "failed: encountered EOF\n"));
1846 UDF_UNLOCK_NODE(udf_node, 0);
1847 buf->b_error = EINVAL;
1848 return;
1849 }
1850 len = udf_rw32(s_ad.len);
1851 flags = UDF_EXT_FLAGS(len);
1852 len = UDF_EXT_LEN(len);
1853
1854 if (flags == UDF_EXT_REDIRECT) {
1855 slot++;
1856 continue;
1857 }
1858
1859 end_foffset = foffset + len;
1860 if (end_foffset > from)
1861 break; /* found */
1862
1863 node_ad_cpy[cpy_slot++] = s_ad;
1864
1865 DPRINTF(ALLOC, ("\t1: vp %d, lb %d, len %d, flags %d "
1866 "-> stack\n",
1867 udf_rw16(s_ad.loc.part_num),
1868 udf_rw32(s_ad.loc.lb_num),
1869 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1870 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1871
1872 foffset = end_foffset;
1873 slot++;
1874 }
1875 restart_slot = slot;
1876 restart_foffset = foffset;
1877
1878 /* 2) trunc overlapping slot at overlap and copy it */
1879 slot_offset = from - foffset;
1880 if (slot_offset > 0) {
1881 DPRINTF(ALLOC, ("\tslot_offset = %d, flags = %d (%d)\n",
1882 slot_offset, flags >> 30, flags));
1883
1884 s_ad.len = udf_rw32(slot_offset | flags);
1885 node_ad_cpy[cpy_slot++] = s_ad;
1886
1887 DPRINTF(ALLOC, ("\t2: vp %d, lb %d, len %d, flags %d "
1888 "-> stack\n",
1889 udf_rw16(s_ad.loc.part_num),
1890 udf_rw32(s_ad.loc.lb_num),
1891 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1892 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1893 }
1894 foffset += slot_offset;
1895
1896 /* 3) insert new mappings */
1897 memset(&s_ad, 0, sizeof(struct long_ad));
1898 lb_num = 0;
1899 for (lb_num = 0; lb_num < num_lb; lb_num++) {
1900 run_start = mapping[lb_num];
1901 run_length = 1;
1902 while (lb_num < num_lb-1) {
1903 if (mapping[lb_num+1] != mapping[lb_num]+1)
1904 if (mapping[lb_num+1] != mapping[lb_num])
1905 break;
1906 run_length++;
1907 lb_num++;
1908 }
1909 /* insert slot for this mapping */
1910 len = run_length * lb_size;
1911
1912 /* bounds checking */
1913 if (foffset + len > till)
1914 len = till - foffset;
1915 KASSERT(foffset + len <= inflen);
1916
1917 s_ad.len = udf_rw32(len | UDF_EXT_ALLOCATED);
1918 s_ad.loc.part_num = udf_rw16(vpart_num);
1919 s_ad.loc.lb_num = udf_rw32(run_start);
1920
1921 foffset += len;
1922
1923 /* paranoia */
1924 if (len == 0) {
1925 DPRINTF(WRITE,
1926 ("Record allocation in node "
1927 "failed: insert failed\n"));
1928 UDF_UNLOCK_NODE(udf_node, 0);
1929 buf->b_error = EINVAL;
1930 return;
1931 }
1932 node_ad_cpy[cpy_slot++] = s_ad;
1933
1934 DPRINTF(ALLOC, ("\t3: insert new mapping vp %d lb %d, len %d, "
1935 "flags %d -> stack\n",
1936 udf_rw16(s_ad.loc.part_num), udf_rw32(s_ad.loc.lb_num),
1937 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1938 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1939 }
1940
1941 /* 4) pop replaced length */
1942 slot = restart_slot;
1943 foffset = restart_foffset;
1944
1945 replace_len = till - foffset; /* total amount of bytes to pop */
1946 slot_offset = from - foffset; /* offset in first encounted slot */
1947 KASSERT((slot_offset % lb_size) == 0);
1948
1949 for (;;) {
1950 udf_get_adslot(udf_node, slot, &s_ad, &eof);
1951 if (eof)
1952 break;
1953
1954 len = udf_rw32(s_ad.len);
1955 flags = UDF_EXT_FLAGS(len);
1956 len = UDF_EXT_LEN(len);
1957 lb_num = udf_rw32(s_ad.loc.lb_num);
1958
1959 if (flags == UDF_EXT_REDIRECT) {
1960 slot++;
1961 continue;
1962 }
1963
1964 DPRINTF(ALLOC, ("\t4i: got slot %d, slot_offset %d, "
1965 "replace_len %d, "
1966 "vp %d, lb %d, len %d, flags %d\n",
1967 slot, slot_offset, replace_len,
1968 udf_rw16(s_ad.loc.part_num),
1969 udf_rw32(s_ad.loc.lb_num),
1970 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1971 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1972
1973 /* adjust for slot offset */
1974 if (slot_offset) {
1975 DPRINTF(ALLOC, ("\t4s: skipping %d\n", slot_offset));
1976 lb_num += slot_offset / lb_size;
1977 len -= slot_offset;
1978 foffset += slot_offset;
1979 replace_len -= slot_offset;
1980
1981 /* mark adjusted */
1982 slot_offset = 0;
1983 }
1984
1985 /* advance for (the rest of) this slot */
1986 replace = MIN(len, replace_len);
1987 DPRINTF(ALLOC, ("\t4d: replacing %d\n", replace));
1988
1989 /* advance for this slot */
1990 if (replace) {
1991 /* note: dont round DOWN on num_lb since we then
1992 * forget the last partial one */
1993 num_lb = (replace + lb_size - 1) / lb_size;
1994 if (flags != UDF_EXT_FREE) {
1995 udf_free_allocated_space(ump, lb_num,
1996 udf_rw16(s_ad.loc.part_num), num_lb);
1997 }
1998 lb_num += num_lb;
1999 len -= replace;
2000 foffset += replace;
2001 replace_len -= replace;
2002 }
2003
2004 /* do we have a slot tail ? */
2005 if (len) {
2006 KASSERT(foffset % lb_size == 0);
2007
2008 /* we arrived at our point, push remainder */
2009 s_ad.len = udf_rw32(len | flags);
2010 s_ad.loc.lb_num = udf_rw32(lb_num);
2011 if (flags == UDF_EXT_FREE)
2012 s_ad.loc.lb_num = udf_rw32(0);
2013 node_ad_cpy[cpy_slot++] = s_ad;
2014 foffset += len;
2015 slot++;
2016
2017 DPRINTF(ALLOC, ("\t4: vp %d, lb %d, len %d, flags %d "
2018 "-> stack\n",
2019 udf_rw16(s_ad.loc.part_num),
2020 udf_rw32(s_ad.loc.lb_num),
2021 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2022 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2023 break;
2024 }
2025
2026 slot++;
2027 }
2028
2029 /* 5) copy remainder */
2030 for (;;) {
2031 udf_get_adslot(udf_node, slot, &s_ad, &eof);
2032 if (eof)
2033 break;
2034
2035 len = udf_rw32(s_ad.len);
2036 flags = UDF_EXT_FLAGS(len);
2037 len = UDF_EXT_LEN(len);
2038
2039 if (flags == UDF_EXT_REDIRECT) {
2040 slot++;
2041 continue;
2042 }
2043
2044 node_ad_cpy[cpy_slot++] = s_ad;
2045
2046 DPRINTF(ALLOC, ("\t5: insert new mapping "
2047 "vp %d lb %d, len %d, flags %d "
2048 "-> stack\n",
2049 udf_rw16(s_ad.loc.part_num),
2050 udf_rw32(s_ad.loc.lb_num),
2051 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2052 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2053
2054 slot++;
2055 }
2056
2057 /* 6) reset node descriptors */
2058 udf_wipe_adslots(udf_node);
2059
2060 /* 7) copy back extents; merge when possible. Recounting on the fly */
2061 cpy_slots = cpy_slot;
2062
2063 c_ad = node_ad_cpy[0];
2064 slot = 0;
2065 DPRINTF(ALLOC, ("\t7s: stack -> got mapping vp %d "
2066 "lb %d, len %d, flags %d\n",
2067 udf_rw16(c_ad.loc.part_num),
2068 udf_rw32(c_ad.loc.lb_num),
2069 UDF_EXT_LEN(udf_rw32(c_ad.len)),
2070 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2071
2072 for (cpy_slot = 1; cpy_slot < cpy_slots; cpy_slot++) {
2073 s_ad = node_ad_cpy[cpy_slot];
2074
2075 DPRINTF(ALLOC, ("\t7i: stack -> got mapping vp %d "
2076 "lb %d, len %d, flags %d\n",
2077 udf_rw16(s_ad.loc.part_num),
2078 udf_rw32(s_ad.loc.lb_num),
2079 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2080 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2081
2082 /* see if we can merge */
2083 if (udf_ads_merge(lb_size, &c_ad, &s_ad)) {
2084 /* not mergable (anymore) */
2085 DPRINTF(ALLOC, ("\t7: appending vp %d lb %d, "
2086 "len %d, flags %d\n",
2087 udf_rw16(c_ad.loc.part_num),
2088 udf_rw32(c_ad.loc.lb_num),
2089 UDF_EXT_LEN(udf_rw32(c_ad.len)),
2090 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2091
2092 error = udf_append_adslot(udf_node, &slot, &c_ad);
2093 if (error) {
2094 buf->b_error = error;
2095 goto out;
2096 }
2097 c_ad = s_ad;
2098 slot++;
2099 }
2100 }
2101
2102 /* 8) push rest slot (if any) */
2103 if (UDF_EXT_LEN(c_ad.len) > 0) {
2104 DPRINTF(ALLOC, ("\t8: last append vp %d lb %d, "
2105 "len %d, flags %d\n",
2106 udf_rw16(c_ad.loc.part_num),
2107 udf_rw32(c_ad.loc.lb_num),
2108 UDF_EXT_LEN(udf_rw32(c_ad.len)),
2109 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2110
2111 error = udf_append_adslot(udf_node, &slot, &c_ad);
2112 if (error) {
2113 buf->b_error = error;
2114 goto out;
2115 }
2116 }
2117
2118 out:
2119 udf_count_alloc_exts(udf_node);
2120
2121 /* the node's descriptors should now be sane */
2122 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2123 UDF_UNLOCK_NODE(udf_node, 0);
2124
2125 KASSERT(orig_inflen == new_inflen);
2126 KASSERT(new_lbrec >= orig_lbrec);
2127
2128 return;
2129 }
2130
2131 /* --------------------------------------------------------------------- */
2132
2133 int
2134 udf_grow_node(struct udf_node *udf_node, uint64_t new_size)
2135 {
2136 union dscrptr *dscr;
2137 struct vnode *vp = udf_node->vnode;
2138 struct udf_mount *ump = udf_node->ump;
2139 struct file_entry *fe;
2140 struct extfile_entry *efe;
2141 struct icb_tag *icbtag;
2142 struct long_ad c_ad, s_ad;
2143 uint64_t size_diff, old_size, inflen, objsize, chunk, append_len;
2144 uint64_t foffset, end_foffset;
2145 uint64_t orig_inflen, orig_lbrec, new_inflen, new_lbrec;
2146 uint32_t lb_size, dscr_size, crclen, lastblock_grow;
2147 uint32_t icbflags, len, flags, max_len;
2148 uint32_t max_l_ad, l_ad, l_ea;
2149 uint16_t my_part, dst_part;
2150 uint8_t *data_pos, *evacuated_data;
2151 int addr_type;
2152 int slot, cpy_slot;
2153 int isdir, eof, error;
2154
2155 DPRINTF(ALLOC, ("udf_grow_node\n"));
2156
2157 UDF_LOCK_NODE(udf_node, 0);
2158 udf_node_sanity_check(udf_node, &orig_inflen, &orig_lbrec);
2159
2160 lb_size = udf_rw32(ump->logical_vol->lb_size);
2161 max_len = ((UDF_EXT_MAXLEN / lb_size) * lb_size);
2162
2163 fe = udf_node->fe;
2164 efe = udf_node->efe;
2165 if (fe) {
2166 dscr = (union dscrptr *) fe;
2167 icbtag = &fe->icbtag;
2168 inflen = udf_rw64(fe->inf_len);
2169 objsize = inflen;
2170 dscr_size = sizeof(struct file_entry) -1;
2171 l_ea = udf_rw32(fe->l_ea);
2172 l_ad = udf_rw32(fe->l_ad);
2173 } else {
2174 dscr = (union dscrptr *) efe;
2175 icbtag = &efe->icbtag;
2176 inflen = udf_rw64(efe->inf_len);
2177 objsize = udf_rw64(efe->obj_size);
2178 dscr_size = sizeof(struct extfile_entry) -1;
2179 l_ea = udf_rw32(efe->l_ea);
2180 l_ad = udf_rw32(efe->l_ad);
2181 }
2182 data_pos = (uint8_t *) dscr + dscr_size + l_ea;
2183 max_l_ad = lb_size - dscr_size - l_ea;
2184
2185 icbflags = udf_rw16(icbtag->flags);
2186 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2187
2188 old_size = inflen;
2189 size_diff = new_size - old_size;
2190
2191 DPRINTF(ALLOC, ("\tfrom %"PRIu64" to %"PRIu64"\n", old_size, new_size));
2192
2193 evacuated_data = NULL;
2194 if (addr_type == UDF_ICB_INTERN_ALLOC) {
2195 if (l_ad + size_diff <= max_l_ad) {
2196 /* only reflect size change directly in the node */
2197 inflen += size_diff;
2198 objsize += size_diff;
2199 l_ad += size_diff;
2200 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea + l_ad;
2201 if (fe) {
2202 fe->inf_len = udf_rw64(inflen);
2203 fe->l_ad = udf_rw32(l_ad);
2204 fe->tag.desc_crc_len = udf_rw32(crclen);
2205 } else {
2206 efe->inf_len = udf_rw64(inflen);
2207 efe->obj_size = udf_rw64(objsize);
2208 efe->l_ad = udf_rw32(l_ad);
2209 efe->tag.desc_crc_len = udf_rw32(crclen);
2210 }
2211 error = 0;
2212
2213 /* set new size for uvm */
2214 uvm_vnp_setsize(vp, old_size);
2215 uvm_vnp_setwritesize(vp, new_size);
2216
2217 #if 0
2218 /* zero append space in buffer */
2219 uvm_vnp_zerorange(vp, old_size, new_size - old_size);
2220 #endif
2221
2222 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2223
2224 /* unlock */
2225 UDF_UNLOCK_NODE(udf_node, 0);
2226
2227 KASSERT(new_inflen == orig_inflen + size_diff);
2228 KASSERT(new_lbrec == orig_lbrec);
2229 KASSERT(new_lbrec == 0);
2230 return 0;
2231 }
2232
2233 DPRINTF(ALLOC, ("\tCONVERT from internal\n"));
2234
2235 if (old_size > 0) {
2236 /* allocate some space and copy in the stuff to keep */
2237 evacuated_data = malloc(lb_size, M_UDFTEMP, M_WAITOK);
2238 memset(evacuated_data, 0, lb_size);
2239
2240 /* node is locked, so safe to exit mutex */
2241 UDF_UNLOCK_NODE(udf_node, 0);
2242
2243 /* read in using the `normal' vn_rdwr() */
2244 error = vn_rdwr(UIO_READ, udf_node->vnode,
2245 evacuated_data, old_size, 0,
2246 UIO_SYSSPACE, IO_ALTSEMANTICS | IO_NODELOCKED,
2247 FSCRED, NULL, NULL);
2248
2249 /* enter again */
2250 UDF_LOCK_NODE(udf_node, 0);
2251 }
2252
2253 /* convert to a normal alloc and select type */
2254 isdir = (vp->v_type == VDIR);
2255 my_part = udf_rw16(udf_node->loc.loc.part_num);
2256 dst_part = isdir? ump->fids_part : ump->data_part;
2257 addr_type = UDF_ICB_SHORT_ALLOC;
2258 if (dst_part != my_part)
2259 addr_type = UDF_ICB_LONG_ALLOC;
2260
2261 icbflags &= ~UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2262 icbflags |= addr_type;
2263 icbtag->flags = udf_rw16(icbflags);
2264
2265 /* wipe old descriptor space */
2266 udf_wipe_adslots(udf_node);
2267
2268 memset(&c_ad, 0, sizeof(struct long_ad));
2269 c_ad.len = udf_rw32(old_size | UDF_EXT_FREE);
2270 c_ad.loc.part_num = udf_rw16(0); /* not relevant */
2271 c_ad.loc.lb_num = udf_rw32(0); /* not relevant */
2272
2273 slot = 0;
2274 } else {
2275 /* goto the last entry (if any) */
2276 slot = 0;
2277 cpy_slot = 0;
2278 foffset = 0;
2279 memset(&c_ad, 0, sizeof(struct long_ad));
2280 for (;;) {
2281 udf_get_adslot(udf_node, slot, &c_ad, &eof);
2282 if (eof)
2283 break;
2284
2285 len = udf_rw32(c_ad.len);
2286 flags = UDF_EXT_FLAGS(len);
2287 len = UDF_EXT_LEN(len);
2288
2289 end_foffset = foffset + len;
2290 if (flags != UDF_EXT_REDIRECT)
2291 foffset = end_foffset;
2292
2293 slot++;
2294 }
2295 /* at end of adslots */
2296
2297 /* special case if the old size was zero, then there is no last slot */
2298 if (old_size == 0) {
2299 c_ad.len = udf_rw32(0 | UDF_EXT_FREE);
2300 c_ad.loc.part_num = udf_rw16(0); /* not relevant */
2301 c_ad.loc.lb_num = udf_rw32(0); /* not relevant */
2302 } else {
2303 /* refetch last slot */
2304 slot--;
2305 udf_get_adslot(udf_node, slot, &c_ad, &eof);
2306 }
2307 }
2308
2309 /*
2310 * If the length of the last slot is not a multiple of lb_size, adjust
2311 * length so that it is; don't forget to adjust `append_len'! relevant for
2312 * extending existing files
2313 */
2314 len = udf_rw32(c_ad.len);
2315 flags = UDF_EXT_FLAGS(len);
2316 len = UDF_EXT_LEN(len);
2317
2318 lastblock_grow = 0;
2319 if (len % lb_size > 0) {
2320 lastblock_grow = lb_size - (len % lb_size);
2321 lastblock_grow = MIN(size_diff, lastblock_grow);
2322 len += lastblock_grow;
2323 c_ad.len = udf_rw32(len | flags);
2324
2325 /* TODO zero appened space in buffer! */
2326 /* using uvm_vnp_zerorange(vp, old_size, new_size - old_size); ? */
2327 }
2328 memset(&s_ad, 0, sizeof(struct long_ad));
2329
2330 /* size_diff can be bigger than allowed, so grow in chunks */
2331 append_len = size_diff - lastblock_grow;
2332 while (append_len > 0) {
2333 chunk = MIN(append_len, max_len);
2334 s_ad.len = udf_rw32(chunk | UDF_EXT_FREE);
2335 s_ad.loc.part_num = udf_rw16(0);
2336 s_ad.loc.lb_num = udf_rw32(0);
2337
2338 if (udf_ads_merge(lb_size, &c_ad, &s_ad)) {
2339 /* not mergable (anymore) */
2340 error = udf_append_adslot(udf_node, &slot, &c_ad);
2341 if (error)
2342 goto errorout;
2343 slot++;
2344 c_ad = s_ad;
2345 memset(&s_ad, 0, sizeof(struct long_ad));
2346 }
2347 append_len -= chunk;
2348 }
2349
2350 /* if there is a rest piece in the accumulator, append it */
2351 if (UDF_EXT_LEN(udf_rw32(c_ad.len)) > 0) {
2352 error = udf_append_adslot(udf_node, &slot, &c_ad);
2353 if (error)
2354 goto errorout;
2355 slot++;
2356 }
2357
2358 /* if there is a rest piece that didn't fit, append it */
2359 if (UDF_EXT_LEN(udf_rw32(s_ad.len)) > 0) {
2360 error = udf_append_adslot(udf_node, &slot, &s_ad);
2361 if (error)
2362 goto errorout;
2363 slot++;
2364 }
2365
2366 inflen += size_diff;
2367 objsize += size_diff;
2368 if (fe) {
2369 fe->inf_len = udf_rw64(inflen);
2370 } else {
2371 efe->inf_len = udf_rw64(inflen);
2372 efe->obj_size = udf_rw64(objsize);
2373 }
2374 error = 0;
2375
2376 if (evacuated_data) {
2377 /* set new write size for uvm */
2378 uvm_vnp_setwritesize(vp, old_size);
2379
2380 /* write out evacuated data */
2381 error = vn_rdwr(UIO_WRITE, udf_node->vnode,
2382 evacuated_data, old_size, 0,
2383 UIO_SYSSPACE, IO_ALTSEMANTICS | IO_NODELOCKED,
2384 FSCRED, NULL, NULL);
2385 uvm_vnp_setsize(vp, old_size);
2386 }
2387
2388 errorout:
2389 if (evacuated_data)
2390 free(evacuated_data, M_UDFTEMP);
2391
2392 udf_count_alloc_exts(udf_node);
2393
2394 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2395 UDF_UNLOCK_NODE(udf_node, 0);
2396
2397 KASSERT(new_inflen == orig_inflen + size_diff);
2398 KASSERT(new_lbrec == orig_lbrec);
2399
2400 return error;
2401 }
2402
2403 /* --------------------------------------------------------------------- */
2404
2405 int
2406 udf_shrink_node(struct udf_node *udf_node, uint64_t new_size)
2407 {
2408 struct vnode *vp = udf_node->vnode;
2409 struct udf_mount *ump = udf_node->ump;
2410 struct file_entry *fe;
2411 struct extfile_entry *efe;
2412 struct icb_tag *icbtag;
2413 struct long_ad c_ad, s_ad, *node_ad_cpy;
2414 uint64_t size_diff, old_size, inflen, objsize;
2415 uint64_t foffset, end_foffset;
2416 uint64_t orig_inflen, orig_lbrec, new_inflen, new_lbrec;
2417 uint32_t lb_size, dscr_size, crclen;
2418 uint32_t slot_offset;
2419 uint32_t len, flags, max_len;
2420 uint32_t num_lb, lb_num;
2421 uint32_t max_l_ad, l_ad, l_ea;
2422 uint16_t vpart_num;
2423 uint8_t *data_pos;
2424 int icbflags, addr_type;
2425 int slot, cpy_slot, cpy_slots;
2426 int eof, error;
2427
2428 DPRINTF(ALLOC, ("udf_shrink_node\n"));
2429
2430 UDF_LOCK_NODE(udf_node, 0);
2431 udf_node_sanity_check(udf_node, &orig_inflen, &orig_lbrec);
2432
2433 lb_size = udf_rw32(ump->logical_vol->lb_size);
2434 max_len = ((UDF_EXT_MAXLEN / lb_size) * lb_size);
2435
2436 /* do the work */
2437 fe = udf_node->fe;
2438 efe = udf_node->efe;
2439 if (fe) {
2440 icbtag = &fe->icbtag;
2441 inflen = udf_rw64(fe->inf_len);
2442 objsize = inflen;
2443 dscr_size = sizeof(struct file_entry) -1;
2444 l_ea = udf_rw32(fe->l_ea);
2445 l_ad = udf_rw32(fe->l_ad);
2446 data_pos = (uint8_t *) fe + dscr_size + l_ea;
2447 } else {
2448 icbtag = &efe->icbtag;
2449 inflen = udf_rw64(efe->inf_len);
2450 objsize = udf_rw64(efe->obj_size);
2451 dscr_size = sizeof(struct extfile_entry) -1;
2452 l_ea = udf_rw32(efe->l_ea);
2453 l_ad = udf_rw32(efe->l_ad);
2454 data_pos = (uint8_t *) efe + dscr_size + l_ea;
2455 }
2456 max_l_ad = lb_size - dscr_size - l_ea;
2457
2458 icbflags = udf_rw16(icbtag->flags);
2459 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2460
2461 old_size = inflen;
2462 size_diff = old_size - new_size;
2463
2464 DPRINTF(ALLOC, ("\tfrom %"PRIu64" to %"PRIu64"\n", old_size, new_size));
2465
2466 /* shrink the node to its new size */
2467 if (addr_type == UDF_ICB_INTERN_ALLOC) {
2468 /* only reflect size change directly in the node */
2469 KASSERT(new_size <= max_l_ad);
2470 inflen -= size_diff;
2471 objsize -= size_diff;
2472 l_ad -= size_diff;
2473 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea + l_ad;
2474 if (fe) {
2475 fe->inf_len = udf_rw64(inflen);
2476 fe->l_ad = udf_rw32(l_ad);
2477 fe->tag.desc_crc_len = udf_rw32(crclen);
2478 } else {
2479 efe->inf_len = udf_rw64(inflen);
2480 efe->obj_size = udf_rw64(objsize);
2481 efe->l_ad = udf_rw32(l_ad);
2482 efe->tag.desc_crc_len = udf_rw32(crclen);
2483 }
2484 error = 0;
2485
2486 /* clear the space in the descriptor */
2487 KASSERT(old_size > new_size);
2488 memset(data_pos + new_size, 0, old_size - new_size);
2489
2490 /* TODO zero appened space in buffer! */
2491 /* using uvm_vnp_zerorange(vp, old_size, old_size - new_size); ? */
2492
2493 /* set new size for uvm */
2494 uvm_vnp_setsize(vp, new_size);
2495
2496 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2497 UDF_UNLOCK_NODE(udf_node, 0);
2498
2499 KASSERT(new_inflen == orig_inflen - size_diff);
2500 KASSERT(new_lbrec == orig_lbrec);
2501 KASSERT(new_lbrec == 0);
2502
2503 return 0;
2504 }
2505
2506 /* setup node cleanup extents copy space */
2507 node_ad_cpy = malloc(lb_size * UDF_MAX_ALLOC_EXTENTS,
2508 M_UDFMNT, M_WAITOK);
2509 memset(node_ad_cpy, 0, lb_size * UDF_MAX_ALLOC_EXTENTS);
2510
2511 /*
2512 * Shrink the node by releasing the allocations and truncate the last
2513 * allocation to the new size. If the new size fits into the
2514 * allocation descriptor itself, transform it into an
2515 * UDF_ICB_INTERN_ALLOC.
2516 */
2517 slot = 0;
2518 cpy_slot = 0;
2519 foffset = 0;
2520
2521 /* 1) copy till first overlap piece to the rewrite buffer */
2522 for (;;) {
2523 udf_get_adslot(udf_node, slot, &s_ad, &eof);
2524 if (eof) {
2525 DPRINTF(WRITE,
2526 ("Shrink node failed: "
2527 "encountered EOF\n"));
2528 error = EINVAL;
2529 goto errorout; /* panic? */
2530 }
2531 len = udf_rw32(s_ad.len);
2532 flags = UDF_EXT_FLAGS(len);
2533 len = UDF_EXT_LEN(len);
2534
2535 if (flags == UDF_EXT_REDIRECT) {
2536 slot++;
2537 continue;
2538 }
2539
2540 end_foffset = foffset + len;
2541 if (end_foffset > new_size)
2542 break; /* found */
2543
2544 node_ad_cpy[cpy_slot++] = s_ad;
2545
2546 DPRINTF(ALLOC, ("\t1: vp %d, lb %d, len %d, flags %d "
2547 "-> stack\n",
2548 udf_rw16(s_ad.loc.part_num),
2549 udf_rw32(s_ad.loc.lb_num),
2550 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2551 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2552
2553 foffset = end_foffset;
2554 slot++;
2555 }
2556 slot_offset = new_size - foffset;
2557
2558 /* 2) trunc overlapping slot at overlap and copy it */
2559 if (slot_offset > 0) {
2560 lb_num = udf_rw32(s_ad.loc.lb_num);
2561 vpart_num = udf_rw16(s_ad.loc.part_num);
2562
2563 if (flags == UDF_EXT_ALLOCATED) {
2564 /* note: round DOWN on num_lb */
2565 lb_num += (slot_offset + lb_size -1) / lb_size;
2566 num_lb = (len - slot_offset) / lb_size;
2567
2568 udf_free_allocated_space(ump, lb_num, vpart_num, num_lb);
2569 }
2570
2571 s_ad.len = udf_rw32(slot_offset | flags);
2572 node_ad_cpy[cpy_slot++] = s_ad;
2573 slot++;
2574
2575 DPRINTF(ALLOC, ("\t2: vp %d, lb %d, len %d, flags %d "
2576 "-> stack\n",
2577 udf_rw16(s_ad.loc.part_num),
2578 udf_rw32(s_ad.loc.lb_num),
2579 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2580 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2581 }
2582
2583 /* 3) delete remainder */
2584 for (;;) {
2585 udf_get_adslot(udf_node, slot, &s_ad, &eof);
2586 if (eof)
2587 break;
2588
2589 len = udf_rw32(s_ad.len);
2590 flags = UDF_EXT_FLAGS(len);
2591 len = UDF_EXT_LEN(len);
2592
2593 if (flags == UDF_EXT_REDIRECT) {
2594 slot++;
2595 continue;
2596 }
2597
2598 DPRINTF(ALLOC, ("\t3: delete remainder "
2599 "vp %d lb %d, len %d, flags %d\n",
2600 udf_rw16(s_ad.loc.part_num),
2601 udf_rw32(s_ad.loc.lb_num),
2602 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2603 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2604
2605 if (flags == UDF_EXT_ALLOCATED) {
2606 lb_num = udf_rw32(s_ad.loc.lb_num);
2607 vpart_num = udf_rw16(s_ad.loc.part_num);
2608 num_lb = (len + lb_size - 1) / lb_size;
2609
2610 udf_free_allocated_space(ump, lb_num, vpart_num,
2611 num_lb);
2612 }
2613
2614 slot++;
2615 }
2616
2617 /* 4) if it will fit into the descriptor then convert */
2618 if (new_size < max_l_ad) {
2619 /*
2620 * resque/evacuate old piece by reading it in, and convert it
2621 * to internal alloc.
2622 */
2623 if (new_size == 0) {
2624 /* XXX/TODO only for zero sizing now */
2625 udf_wipe_adslots(udf_node);
2626
2627 icbflags &= ~UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2628 icbflags |= UDF_ICB_INTERN_ALLOC;
2629 icbtag->flags = udf_rw16(icbflags);
2630
2631 inflen -= size_diff; KASSERT(inflen == 0);
2632 objsize -= size_diff;
2633 l_ad = new_size;
2634 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea + l_ad;
2635 if (fe) {
2636 fe->inf_len = udf_rw64(inflen);
2637 fe->l_ad = udf_rw32(l_ad);
2638 fe->tag.desc_crc_len = udf_rw32(crclen);
2639 } else {
2640 efe->inf_len = udf_rw64(inflen);
2641 efe->obj_size = udf_rw64(objsize);
2642 efe->l_ad = udf_rw32(l_ad);
2643 efe->tag.desc_crc_len = udf_rw32(crclen);
2644 }
2645 /* eventually copy in evacuated piece */
2646 /* set new size for uvm */
2647 uvm_vnp_setsize(vp, new_size);
2648
2649 free(node_ad_cpy, M_UDFMNT);
2650 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2651
2652 UDF_UNLOCK_NODE(udf_node, 0);
2653
2654 KASSERT(new_inflen == orig_inflen - size_diff);
2655 KASSERT(new_inflen == 0);
2656 KASSERT(new_lbrec == 0);
2657
2658 return 0;
2659 }
2660
2661 printf("UDF_SHRINK_NODE: could convert to internal alloc!\n");
2662 }
2663
2664 /* 5) reset node descriptors */
2665 udf_wipe_adslots(udf_node);
2666
2667 /* 6) copy back extents; merge when possible. Recounting on the fly */
2668 cpy_slots = cpy_slot;
2669
2670 c_ad = node_ad_cpy[0];
2671 slot = 0;
2672 for (cpy_slot = 1; cpy_slot < cpy_slots; cpy_slot++) {
2673 s_ad = node_ad_cpy[cpy_slot];
2674
2675 DPRINTF(ALLOC, ("\t6: stack -> got mapping vp %d "
2676 "lb %d, len %d, flags %d\n",
2677 udf_rw16(s_ad.loc.part_num),
2678 udf_rw32(s_ad.loc.lb_num),
2679 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2680 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2681
2682 /* see if we can merge */
2683 if (udf_ads_merge(lb_size, &c_ad, &s_ad)) {
2684 /* not mergable (anymore) */
2685 DPRINTF(ALLOC, ("\t6: appending vp %d lb %d, "
2686 "len %d, flags %d\n",
2687 udf_rw16(c_ad.loc.part_num),
2688 udf_rw32(c_ad.loc.lb_num),
2689 UDF_EXT_LEN(udf_rw32(c_ad.len)),
2690 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2691
2692 error = udf_append_adslot(udf_node, &slot, &c_ad);
2693 if (error)
2694 goto errorout; /* panic? */
2695 c_ad = s_ad;
2696 slot++;
2697 }
2698 }
2699
2700 /* 7) push rest slot (if any) */
2701 if (UDF_EXT_LEN(c_ad.len) > 0) {
2702 DPRINTF(ALLOC, ("\t7: last append vp %d lb %d, "
2703 "len %d, flags %d\n",
2704 udf_rw16(c_ad.loc.part_num),
2705 udf_rw32(c_ad.loc.lb_num),
2706 UDF_EXT_LEN(udf_rw32(c_ad.len)),
2707 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2708
2709 error = udf_append_adslot(udf_node, &slot, &c_ad);
2710 if (error)
2711 goto errorout; /* panic? */
2712 ;
2713 }
2714
2715 inflen -= size_diff;
2716 objsize -= size_diff;
2717 if (fe) {
2718 fe->inf_len = udf_rw64(inflen);
2719 } else {
2720 efe->inf_len = udf_rw64(inflen);
2721 efe->obj_size = udf_rw64(objsize);
2722 }
2723 error = 0;
2724
2725 /* set new size for uvm */
2726 uvm_vnp_setsize(vp, new_size);
2727
2728 errorout:
2729 free(node_ad_cpy, M_UDFMNT);
2730
2731 udf_count_alloc_exts(udf_node);
2732
2733 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2734 UDF_UNLOCK_NODE(udf_node, 0);
2735
2736 KASSERT(new_inflen == orig_inflen - size_diff);
2737
2738 return error;
2739 }
2740
2741