udf_allocation.c revision 1.18.4.1 1 /* $NetBSD: udf_allocation.c,v 1.18.4.1 2009/03/31 23:06:13 snj Exp $ */
2
3 /*
4 * Copyright (c) 2006, 2008 Reinoud Zandijk
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 *
27 */
28
29 #include <sys/cdefs.h>
30 #ifndef lint
31 __KERNEL_RCSID(0, "$NetBSD: udf_allocation.c,v 1.18.4.1 2009/03/31 23:06:13 snj Exp $");
32 #endif /* not lint */
33
34
35 #if defined(_KERNEL_OPT)
36 #include "opt_quota.h"
37 #include "opt_compat_netbsd.h"
38 #endif
39
40 /* TODO strip */
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/sysctl.h>
44 #include <sys/namei.h>
45 #include <sys/proc.h>
46 #include <sys/kernel.h>
47 #include <sys/vnode.h>
48 #include <miscfs/genfs/genfs_node.h>
49 #include <sys/mount.h>
50 #include <sys/buf.h>
51 #include <sys/file.h>
52 #include <sys/device.h>
53 #include <sys/disklabel.h>
54 #include <sys/ioctl.h>
55 #include <sys/malloc.h>
56 #include <sys/dirent.h>
57 #include <sys/stat.h>
58 #include <sys/conf.h>
59 #include <sys/kauth.h>
60 #include <sys/kthread.h>
61 #include <dev/clock_subr.h>
62
63 #include <fs/udf/ecma167-udf.h>
64 #include <fs/udf/udf_mount.h>
65
66 #include "udf.h"
67 #include "udf_subr.h"
68 #include "udf_bswap.h"
69
70
71 #define VTOI(vnode) ((struct udf_node *) vnode->v_data)
72
73 static void udf_record_allocation_in_node(struct udf_mount *ump,
74 struct buf *buf, uint16_t vpart_num, uint64_t *mapping,
75 struct long_ad *node_ad_cpy);
76
77 /*
78 * IDEA/BUSY: Each udf_node gets its own extentwalker state for all operations;
79 * this will hopefully/likely reduce O(nlog(n)) to O(1) for most functionality
80 * since actions are most likely sequencial and thus seeking doesn't need
81 * searching for the same or adjacent position again.
82 */
83
84 /* --------------------------------------------------------------------- */
85
86 #if 0
87 #if 1
88 static void
89 udf_node_dump(struct udf_node *udf_node) {
90 struct file_entry *fe;
91 struct extfile_entry *efe;
92 struct icb_tag *icbtag;
93 struct long_ad s_ad;
94 uint64_t inflen;
95 uint32_t icbflags, addr_type;
96 uint32_t len, lb_num;
97 uint32_t flags;
98 int part_num;
99 int lb_size, eof, slot;
100
101 if ((udf_verbose & UDF_DEBUG_NODEDUMP) == 0)
102 return;
103
104 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
105
106 fe = udf_node->fe;
107 efe = udf_node->efe;
108 if (fe) {
109 icbtag = &fe->icbtag;
110 inflen = udf_rw64(fe->inf_len);
111 } else {
112 icbtag = &efe->icbtag;
113 inflen = udf_rw64(efe->inf_len);
114 }
115
116 icbflags = udf_rw16(icbtag->flags);
117 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
118
119 printf("udf_node_dump %p :\n", udf_node);
120
121 if (addr_type == UDF_ICB_INTERN_ALLOC) {
122 printf("\tIntern alloc, len = %"PRIu64"\n", inflen);
123 return;
124 }
125
126 printf("\tInflen = %"PRIu64"\n", inflen);
127 printf("\t\t");
128
129 slot = 0;
130 for (;;) {
131 udf_get_adslot(udf_node, slot, &s_ad, &eof);
132 if (eof)
133 break;
134 part_num = udf_rw16(s_ad.loc.part_num);
135 lb_num = udf_rw32(s_ad.loc.lb_num);
136 len = udf_rw32(s_ad.len);
137 flags = UDF_EXT_FLAGS(len);
138 len = UDF_EXT_LEN(len);
139
140 printf("[");
141 if (part_num >= 0)
142 printf("part %d, ", part_num);
143 printf("lb_num %d, len %d", lb_num, len);
144 if (flags)
145 printf(", flags %d", flags>>30);
146 printf("] ");
147
148 if (flags == UDF_EXT_REDIRECT) {
149 printf("\n\textent END\n\tallocation extent\n\t\t");
150 }
151
152 slot++;
153 }
154 printf("\n\tl_ad END\n\n");
155 }
156 #else
157 #define udf_node_dump(a)
158 #endif
159
160
161 static void
162 udf_assert_allocated(struct udf_mount *ump, uint16_t vpart_num,
163 uint32_t lb_num, uint32_t num_lb)
164 {
165 struct udf_bitmap *bitmap;
166 struct part_desc *pdesc;
167 uint32_t ptov;
168 uint32_t bitval;
169 uint8_t *bpos;
170 int bit;
171 int phys_part;
172 int ok;
173
174 DPRINTF(PARANOIA, ("udf_assert_allocated: check virt lbnum %d "
175 "part %d + %d sect\n", lb_num, vpart_num, num_lb));
176
177 /* get partition backing up this vpart_num */
178 pdesc = ump->partitions[ump->vtop[vpart_num]];
179
180 switch (ump->vtop_tp[vpart_num]) {
181 case UDF_VTOP_TYPE_PHYS :
182 case UDF_VTOP_TYPE_SPARABLE :
183 /* free space to freed or unallocated space bitmap */
184 ptov = udf_rw32(pdesc->start_loc);
185 phys_part = ump->vtop[vpart_num];
186
187 /* use unallocated bitmap */
188 bitmap = &ump->part_unalloc_bits[phys_part];
189
190 /* if no bitmaps are defined, bail out */
191 if (bitmap->bits == NULL)
192 break;
193
194 /* check bits */
195 KASSERT(bitmap->bits);
196 ok = 1;
197 bpos = bitmap->bits + lb_num/8;
198 bit = lb_num % 8;
199 while (num_lb > 0) {
200 bitval = (1 << bit);
201 DPRINTF(PARANOIA, ("XXX : check %d, %p, bit %d\n",
202 lb_num, bpos, bit));
203 KASSERT(bitmap->bits + lb_num/8 == bpos);
204 if (*bpos & bitval) {
205 printf("\tlb_num %d is NOT marked busy\n",
206 lb_num);
207 ok = 0;
208 }
209 lb_num++; num_lb--;
210 bit = (bit + 1) % 8;
211 if (bit == 0)
212 bpos++;
213 }
214 if (!ok) {
215 /* KASSERT(0); */
216 }
217
218 break;
219 case UDF_VTOP_TYPE_VIRT :
220 /* TODO check space */
221 KASSERT(num_lb == 1);
222 break;
223 case UDF_VTOP_TYPE_META :
224 /* TODO check space in the metadata bitmap */
225 default:
226 /* not implemented */
227 break;
228 }
229 }
230
231
232 static void
233 udf_node_sanity_check(struct udf_node *udf_node,
234 uint64_t *cnt_inflen, uint64_t *cnt_logblksrec)
235 {
236 union dscrptr *dscr;
237 struct file_entry *fe;
238 struct extfile_entry *efe;
239 struct icb_tag *icbtag;
240 struct long_ad s_ad;
241 uint64_t inflen, logblksrec;
242 uint32_t icbflags, addr_type;
243 uint32_t len, lb_num, l_ea, l_ad, max_l_ad;
244 uint16_t part_num;
245 uint8_t *data_pos;
246 int dscr_size, lb_size, flags, whole_lb;
247 int i, slot, eof;
248
249 // KASSERT(mutex_owned(&udf_node->ump->allocate_mutex));
250
251 if (1)
252 udf_node_dump(udf_node);
253
254 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
255
256 fe = udf_node->fe;
257 efe = udf_node->efe;
258 if (fe) {
259 dscr = (union dscrptr *) fe;
260 icbtag = &fe->icbtag;
261 inflen = udf_rw64(fe->inf_len);
262 dscr_size = sizeof(struct file_entry) -1;
263 logblksrec = udf_rw64(fe->logblks_rec);
264 l_ad = udf_rw32(fe->l_ad);
265 l_ea = udf_rw32(fe->l_ea);
266 } else {
267 dscr = (union dscrptr *) efe;
268 icbtag = &efe->icbtag;
269 inflen = udf_rw64(efe->inf_len);
270 dscr_size = sizeof(struct extfile_entry) -1;
271 logblksrec = udf_rw64(efe->logblks_rec);
272 l_ad = udf_rw32(efe->l_ad);
273 l_ea = udf_rw32(efe->l_ea);
274 }
275 data_pos = (uint8_t *) dscr + dscr_size + l_ea;
276 max_l_ad = lb_size - dscr_size - l_ea;
277 icbflags = udf_rw16(icbtag->flags);
278 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
279
280 /* check if tail is zero */
281 DPRINTF(PARANOIA, ("Sanity check blank tail\n"));
282 for (i = l_ad; i < max_l_ad; i++) {
283 if (data_pos[i] != 0)
284 printf( "sanity_check: violation: node byte %d "
285 "has value %d\n", i, data_pos[i]);
286 }
287
288 /* reset counters */
289 *cnt_inflen = 0;
290 *cnt_logblksrec = 0;
291
292 if (addr_type == UDF_ICB_INTERN_ALLOC) {
293 KASSERT(l_ad <= max_l_ad);
294 KASSERT(l_ad == inflen);
295 *cnt_inflen = inflen;
296 return;
297 }
298
299 /* start counting */
300 whole_lb = 1;
301 slot = 0;
302 for (;;) {
303 udf_get_adslot(udf_node, slot, &s_ad, &eof);
304 if (eof)
305 break;
306 KASSERT(whole_lb == 1);
307
308 part_num = udf_rw16(s_ad.loc.part_num);
309 lb_num = udf_rw32(s_ad.loc.lb_num);
310 len = udf_rw32(s_ad.len);
311 flags = UDF_EXT_FLAGS(len);
312 len = UDF_EXT_LEN(len);
313
314 if (flags != UDF_EXT_REDIRECT) {
315 *cnt_inflen += len;
316 if (flags == UDF_EXT_ALLOCATED) {
317 *cnt_logblksrec += (len + lb_size -1) / lb_size;
318 }
319 } else {
320 KASSERT(len == lb_size);
321 }
322 /* check allocation */
323 if (flags == UDF_EXT_ALLOCATED)
324 udf_assert_allocated(udf_node->ump, part_num, lb_num,
325 (len + lb_size - 1) / lb_size);
326
327 /* check whole lb */
328 whole_lb = ((len % lb_size) == 0);
329
330 slot++;
331 }
332 /* rest should be zero (ad_off > l_ad < max_l_ad - adlen) */
333
334 KASSERT(*cnt_inflen == inflen);
335 KASSERT(*cnt_logblksrec == logblksrec);
336
337 // KASSERT(mutex_owned(&udf_node->ump->allocate_mutex));
338 }
339 #else
340 static void
341 udf_node_sanity_check(struct udf_node *udf_node,
342 uint64_t *cnt_inflen, uint64_t *cnt_logblksrec) {
343 struct file_entry *fe;
344 struct extfile_entry *efe;
345 struct icb_tag *icbtag;
346 uint64_t inflen, logblksrec;
347 int dscr_size, lb_size;
348
349 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
350
351 fe = udf_node->fe;
352 efe = udf_node->efe;
353 if (fe) {
354 icbtag = &fe->icbtag;
355 inflen = udf_rw64(fe->inf_len);
356 dscr_size = sizeof(struct file_entry) -1;
357 logblksrec = udf_rw64(fe->logblks_rec);
358 } else {
359 icbtag = &efe->icbtag;
360 inflen = udf_rw64(efe->inf_len);
361 dscr_size = sizeof(struct extfile_entry) -1;
362 logblksrec = udf_rw64(efe->logblks_rec);
363 }
364 *cnt_logblksrec = logblksrec;
365 *cnt_inflen = inflen;
366 }
367 #endif
368
369 /* --------------------------------------------------------------------- */
370
371 int
372 udf_translate_vtop(struct udf_mount *ump, struct long_ad *icb_loc,
373 uint32_t *lb_numres, uint32_t *extres)
374 {
375 struct part_desc *pdesc;
376 struct spare_map_entry *sme;
377 struct long_ad s_icb_loc;
378 uint64_t foffset, end_foffset;
379 uint32_t lb_size, len;
380 uint32_t lb_num, lb_rel, lb_packet;
381 uint32_t udf_rw32_lbmap, ext_offset;
382 uint16_t vpart;
383 int rel, part, error, eof, slot, flags;
384
385 assert(ump && icb_loc && lb_numres);
386
387 vpart = udf_rw16(icb_loc->loc.part_num);
388 lb_num = udf_rw32(icb_loc->loc.lb_num);
389 if (vpart > UDF_VTOP_RAWPART)
390 return EINVAL;
391
392 translate_again:
393 part = ump->vtop[vpart];
394 pdesc = ump->partitions[part];
395
396 switch (ump->vtop_tp[vpart]) {
397 case UDF_VTOP_TYPE_RAW :
398 /* 1:1 to the end of the device */
399 *lb_numres = lb_num;
400 *extres = INT_MAX;
401 return 0;
402 case UDF_VTOP_TYPE_PHYS :
403 /* transform into its disc logical block */
404 if (lb_num > udf_rw32(pdesc->part_len))
405 return EINVAL;
406 *lb_numres = lb_num + udf_rw32(pdesc->start_loc);
407
408 /* extent from here to the end of the partition */
409 *extres = udf_rw32(pdesc->part_len) - lb_num;
410 return 0;
411 case UDF_VTOP_TYPE_VIRT :
412 /* only maps one logical block, lookup in VAT */
413 if (lb_num >= ump->vat_entries) /* XXX > or >= ? */
414 return EINVAL;
415
416 /* lookup in virtual allocation table file */
417 mutex_enter(&ump->allocate_mutex);
418 error = udf_vat_read(ump->vat_node,
419 (uint8_t *) &udf_rw32_lbmap, 4,
420 ump->vat_offset + lb_num * 4);
421 mutex_exit(&ump->allocate_mutex);
422
423 if (error)
424 return error;
425
426 lb_num = udf_rw32(udf_rw32_lbmap);
427
428 /* transform into its disc logical block */
429 if (lb_num > udf_rw32(pdesc->part_len))
430 return EINVAL;
431 *lb_numres = lb_num + udf_rw32(pdesc->start_loc);
432
433 /* just one logical block */
434 *extres = 1;
435 return 0;
436 case UDF_VTOP_TYPE_SPARABLE :
437 /* check if the packet containing the lb_num is remapped */
438 lb_packet = lb_num / ump->sparable_packet_size;
439 lb_rel = lb_num % ump->sparable_packet_size;
440
441 for (rel = 0; rel < udf_rw16(ump->sparing_table->rt_l); rel++) {
442 sme = &ump->sparing_table->entries[rel];
443 if (lb_packet == udf_rw32(sme->org)) {
444 /* NOTE maps to absolute disc logical block! */
445 *lb_numres = udf_rw32(sme->map) + lb_rel;
446 *extres = ump->sparable_packet_size - lb_rel;
447 return 0;
448 }
449 }
450
451 /* transform into its disc logical block */
452 if (lb_num > udf_rw32(pdesc->part_len))
453 return EINVAL;
454 *lb_numres = lb_num + udf_rw32(pdesc->start_loc);
455
456 /* rest of block */
457 *extres = ump->sparable_packet_size - lb_rel;
458 return 0;
459 case UDF_VTOP_TYPE_META :
460 /* we have to look into the file's allocation descriptors */
461
462 /* use metadatafile allocation mutex */
463 lb_size = udf_rw32(ump->logical_vol->lb_size);
464
465 UDF_LOCK_NODE(ump->metadata_node, 0);
466
467 /* get first overlapping extent */
468 foffset = 0;
469 slot = 0;
470 for (;;) {
471 udf_get_adslot(ump->metadata_node,
472 slot, &s_icb_loc, &eof);
473 DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, "
474 "len = %d, lb_num = %d, part = %d\n",
475 slot, eof,
476 UDF_EXT_FLAGS(udf_rw32(s_icb_loc.len)),
477 UDF_EXT_LEN(udf_rw32(s_icb_loc.len)),
478 udf_rw32(s_icb_loc.loc.lb_num),
479 udf_rw16(s_icb_loc.loc.part_num)));
480 if (eof) {
481 DPRINTF(TRANSLATE,
482 ("Meta partition translation "
483 "failed: can't seek location\n"));
484 UDF_UNLOCK_NODE(ump->metadata_node, 0);
485 return EINVAL;
486 }
487 len = udf_rw32(s_icb_loc.len);
488 flags = UDF_EXT_FLAGS(len);
489 len = UDF_EXT_LEN(len);
490
491 if (flags == UDF_EXT_REDIRECT) {
492 slot++;
493 continue;
494 }
495
496 end_foffset = foffset + len;
497
498 if (end_foffset > lb_num * lb_size)
499 break; /* found */
500 foffset = end_foffset;
501 slot++;
502 }
503 /* found overlapping slot */
504 ext_offset = lb_num * lb_size - foffset;
505
506 /* process extent offset */
507 lb_num = udf_rw32(s_icb_loc.loc.lb_num);
508 vpart = udf_rw16(s_icb_loc.loc.part_num);
509 lb_num += (ext_offset + lb_size -1) / lb_size;
510 len -= ext_offset;
511 ext_offset = 0;
512
513 flags = UDF_EXT_FLAGS(s_icb_loc.len);
514
515 UDF_UNLOCK_NODE(ump->metadata_node, 0);
516 if (flags != UDF_EXT_ALLOCATED) {
517 DPRINTF(TRANSLATE, ("Metadata partition translation "
518 "failed: not allocated\n"));
519 return EINVAL;
520 }
521
522 /*
523 * vpart and lb_num are updated, translate again since we
524 * might be mapped on sparable media
525 */
526 goto translate_again;
527 default:
528 printf("UDF vtop translation scheme %d unimplemented yet\n",
529 ump->vtop_tp[vpart]);
530 }
531
532 return EINVAL;
533 }
534
535
536 /* XXX provisional primitive braindead version */
537 /* TODO use ext_res */
538 void
539 udf_translate_vtop_list(struct udf_mount *ump, uint32_t sectors,
540 uint16_t vpart_num, uint64_t *lmapping, uint64_t *pmapping)
541 {
542 struct long_ad loc;
543 uint32_t lb_numres, ext_res;
544 int sector;
545
546 for (sector = 0; sector < sectors; sector++) {
547 memset(&loc, 0, sizeof(struct long_ad));
548 loc.loc.part_num = udf_rw16(vpart_num);
549 loc.loc.lb_num = udf_rw32(*lmapping);
550 udf_translate_vtop(ump, &loc, &lb_numres, &ext_res);
551 *pmapping = lb_numres;
552 lmapping++; pmapping++;
553 }
554 }
555
556
557 /* --------------------------------------------------------------------- */
558
559 /*
560 * Translate an extent (in logical_blocks) into logical block numbers; used
561 * for read and write operations. DOESNT't check extents.
562 */
563
564 int
565 udf_translate_file_extent(struct udf_node *udf_node,
566 uint32_t from, uint32_t num_lb,
567 uint64_t *map)
568 {
569 struct udf_mount *ump;
570 struct icb_tag *icbtag;
571 struct long_ad t_ad, s_ad;
572 uint64_t transsec;
573 uint64_t foffset, end_foffset;
574 uint32_t transsec32;
575 uint32_t lb_size;
576 uint32_t ext_offset;
577 uint32_t lb_num, len;
578 uint32_t overlap, translen;
579 uint16_t vpart_num;
580 int eof, error, flags;
581 int slot, addr_type, icbflags;
582
583 if (!udf_node)
584 return ENOENT;
585
586 KASSERT(num_lb > 0);
587
588 UDF_LOCK_NODE(udf_node, 0);
589
590 /* initialise derivative vars */
591 ump = udf_node->ump;
592 lb_size = udf_rw32(ump->logical_vol->lb_size);
593
594 if (udf_node->fe) {
595 icbtag = &udf_node->fe->icbtag;
596 } else {
597 icbtag = &udf_node->efe->icbtag;
598 }
599 icbflags = udf_rw16(icbtag->flags);
600 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
601
602 /* do the work */
603 if (addr_type == UDF_ICB_INTERN_ALLOC) {
604 *map = UDF_TRANS_INTERN;
605 UDF_UNLOCK_NODE(udf_node, 0);
606 return 0;
607 }
608
609 /* find first overlapping extent */
610 foffset = 0;
611 slot = 0;
612 for (;;) {
613 udf_get_adslot(udf_node, slot, &s_ad, &eof);
614 DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, len = %d, "
615 "lb_num = %d, part = %d\n", slot, eof,
616 UDF_EXT_FLAGS(udf_rw32(s_ad.len)),
617 UDF_EXT_LEN(udf_rw32(s_ad.len)),
618 udf_rw32(s_ad.loc.lb_num),
619 udf_rw16(s_ad.loc.part_num)));
620 if (eof) {
621 DPRINTF(TRANSLATE,
622 ("Translate file extent "
623 "failed: can't seek location\n"));
624 UDF_UNLOCK_NODE(udf_node, 0);
625 return EINVAL;
626 }
627 len = udf_rw32(s_ad.len);
628 flags = UDF_EXT_FLAGS(len);
629 len = UDF_EXT_LEN(len);
630 lb_num = udf_rw32(s_ad.loc.lb_num);
631
632 if (flags == UDF_EXT_REDIRECT) {
633 slot++;
634 continue;
635 }
636
637 end_foffset = foffset + len;
638
639 if (end_foffset > from * lb_size)
640 break; /* found */
641 foffset = end_foffset;
642 slot++;
643 }
644 /* found overlapping slot */
645 ext_offset = from * lb_size - foffset;
646
647 for (;;) {
648 udf_get_adslot(udf_node, slot, &s_ad, &eof);
649 DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, len = %d, "
650 "lb_num = %d, part = %d\n", slot, eof,
651 UDF_EXT_FLAGS(udf_rw32(s_ad.len)),
652 UDF_EXT_LEN(udf_rw32(s_ad.len)),
653 udf_rw32(s_ad.loc.lb_num),
654 udf_rw16(s_ad.loc.part_num)));
655 if (eof) {
656 DPRINTF(TRANSLATE,
657 ("Translate file extent "
658 "failed: past eof\n"));
659 UDF_UNLOCK_NODE(udf_node, 0);
660 return EINVAL;
661 }
662
663 len = udf_rw32(s_ad.len);
664 flags = UDF_EXT_FLAGS(len);
665 len = UDF_EXT_LEN(len);
666
667 lb_num = udf_rw32(s_ad.loc.lb_num);
668 vpart_num = udf_rw16(s_ad.loc.part_num);
669
670 end_foffset = foffset + len;
671
672 /* process extent, don't forget to advance on ext_offset! */
673 lb_num += (ext_offset + lb_size -1) / lb_size;
674 overlap = (len - ext_offset + lb_size -1) / lb_size;
675 ext_offset = 0;
676
677 /*
678 * note that the while(){} is nessisary for the extent that
679 * the udf_translate_vtop() returns doens't have to span the
680 * whole extent.
681 */
682
683 overlap = MIN(overlap, num_lb);
684 while (overlap && (flags != UDF_EXT_REDIRECT)) {
685 switch (flags) {
686 case UDF_EXT_FREE :
687 case UDF_EXT_ALLOCATED_BUT_NOT_USED :
688 transsec = UDF_TRANS_ZERO;
689 translen = overlap;
690 while (overlap && num_lb && translen) {
691 *map++ = transsec;
692 lb_num++;
693 overlap--; num_lb--; translen--;
694 }
695 break;
696 case UDF_EXT_ALLOCATED :
697 t_ad.loc.lb_num = udf_rw32(lb_num);
698 t_ad.loc.part_num = udf_rw16(vpart_num);
699 error = udf_translate_vtop(ump,
700 &t_ad, &transsec32, &translen);
701 transsec = transsec32;
702 if (error) {
703 UDF_UNLOCK_NODE(udf_node, 0);
704 return error;
705 }
706 while (overlap && num_lb && translen) {
707 *map++ = transsec;
708 lb_num++; transsec++;
709 overlap--; num_lb--; translen--;
710 }
711 break;
712 default:
713 DPRINTF(TRANSLATE,
714 ("Translate file extent "
715 "failed: bad flags %x\n", flags));
716 UDF_UNLOCK_NODE(udf_node, 0);
717 return EINVAL;
718 }
719 }
720 if (num_lb == 0)
721 break;
722
723 if (flags != UDF_EXT_REDIRECT)
724 foffset = end_foffset;
725 slot++;
726 }
727 UDF_UNLOCK_NODE(udf_node, 0);
728
729 return 0;
730 }
731
732 /* --------------------------------------------------------------------- */
733
734 static int
735 udf_search_free_vatloc(struct udf_mount *ump, uint32_t *lbnumres)
736 {
737 uint32_t lb_size, lb_num, lb_map, udf_rw32_lbmap;
738 uint8_t *blob;
739 int entry, chunk, found, error;
740
741 KASSERT(ump);
742 KASSERT(ump->logical_vol);
743
744 lb_size = udf_rw32(ump->logical_vol->lb_size);
745 blob = malloc(lb_size, M_UDFTEMP, M_WAITOK);
746
747 /* TODO static allocation of search chunk */
748
749 lb_num = MIN(ump->vat_entries, ump->vat_last_free_lb);
750 found = 0;
751 error = 0;
752 entry = 0;
753 do {
754 chunk = MIN(lb_size, (ump->vat_entries - lb_num) * 4);
755 if (chunk <= 0)
756 break;
757 /* load in chunk */
758 error = udf_vat_read(ump->vat_node, blob, chunk,
759 ump->vat_offset + lb_num * 4);
760
761 if (error)
762 break;
763
764 /* search this chunk */
765 for (entry=0; entry < chunk /4; entry++, lb_num++) {
766 udf_rw32_lbmap = *((uint32_t *) (blob + entry * 4));
767 lb_map = udf_rw32(udf_rw32_lbmap);
768 if (lb_map == 0xffffffff) {
769 found = 1;
770 break;
771 }
772 }
773 } while (!found);
774 if (error) {
775 printf("udf_search_free_vatloc: error reading in vat chunk "
776 "(lb %d, size %d)\n", lb_num, chunk);
777 }
778
779 if (!found) {
780 /* extend VAT */
781 DPRINTF(WRITE, ("udf_search_free_vatloc: extending\n"));
782 lb_num = ump->vat_entries;
783 ump->vat_entries++;
784 }
785
786 /* mark entry with initialiser just in case */
787 lb_map = udf_rw32(0xfffffffe);
788 udf_vat_write(ump->vat_node, (uint8_t *) &lb_map, 4,
789 ump->vat_offset + lb_num *4);
790 ump->vat_last_free_lb = lb_num;
791
792 free(blob, M_UDFTEMP);
793 *lbnumres = lb_num;
794 return 0;
795 }
796
797
798 static void
799 udf_bitmap_allocate(struct udf_bitmap *bitmap, int ismetadata,
800 uint32_t *num_lb, uint64_t *lmappos)
801 {
802 uint32_t offset, lb_num, bit;
803 int32_t diff;
804 uint8_t *bpos;
805 int pass;
806
807 if (!ismetadata) {
808 /* heuristic to keep the two pointers not too close */
809 diff = bitmap->data_pos - bitmap->metadata_pos;
810 if ((diff >= 0) && (diff < 1024))
811 bitmap->data_pos = bitmap->metadata_pos + 1024;
812 }
813 offset = ismetadata ? bitmap->metadata_pos : bitmap->data_pos;
814 offset &= ~7;
815 for (pass = 0; pass < 2; pass++) {
816 if (offset >= bitmap->max_offset)
817 offset = 0;
818
819 while (offset < bitmap->max_offset) {
820 if (*num_lb == 0)
821 break;
822
823 /* use first bit not set */
824 bpos = bitmap->bits + offset/8;
825 bit = ffs(*bpos); /* returns 0 or 1..8 */
826 if (bit == 0) {
827 offset += 8;
828 continue;
829 }
830
831 /* check for ffs overshoot */
832 if (offset + bit-1 >= bitmap->max_offset) {
833 offset = bitmap->max_offset;
834 break;
835 }
836
837 DPRINTF(PARANOIA, ("XXX : allocate %d, %p, bit %d\n",
838 offset + bit -1, bpos, bit-1));
839 *bpos &= ~(1 << (bit-1));
840 lb_num = offset + bit-1;
841 *lmappos++ = lb_num;
842 *num_lb = *num_lb - 1;
843 // offset = (offset & ~7);
844 }
845 }
846
847 if (ismetadata) {
848 bitmap->metadata_pos = offset;
849 } else {
850 bitmap->data_pos = offset;
851 }
852 }
853
854
855 static void
856 udf_bitmap_free(struct udf_bitmap *bitmap, uint32_t lb_num, uint32_t num_lb)
857 {
858 uint32_t offset;
859 uint32_t bit, bitval;
860 uint8_t *bpos;
861
862 offset = lb_num;
863
864 /* starter bits */
865 bpos = bitmap->bits + offset/8;
866 bit = offset % 8;
867 while ((bit != 0) && (num_lb > 0)) {
868 bitval = (1 << bit);
869 KASSERT((*bpos & bitval) == 0);
870 DPRINTF(PARANOIA, ("XXX : free %d, %p, %d\n",
871 offset, bpos, bit));
872 *bpos |= bitval;
873 offset++; num_lb--;
874 bit = (bit + 1) % 8;
875 }
876 if (num_lb == 0)
877 return;
878
879 /* whole bytes */
880 KASSERT(bit == 0);
881 bpos = bitmap->bits + offset / 8;
882 while (num_lb >= 8) {
883 KASSERT((*bpos == 0));
884 DPRINTF(PARANOIA, ("XXX : free %d + 8, %p\n", offset, bpos));
885 *bpos = 255;
886 offset += 8; num_lb -= 8;
887 bpos++;
888 }
889
890 /* stop bits */
891 KASSERT(num_lb < 8);
892 bit = 0;
893 while (num_lb > 0) {
894 bitval = (1 << bit);
895 KASSERT((*bpos & bitval) == 0);
896 DPRINTF(PARANOIA, ("XXX : free %d, %p, %d\n",
897 offset, bpos, bit));
898 *bpos |= bitval;
899 offset++; num_lb--;
900 bit = (bit + 1) % 8;
901 }
902 }
903
904
905 /* allocate a contiguous sequence of sectornumbers */
906 static int
907 udf_allocate_space(struct udf_mount *ump, int udf_c_type,
908 uint16_t vpart_num, uint32_t num_lb, uint64_t *lmapping)
909 {
910 struct mmc_trackinfo *alloc_track, *other_track;
911 struct udf_bitmap *bitmap;
912 struct part_desc *pdesc;
913 struct logvol_int_desc *lvid;
914 uint64_t *lmappos;
915 uint32_t ptov, lb_num, *freepos, free_lbs;
916 int lb_size, alloc_num_lb;
917 int alloc_type, error;
918 int is_node;
919
920 DPRINTF(CALL, ("udf_allocate_space(ctype %d, vpart %d, num_lb %d\n",
921 udf_c_type, vpart_num, num_lb));
922 mutex_enter(&ump->allocate_mutex);
923
924 lb_size = udf_rw32(ump->logical_vol->lb_size);
925 KASSERT(lb_size == ump->discinfo.sector_size);
926
927 /* XXX TODO check disc space */
928
929 alloc_type = ump->vtop_alloc[vpart_num];
930 is_node = (udf_c_type == UDF_C_NODE);
931
932 lmappos = lmapping;
933 error = 0;
934 switch (alloc_type) {
935 case UDF_ALLOC_VAT :
936 /* search empty slot in VAT file */
937 KASSERT(num_lb == 1);
938 error = udf_search_free_vatloc(ump, &lb_num);
939 if (!error)
940 *lmappos = lb_num;
941 break;
942 case UDF_ALLOC_SEQUENTIAL :
943 /* sequential allocation on recordable media */
944 /* get partition backing up this vpart_num_num */
945 pdesc = ump->partitions[ump->vtop[vpart_num]];
946
947 /* calculate offset from physical base partition */
948 ptov = udf_rw32(pdesc->start_loc);
949
950 /* get our track descriptors */
951 if (vpart_num == ump->node_part) {
952 alloc_track = &ump->metadata_track;
953 other_track = &ump->data_track;
954 } else {
955 alloc_track = &ump->data_track;
956 other_track = &ump->metadata_track;
957 }
958
959 /* allocate */
960 for (lb_num = 0; lb_num < num_lb; lb_num++) {
961 *lmappos++ = alloc_track->next_writable - ptov;
962 alloc_track->next_writable++;
963 alloc_track->free_blocks--;
964 }
965
966 /* keep other track up-to-date */
967 if (alloc_track->tracknr == other_track->tracknr)
968 memcpy(other_track, alloc_track,
969 sizeof(struct mmc_trackinfo));
970 break;
971 case UDF_ALLOC_SPACEMAP :
972 /* try to allocate on unallocated bits */
973 alloc_num_lb = num_lb;
974 bitmap = &ump->part_unalloc_bits[vpart_num];
975 udf_bitmap_allocate(bitmap, is_node, &alloc_num_lb, lmappos);
976 ump->lvclose |= UDF_WRITE_PART_BITMAPS;
977
978 /* have we allocated all? */
979 if (alloc_num_lb) {
980 /* TODO convert freed to unalloc and try again */
981 /* free allocated piece for now */
982 lmappos = lmapping;
983 for (lb_num=0; lb_num < num_lb-alloc_num_lb; lb_num++) {
984 udf_bitmap_free(bitmap, *lmappos++, 1);
985 }
986 error = ENOSPC;
987 }
988 if (!error) {
989 /* adjust freecount */
990 lvid = ump->logvol_integrity;
991 freepos = &lvid->tables[0] + vpart_num;
992 free_lbs = udf_rw32(*freepos);
993 *freepos = udf_rw32(free_lbs - num_lb);
994 }
995 break;
996 case UDF_ALLOC_METABITMAP : /* UDF 2.50, 2.60 BluRay-RE */
997 /* allocate on metadata unallocated bits */
998 alloc_num_lb = num_lb;
999 bitmap = &ump->metadata_unalloc_bits;
1000 udf_bitmap_allocate(bitmap, is_node, &alloc_num_lb, lmappos);
1001 ump->lvclose |= UDF_WRITE_PART_BITMAPS;
1002
1003 /* have we allocated all? */
1004 if (alloc_num_lb) {
1005 /* YIKES! TODO we need to extend the metadata partition */
1006 /* free allocated piece for now */
1007 lmappos = lmapping;
1008 for (lb_num=0; lb_num < num_lb-alloc_num_lb; lb_num++) {
1009 udf_bitmap_free(bitmap, *lmappos++, 1);
1010 }
1011 error = ENOSPC;
1012 }
1013 if (!error) {
1014 /* adjust freecount */
1015 lvid = ump->logvol_integrity;
1016 freepos = &lvid->tables[0] + vpart_num;
1017 free_lbs = udf_rw32(*freepos);
1018 *freepos = udf_rw32(free_lbs - num_lb);
1019 }
1020 break;
1021 case UDF_ALLOC_METASEQUENTIAL : /* UDF 2.60 BluRay-R */
1022 case UDF_ALLOC_RELAXEDSEQUENTIAL : /* UDF 2.50/~meta BluRay-R */
1023 printf("ALERT: udf_allocate_space : allocation %d "
1024 "not implemented yet!\n", alloc_type);
1025 /* TODO implement, doesn't have to be contiguous */
1026 error = ENOSPC;
1027 break;
1028 }
1029
1030 #ifdef DEBUG
1031 if (udf_verbose & UDF_DEBUG_ALLOC) {
1032 lmappos = lmapping;
1033 printf("udf_allocate_space, allocated logical lba :\n");
1034 for (lb_num = 0; lb_num < num_lb; lb_num++) {
1035 printf("%s %"PRIu64",", (lb_num > 0)?",":"",
1036 *lmappos++);
1037 }
1038 printf("\n");
1039 }
1040 #endif
1041 mutex_exit(&ump->allocate_mutex);
1042
1043 return error;
1044 }
1045
1046 /* --------------------------------------------------------------------- */
1047
1048 void
1049 udf_free_allocated_space(struct udf_mount *ump, uint32_t lb_num,
1050 uint16_t vpart_num, uint32_t num_lb)
1051 {
1052 struct udf_bitmap *bitmap;
1053 struct part_desc *pdesc;
1054 struct logvol_int_desc *lvid;
1055 uint32_t ptov, lb_map, udf_rw32_lbmap;
1056 uint32_t *freepos, free_lbs;
1057 int phys_part;
1058 int error;
1059
1060 DPRINTF(ALLOC, ("udf_free_allocated_space: freeing virt lbnum %d "
1061 "part %d + %d sect\n", lb_num, vpart_num, num_lb));
1062
1063 /* no use freeing zero length */
1064 if (num_lb == 0)
1065 return;
1066
1067 mutex_enter(&ump->allocate_mutex);
1068
1069 /* get partition backing up this vpart_num */
1070 pdesc = ump->partitions[ump->vtop[vpart_num]];
1071
1072 switch (ump->vtop_tp[vpart_num]) {
1073 case UDF_VTOP_TYPE_PHYS :
1074 case UDF_VTOP_TYPE_SPARABLE :
1075 /* free space to freed or unallocated space bitmap */
1076 ptov = udf_rw32(pdesc->start_loc);
1077 phys_part = ump->vtop[vpart_num];
1078
1079 /* first try freed space bitmap */
1080 bitmap = &ump->part_freed_bits[phys_part];
1081
1082 /* if not defined, use unallocated bitmap */
1083 if (bitmap->bits == NULL)
1084 bitmap = &ump->part_unalloc_bits[phys_part];
1085
1086 /* if no bitmaps are defined, bail out; XXX OK? */
1087 if (bitmap->bits == NULL)
1088 break;
1089
1090 /* free bits if its defined */
1091 KASSERT(bitmap->bits);
1092 ump->lvclose |= UDF_WRITE_PART_BITMAPS;
1093 udf_bitmap_free(bitmap, lb_num, num_lb);
1094
1095 /* adjust freecount */
1096 lvid = ump->logvol_integrity;
1097 freepos = &lvid->tables[0] + vpart_num;
1098 free_lbs = udf_rw32(*freepos);
1099 *freepos = udf_rw32(free_lbs + num_lb);
1100 break;
1101 case UDF_VTOP_TYPE_VIRT :
1102 /* free this VAT entry */
1103 KASSERT(num_lb == 1);
1104
1105 lb_map = 0xffffffff;
1106 udf_rw32_lbmap = udf_rw32(lb_map);
1107 error = udf_vat_write(ump->vat_node,
1108 (uint8_t *) &udf_rw32_lbmap, 4,
1109 ump->vat_offset + lb_num * 4);
1110 KASSERT(error == 0);
1111 ump->vat_last_free_lb = MIN(ump->vat_last_free_lb, lb_num);
1112 break;
1113 case UDF_VTOP_TYPE_META :
1114 /* free space in the metadata bitmap */
1115 bitmap = &ump->metadata_unalloc_bits;
1116 KASSERT(bitmap->bits);
1117
1118 ump->lvclose |= UDF_WRITE_PART_BITMAPS;
1119 udf_bitmap_free(bitmap, lb_num, num_lb);
1120
1121 /* adjust freecount */
1122 lvid = ump->logvol_integrity;
1123 freepos = &lvid->tables[0] + vpart_num;
1124 free_lbs = udf_rw32(*freepos);
1125 *freepos = udf_rw32(free_lbs + num_lb);
1126 break;
1127 default:
1128 printf("ALERT: udf_free_allocated_space : allocation %d "
1129 "not implemented yet!\n", ump->vtop_tp[vpart_num]);
1130 break;
1131 }
1132
1133 mutex_exit(&ump->allocate_mutex);
1134 }
1135
1136 /* --------------------------------------------------------------------- */
1137
1138 int
1139 udf_pre_allocate_space(struct udf_mount *ump, int udf_c_type,
1140 uint32_t num_lb, uint16_t vpartnr, uint64_t *lmapping)
1141 {
1142 /* TODO properly maintain uncomitted_lb per partition */
1143
1144 /* reserve size for VAT allocated data */
1145 if (ump->vtop_alloc[vpartnr] == UDF_ALLOC_VAT) {
1146 mutex_enter(&ump->allocate_mutex);
1147 ump->uncomitted_lb += num_lb;
1148 mutex_exit(&ump->allocate_mutex);
1149 }
1150
1151 return udf_allocate_space(ump, udf_c_type, vpartnr, num_lb, lmapping);
1152 }
1153
1154 /* --------------------------------------------------------------------- */
1155
1156 /*
1157 * Allocate a buf on disc for direct write out. The space doesn't have to be
1158 * contiguous as the caller takes care of this.
1159 */
1160
1161 void
1162 udf_late_allocate_buf(struct udf_mount *ump, struct buf *buf,
1163 uint64_t *lmapping, struct long_ad *node_ad_cpy, uint16_t *vpart_nump)
1164 {
1165 struct udf_node *udf_node = VTOI(buf->b_vp);
1166 int lb_size, blks, udf_c_type;
1167 int vpart_num, num_lb;
1168 int error, s;
1169
1170 /*
1171 * for each sector in the buf, allocate a sector on disc and record
1172 * its position in the provided mapping array.
1173 *
1174 * If its userdata or FIDs, record its location in its node.
1175 */
1176
1177 lb_size = udf_rw32(ump->logical_vol->lb_size);
1178 num_lb = (buf->b_bcount + lb_size -1) / lb_size;
1179 blks = lb_size / DEV_BSIZE;
1180 udf_c_type = buf->b_udf_c_type;
1181
1182 KASSERT(lb_size == ump->discinfo.sector_size);
1183
1184 /* select partition to record the buffer on */
1185 vpart_num = ump->data_part;
1186 if (udf_c_type == UDF_C_NODE)
1187 vpart_num = ump->node_part;
1188 if (udf_c_type == UDF_C_FIDS)
1189 vpart_num = ump->fids_part;
1190 *vpart_nump = vpart_num;
1191
1192 if (udf_c_type == UDF_C_NODE) {
1193 /* if not VAT, its allready allocated */
1194 if (ump->vtop_alloc[ump->node_part] != UDF_ALLOC_VAT)
1195 return;
1196
1197 /* allocate on its backing sequential partition */
1198 vpart_num = ump->data_part;
1199 }
1200
1201 /* do allocation on the selected partition */
1202 error = udf_allocate_space(ump, udf_c_type,
1203 vpart_num, num_lb, lmapping);
1204 if (error) {
1205 /* ARGH! we've not done our accounting right! */
1206 panic("UDF disc allocation accounting gone wrong");
1207 }
1208
1209 /* commit our sector count */
1210 mutex_enter(&ump->allocate_mutex);
1211 if (num_lb > ump->uncomitted_lb) {
1212 ump->uncomitted_lb = 0;
1213 } else {
1214 ump->uncomitted_lb -= num_lb;
1215 }
1216 mutex_exit(&ump->allocate_mutex);
1217
1218 /* If its userdata or FIDs, record its allocation in its node. */
1219 if ((udf_c_type == UDF_C_USERDATA) ||
1220 (udf_c_type == UDF_C_FIDS) ||
1221 (udf_c_type == UDF_C_METADATA_SBM))
1222 {
1223 udf_record_allocation_in_node(ump, buf, vpart_num, lmapping,
1224 node_ad_cpy);
1225 /* decrement our outstanding bufs counter */
1226 s = splbio();
1227 udf_node->outstanding_bufs--;
1228 splx(s);
1229 }
1230 }
1231
1232 /* --------------------------------------------------------------------- */
1233
1234 /*
1235 * Try to merge a1 with the new piece a2. udf_ads_merge returns error when not
1236 * possible (anymore); a2 returns the rest piece.
1237 */
1238
1239 static int
1240 udf_ads_merge(uint32_t lb_size, struct long_ad *a1, struct long_ad *a2)
1241 {
1242 uint32_t max_len, merge_len;
1243 uint32_t a1_len, a2_len;
1244 uint32_t a1_flags, a2_flags;
1245 uint32_t a1_lbnum, a2_lbnum;
1246 uint16_t a1_part, a2_part;
1247
1248 max_len = ((UDF_EXT_MAXLEN / lb_size) * lb_size);
1249
1250 a1_flags = UDF_EXT_FLAGS(udf_rw32(a1->len));
1251 a1_len = UDF_EXT_LEN(udf_rw32(a1->len));
1252 a1_lbnum = udf_rw32(a1->loc.lb_num);
1253 a1_part = udf_rw16(a1->loc.part_num);
1254
1255 a2_flags = UDF_EXT_FLAGS(udf_rw32(a2->len));
1256 a2_len = UDF_EXT_LEN(udf_rw32(a2->len));
1257 a2_lbnum = udf_rw32(a2->loc.lb_num);
1258 a2_part = udf_rw16(a2->loc.part_num);
1259
1260 /* defines same space */
1261 if (a1_flags != a2_flags)
1262 return 1;
1263
1264 if (a1_flags != UDF_EXT_FREE) {
1265 /* the same partition */
1266 if (a1_part != a2_part)
1267 return 1;
1268
1269 /* a2 is successor of a1 */
1270 if (a1_lbnum * lb_size + a1_len != a2_lbnum * lb_size)
1271 return 1;
1272 }
1273
1274 /* merge as most from a2 if possible */
1275 merge_len = MIN(a2_len, max_len - a1_len);
1276 a1_len += merge_len;
1277 a2_len -= merge_len;
1278 a2_lbnum += merge_len/lb_size;
1279
1280 a1->len = udf_rw32(a1_len | a1_flags);
1281 a2->len = udf_rw32(a2_len | a2_flags);
1282 a2->loc.lb_num = udf_rw32(a2_lbnum);
1283
1284 if (a2_len > 0)
1285 return 1;
1286
1287 /* there is space over to merge */
1288 return 0;
1289 }
1290
1291 /* --------------------------------------------------------------------- */
1292
1293 static void
1294 udf_wipe_adslots(struct udf_node *udf_node)
1295 {
1296 struct file_entry *fe;
1297 struct extfile_entry *efe;
1298 struct alloc_ext_entry *ext;
1299 uint64_t inflen, objsize;
1300 uint32_t lb_size, dscr_size, l_ea, l_ad, max_l_ad, crclen;
1301 uint8_t *data_pos;
1302 int extnr;
1303
1304 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1305
1306 fe = udf_node->fe;
1307 efe = udf_node->efe;
1308 if (fe) {
1309 inflen = udf_rw64(fe->inf_len);
1310 objsize = inflen;
1311 dscr_size = sizeof(struct file_entry) -1;
1312 l_ea = udf_rw32(fe->l_ea);
1313 l_ad = udf_rw32(fe->l_ad);
1314 data_pos = (uint8_t *) fe + dscr_size + l_ea;
1315 } else {
1316 inflen = udf_rw64(efe->inf_len);
1317 objsize = udf_rw64(efe->obj_size);
1318 dscr_size = sizeof(struct extfile_entry) -1;
1319 l_ea = udf_rw32(efe->l_ea);
1320 l_ad = udf_rw32(efe->l_ad);
1321 data_pos = (uint8_t *) efe + dscr_size + l_ea;
1322 }
1323 max_l_ad = lb_size - dscr_size - l_ea;
1324
1325 /* wipe fe/efe */
1326 memset(data_pos, 0, max_l_ad);
1327 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea;
1328 if (fe) {
1329 fe->l_ad = udf_rw32(0);
1330 fe->logblks_rec = udf_rw64(0);
1331 fe->tag.desc_crc_len = udf_rw32(crclen);
1332 } else {
1333 efe->l_ad = udf_rw32(0);
1334 efe->logblks_rec = udf_rw64(0);
1335 efe->tag.desc_crc_len = udf_rw32(crclen);
1336 }
1337
1338 /* wipe all allocation extent entries */
1339 for (extnr = 0; extnr < udf_node->num_extensions; extnr++) {
1340 ext = udf_node->ext[extnr];
1341 dscr_size = sizeof(struct alloc_ext_entry) -1;
1342 data_pos = (uint8_t *) ext->data;
1343 max_l_ad = lb_size - dscr_size;
1344 memset(data_pos, 0, max_l_ad);
1345 ext->l_ad = udf_rw32(0);
1346
1347 crclen = dscr_size - UDF_DESC_TAG_LENGTH;
1348 ext->tag.desc_crc_len = udf_rw32(crclen);
1349 }
1350 udf_node->i_flags |= IN_NODE_REBUILD;
1351 }
1352
1353 /* --------------------------------------------------------------------- */
1354
1355 void
1356 udf_get_adslot(struct udf_node *udf_node, int slot, struct long_ad *icb,
1357 int *eof) {
1358 struct file_entry *fe;
1359 struct extfile_entry *efe;
1360 struct alloc_ext_entry *ext;
1361 struct icb_tag *icbtag;
1362 struct short_ad *short_ad;
1363 struct long_ad *long_ad, l_icb;
1364 uint32_t offset;
1365 uint32_t lb_size, dscr_size, l_ea, l_ad, flags;
1366 uint8_t *data_pos;
1367 int icbflags, addr_type, adlen, extnr;
1368
1369 /* determine what descriptor we are in */
1370 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1371
1372 fe = udf_node->fe;
1373 efe = udf_node->efe;
1374 if (fe) {
1375 icbtag = &fe->icbtag;
1376 dscr_size = sizeof(struct file_entry) -1;
1377 l_ea = udf_rw32(fe->l_ea);
1378 l_ad = udf_rw32(fe->l_ad);
1379 data_pos = (uint8_t *) fe + dscr_size + l_ea;
1380 } else {
1381 icbtag = &efe->icbtag;
1382 dscr_size = sizeof(struct extfile_entry) -1;
1383 l_ea = udf_rw32(efe->l_ea);
1384 l_ad = udf_rw32(efe->l_ad);
1385 data_pos = (uint8_t *) efe + dscr_size + l_ea;
1386 }
1387
1388 icbflags = udf_rw16(icbtag->flags);
1389 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
1390
1391 /* just in case we're called on an intern, its EOF */
1392 if (addr_type == UDF_ICB_INTERN_ALLOC) {
1393 memset(icb, 0, sizeof(struct long_ad));
1394 *eof = 1;
1395 return;
1396 }
1397
1398 adlen = 0;
1399 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1400 adlen = sizeof(struct short_ad);
1401 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1402 adlen = sizeof(struct long_ad);
1403 }
1404
1405 /* if offset too big, we go to the allocation extensions */
1406 offset = slot * adlen;
1407 extnr = -1;
1408 while (offset >= l_ad) {
1409 /* check if our last entry is a redirect */
1410 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1411 short_ad = (struct short_ad *) (data_pos + l_ad-adlen);
1412 l_icb.len = short_ad->len;
1413 l_icb.loc.part_num = udf_node->loc.loc.part_num;
1414 l_icb.loc.lb_num = short_ad->lb_num;
1415 } else {
1416 KASSERT(addr_type == UDF_ICB_LONG_ALLOC);
1417 long_ad = (struct long_ad *) (data_pos + l_ad-adlen);
1418 l_icb = *long_ad;
1419 }
1420 flags = UDF_EXT_FLAGS(udf_rw32(l_icb.len));
1421 if (flags != UDF_EXT_REDIRECT) {
1422 l_ad = 0; /* force EOF */
1423 break;
1424 }
1425
1426 /* advance to next extent */
1427 extnr++;
1428 if (extnr >= udf_node->num_extensions) {
1429 l_ad = 0; /* force EOF */
1430 break;
1431 }
1432 offset = offset - l_ad;
1433 ext = udf_node->ext[extnr];
1434 dscr_size = sizeof(struct alloc_ext_entry) -1;
1435 l_ad = udf_rw32(ext->l_ad);
1436 data_pos = (uint8_t *) ext + dscr_size;
1437 }
1438
1439 /* XXX l_ad == 0 should be enough to check */
1440 *eof = (offset >= l_ad) || (l_ad == 0);
1441 if (*eof) {
1442 DPRINTF(PARANOIDADWLK, ("returning EOF, extnr %d, offset %d, "
1443 "l_ad %d\n", extnr, offset, l_ad));
1444 memset(icb, 0, sizeof(struct long_ad));
1445 return;
1446 }
1447
1448 /* get the element */
1449 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1450 short_ad = (struct short_ad *) (data_pos + offset);
1451 icb->len = short_ad->len;
1452 icb->loc.part_num = udf_node->loc.loc.part_num;
1453 icb->loc.lb_num = short_ad->lb_num;
1454 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1455 long_ad = (struct long_ad *) (data_pos + offset);
1456 *icb = *long_ad;
1457 }
1458 DPRINTF(PARANOIDADWLK, ("returning element : v %d, lb %d, len %d, "
1459 "flags %d\n", icb->loc.part_num, icb->loc.lb_num,
1460 UDF_EXT_LEN(icb->len), UDF_EXT_FLAGS(icb->len)));
1461 }
1462
1463 /* --------------------------------------------------------------------- */
1464
1465 int
1466 udf_append_adslot(struct udf_node *udf_node, int *slot, struct long_ad *icb) {
1467 struct udf_mount *ump = udf_node->ump;
1468 union dscrptr *dscr, *extdscr;
1469 struct file_entry *fe;
1470 struct extfile_entry *efe;
1471 struct alloc_ext_entry *ext;
1472 struct icb_tag *icbtag;
1473 struct short_ad *short_ad;
1474 struct long_ad *long_ad, o_icb, l_icb;
1475 uint64_t logblks_rec, *logblks_rec_p;
1476 uint64_t lmapping;
1477 uint32_t offset, rest, len, lb_num;
1478 uint32_t lb_size, dscr_size, l_ea, l_ad, *l_ad_p, max_l_ad, crclen;
1479 uint32_t flags;
1480 uint16_t vpart_num;
1481 uint8_t *data_pos;
1482 int icbflags, addr_type, adlen, extnr;
1483 int error;
1484
1485 lb_size = udf_rw32(ump->logical_vol->lb_size);
1486 vpart_num = udf_rw16(udf_node->loc.loc.part_num);
1487
1488 /* determine what descriptor we are in */
1489 fe = udf_node->fe;
1490 efe = udf_node->efe;
1491 if (fe) {
1492 icbtag = &fe->icbtag;
1493 dscr = (union dscrptr *) fe;
1494 dscr_size = sizeof(struct file_entry) -1;
1495
1496 l_ea = udf_rw32(fe->l_ea);
1497 l_ad_p = &fe->l_ad;
1498 logblks_rec_p = &fe->logblks_rec;
1499 } else {
1500 icbtag = &efe->icbtag;
1501 dscr = (union dscrptr *) efe;
1502 dscr_size = sizeof(struct extfile_entry) -1;
1503
1504 l_ea = udf_rw32(efe->l_ea);
1505 l_ad_p = &efe->l_ad;
1506 logblks_rec_p = &efe->logblks_rec;
1507 }
1508 data_pos = (uint8_t *) dscr + dscr_size + l_ea;
1509 max_l_ad = lb_size - dscr_size - l_ea;
1510
1511 icbflags = udf_rw16(icbtag->flags);
1512 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
1513
1514 /* just in case we're called on an intern, its EOF */
1515 if (addr_type == UDF_ICB_INTERN_ALLOC) {
1516 panic("udf_append_adslot on UDF_ICB_INTERN_ALLOC\n");
1517 }
1518
1519 adlen = 0;
1520 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1521 adlen = sizeof(struct short_ad);
1522 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1523 adlen = sizeof(struct long_ad);
1524 }
1525
1526 /* clean up given long_ad since it can be a synthesized one */
1527 flags = UDF_EXT_FLAGS(udf_rw32(icb->len));
1528 if (flags == UDF_EXT_FREE) {
1529 icb->loc.part_num = udf_rw16(0);
1530 icb->loc.lb_num = udf_rw32(0);
1531 }
1532
1533 /* if offset too big, we go to the allocation extensions */
1534 l_ad = udf_rw32(*l_ad_p);
1535 offset = (*slot) * adlen;
1536 extnr = -1;
1537 while (offset >= l_ad) {
1538 /* check if our last entry is a redirect */
1539 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1540 short_ad = (struct short_ad *) (data_pos + l_ad-adlen);
1541 l_icb.len = short_ad->len;
1542 l_icb.loc.part_num = udf_node->loc.loc.part_num;
1543 l_icb.loc.lb_num = short_ad->lb_num;
1544 } else {
1545 KASSERT(addr_type == UDF_ICB_LONG_ALLOC);
1546 long_ad = (struct long_ad *) (data_pos + l_ad-adlen);
1547 l_icb = *long_ad;
1548 }
1549 flags = UDF_EXT_FLAGS(udf_rw32(l_icb.len));
1550 if (flags != UDF_EXT_REDIRECT) {
1551 /* only one past the last one is adressable */
1552 break;
1553 }
1554
1555 /* advance to next extent */
1556 extnr++;
1557 KASSERT(extnr < udf_node->num_extensions);
1558 offset = offset - l_ad;
1559
1560 ext = udf_node->ext[extnr];
1561 dscr = (union dscrptr *) ext;
1562 dscr_size = sizeof(struct alloc_ext_entry) -1;
1563 max_l_ad = lb_size - dscr_size;
1564 l_ad_p = &ext->l_ad;
1565 l_ad = udf_rw32(*l_ad_p);
1566 data_pos = (uint8_t *) ext + dscr_size;
1567 }
1568 DPRINTF(PARANOIDADWLK, ("append, ext %d, offset %d, l_ad %d\n",
1569 extnr, offset, udf_rw32(*l_ad_p)));
1570 KASSERT(l_ad == udf_rw32(*l_ad_p));
1571
1572 /* offset is offset within the current (E)FE/AED */
1573 l_ad = udf_rw32(*l_ad_p);
1574 crclen = udf_rw32(dscr->tag.desc_crc_len);
1575 logblks_rec = udf_rw64(*logblks_rec_p);
1576
1577 /* overwriting old piece? */
1578 if (offset < l_ad) {
1579 /* overwrite entry; compensate for the old element */
1580 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1581 short_ad = (struct short_ad *) (data_pos + offset);
1582 o_icb.len = short_ad->len;
1583 o_icb.loc.part_num = udf_rw16(0); /* ignore */
1584 o_icb.loc.lb_num = short_ad->lb_num;
1585 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1586 long_ad = (struct long_ad *) (data_pos + offset);
1587 o_icb = *long_ad;
1588 } else {
1589 panic("Invalid address type in udf_append_adslot\n");
1590 }
1591
1592 len = udf_rw32(o_icb.len);
1593 if (UDF_EXT_FLAGS(len) == UDF_EXT_ALLOCATED) {
1594 /* adjust counts */
1595 len = UDF_EXT_LEN(len);
1596 logblks_rec -= (len + lb_size -1) / lb_size;
1597 }
1598 }
1599
1600 /* check if we're not appending a redirection */
1601 flags = UDF_EXT_FLAGS(udf_rw32(icb->len));
1602 KASSERT(flags != UDF_EXT_REDIRECT);
1603
1604 /* round down available space */
1605 rest = adlen * ((max_l_ad - offset) / adlen);
1606 if (rest <= adlen) {
1607 /* have to append aed, see if we already have a spare one */
1608 extnr++;
1609 ext = udf_node->ext[extnr];
1610 l_icb = udf_node->ext_loc[extnr];
1611 if (ext == NULL) {
1612 DPRINTF(ALLOC,("adding allocation extent %d\n", extnr));
1613
1614 error = udf_pre_allocate_space(ump, UDF_C_NODE, 1,
1615 vpart_num, &lmapping);
1616 lb_num = lmapping;
1617 if (error)
1618 return error;
1619
1620 /* initialise pointer to location */
1621 memset(&l_icb, 0, sizeof(struct long_ad));
1622 l_icb.len = udf_rw32(lb_size | UDF_EXT_REDIRECT);
1623 l_icb.loc.lb_num = udf_rw32(lb_num);
1624 l_icb.loc.part_num = udf_rw16(vpart_num);
1625
1626 /* create new aed descriptor */
1627 udf_create_logvol_dscr(ump, udf_node, &l_icb, &extdscr);
1628 ext = &extdscr->aee;
1629
1630 udf_inittag(ump, &ext->tag, TAGID_ALLOCEXTENT, lb_num);
1631 dscr_size = sizeof(struct alloc_ext_entry) -1;
1632 max_l_ad = lb_size - dscr_size;
1633 memset(ext->data, 0, max_l_ad);
1634 ext->l_ad = udf_rw32(0);
1635 ext->tag.desc_crc_len =
1636 udf_rw32(dscr_size - UDF_DESC_TAG_LENGTH);
1637
1638 /* declare aed */
1639 udf_node->num_extensions++;
1640 udf_node->ext_loc[extnr] = l_icb;
1641 udf_node->ext[extnr] = ext;
1642 }
1643 /* add redirect and adjust l_ad and crclen for old descr */
1644 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1645 short_ad = (struct short_ad *) (data_pos + offset);
1646 short_ad->len = l_icb.len;
1647 short_ad->lb_num = l_icb.loc.lb_num;
1648 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1649 long_ad = (struct long_ad *) (data_pos + offset);
1650 *long_ad = l_icb;
1651 }
1652 l_ad += adlen;
1653 crclen += adlen;
1654 dscr->tag.desc_crc_len = udf_rw32(crclen);
1655 *l_ad_p = udf_rw32(l_ad);
1656
1657 /* advance to the new extension */
1658 KASSERT(ext != NULL);
1659 dscr = (union dscrptr *) ext;
1660 dscr_size = sizeof(struct alloc_ext_entry) -1;
1661 max_l_ad = lb_size - dscr_size;
1662 data_pos = (uint8_t *) dscr + dscr_size;
1663
1664 l_ad_p = &ext->l_ad;
1665 l_ad = udf_rw32(*l_ad_p);
1666 crclen = udf_rw32(dscr->tag.desc_crc_len);
1667 offset = 0;
1668
1669 /* adjust callees slot count for link insert */
1670 *slot += 1;
1671 }
1672
1673 /* write out the element */
1674 DPRINTF(PARANOIDADWLK, ("adding element : %p : v %d, lb %d, "
1675 "len %d, flags %d\n", data_pos + offset,
1676 icb->loc.part_num, icb->loc.lb_num,
1677 UDF_EXT_LEN(icb->len), UDF_EXT_FLAGS(icb->len)));
1678 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1679 short_ad = (struct short_ad *) (data_pos + offset);
1680 short_ad->len = icb->len;
1681 short_ad->lb_num = icb->loc.lb_num;
1682 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1683 long_ad = (struct long_ad *) (data_pos + offset);
1684 *long_ad = *icb;
1685 }
1686
1687 /* adjust logblks recorded count */
1688 flags = UDF_EXT_FLAGS(udf_rw32(icb->len));
1689 if (flags == UDF_EXT_ALLOCATED)
1690 logblks_rec += (UDF_EXT_LEN(icb->len) + lb_size -1) / lb_size;
1691 *logblks_rec_p = udf_rw64(logblks_rec);
1692
1693 /* adjust l_ad and crclen when needed */
1694 if (offset >= l_ad) {
1695 l_ad += adlen;
1696 crclen += adlen;
1697 dscr->tag.desc_crc_len = udf_rw32(crclen);
1698 *l_ad_p = udf_rw32(l_ad);
1699 }
1700
1701 return 0;
1702 }
1703
1704 /* --------------------------------------------------------------------- */
1705
1706 static void
1707 udf_count_alloc_exts(struct udf_node *udf_node)
1708 {
1709 struct long_ad s_ad;
1710 uint32_t lb_num, len, flags;
1711 uint16_t vpart_num;
1712 int slot, eof;
1713 int num_extents, extnr;
1714 int lb_size;
1715
1716 if (udf_node->num_extensions == 0)
1717 return;
1718
1719 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1720 /* count number of allocation extents in use */
1721 num_extents = 0;
1722 slot = 0;
1723 for (;;) {
1724 udf_get_adslot(udf_node, slot, &s_ad, &eof);
1725 if (eof)
1726 break;
1727 len = udf_rw32(s_ad.len);
1728 flags = UDF_EXT_FLAGS(len);
1729
1730 if (flags == UDF_EXT_REDIRECT)
1731 num_extents++;
1732
1733 slot++;
1734 }
1735
1736 DPRINTF(ALLOC, ("udf_count_alloc_ext counted %d live extents\n",
1737 num_extents));
1738
1739 /* XXX choice: we could delay freeing them on node writeout */
1740 /* free excess entries */
1741 extnr = num_extents;
1742 for (;extnr < udf_node->num_extensions; extnr++) {
1743 DPRINTF(ALLOC, ("freeing alloc ext %d\n", extnr));
1744 /* free dscriptor */
1745 s_ad = udf_node->ext_loc[extnr];
1746 udf_free_logvol_dscr(udf_node->ump, &s_ad,
1747 udf_node->ext[extnr]);
1748 udf_node->ext[extnr] = NULL;
1749
1750 /* free disc space */
1751 lb_num = udf_rw32(s_ad.loc.lb_num);
1752 vpart_num = udf_rw16(s_ad.loc.part_num);
1753 udf_free_allocated_space(udf_node->ump, lb_num, vpart_num, 1);
1754
1755 memset(&udf_node->ext_loc[extnr], 0, sizeof(struct long_ad));
1756 }
1757
1758 /* set our new number of allocation extents */
1759 udf_node->num_extensions = num_extents;
1760 }
1761
1762
1763 /* --------------------------------------------------------------------- */
1764
1765 /*
1766 * Adjust the node's allocation descriptors to reflect the new mapping; do
1767 * take note that we might glue to existing allocation descriptors.
1768 *
1769 * XXX Note there can only be one allocation being recorded/mount; maybe
1770 * explicit allocation in shedule thread?
1771 */
1772
1773 static void
1774 udf_record_allocation_in_node(struct udf_mount *ump, struct buf *buf,
1775 uint16_t vpart_num, uint64_t *mapping, struct long_ad *node_ad_cpy)
1776 {
1777 struct vnode *vp = buf->b_vp;
1778 struct udf_node *udf_node = VTOI(vp);
1779 struct file_entry *fe;
1780 struct extfile_entry *efe;
1781 struct icb_tag *icbtag;
1782 struct long_ad s_ad, c_ad;
1783 uint64_t inflen, from, till;
1784 uint64_t foffset, end_foffset, restart_foffset;
1785 uint64_t orig_inflen, orig_lbrec, new_inflen, new_lbrec;
1786 uint32_t num_lb, len, flags, lb_num;
1787 uint32_t run_start;
1788 uint32_t slot_offset, replace_len, replace;
1789 int addr_type, icbflags;
1790 // int udf_c_type = buf->b_udf_c_type;
1791 int lb_size, run_length, eof;
1792 int slot, cpy_slot, cpy_slots, restart_slot;
1793 int error;
1794
1795 DPRINTF(ALLOC, ("udf_record_allocation_in_node\n"));
1796
1797 #if 0
1798 /* XXX disable sanity check for now */
1799 /* sanity check ... should be panic ? */
1800 if ((udf_c_type != UDF_C_USERDATA) && (udf_c_type != UDF_C_FIDS))
1801 return;
1802 #endif
1803
1804 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1805
1806 /* do the job */
1807 UDF_LOCK_NODE(udf_node, 0); /* XXX can deadlock ? */
1808 udf_node_sanity_check(udf_node, &orig_inflen, &orig_lbrec);
1809
1810 fe = udf_node->fe;
1811 efe = udf_node->efe;
1812 if (fe) {
1813 icbtag = &fe->icbtag;
1814 inflen = udf_rw64(fe->inf_len);
1815 } else {
1816 icbtag = &efe->icbtag;
1817 inflen = udf_rw64(efe->inf_len);
1818 }
1819
1820 /* do check if `till' is not past file information length */
1821 from = buf->b_lblkno * lb_size;
1822 till = MIN(inflen, from + buf->b_resid);
1823
1824 num_lb = (till - from + lb_size -1) / lb_size;
1825
1826 DPRINTF(ALLOC, ("record allocation from %"PRIu64" + %d\n", from, buf->b_bcount));
1827
1828 icbflags = udf_rw16(icbtag->flags);
1829 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
1830
1831 if (addr_type == UDF_ICB_INTERN_ALLOC) {
1832 /* nothing to do */
1833 /* XXX clean up rest of node? just in case? */
1834 UDF_UNLOCK_NODE(udf_node, 0);
1835 return;
1836 }
1837
1838 slot = 0;
1839 cpy_slot = 0;
1840 foffset = 0;
1841
1842 /* 1) copy till first overlap piece to the rewrite buffer */
1843 for (;;) {
1844 udf_get_adslot(udf_node, slot, &s_ad, &eof);
1845 if (eof) {
1846 DPRINTF(WRITE,
1847 ("Record allocation in node "
1848 "failed: encountered EOF\n"));
1849 UDF_UNLOCK_NODE(udf_node, 0);
1850 buf->b_error = EINVAL;
1851 return;
1852 }
1853 len = udf_rw32(s_ad.len);
1854 flags = UDF_EXT_FLAGS(len);
1855 len = UDF_EXT_LEN(len);
1856
1857 if (flags == UDF_EXT_REDIRECT) {
1858 slot++;
1859 continue;
1860 }
1861
1862 end_foffset = foffset + len;
1863 if (end_foffset > from)
1864 break; /* found */
1865
1866 node_ad_cpy[cpy_slot++] = s_ad;
1867
1868 DPRINTF(ALLOC, ("\t1: vp %d, lb %d, len %d, flags %d "
1869 "-> stack\n",
1870 udf_rw16(s_ad.loc.part_num),
1871 udf_rw32(s_ad.loc.lb_num),
1872 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1873 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1874
1875 foffset = end_foffset;
1876 slot++;
1877 }
1878 restart_slot = slot;
1879 restart_foffset = foffset;
1880
1881 /* 2) trunc overlapping slot at overlap and copy it */
1882 slot_offset = from - foffset;
1883 if (slot_offset > 0) {
1884 DPRINTF(ALLOC, ("\tslot_offset = %d, flags = %d (%d)\n",
1885 slot_offset, flags >> 30, flags));
1886
1887 s_ad.len = udf_rw32(slot_offset | flags);
1888 node_ad_cpy[cpy_slot++] = s_ad;
1889
1890 DPRINTF(ALLOC, ("\t2: vp %d, lb %d, len %d, flags %d "
1891 "-> stack\n",
1892 udf_rw16(s_ad.loc.part_num),
1893 udf_rw32(s_ad.loc.lb_num),
1894 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1895 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1896 }
1897 foffset += slot_offset;
1898
1899 /* 3) insert new mappings */
1900 memset(&s_ad, 0, sizeof(struct long_ad));
1901 lb_num = 0;
1902 for (lb_num = 0; lb_num < num_lb; lb_num++) {
1903 run_start = mapping[lb_num];
1904 run_length = 1;
1905 while (lb_num < num_lb-1) {
1906 if (mapping[lb_num+1] != mapping[lb_num]+1)
1907 if (mapping[lb_num+1] != mapping[lb_num])
1908 break;
1909 run_length++;
1910 lb_num++;
1911 }
1912 /* insert slot for this mapping */
1913 len = run_length * lb_size;
1914
1915 /* bounds checking */
1916 if (foffset + len > till)
1917 len = till - foffset;
1918 KASSERT(foffset + len <= inflen);
1919
1920 s_ad.len = udf_rw32(len | UDF_EXT_ALLOCATED);
1921 s_ad.loc.part_num = udf_rw16(vpart_num);
1922 s_ad.loc.lb_num = udf_rw32(run_start);
1923
1924 foffset += len;
1925
1926 /* paranoia */
1927 if (len == 0) {
1928 DPRINTF(WRITE,
1929 ("Record allocation in node "
1930 "failed: insert failed\n"));
1931 UDF_UNLOCK_NODE(udf_node, 0);
1932 buf->b_error = EINVAL;
1933 return;
1934 }
1935 node_ad_cpy[cpy_slot++] = s_ad;
1936
1937 DPRINTF(ALLOC, ("\t3: insert new mapping vp %d lb %d, len %d, "
1938 "flags %d -> stack\n",
1939 udf_rw16(s_ad.loc.part_num), udf_rw32(s_ad.loc.lb_num),
1940 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1941 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1942 }
1943
1944 /* 4) pop replaced length */
1945 slot = restart_slot;
1946 foffset = restart_foffset;
1947
1948 replace_len = till - foffset; /* total amount of bytes to pop */
1949 slot_offset = from - foffset; /* offset in first encounted slot */
1950 KASSERT((slot_offset % lb_size) == 0);
1951
1952 for (;;) {
1953 udf_get_adslot(udf_node, slot, &s_ad, &eof);
1954 if (eof)
1955 break;
1956
1957 len = udf_rw32(s_ad.len);
1958 flags = UDF_EXT_FLAGS(len);
1959 len = UDF_EXT_LEN(len);
1960 lb_num = udf_rw32(s_ad.loc.lb_num);
1961
1962 if (flags == UDF_EXT_REDIRECT) {
1963 slot++;
1964 continue;
1965 }
1966
1967 DPRINTF(ALLOC, ("\t4i: got slot %d, slot_offset %d, "
1968 "replace_len %d, "
1969 "vp %d, lb %d, len %d, flags %d\n",
1970 slot, slot_offset, replace_len,
1971 udf_rw16(s_ad.loc.part_num),
1972 udf_rw32(s_ad.loc.lb_num),
1973 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1974 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1975
1976 /* adjust for slot offset */
1977 if (slot_offset) {
1978 DPRINTF(ALLOC, ("\t4s: skipping %d\n", slot_offset));
1979 lb_num += slot_offset / lb_size;
1980 len -= slot_offset;
1981 foffset += slot_offset;
1982 replace_len -= slot_offset;
1983
1984 /* mark adjusted */
1985 slot_offset = 0;
1986 }
1987
1988 /* advance for (the rest of) this slot */
1989 replace = MIN(len, replace_len);
1990 DPRINTF(ALLOC, ("\t4d: replacing %d\n", replace));
1991
1992 /* advance for this slot */
1993 if (replace) {
1994 /* note: dont round DOWN on num_lb since we then
1995 * forget the last partial one */
1996 num_lb = (replace + lb_size - 1) / lb_size;
1997 if (flags != UDF_EXT_FREE) {
1998 udf_free_allocated_space(ump, lb_num,
1999 udf_rw16(s_ad.loc.part_num), num_lb);
2000 }
2001 lb_num += num_lb;
2002 len -= replace;
2003 foffset += replace;
2004 replace_len -= replace;
2005 }
2006
2007 /* do we have a slot tail ? */
2008 if (len) {
2009 KASSERT(foffset % lb_size == 0);
2010
2011 /* we arrived at our point, push remainder */
2012 s_ad.len = udf_rw32(len | flags);
2013 s_ad.loc.lb_num = udf_rw32(lb_num);
2014 if (flags == UDF_EXT_FREE)
2015 s_ad.loc.lb_num = udf_rw32(0);
2016 node_ad_cpy[cpy_slot++] = s_ad;
2017 foffset += len;
2018 slot++;
2019
2020 DPRINTF(ALLOC, ("\t4: vp %d, lb %d, len %d, flags %d "
2021 "-> stack\n",
2022 udf_rw16(s_ad.loc.part_num),
2023 udf_rw32(s_ad.loc.lb_num),
2024 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2025 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2026 break;
2027 }
2028
2029 slot++;
2030 }
2031
2032 /* 5) copy remainder */
2033 for (;;) {
2034 udf_get_adslot(udf_node, slot, &s_ad, &eof);
2035 if (eof)
2036 break;
2037
2038 len = udf_rw32(s_ad.len);
2039 flags = UDF_EXT_FLAGS(len);
2040 len = UDF_EXT_LEN(len);
2041
2042 if (flags == UDF_EXT_REDIRECT) {
2043 slot++;
2044 continue;
2045 }
2046
2047 node_ad_cpy[cpy_slot++] = s_ad;
2048
2049 DPRINTF(ALLOC, ("\t5: insert new mapping "
2050 "vp %d lb %d, len %d, flags %d "
2051 "-> stack\n",
2052 udf_rw16(s_ad.loc.part_num),
2053 udf_rw32(s_ad.loc.lb_num),
2054 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2055 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2056
2057 slot++;
2058 }
2059
2060 /* 6) reset node descriptors */
2061 udf_wipe_adslots(udf_node);
2062
2063 /* 7) copy back extents; merge when possible. Recounting on the fly */
2064 cpy_slots = cpy_slot;
2065
2066 c_ad = node_ad_cpy[0];
2067 slot = 0;
2068 DPRINTF(ALLOC, ("\t7s: stack -> got mapping vp %d "
2069 "lb %d, len %d, flags %d\n",
2070 udf_rw16(c_ad.loc.part_num),
2071 udf_rw32(c_ad.loc.lb_num),
2072 UDF_EXT_LEN(udf_rw32(c_ad.len)),
2073 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2074
2075 for (cpy_slot = 1; cpy_slot < cpy_slots; cpy_slot++) {
2076 s_ad = node_ad_cpy[cpy_slot];
2077
2078 DPRINTF(ALLOC, ("\t7i: stack -> got mapping vp %d "
2079 "lb %d, len %d, flags %d\n",
2080 udf_rw16(s_ad.loc.part_num),
2081 udf_rw32(s_ad.loc.lb_num),
2082 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2083 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2084
2085 /* see if we can merge */
2086 if (udf_ads_merge(lb_size, &c_ad, &s_ad)) {
2087 /* not mergable (anymore) */
2088 DPRINTF(ALLOC, ("\t7: appending vp %d lb %d, "
2089 "len %d, flags %d\n",
2090 udf_rw16(c_ad.loc.part_num),
2091 udf_rw32(c_ad.loc.lb_num),
2092 UDF_EXT_LEN(udf_rw32(c_ad.len)),
2093 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2094
2095 error = udf_append_adslot(udf_node, &slot, &c_ad);
2096 if (error) {
2097 buf->b_error = error;
2098 goto out;
2099 }
2100 c_ad = s_ad;
2101 slot++;
2102 }
2103 }
2104
2105 /* 8) push rest slot (if any) */
2106 if (UDF_EXT_LEN(c_ad.len) > 0) {
2107 DPRINTF(ALLOC, ("\t8: last append vp %d lb %d, "
2108 "len %d, flags %d\n",
2109 udf_rw16(c_ad.loc.part_num),
2110 udf_rw32(c_ad.loc.lb_num),
2111 UDF_EXT_LEN(udf_rw32(c_ad.len)),
2112 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2113
2114 error = udf_append_adslot(udf_node, &slot, &c_ad);
2115 if (error) {
2116 buf->b_error = error;
2117 goto out;
2118 }
2119 }
2120
2121 out:
2122 udf_count_alloc_exts(udf_node);
2123
2124 /* the node's descriptors should now be sane */
2125 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2126 UDF_UNLOCK_NODE(udf_node, 0);
2127
2128 KASSERT(orig_inflen == new_inflen);
2129 KASSERT(new_lbrec >= orig_lbrec);
2130
2131 return;
2132 }
2133
2134 /* --------------------------------------------------------------------- */
2135
2136 int
2137 udf_grow_node(struct udf_node *udf_node, uint64_t new_size)
2138 {
2139 union dscrptr *dscr;
2140 struct vnode *vp = udf_node->vnode;
2141 struct udf_mount *ump = udf_node->ump;
2142 struct file_entry *fe;
2143 struct extfile_entry *efe;
2144 struct icb_tag *icbtag;
2145 struct long_ad c_ad, s_ad;
2146 uint64_t size_diff, old_size, inflen, objsize, chunk, append_len;
2147 uint64_t foffset, end_foffset;
2148 uint64_t orig_inflen, orig_lbrec, new_inflen, new_lbrec;
2149 uint32_t lb_size, dscr_size, crclen, lastblock_grow;
2150 uint32_t icbflags, len, flags, max_len;
2151 uint32_t max_l_ad, l_ad, l_ea;
2152 uint16_t my_part, dst_part;
2153 uint8_t *data_pos, *evacuated_data;
2154 int addr_type;
2155 int slot, cpy_slot;
2156 int isdir, eof, error;
2157
2158 DPRINTF(ALLOC, ("udf_grow_node\n"));
2159
2160 UDF_LOCK_NODE(udf_node, 0);
2161 udf_node_sanity_check(udf_node, &orig_inflen, &orig_lbrec);
2162
2163 lb_size = udf_rw32(ump->logical_vol->lb_size);
2164 max_len = ((UDF_EXT_MAXLEN / lb_size) * lb_size);
2165
2166 fe = udf_node->fe;
2167 efe = udf_node->efe;
2168 if (fe) {
2169 dscr = (union dscrptr *) fe;
2170 icbtag = &fe->icbtag;
2171 inflen = udf_rw64(fe->inf_len);
2172 objsize = inflen;
2173 dscr_size = sizeof(struct file_entry) -1;
2174 l_ea = udf_rw32(fe->l_ea);
2175 l_ad = udf_rw32(fe->l_ad);
2176 } else {
2177 dscr = (union dscrptr *) efe;
2178 icbtag = &efe->icbtag;
2179 inflen = udf_rw64(efe->inf_len);
2180 objsize = udf_rw64(efe->obj_size);
2181 dscr_size = sizeof(struct extfile_entry) -1;
2182 l_ea = udf_rw32(efe->l_ea);
2183 l_ad = udf_rw32(efe->l_ad);
2184 }
2185 data_pos = (uint8_t *) dscr + dscr_size + l_ea;
2186 max_l_ad = lb_size - dscr_size - l_ea;
2187
2188 icbflags = udf_rw16(icbtag->flags);
2189 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2190
2191 old_size = inflen;
2192 size_diff = new_size - old_size;
2193
2194 DPRINTF(ALLOC, ("\tfrom %"PRIu64" to %"PRIu64"\n", old_size, new_size));
2195
2196 evacuated_data = NULL;
2197 if (addr_type == UDF_ICB_INTERN_ALLOC) {
2198 if (l_ad + size_diff <= max_l_ad) {
2199 /* only reflect size change directly in the node */
2200 inflen += size_diff;
2201 objsize += size_diff;
2202 l_ad += size_diff;
2203 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea + l_ad;
2204 if (fe) {
2205 fe->inf_len = udf_rw64(inflen);
2206 fe->l_ad = udf_rw32(l_ad);
2207 fe->tag.desc_crc_len = udf_rw32(crclen);
2208 } else {
2209 efe->inf_len = udf_rw64(inflen);
2210 efe->obj_size = udf_rw64(objsize);
2211 efe->l_ad = udf_rw32(l_ad);
2212 efe->tag.desc_crc_len = udf_rw32(crclen);
2213 }
2214 error = 0;
2215
2216 /* set new size for uvm */
2217 uvm_vnp_setsize(vp, old_size);
2218 uvm_vnp_setwritesize(vp, new_size);
2219
2220 #if 0
2221 /* zero append space in buffer */
2222 uvm_vnp_zerorange(vp, old_size, new_size - old_size);
2223 #endif
2224
2225 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2226
2227 /* unlock */
2228 UDF_UNLOCK_NODE(udf_node, 0);
2229
2230 KASSERT(new_inflen == orig_inflen + size_diff);
2231 KASSERT(new_lbrec == orig_lbrec);
2232 KASSERT(new_lbrec == 0);
2233 return 0;
2234 }
2235
2236 DPRINTF(ALLOC, ("\tCONVERT from internal\n"));
2237
2238 if (old_size > 0) {
2239 /* allocate some space and copy in the stuff to keep */
2240 evacuated_data = malloc(lb_size, M_UDFTEMP, M_WAITOK);
2241 memset(evacuated_data, 0, lb_size);
2242
2243 /* node is locked, so safe to exit mutex */
2244 UDF_UNLOCK_NODE(udf_node, 0);
2245
2246 /* read in using the `normal' vn_rdwr() */
2247 error = vn_rdwr(UIO_READ, udf_node->vnode,
2248 evacuated_data, old_size, 0,
2249 UIO_SYSSPACE, IO_ALTSEMANTICS | IO_NODELOCKED,
2250 FSCRED, NULL, NULL);
2251
2252 /* enter again */
2253 UDF_LOCK_NODE(udf_node, 0);
2254 }
2255
2256 /* convert to a normal alloc and select type */
2257 isdir = (vp->v_type == VDIR);
2258 my_part = udf_rw16(udf_node->loc.loc.part_num);
2259 dst_part = isdir? ump->fids_part : ump->data_part;
2260 addr_type = UDF_ICB_SHORT_ALLOC;
2261 if (dst_part != my_part)
2262 addr_type = UDF_ICB_LONG_ALLOC;
2263
2264 icbflags &= ~UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2265 icbflags |= addr_type;
2266 icbtag->flags = udf_rw16(icbflags);
2267
2268 /* wipe old descriptor space */
2269 udf_wipe_adslots(udf_node);
2270
2271 memset(&c_ad, 0, sizeof(struct long_ad));
2272 c_ad.len = udf_rw32(old_size | UDF_EXT_FREE);
2273 c_ad.loc.part_num = udf_rw16(0); /* not relevant */
2274 c_ad.loc.lb_num = udf_rw32(0); /* not relevant */
2275
2276 slot = 0;
2277 } else {
2278 /* goto the last entry (if any) */
2279 slot = 0;
2280 cpy_slot = 0;
2281 foffset = 0;
2282 memset(&c_ad, 0, sizeof(struct long_ad));
2283 for (;;) {
2284 udf_get_adslot(udf_node, slot, &c_ad, &eof);
2285 if (eof)
2286 break;
2287
2288 len = udf_rw32(c_ad.len);
2289 flags = UDF_EXT_FLAGS(len);
2290 len = UDF_EXT_LEN(len);
2291
2292 end_foffset = foffset + len;
2293 if (flags != UDF_EXT_REDIRECT)
2294 foffset = end_foffset;
2295
2296 slot++;
2297 }
2298 /* at end of adslots */
2299
2300 /* special case if the old size was zero, then there is no last slot */
2301 if (old_size == 0) {
2302 c_ad.len = udf_rw32(0 | UDF_EXT_FREE);
2303 c_ad.loc.part_num = udf_rw16(0); /* not relevant */
2304 c_ad.loc.lb_num = udf_rw32(0); /* not relevant */
2305 } else {
2306 /* refetch last slot */
2307 slot--;
2308 udf_get_adslot(udf_node, slot, &c_ad, &eof);
2309 }
2310 }
2311
2312 /*
2313 * If the length of the last slot is not a multiple of lb_size, adjust
2314 * length so that it is; don't forget to adjust `append_len'! relevant for
2315 * extending existing files
2316 */
2317 len = udf_rw32(c_ad.len);
2318 flags = UDF_EXT_FLAGS(len);
2319 len = UDF_EXT_LEN(len);
2320
2321 lastblock_grow = 0;
2322 if (len % lb_size > 0) {
2323 lastblock_grow = lb_size - (len % lb_size);
2324 lastblock_grow = MIN(size_diff, lastblock_grow);
2325 len += lastblock_grow;
2326 c_ad.len = udf_rw32(len | flags);
2327
2328 /* TODO zero appened space in buffer! */
2329 /* using uvm_vnp_zerorange(vp, old_size, new_size - old_size); ? */
2330 }
2331 memset(&s_ad, 0, sizeof(struct long_ad));
2332
2333 /* size_diff can be bigger than allowed, so grow in chunks */
2334 append_len = size_diff - lastblock_grow;
2335 while (append_len > 0) {
2336 chunk = MIN(append_len, max_len);
2337 s_ad.len = udf_rw32(chunk | UDF_EXT_FREE);
2338 s_ad.loc.part_num = udf_rw16(0);
2339 s_ad.loc.lb_num = udf_rw32(0);
2340
2341 if (udf_ads_merge(lb_size, &c_ad, &s_ad)) {
2342 /* not mergable (anymore) */
2343 error = udf_append_adslot(udf_node, &slot, &c_ad);
2344 if (error)
2345 goto errorout;
2346 slot++;
2347 c_ad = s_ad;
2348 memset(&s_ad, 0, sizeof(struct long_ad));
2349 }
2350 append_len -= chunk;
2351 }
2352
2353 /* if there is a rest piece in the accumulator, append it */
2354 if (UDF_EXT_LEN(udf_rw32(c_ad.len)) > 0) {
2355 error = udf_append_adslot(udf_node, &slot, &c_ad);
2356 if (error)
2357 goto errorout;
2358 slot++;
2359 }
2360
2361 /* if there is a rest piece that didn't fit, append it */
2362 if (UDF_EXT_LEN(udf_rw32(s_ad.len)) > 0) {
2363 error = udf_append_adslot(udf_node, &slot, &s_ad);
2364 if (error)
2365 goto errorout;
2366 slot++;
2367 }
2368
2369 inflen += size_diff;
2370 objsize += size_diff;
2371 if (fe) {
2372 fe->inf_len = udf_rw64(inflen);
2373 } else {
2374 efe->inf_len = udf_rw64(inflen);
2375 efe->obj_size = udf_rw64(objsize);
2376 }
2377 error = 0;
2378
2379 if (evacuated_data) {
2380 /* set new write size for uvm */
2381 uvm_vnp_setwritesize(vp, old_size);
2382
2383 /* write out evacuated data */
2384 error = vn_rdwr(UIO_WRITE, udf_node->vnode,
2385 evacuated_data, old_size, 0,
2386 UIO_SYSSPACE, IO_ALTSEMANTICS | IO_NODELOCKED,
2387 FSCRED, NULL, NULL);
2388 uvm_vnp_setsize(vp, old_size);
2389 }
2390
2391 errorout:
2392 if (evacuated_data)
2393 free(evacuated_data, M_UDFTEMP);
2394
2395 udf_count_alloc_exts(udf_node);
2396
2397 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2398 UDF_UNLOCK_NODE(udf_node, 0);
2399
2400 KASSERT(new_inflen == orig_inflen + size_diff);
2401 KASSERT(new_lbrec == orig_lbrec);
2402
2403 return error;
2404 }
2405
2406 /* --------------------------------------------------------------------- */
2407
2408 int
2409 udf_shrink_node(struct udf_node *udf_node, uint64_t new_size)
2410 {
2411 struct vnode *vp = udf_node->vnode;
2412 struct udf_mount *ump = udf_node->ump;
2413 struct file_entry *fe;
2414 struct extfile_entry *efe;
2415 struct icb_tag *icbtag;
2416 struct long_ad c_ad, s_ad, *node_ad_cpy;
2417 uint64_t size_diff, old_size, inflen, objsize;
2418 uint64_t foffset, end_foffset;
2419 uint64_t orig_inflen, orig_lbrec, new_inflen, new_lbrec;
2420 uint32_t lb_size, dscr_size, crclen;
2421 uint32_t slot_offset;
2422 uint32_t len, flags, max_len;
2423 uint32_t num_lb, lb_num;
2424 uint32_t max_l_ad, l_ad, l_ea;
2425 uint16_t vpart_num;
2426 uint8_t *data_pos;
2427 int icbflags, addr_type;
2428 int slot, cpy_slot, cpy_slots;
2429 int eof, error;
2430
2431 DPRINTF(ALLOC, ("udf_shrink_node\n"));
2432
2433 UDF_LOCK_NODE(udf_node, 0);
2434 udf_node_sanity_check(udf_node, &orig_inflen, &orig_lbrec);
2435
2436 lb_size = udf_rw32(ump->logical_vol->lb_size);
2437 max_len = ((UDF_EXT_MAXLEN / lb_size) * lb_size);
2438
2439 /* do the work */
2440 fe = udf_node->fe;
2441 efe = udf_node->efe;
2442 if (fe) {
2443 icbtag = &fe->icbtag;
2444 inflen = udf_rw64(fe->inf_len);
2445 objsize = inflen;
2446 dscr_size = sizeof(struct file_entry) -1;
2447 l_ea = udf_rw32(fe->l_ea);
2448 l_ad = udf_rw32(fe->l_ad);
2449 data_pos = (uint8_t *) fe + dscr_size + l_ea;
2450 } else {
2451 icbtag = &efe->icbtag;
2452 inflen = udf_rw64(efe->inf_len);
2453 objsize = udf_rw64(efe->obj_size);
2454 dscr_size = sizeof(struct extfile_entry) -1;
2455 l_ea = udf_rw32(efe->l_ea);
2456 l_ad = udf_rw32(efe->l_ad);
2457 data_pos = (uint8_t *) efe + dscr_size + l_ea;
2458 }
2459 max_l_ad = lb_size - dscr_size - l_ea;
2460
2461 icbflags = udf_rw16(icbtag->flags);
2462 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2463
2464 old_size = inflen;
2465 size_diff = old_size - new_size;
2466
2467 DPRINTF(ALLOC, ("\tfrom %"PRIu64" to %"PRIu64"\n", old_size, new_size));
2468
2469 /* shrink the node to its new size */
2470 if (addr_type == UDF_ICB_INTERN_ALLOC) {
2471 /* only reflect size change directly in the node */
2472 KASSERT(new_size <= max_l_ad);
2473 inflen -= size_diff;
2474 objsize -= size_diff;
2475 l_ad -= size_diff;
2476 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea + l_ad;
2477 if (fe) {
2478 fe->inf_len = udf_rw64(inflen);
2479 fe->l_ad = udf_rw32(l_ad);
2480 fe->tag.desc_crc_len = udf_rw32(crclen);
2481 } else {
2482 efe->inf_len = udf_rw64(inflen);
2483 efe->obj_size = udf_rw64(objsize);
2484 efe->l_ad = udf_rw32(l_ad);
2485 efe->tag.desc_crc_len = udf_rw32(crclen);
2486 }
2487 error = 0;
2488
2489 /* clear the space in the descriptor */
2490 KASSERT(old_size > new_size);
2491 memset(data_pos + new_size, 0, old_size - new_size);
2492
2493 /* TODO zero appened space in buffer! */
2494 /* using uvm_vnp_zerorange(vp, old_size, old_size - new_size); ? */
2495
2496 /* set new size for uvm */
2497 uvm_vnp_setsize(vp, new_size);
2498
2499 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2500 UDF_UNLOCK_NODE(udf_node, 0);
2501
2502 KASSERT(new_inflen == orig_inflen - size_diff);
2503 KASSERT(new_lbrec == orig_lbrec);
2504 KASSERT(new_lbrec == 0);
2505
2506 return 0;
2507 }
2508
2509 /* setup node cleanup extents copy space */
2510 node_ad_cpy = malloc(lb_size * UDF_MAX_ALLOC_EXTENTS,
2511 M_UDFMNT, M_WAITOK);
2512 memset(node_ad_cpy, 0, lb_size * UDF_MAX_ALLOC_EXTENTS);
2513
2514 /*
2515 * Shrink the node by releasing the allocations and truncate the last
2516 * allocation to the new size. If the new size fits into the
2517 * allocation descriptor itself, transform it into an
2518 * UDF_ICB_INTERN_ALLOC.
2519 */
2520 slot = 0;
2521 cpy_slot = 0;
2522 foffset = 0;
2523
2524 /* 1) copy till first overlap piece to the rewrite buffer */
2525 for (;;) {
2526 udf_get_adslot(udf_node, slot, &s_ad, &eof);
2527 if (eof) {
2528 DPRINTF(WRITE,
2529 ("Shrink node failed: "
2530 "encountered EOF\n"));
2531 error = EINVAL;
2532 goto errorout; /* panic? */
2533 }
2534 len = udf_rw32(s_ad.len);
2535 flags = UDF_EXT_FLAGS(len);
2536 len = UDF_EXT_LEN(len);
2537
2538 if (flags == UDF_EXT_REDIRECT) {
2539 slot++;
2540 continue;
2541 }
2542
2543 end_foffset = foffset + len;
2544 if (end_foffset > new_size)
2545 break; /* found */
2546
2547 node_ad_cpy[cpy_slot++] = s_ad;
2548
2549 DPRINTF(ALLOC, ("\t1: vp %d, lb %d, len %d, flags %d "
2550 "-> stack\n",
2551 udf_rw16(s_ad.loc.part_num),
2552 udf_rw32(s_ad.loc.lb_num),
2553 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2554 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2555
2556 foffset = end_foffset;
2557 slot++;
2558 }
2559 slot_offset = new_size - foffset;
2560
2561 /* 2) trunc overlapping slot at overlap and copy it */
2562 if (slot_offset > 0) {
2563 lb_num = udf_rw32(s_ad.loc.lb_num);
2564 vpart_num = udf_rw16(s_ad.loc.part_num);
2565
2566 if (flags == UDF_EXT_ALLOCATED) {
2567 /* note: round DOWN on num_lb */
2568 lb_num += (slot_offset + lb_size -1) / lb_size;
2569 num_lb = (len - slot_offset) / lb_size;
2570
2571 udf_free_allocated_space(ump, lb_num, vpart_num, num_lb);
2572 }
2573
2574 s_ad.len = udf_rw32(slot_offset | flags);
2575 node_ad_cpy[cpy_slot++] = s_ad;
2576 slot++;
2577
2578 DPRINTF(ALLOC, ("\t2: vp %d, lb %d, len %d, flags %d "
2579 "-> stack\n",
2580 udf_rw16(s_ad.loc.part_num),
2581 udf_rw32(s_ad.loc.lb_num),
2582 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2583 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2584 }
2585
2586 /* 3) delete remainder */
2587 for (;;) {
2588 udf_get_adslot(udf_node, slot, &s_ad, &eof);
2589 if (eof)
2590 break;
2591
2592 len = udf_rw32(s_ad.len);
2593 flags = UDF_EXT_FLAGS(len);
2594 len = UDF_EXT_LEN(len);
2595
2596 if (flags == UDF_EXT_REDIRECT) {
2597 slot++;
2598 continue;
2599 }
2600
2601 DPRINTF(ALLOC, ("\t3: delete remainder "
2602 "vp %d lb %d, len %d, flags %d\n",
2603 udf_rw16(s_ad.loc.part_num),
2604 udf_rw32(s_ad.loc.lb_num),
2605 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2606 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2607
2608 if (flags == UDF_EXT_ALLOCATED) {
2609 lb_num = udf_rw32(s_ad.loc.lb_num);
2610 vpart_num = udf_rw16(s_ad.loc.part_num);
2611 num_lb = (len + lb_size - 1) / lb_size;
2612
2613 udf_free_allocated_space(ump, lb_num, vpart_num,
2614 num_lb);
2615 }
2616
2617 slot++;
2618 }
2619
2620 /* 4) if it will fit into the descriptor then convert */
2621 if (new_size < max_l_ad) {
2622 /*
2623 * resque/evacuate old piece by reading it in, and convert it
2624 * to internal alloc.
2625 */
2626 if (new_size == 0) {
2627 /* XXX/TODO only for zero sizing now */
2628 udf_wipe_adslots(udf_node);
2629
2630 icbflags &= ~UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2631 icbflags |= UDF_ICB_INTERN_ALLOC;
2632 icbtag->flags = udf_rw16(icbflags);
2633
2634 inflen -= size_diff; KASSERT(inflen == 0);
2635 objsize -= size_diff;
2636 l_ad = new_size;
2637 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea + l_ad;
2638 if (fe) {
2639 fe->inf_len = udf_rw64(inflen);
2640 fe->l_ad = udf_rw32(l_ad);
2641 fe->tag.desc_crc_len = udf_rw32(crclen);
2642 } else {
2643 efe->inf_len = udf_rw64(inflen);
2644 efe->obj_size = udf_rw64(objsize);
2645 efe->l_ad = udf_rw32(l_ad);
2646 efe->tag.desc_crc_len = udf_rw32(crclen);
2647 }
2648 /* eventually copy in evacuated piece */
2649 /* set new size for uvm */
2650 uvm_vnp_setsize(vp, new_size);
2651
2652 free(node_ad_cpy, M_UDFMNT);
2653 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2654
2655 UDF_UNLOCK_NODE(udf_node, 0);
2656
2657 KASSERT(new_inflen == orig_inflen - size_diff);
2658 KASSERT(new_inflen == 0);
2659 KASSERT(new_lbrec == 0);
2660
2661 return 0;
2662 }
2663
2664 printf("UDF_SHRINK_NODE: could convert to internal alloc!\n");
2665 }
2666
2667 /* 5) reset node descriptors */
2668 udf_wipe_adslots(udf_node);
2669
2670 /* 6) copy back extents; merge when possible. Recounting on the fly */
2671 cpy_slots = cpy_slot;
2672
2673 c_ad = node_ad_cpy[0];
2674 slot = 0;
2675 for (cpy_slot = 1; cpy_slot < cpy_slots; cpy_slot++) {
2676 s_ad = node_ad_cpy[cpy_slot];
2677
2678 DPRINTF(ALLOC, ("\t6: stack -> got mapping vp %d "
2679 "lb %d, len %d, flags %d\n",
2680 udf_rw16(s_ad.loc.part_num),
2681 udf_rw32(s_ad.loc.lb_num),
2682 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2683 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2684
2685 /* see if we can merge */
2686 if (udf_ads_merge(lb_size, &c_ad, &s_ad)) {
2687 /* not mergable (anymore) */
2688 DPRINTF(ALLOC, ("\t6: appending vp %d lb %d, "
2689 "len %d, flags %d\n",
2690 udf_rw16(c_ad.loc.part_num),
2691 udf_rw32(c_ad.loc.lb_num),
2692 UDF_EXT_LEN(udf_rw32(c_ad.len)),
2693 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2694
2695 error = udf_append_adslot(udf_node, &slot, &c_ad);
2696 if (error)
2697 goto errorout; /* panic? */
2698 c_ad = s_ad;
2699 slot++;
2700 }
2701 }
2702
2703 /* 7) push rest slot (if any) */
2704 if (UDF_EXT_LEN(c_ad.len) > 0) {
2705 DPRINTF(ALLOC, ("\t7: last append vp %d lb %d, "
2706 "len %d, flags %d\n",
2707 udf_rw16(c_ad.loc.part_num),
2708 udf_rw32(c_ad.loc.lb_num),
2709 UDF_EXT_LEN(udf_rw32(c_ad.len)),
2710 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2711
2712 error = udf_append_adslot(udf_node, &slot, &c_ad);
2713 if (error)
2714 goto errorout; /* panic? */
2715 ;
2716 }
2717
2718 inflen -= size_diff;
2719 objsize -= size_diff;
2720 if (fe) {
2721 fe->inf_len = udf_rw64(inflen);
2722 } else {
2723 efe->inf_len = udf_rw64(inflen);
2724 efe->obj_size = udf_rw64(objsize);
2725 }
2726 error = 0;
2727
2728 /* set new size for uvm */
2729 uvm_vnp_setsize(vp, new_size);
2730
2731 errorout:
2732 free(node_ad_cpy, M_UDFMNT);
2733
2734 udf_count_alloc_exts(udf_node);
2735
2736 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2737 UDF_UNLOCK_NODE(udf_node, 0);
2738
2739 KASSERT(new_inflen == orig_inflen - size_diff);
2740
2741 return error;
2742 }
2743
2744