udf_allocation.c revision 1.1.2.4 1 /* $NetBSD: udf_allocation.c,v 1.1.2.4 2009/06/20 07:20:30 yamt Exp $ */
2
3 /*
4 * Copyright (c) 2006, 2008 Reinoud Zandijk
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 *
27 */
28
29 #include <sys/cdefs.h>
30 #ifndef lint
31 __KERNEL_RCSID(0, "$NetBSD: udf_allocation.c,v 1.1.2.4 2009/06/20 07:20:30 yamt Exp $");
32 #endif /* not lint */
33
34
35 #if defined(_KERNEL_OPT)
36 #include "opt_quota.h"
37 #include "opt_compat_netbsd.h"
38 #endif
39
40 /* TODO strip */
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/sysctl.h>
44 #include <sys/namei.h>
45 #include <sys/proc.h>
46 #include <sys/kernel.h>
47 #include <sys/vnode.h>
48 #include <miscfs/genfs/genfs_node.h>
49 #include <sys/mount.h>
50 #include <sys/buf.h>
51 #include <sys/file.h>
52 #include <sys/device.h>
53 #include <sys/disklabel.h>
54 #include <sys/ioctl.h>
55 #include <sys/malloc.h>
56 #include <sys/dirent.h>
57 #include <sys/stat.h>
58 #include <sys/conf.h>
59 #include <sys/kauth.h>
60 #include <sys/kthread.h>
61 #include <dev/clock_subr.h>
62
63 #include <fs/udf/ecma167-udf.h>
64 #include <fs/udf/udf_mount.h>
65
66 #include "udf.h"
67 #include "udf_subr.h"
68 #include "udf_bswap.h"
69
70
71 #define VTOI(vnode) ((struct udf_node *) vnode->v_data)
72
73 static void udf_record_allocation_in_node(struct udf_mount *ump,
74 struct buf *buf, uint16_t vpart_num, uint64_t *mapping,
75 struct long_ad *node_ad_cpy);
76
77 /*
78 * IDEA/BUSY: Each udf_node gets its own extentwalker state for all operations;
79 * this will hopefully/likely reduce O(nlog(n)) to O(1) for most functionality
80 * since actions are most likely sequencial and thus seeking doesn't need
81 * searching for the same or adjacent position again.
82 */
83
84 /* --------------------------------------------------------------------- */
85
86 #if 0
87 #if 1
88 static void
89 udf_node_dump(struct udf_node *udf_node) {
90 struct file_entry *fe;
91 struct extfile_entry *efe;
92 struct icb_tag *icbtag;
93 struct long_ad s_ad;
94 uint64_t inflen;
95 uint32_t icbflags, addr_type;
96 uint32_t len, lb_num;
97 uint32_t flags;
98 int part_num;
99 int lb_size, eof, slot;
100
101 if ((udf_verbose & UDF_DEBUG_NODEDUMP) == 0)
102 return;
103
104 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
105
106 fe = udf_node->fe;
107 efe = udf_node->efe;
108 if (fe) {
109 icbtag = &fe->icbtag;
110 inflen = udf_rw64(fe->inf_len);
111 } else {
112 icbtag = &efe->icbtag;
113 inflen = udf_rw64(efe->inf_len);
114 }
115
116 icbflags = udf_rw16(icbtag->flags);
117 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
118
119 printf("udf_node_dump %p :\n", udf_node);
120
121 if (addr_type == UDF_ICB_INTERN_ALLOC) {
122 printf("\tIntern alloc, len = %"PRIu64"\n", inflen);
123 return;
124 }
125
126 printf("\tInflen = %"PRIu64"\n", inflen);
127 printf("\t\t");
128
129 slot = 0;
130 for (;;) {
131 udf_get_adslot(udf_node, slot, &s_ad, &eof);
132 if (eof)
133 break;
134 part_num = udf_rw16(s_ad.loc.part_num);
135 lb_num = udf_rw32(s_ad.loc.lb_num);
136 len = udf_rw32(s_ad.len);
137 flags = UDF_EXT_FLAGS(len);
138 len = UDF_EXT_LEN(len);
139
140 printf("[");
141 if (part_num >= 0)
142 printf("part %d, ", part_num);
143 printf("lb_num %d, len %d", lb_num, len);
144 if (flags)
145 printf(", flags %d", flags>>30);
146 printf("] ");
147
148 if (flags == UDF_EXT_REDIRECT) {
149 printf("\n\textent END\n\tallocation extent\n\t\t");
150 }
151
152 slot++;
153 }
154 printf("\n\tl_ad END\n\n");
155 }
156 #else
157 #define udf_node_dump(a)
158 #endif
159
160
161 static void
162 udf_assert_allocated(struct udf_mount *ump, uint16_t vpart_num,
163 uint32_t lb_num, uint32_t num_lb)
164 {
165 struct udf_bitmap *bitmap;
166 struct part_desc *pdesc;
167 uint32_t ptov;
168 uint32_t bitval;
169 uint8_t *bpos;
170 int bit;
171 int phys_part;
172 int ok;
173
174 DPRINTF(PARANOIA, ("udf_assert_allocated: check virt lbnum %d "
175 "part %d + %d sect\n", lb_num, vpart_num, num_lb));
176
177 /* get partition backing up this vpart_num */
178 pdesc = ump->partitions[ump->vtop[vpart_num]];
179
180 switch (ump->vtop_tp[vpart_num]) {
181 case UDF_VTOP_TYPE_PHYS :
182 case UDF_VTOP_TYPE_SPARABLE :
183 /* free space to freed or unallocated space bitmap */
184 ptov = udf_rw32(pdesc->start_loc);
185 phys_part = ump->vtop[vpart_num];
186
187 /* use unallocated bitmap */
188 bitmap = &ump->part_unalloc_bits[phys_part];
189
190 /* if no bitmaps are defined, bail out */
191 if (bitmap->bits == NULL)
192 break;
193
194 /* check bits */
195 KASSERT(bitmap->bits);
196 ok = 1;
197 bpos = bitmap->bits + lb_num/8;
198 bit = lb_num % 8;
199 while (num_lb > 0) {
200 bitval = (1 << bit);
201 DPRINTF(PARANOIA, ("XXX : check %d, %p, bit %d\n",
202 lb_num, bpos, bit));
203 KASSERT(bitmap->bits + lb_num/8 == bpos);
204 if (*bpos & bitval) {
205 printf("\tlb_num %d is NOT marked busy\n",
206 lb_num);
207 ok = 0;
208 }
209 lb_num++; num_lb--;
210 bit = (bit + 1) % 8;
211 if (bit == 0)
212 bpos++;
213 }
214 if (!ok) {
215 /* KASSERT(0); */
216 }
217
218 break;
219 case UDF_VTOP_TYPE_VIRT :
220 /* TODO check space */
221 KASSERT(num_lb == 1);
222 break;
223 case UDF_VTOP_TYPE_META :
224 /* TODO check space in the metadata bitmap */
225 default:
226 /* not implemented */
227 break;
228 }
229 }
230
231
232 static void
233 udf_node_sanity_check(struct udf_node *udf_node,
234 uint64_t *cnt_inflen, uint64_t *cnt_logblksrec)
235 {
236 union dscrptr *dscr;
237 struct file_entry *fe;
238 struct extfile_entry *efe;
239 struct icb_tag *icbtag;
240 struct long_ad s_ad;
241 uint64_t inflen, logblksrec;
242 uint32_t icbflags, addr_type;
243 uint32_t len, lb_num, l_ea, l_ad, max_l_ad;
244 uint16_t part_num;
245 uint8_t *data_pos;
246 int dscr_size, lb_size, flags, whole_lb;
247 int i, slot, eof;
248
249 // KASSERT(mutex_owned(&udf_node->ump->allocate_mutex));
250
251 if (1)
252 udf_node_dump(udf_node);
253
254 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
255
256 fe = udf_node->fe;
257 efe = udf_node->efe;
258 if (fe) {
259 dscr = (union dscrptr *) fe;
260 icbtag = &fe->icbtag;
261 inflen = udf_rw64(fe->inf_len);
262 dscr_size = sizeof(struct file_entry) -1;
263 logblksrec = udf_rw64(fe->logblks_rec);
264 l_ad = udf_rw32(fe->l_ad);
265 l_ea = udf_rw32(fe->l_ea);
266 } else {
267 dscr = (union dscrptr *) efe;
268 icbtag = &efe->icbtag;
269 inflen = udf_rw64(efe->inf_len);
270 dscr_size = sizeof(struct extfile_entry) -1;
271 logblksrec = udf_rw64(efe->logblks_rec);
272 l_ad = udf_rw32(efe->l_ad);
273 l_ea = udf_rw32(efe->l_ea);
274 }
275 data_pos = (uint8_t *) dscr + dscr_size + l_ea;
276 max_l_ad = lb_size - dscr_size - l_ea;
277 icbflags = udf_rw16(icbtag->flags);
278 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
279
280 /* check if tail is zero */
281 DPRINTF(PARANOIA, ("Sanity check blank tail\n"));
282 for (i = l_ad; i < max_l_ad; i++) {
283 if (data_pos[i] != 0)
284 printf( "sanity_check: violation: node byte %d "
285 "has value %d\n", i, data_pos[i]);
286 }
287
288 /* reset counters */
289 *cnt_inflen = 0;
290 *cnt_logblksrec = 0;
291
292 if (addr_type == UDF_ICB_INTERN_ALLOC) {
293 KASSERT(l_ad <= max_l_ad);
294 KASSERT(l_ad == inflen);
295 *cnt_inflen = inflen;
296 return;
297 }
298
299 /* start counting */
300 whole_lb = 1;
301 slot = 0;
302 for (;;) {
303 udf_get_adslot(udf_node, slot, &s_ad, &eof);
304 if (eof)
305 break;
306 KASSERT(whole_lb == 1);
307
308 part_num = udf_rw16(s_ad.loc.part_num);
309 lb_num = udf_rw32(s_ad.loc.lb_num);
310 len = udf_rw32(s_ad.len);
311 flags = UDF_EXT_FLAGS(len);
312 len = UDF_EXT_LEN(len);
313
314 if (flags != UDF_EXT_REDIRECT) {
315 *cnt_inflen += len;
316 if (flags == UDF_EXT_ALLOCATED) {
317 *cnt_logblksrec += (len + lb_size -1) / lb_size;
318 }
319 } else {
320 KASSERT(len == lb_size);
321 }
322 /* check allocation */
323 if (flags == UDF_EXT_ALLOCATED)
324 udf_assert_allocated(udf_node->ump, part_num, lb_num,
325 (len + lb_size - 1) / lb_size);
326
327 /* check whole lb */
328 whole_lb = ((len % lb_size) == 0);
329
330 slot++;
331 }
332 /* rest should be zero (ad_off > l_ad < max_l_ad - adlen) */
333
334 KASSERT(*cnt_inflen == inflen);
335 KASSERT(*cnt_logblksrec == logblksrec);
336
337 // KASSERT(mutex_owned(&udf_node->ump->allocate_mutex));
338 }
339 #else
340 static void
341 udf_node_sanity_check(struct udf_node *udf_node,
342 uint64_t *cnt_inflen, uint64_t *cnt_logblksrec) {
343 struct file_entry *fe;
344 struct extfile_entry *efe;
345 struct icb_tag *icbtag;
346 uint64_t inflen, logblksrec;
347 int dscr_size, lb_size;
348
349 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
350
351 fe = udf_node->fe;
352 efe = udf_node->efe;
353 if (fe) {
354 icbtag = &fe->icbtag;
355 inflen = udf_rw64(fe->inf_len);
356 dscr_size = sizeof(struct file_entry) -1;
357 logblksrec = udf_rw64(fe->logblks_rec);
358 } else {
359 icbtag = &efe->icbtag;
360 inflen = udf_rw64(efe->inf_len);
361 dscr_size = sizeof(struct extfile_entry) -1;
362 logblksrec = udf_rw64(efe->logblks_rec);
363 }
364 *cnt_logblksrec = logblksrec;
365 *cnt_inflen = inflen;
366 }
367 #endif
368
369 /* --------------------------------------------------------------------- */
370
371 int
372 udf_translate_vtop(struct udf_mount *ump, struct long_ad *icb_loc,
373 uint32_t *lb_numres, uint32_t *extres)
374 {
375 struct part_desc *pdesc;
376 struct spare_map_entry *sme;
377 struct long_ad s_icb_loc;
378 uint64_t foffset, end_foffset;
379 uint32_t lb_size, len;
380 uint32_t lb_num, lb_rel, lb_packet;
381 uint32_t udf_rw32_lbmap, ext_offset;
382 uint16_t vpart;
383 int rel, part, error, eof, slot, flags;
384
385 assert(ump && icb_loc && lb_numres);
386
387 vpart = udf_rw16(icb_loc->loc.part_num);
388 lb_num = udf_rw32(icb_loc->loc.lb_num);
389 if (vpart > UDF_VTOP_RAWPART)
390 return EINVAL;
391
392 translate_again:
393 part = ump->vtop[vpart];
394 pdesc = ump->partitions[part];
395
396 switch (ump->vtop_tp[vpart]) {
397 case UDF_VTOP_TYPE_RAW :
398 /* 1:1 to the end of the device */
399 *lb_numres = lb_num;
400 *extres = INT_MAX;
401 return 0;
402 case UDF_VTOP_TYPE_PHYS :
403 /* transform into its disc logical block */
404 if (lb_num > udf_rw32(pdesc->part_len))
405 return EINVAL;
406 *lb_numres = lb_num + udf_rw32(pdesc->start_loc);
407
408 /* extent from here to the end of the partition */
409 *extres = udf_rw32(pdesc->part_len) - lb_num;
410 return 0;
411 case UDF_VTOP_TYPE_VIRT :
412 /* only maps one logical block, lookup in VAT */
413 if (lb_num >= ump->vat_entries) /* XXX > or >= ? */
414 return EINVAL;
415
416 /* lookup in virtual allocation table file */
417 mutex_enter(&ump->allocate_mutex);
418 error = udf_vat_read(ump->vat_node,
419 (uint8_t *) &udf_rw32_lbmap, 4,
420 ump->vat_offset + lb_num * 4);
421 mutex_exit(&ump->allocate_mutex);
422
423 if (error)
424 return error;
425
426 lb_num = udf_rw32(udf_rw32_lbmap);
427
428 /* transform into its disc logical block */
429 if (lb_num > udf_rw32(pdesc->part_len))
430 return EINVAL;
431 *lb_numres = lb_num + udf_rw32(pdesc->start_loc);
432
433 /* just one logical block */
434 *extres = 1;
435 return 0;
436 case UDF_VTOP_TYPE_SPARABLE :
437 /* check if the packet containing the lb_num is remapped */
438 lb_packet = lb_num / ump->sparable_packet_size;
439 lb_rel = lb_num % ump->sparable_packet_size;
440
441 for (rel = 0; rel < udf_rw16(ump->sparing_table->rt_l); rel++) {
442 sme = &ump->sparing_table->entries[rel];
443 if (lb_packet == udf_rw32(sme->org)) {
444 /* NOTE maps to absolute disc logical block! */
445 *lb_numres = udf_rw32(sme->map) + lb_rel;
446 *extres = ump->sparable_packet_size - lb_rel;
447 return 0;
448 }
449 }
450
451 /* transform into its disc logical block */
452 if (lb_num > udf_rw32(pdesc->part_len))
453 return EINVAL;
454 *lb_numres = lb_num + udf_rw32(pdesc->start_loc);
455
456 /* rest of block */
457 *extres = ump->sparable_packet_size - lb_rel;
458 return 0;
459 case UDF_VTOP_TYPE_META :
460 /* we have to look into the file's allocation descriptors */
461
462 /* use metadatafile allocation mutex */
463 lb_size = udf_rw32(ump->logical_vol->lb_size);
464
465 UDF_LOCK_NODE(ump->metadata_node, 0);
466
467 /* get first overlapping extent */
468 foffset = 0;
469 slot = 0;
470 for (;;) {
471 udf_get_adslot(ump->metadata_node,
472 slot, &s_icb_loc, &eof);
473 DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, "
474 "len = %d, lb_num = %d, part = %d\n",
475 slot, eof,
476 UDF_EXT_FLAGS(udf_rw32(s_icb_loc.len)),
477 UDF_EXT_LEN(udf_rw32(s_icb_loc.len)),
478 udf_rw32(s_icb_loc.loc.lb_num),
479 udf_rw16(s_icb_loc.loc.part_num)));
480 if (eof) {
481 DPRINTF(TRANSLATE,
482 ("Meta partition translation "
483 "failed: can't seek location\n"));
484 UDF_UNLOCK_NODE(ump->metadata_node, 0);
485 return EINVAL;
486 }
487 len = udf_rw32(s_icb_loc.len);
488 flags = UDF_EXT_FLAGS(len);
489 len = UDF_EXT_LEN(len);
490
491 if (flags == UDF_EXT_REDIRECT) {
492 slot++;
493 continue;
494 }
495
496 end_foffset = foffset + len;
497
498 if (end_foffset > lb_num * lb_size)
499 break; /* found */
500 foffset = end_foffset;
501 slot++;
502 }
503 /* found overlapping slot */
504 ext_offset = lb_num * lb_size - foffset;
505
506 /* process extent offset */
507 lb_num = udf_rw32(s_icb_loc.loc.lb_num);
508 vpart = udf_rw16(s_icb_loc.loc.part_num);
509 lb_num += (ext_offset + lb_size -1) / lb_size;
510 ext_offset = 0;
511
512 UDF_UNLOCK_NODE(ump->metadata_node, 0);
513 if (flags != UDF_EXT_ALLOCATED) {
514 DPRINTF(TRANSLATE, ("Metadata partition translation "
515 "failed: not allocated\n"));
516 return EINVAL;
517 }
518
519 /*
520 * vpart and lb_num are updated, translate again since we
521 * might be mapped on sparable media
522 */
523 goto translate_again;
524 default:
525 printf("UDF vtop translation scheme %d unimplemented yet\n",
526 ump->vtop_tp[vpart]);
527 }
528
529 return EINVAL;
530 }
531
532
533 /* XXX provisional primitive braindead version */
534 /* TODO use ext_res */
535 void
536 udf_translate_vtop_list(struct udf_mount *ump, uint32_t sectors,
537 uint16_t vpart_num, uint64_t *lmapping, uint64_t *pmapping)
538 {
539 struct long_ad loc;
540 uint32_t lb_numres, ext_res;
541 int sector;
542
543 for (sector = 0; sector < sectors; sector++) {
544 memset(&loc, 0, sizeof(struct long_ad));
545 loc.loc.part_num = udf_rw16(vpart_num);
546 loc.loc.lb_num = udf_rw32(*lmapping);
547 udf_translate_vtop(ump, &loc, &lb_numres, &ext_res);
548 *pmapping = lb_numres;
549 lmapping++; pmapping++;
550 }
551 }
552
553
554 /* --------------------------------------------------------------------- */
555
556 /*
557 * Translate an extent (in logical_blocks) into logical block numbers; used
558 * for read and write operations. DOESNT't check extents.
559 */
560
561 int
562 udf_translate_file_extent(struct udf_node *udf_node,
563 uint32_t from, uint32_t num_lb,
564 uint64_t *map)
565 {
566 struct udf_mount *ump;
567 struct icb_tag *icbtag;
568 struct long_ad t_ad, s_ad;
569 uint64_t transsec;
570 uint64_t foffset, end_foffset;
571 uint32_t transsec32;
572 uint32_t lb_size;
573 uint32_t ext_offset;
574 uint32_t lb_num, len;
575 uint32_t overlap, translen;
576 uint16_t vpart_num;
577 int eof, error, flags;
578 int slot, addr_type, icbflags;
579
580 if (!udf_node)
581 return ENOENT;
582
583 KASSERT(num_lb > 0);
584
585 UDF_LOCK_NODE(udf_node, 0);
586
587 /* initialise derivative vars */
588 ump = udf_node->ump;
589 lb_size = udf_rw32(ump->logical_vol->lb_size);
590
591 if (udf_node->fe) {
592 icbtag = &udf_node->fe->icbtag;
593 } else {
594 icbtag = &udf_node->efe->icbtag;
595 }
596 icbflags = udf_rw16(icbtag->flags);
597 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
598
599 /* do the work */
600 if (addr_type == UDF_ICB_INTERN_ALLOC) {
601 *map = UDF_TRANS_INTERN;
602 UDF_UNLOCK_NODE(udf_node, 0);
603 return 0;
604 }
605
606 /* find first overlapping extent */
607 foffset = 0;
608 slot = 0;
609 for (;;) {
610 udf_get_adslot(udf_node, slot, &s_ad, &eof);
611 DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, len = %d, "
612 "lb_num = %d, part = %d\n", slot, eof,
613 UDF_EXT_FLAGS(udf_rw32(s_ad.len)),
614 UDF_EXT_LEN(udf_rw32(s_ad.len)),
615 udf_rw32(s_ad.loc.lb_num),
616 udf_rw16(s_ad.loc.part_num)));
617 if (eof) {
618 DPRINTF(TRANSLATE,
619 ("Translate file extent "
620 "failed: can't seek location\n"));
621 UDF_UNLOCK_NODE(udf_node, 0);
622 return EINVAL;
623 }
624 len = udf_rw32(s_ad.len);
625 flags = UDF_EXT_FLAGS(len);
626 len = UDF_EXT_LEN(len);
627 lb_num = udf_rw32(s_ad.loc.lb_num);
628
629 if (flags == UDF_EXT_REDIRECT) {
630 slot++;
631 continue;
632 }
633
634 end_foffset = foffset + len;
635
636 if (end_foffset > from * lb_size)
637 break; /* found */
638 foffset = end_foffset;
639 slot++;
640 }
641 /* found overlapping slot */
642 ext_offset = from * lb_size - foffset;
643
644 for (;;) {
645 udf_get_adslot(udf_node, slot, &s_ad, &eof);
646 DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, len = %d, "
647 "lb_num = %d, part = %d\n", slot, eof,
648 UDF_EXT_FLAGS(udf_rw32(s_ad.len)),
649 UDF_EXT_LEN(udf_rw32(s_ad.len)),
650 udf_rw32(s_ad.loc.lb_num),
651 udf_rw16(s_ad.loc.part_num)));
652 if (eof) {
653 DPRINTF(TRANSLATE,
654 ("Translate file extent "
655 "failed: past eof\n"));
656 UDF_UNLOCK_NODE(udf_node, 0);
657 return EINVAL;
658 }
659
660 len = udf_rw32(s_ad.len);
661 flags = UDF_EXT_FLAGS(len);
662 len = UDF_EXT_LEN(len);
663
664 lb_num = udf_rw32(s_ad.loc.lb_num);
665 vpart_num = udf_rw16(s_ad.loc.part_num);
666
667 end_foffset = foffset + len;
668
669 /* process extent, don't forget to advance on ext_offset! */
670 lb_num += (ext_offset + lb_size -1) / lb_size;
671 overlap = (len - ext_offset + lb_size -1) / lb_size;
672 ext_offset = 0;
673
674 /*
675 * note that the while(){} is nessisary for the extent that
676 * the udf_translate_vtop() returns doens't have to span the
677 * whole extent.
678 */
679
680 overlap = MIN(overlap, num_lb);
681 while (overlap && (flags != UDF_EXT_REDIRECT)) {
682 switch (flags) {
683 case UDF_EXT_FREE :
684 case UDF_EXT_ALLOCATED_BUT_NOT_USED :
685 transsec = UDF_TRANS_ZERO;
686 translen = overlap;
687 while (overlap && num_lb && translen) {
688 *map++ = transsec;
689 lb_num++;
690 overlap--; num_lb--; translen--;
691 }
692 break;
693 case UDF_EXT_ALLOCATED :
694 t_ad.loc.lb_num = udf_rw32(lb_num);
695 t_ad.loc.part_num = udf_rw16(vpart_num);
696 error = udf_translate_vtop(ump,
697 &t_ad, &transsec32, &translen);
698 transsec = transsec32;
699 if (error) {
700 UDF_UNLOCK_NODE(udf_node, 0);
701 return error;
702 }
703 while (overlap && num_lb && translen) {
704 *map++ = transsec;
705 lb_num++; transsec++;
706 overlap--; num_lb--; translen--;
707 }
708 break;
709 default:
710 DPRINTF(TRANSLATE,
711 ("Translate file extent "
712 "failed: bad flags %x\n", flags));
713 UDF_UNLOCK_NODE(udf_node, 0);
714 return EINVAL;
715 }
716 }
717 if (num_lb == 0)
718 break;
719
720 if (flags != UDF_EXT_REDIRECT)
721 foffset = end_foffset;
722 slot++;
723 }
724 UDF_UNLOCK_NODE(udf_node, 0);
725
726 return 0;
727 }
728
729 /* --------------------------------------------------------------------- */
730
731 static int
732 udf_search_free_vatloc(struct udf_mount *ump, uint32_t *lbnumres)
733 {
734 uint32_t lb_size, lb_num, lb_map, udf_rw32_lbmap;
735 uint8_t *blob;
736 int entry, chunk, found, error;
737
738 KASSERT(ump);
739 KASSERT(ump->logical_vol);
740
741 lb_size = udf_rw32(ump->logical_vol->lb_size);
742 blob = malloc(lb_size, M_UDFTEMP, M_WAITOK);
743
744 /* TODO static allocation of search chunk */
745
746 lb_num = MIN(ump->vat_entries, ump->vat_last_free_lb);
747 found = 0;
748 error = 0;
749 entry = 0;
750 do {
751 chunk = MIN(lb_size, (ump->vat_entries - lb_num) * 4);
752 if (chunk <= 0)
753 break;
754 /* load in chunk */
755 error = udf_vat_read(ump->vat_node, blob, chunk,
756 ump->vat_offset + lb_num * 4);
757
758 if (error)
759 break;
760
761 /* search this chunk */
762 for (entry=0; entry < chunk /4; entry++, lb_num++) {
763 udf_rw32_lbmap = *((uint32_t *) (blob + entry * 4));
764 lb_map = udf_rw32(udf_rw32_lbmap);
765 if (lb_map == 0xffffffff) {
766 found = 1;
767 break;
768 }
769 }
770 } while (!found);
771 if (error) {
772 printf("udf_search_free_vatloc: error reading in vat chunk "
773 "(lb %d, size %d)\n", lb_num, chunk);
774 }
775
776 if (!found) {
777 /* extend VAT */
778 DPRINTF(WRITE, ("udf_search_free_vatloc: extending\n"));
779 lb_num = ump->vat_entries;
780 ump->vat_entries++;
781 }
782
783 /* mark entry with initialiser just in case */
784 lb_map = udf_rw32(0xfffffffe);
785 udf_vat_write(ump->vat_node, (uint8_t *) &lb_map, 4,
786 ump->vat_offset + lb_num *4);
787 ump->vat_last_free_lb = lb_num;
788
789 free(blob, M_UDFTEMP);
790 *lbnumres = lb_num;
791 return 0;
792 }
793
794
795 static void
796 udf_bitmap_allocate(struct udf_bitmap *bitmap, int ismetadata,
797 uint32_t *num_lb, uint64_t *lmappos)
798 {
799 uint32_t offset, lb_num, bit;
800 int32_t diff;
801 uint8_t *bpos;
802 int pass;
803
804 if (!ismetadata) {
805 /* heuristic to keep the two pointers not too close */
806 diff = bitmap->data_pos - bitmap->metadata_pos;
807 if ((diff >= 0) && (diff < 1024))
808 bitmap->data_pos = bitmap->metadata_pos + 1024;
809 }
810 offset = ismetadata ? bitmap->metadata_pos : bitmap->data_pos;
811 offset &= ~7;
812 for (pass = 0; pass < 2; pass++) {
813 if (offset >= bitmap->max_offset)
814 offset = 0;
815
816 while (offset < bitmap->max_offset) {
817 if (*num_lb == 0)
818 break;
819
820 /* use first bit not set */
821 bpos = bitmap->bits + offset/8;
822 bit = ffs(*bpos); /* returns 0 or 1..8 */
823 if (bit == 0) {
824 offset += 8;
825 continue;
826 }
827
828 /* check for ffs overshoot */
829 if (offset + bit-1 >= bitmap->max_offset) {
830 offset = bitmap->max_offset;
831 break;
832 }
833
834 DPRINTF(PARANOIA, ("XXX : allocate %d, %p, bit %d\n",
835 offset + bit -1, bpos, bit-1));
836 *bpos &= ~(1 << (bit-1));
837 lb_num = offset + bit-1;
838 *lmappos++ = lb_num;
839 *num_lb = *num_lb - 1;
840 // offset = (offset & ~7);
841 }
842 }
843
844 if (ismetadata) {
845 bitmap->metadata_pos = offset;
846 } else {
847 bitmap->data_pos = offset;
848 }
849 }
850
851
852 static void
853 udf_bitmap_free(struct udf_bitmap *bitmap, uint32_t lb_num, uint32_t num_lb)
854 {
855 uint32_t offset;
856 uint32_t bit, bitval;
857 uint8_t *bpos;
858
859 offset = lb_num;
860
861 /* starter bits */
862 bpos = bitmap->bits + offset/8;
863 bit = offset % 8;
864 while ((bit != 0) && (num_lb > 0)) {
865 bitval = (1 << bit);
866 KASSERT((*bpos & bitval) == 0);
867 DPRINTF(PARANOIA, ("XXX : free %d, %p, %d\n",
868 offset, bpos, bit));
869 *bpos |= bitval;
870 offset++; num_lb--;
871 bit = (bit + 1) % 8;
872 }
873 if (num_lb == 0)
874 return;
875
876 /* whole bytes */
877 KASSERT(bit == 0);
878 bpos = bitmap->bits + offset / 8;
879 while (num_lb >= 8) {
880 KASSERT((*bpos == 0));
881 DPRINTF(PARANOIA, ("XXX : free %d + 8, %p\n", offset, bpos));
882 *bpos = 255;
883 offset += 8; num_lb -= 8;
884 bpos++;
885 }
886
887 /* stop bits */
888 KASSERT(num_lb < 8);
889 bit = 0;
890 while (num_lb > 0) {
891 bitval = (1 << bit);
892 KASSERT((*bpos & bitval) == 0);
893 DPRINTF(PARANOIA, ("XXX : free %d, %p, %d\n",
894 offset, bpos, bit));
895 *bpos |= bitval;
896 offset++; num_lb--;
897 bit = (bit + 1) % 8;
898 }
899 }
900
901
902 /* allocate a contiguous sequence of sectornumbers */
903 static int
904 udf_allocate_space(struct udf_mount *ump, int udf_c_type,
905 uint16_t vpart_num, uint32_t num_lb, uint64_t *lmapping)
906 {
907 struct mmc_trackinfo *alloc_track, *other_track;
908 struct udf_bitmap *bitmap;
909 struct part_desc *pdesc;
910 struct logvol_int_desc *lvid;
911 uint64_t *lmappos;
912 uint32_t ptov, lb_num, *freepos, free_lbs;
913 int lb_size, alloc_num_lb;
914 int alloc_type, error;
915 int is_node;
916
917 DPRINTF(CALL, ("udf_allocate_space(ctype %d, vpart %d, num_lb %d\n",
918 udf_c_type, vpart_num, num_lb));
919 mutex_enter(&ump->allocate_mutex);
920
921 lb_size = udf_rw32(ump->logical_vol->lb_size);
922 KASSERT(lb_size == ump->discinfo.sector_size);
923
924 /* XXX TODO check disc space */
925
926 alloc_type = ump->vtop_alloc[vpart_num];
927 is_node = (udf_c_type == UDF_C_NODE);
928
929 lmappos = lmapping;
930 error = 0;
931 switch (alloc_type) {
932 case UDF_ALLOC_VAT :
933 /* search empty slot in VAT file */
934 KASSERT(num_lb == 1);
935 error = udf_search_free_vatloc(ump, &lb_num);
936 if (!error)
937 *lmappos = lb_num;
938 break;
939 case UDF_ALLOC_SEQUENTIAL :
940 /* sequential allocation on recordable media */
941 /* get partition backing up this vpart_num_num */
942 pdesc = ump->partitions[ump->vtop[vpart_num]];
943
944 /* calculate offset from physical base partition */
945 ptov = udf_rw32(pdesc->start_loc);
946
947 /* get our track descriptors */
948 if (vpart_num == ump->node_part) {
949 alloc_track = &ump->metadata_track;
950 other_track = &ump->data_track;
951 } else {
952 alloc_track = &ump->data_track;
953 other_track = &ump->metadata_track;
954 }
955
956 /* allocate */
957 for (lb_num = 0; lb_num < num_lb; lb_num++) {
958 *lmappos++ = alloc_track->next_writable - ptov;
959 alloc_track->next_writable++;
960 alloc_track->free_blocks--;
961 }
962
963 /* keep other track up-to-date */
964 if (alloc_track->tracknr == other_track->tracknr)
965 memcpy(other_track, alloc_track,
966 sizeof(struct mmc_trackinfo));
967 break;
968 case UDF_ALLOC_SPACEMAP :
969 /* try to allocate on unallocated bits */
970 alloc_num_lb = num_lb;
971 bitmap = &ump->part_unalloc_bits[vpart_num];
972 udf_bitmap_allocate(bitmap, is_node, &alloc_num_lb, lmappos);
973 ump->lvclose |= UDF_WRITE_PART_BITMAPS;
974
975 /* have we allocated all? */
976 if (alloc_num_lb) {
977 /* TODO convert freed to unalloc and try again */
978 /* free allocated piece for now */
979 lmappos = lmapping;
980 for (lb_num=0; lb_num < num_lb-alloc_num_lb; lb_num++) {
981 udf_bitmap_free(bitmap, *lmappos++, 1);
982 }
983 error = ENOSPC;
984 }
985 if (!error) {
986 /* adjust freecount */
987 lvid = ump->logvol_integrity;
988 freepos = &lvid->tables[0] + vpart_num;
989 free_lbs = udf_rw32(*freepos);
990 *freepos = udf_rw32(free_lbs - num_lb);
991 }
992 break;
993 case UDF_ALLOC_METABITMAP : /* UDF 2.50, 2.60 BluRay-RE */
994 /* allocate on metadata unallocated bits */
995 alloc_num_lb = num_lb;
996 bitmap = &ump->metadata_unalloc_bits;
997 udf_bitmap_allocate(bitmap, is_node, &alloc_num_lb, lmappos);
998 ump->lvclose |= UDF_WRITE_PART_BITMAPS;
999
1000 /* have we allocated all? */
1001 if (alloc_num_lb) {
1002 /* YIKES! TODO we need to extend the metadata partition */
1003 /* free allocated piece for now */
1004 lmappos = lmapping;
1005 for (lb_num=0; lb_num < num_lb-alloc_num_lb; lb_num++) {
1006 udf_bitmap_free(bitmap, *lmappos++, 1);
1007 }
1008 error = ENOSPC;
1009 }
1010 if (!error) {
1011 /* adjust freecount */
1012 lvid = ump->logvol_integrity;
1013 freepos = &lvid->tables[0] + vpart_num;
1014 free_lbs = udf_rw32(*freepos);
1015 *freepos = udf_rw32(free_lbs - num_lb);
1016 }
1017 break;
1018 case UDF_ALLOC_METASEQUENTIAL : /* UDF 2.60 BluRay-R */
1019 case UDF_ALLOC_RELAXEDSEQUENTIAL : /* UDF 2.50/~meta BluRay-R */
1020 printf("ALERT: udf_allocate_space : allocation %d "
1021 "not implemented yet!\n", alloc_type);
1022 /* TODO implement, doesn't have to be contiguous */
1023 error = ENOSPC;
1024 break;
1025 }
1026
1027 #ifdef DEBUG
1028 if (udf_verbose & UDF_DEBUG_ALLOC) {
1029 lmappos = lmapping;
1030 printf("udf_allocate_space, allocated logical lba :\n");
1031 for (lb_num = 0; lb_num < num_lb; lb_num++) {
1032 printf("%s %"PRIu64",", (lb_num > 0)?",":"",
1033 *lmappos++);
1034 }
1035 printf("\n");
1036 }
1037 #endif
1038 mutex_exit(&ump->allocate_mutex);
1039
1040 return error;
1041 }
1042
1043 /* --------------------------------------------------------------------- */
1044
1045 void
1046 udf_free_allocated_space(struct udf_mount *ump, uint32_t lb_num,
1047 uint16_t vpart_num, uint32_t num_lb)
1048 {
1049 struct udf_bitmap *bitmap;
1050 struct part_desc *pdesc;
1051 struct logvol_int_desc *lvid;
1052 uint32_t ptov, lb_map, udf_rw32_lbmap;
1053 uint32_t *freepos, free_lbs;
1054 int phys_part;
1055 int error;
1056
1057 DPRINTF(ALLOC, ("udf_free_allocated_space: freeing virt lbnum %d "
1058 "part %d + %d sect\n", lb_num, vpart_num, num_lb));
1059
1060 /* no use freeing zero length */
1061 if (num_lb == 0)
1062 return;
1063
1064 mutex_enter(&ump->allocate_mutex);
1065
1066 /* get partition backing up this vpart_num */
1067 pdesc = ump->partitions[ump->vtop[vpart_num]];
1068
1069 switch (ump->vtop_tp[vpart_num]) {
1070 case UDF_VTOP_TYPE_PHYS :
1071 case UDF_VTOP_TYPE_SPARABLE :
1072 /* free space to freed or unallocated space bitmap */
1073 ptov = udf_rw32(pdesc->start_loc);
1074 phys_part = ump->vtop[vpart_num];
1075
1076 /* first try freed space bitmap */
1077 bitmap = &ump->part_freed_bits[phys_part];
1078
1079 /* if not defined, use unallocated bitmap */
1080 if (bitmap->bits == NULL)
1081 bitmap = &ump->part_unalloc_bits[phys_part];
1082
1083 /* if no bitmaps are defined, bail out; XXX OK? */
1084 if (bitmap->bits == NULL)
1085 break;
1086
1087 /* free bits if its defined */
1088 KASSERT(bitmap->bits);
1089 ump->lvclose |= UDF_WRITE_PART_BITMAPS;
1090 udf_bitmap_free(bitmap, lb_num, num_lb);
1091
1092 /* adjust freecount */
1093 lvid = ump->logvol_integrity;
1094 freepos = &lvid->tables[0] + vpart_num;
1095 free_lbs = udf_rw32(*freepos);
1096 *freepos = udf_rw32(free_lbs + num_lb);
1097 break;
1098 case UDF_VTOP_TYPE_VIRT :
1099 /* free this VAT entry */
1100 KASSERT(num_lb == 1);
1101
1102 lb_map = 0xffffffff;
1103 udf_rw32_lbmap = udf_rw32(lb_map);
1104 error = udf_vat_write(ump->vat_node,
1105 (uint8_t *) &udf_rw32_lbmap, 4,
1106 ump->vat_offset + lb_num * 4);
1107 KASSERT(error == 0);
1108 ump->vat_last_free_lb = MIN(ump->vat_last_free_lb, lb_num);
1109 break;
1110 case UDF_VTOP_TYPE_META :
1111 /* free space in the metadata bitmap */
1112 bitmap = &ump->metadata_unalloc_bits;
1113 KASSERT(bitmap->bits);
1114
1115 ump->lvclose |= UDF_WRITE_PART_BITMAPS;
1116 udf_bitmap_free(bitmap, lb_num, num_lb);
1117
1118 /* adjust freecount */
1119 lvid = ump->logvol_integrity;
1120 freepos = &lvid->tables[0] + vpart_num;
1121 free_lbs = udf_rw32(*freepos);
1122 *freepos = udf_rw32(free_lbs + num_lb);
1123 break;
1124 default:
1125 printf("ALERT: udf_free_allocated_space : allocation %d "
1126 "not implemented yet!\n", ump->vtop_tp[vpart_num]);
1127 break;
1128 }
1129
1130 mutex_exit(&ump->allocate_mutex);
1131 }
1132
1133 /* --------------------------------------------------------------------- */
1134
1135 int
1136 udf_pre_allocate_space(struct udf_mount *ump, int udf_c_type,
1137 uint32_t num_lb, uint16_t vpartnr, uint64_t *lmapping)
1138 {
1139 /* TODO properly maintain uncomitted_lb per partition */
1140
1141 /* reserve size for VAT allocated data */
1142 if (ump->vtop_alloc[vpartnr] == UDF_ALLOC_VAT) {
1143 mutex_enter(&ump->allocate_mutex);
1144 ump->uncomitted_lb += num_lb;
1145 mutex_exit(&ump->allocate_mutex);
1146 }
1147
1148 return udf_allocate_space(ump, udf_c_type, vpartnr, num_lb, lmapping);
1149 }
1150
1151 /* --------------------------------------------------------------------- */
1152
1153 /*
1154 * Allocate a buf on disc for direct write out. The space doesn't have to be
1155 * contiguous as the caller takes care of this.
1156 */
1157
1158 void
1159 udf_late_allocate_buf(struct udf_mount *ump, struct buf *buf,
1160 uint64_t *lmapping, struct long_ad *node_ad_cpy, uint16_t *vpart_nump)
1161 {
1162 struct udf_node *udf_node = VTOI(buf->b_vp);
1163 int lb_size, blks, udf_c_type;
1164 int vpart_num, num_lb;
1165 int error, s;
1166
1167 /*
1168 * for each sector in the buf, allocate a sector on disc and record
1169 * its position in the provided mapping array.
1170 *
1171 * If its userdata or FIDs, record its location in its node.
1172 */
1173
1174 lb_size = udf_rw32(ump->logical_vol->lb_size);
1175 num_lb = (buf->b_bcount + lb_size -1) / lb_size;
1176 blks = lb_size / DEV_BSIZE;
1177 udf_c_type = buf->b_udf_c_type;
1178
1179 KASSERT(lb_size == ump->discinfo.sector_size);
1180
1181 /* select partition to record the buffer on */
1182 vpart_num = ump->data_part;
1183 if (udf_c_type == UDF_C_NODE)
1184 vpart_num = ump->node_part;
1185 if (udf_c_type == UDF_C_FIDS)
1186 vpart_num = ump->fids_part;
1187 *vpart_nump = vpart_num;
1188
1189 if (udf_c_type == UDF_C_NODE) {
1190 /* if not VAT, its allready allocated */
1191 if (ump->vtop_alloc[ump->node_part] != UDF_ALLOC_VAT)
1192 return;
1193
1194 /* allocate on its backing sequential partition */
1195 vpart_num = ump->data_part;
1196 }
1197
1198 /* do allocation on the selected partition */
1199 error = udf_allocate_space(ump, udf_c_type,
1200 vpart_num, num_lb, lmapping);
1201 if (error) {
1202 /* ARGH! we've not done our accounting right! */
1203 panic("UDF disc allocation accounting gone wrong");
1204 }
1205
1206 /* commit our sector count */
1207 mutex_enter(&ump->allocate_mutex);
1208 if (num_lb > ump->uncomitted_lb) {
1209 ump->uncomitted_lb = 0;
1210 } else {
1211 ump->uncomitted_lb -= num_lb;
1212 }
1213 mutex_exit(&ump->allocate_mutex);
1214
1215 /* If its userdata or FIDs, record its allocation in its node. */
1216 if ((udf_c_type == UDF_C_USERDATA) ||
1217 (udf_c_type == UDF_C_FIDS) ||
1218 (udf_c_type == UDF_C_METADATA_SBM))
1219 {
1220 udf_record_allocation_in_node(ump, buf, vpart_num, lmapping,
1221 node_ad_cpy);
1222 /* decrement our outstanding bufs counter */
1223 s = splbio();
1224 udf_node->outstanding_bufs--;
1225 splx(s);
1226 }
1227 }
1228
1229 /* --------------------------------------------------------------------- */
1230
1231 /*
1232 * Try to merge a1 with the new piece a2. udf_ads_merge returns error when not
1233 * possible (anymore); a2 returns the rest piece.
1234 */
1235
1236 static int
1237 udf_ads_merge(uint32_t lb_size, struct long_ad *a1, struct long_ad *a2)
1238 {
1239 uint32_t max_len, merge_len;
1240 uint32_t a1_len, a2_len;
1241 uint32_t a1_flags, a2_flags;
1242 uint32_t a1_lbnum, a2_lbnum;
1243 uint16_t a1_part, a2_part;
1244
1245 max_len = ((UDF_EXT_MAXLEN / lb_size) * lb_size);
1246
1247 a1_flags = UDF_EXT_FLAGS(udf_rw32(a1->len));
1248 a1_len = UDF_EXT_LEN(udf_rw32(a1->len));
1249 a1_lbnum = udf_rw32(a1->loc.lb_num);
1250 a1_part = udf_rw16(a1->loc.part_num);
1251
1252 a2_flags = UDF_EXT_FLAGS(udf_rw32(a2->len));
1253 a2_len = UDF_EXT_LEN(udf_rw32(a2->len));
1254 a2_lbnum = udf_rw32(a2->loc.lb_num);
1255 a2_part = udf_rw16(a2->loc.part_num);
1256
1257 /* defines same space */
1258 if (a1_flags != a2_flags)
1259 return 1;
1260
1261 if (a1_flags != UDF_EXT_FREE) {
1262 /* the same partition */
1263 if (a1_part != a2_part)
1264 return 1;
1265
1266 /* a2 is successor of a1 */
1267 if (a1_lbnum * lb_size + a1_len != a2_lbnum * lb_size)
1268 return 1;
1269 }
1270
1271 /* merge as most from a2 if possible */
1272 merge_len = MIN(a2_len, max_len - a1_len);
1273 a1_len += merge_len;
1274 a2_len -= merge_len;
1275 a2_lbnum += merge_len/lb_size;
1276
1277 a1->len = udf_rw32(a1_len | a1_flags);
1278 a2->len = udf_rw32(a2_len | a2_flags);
1279 a2->loc.lb_num = udf_rw32(a2_lbnum);
1280
1281 if (a2_len > 0)
1282 return 1;
1283
1284 /* there is space over to merge */
1285 return 0;
1286 }
1287
1288 /* --------------------------------------------------------------------- */
1289
1290 static void
1291 udf_wipe_adslots(struct udf_node *udf_node)
1292 {
1293 struct file_entry *fe;
1294 struct extfile_entry *efe;
1295 struct alloc_ext_entry *ext;
1296 uint64_t inflen, objsize;
1297 uint32_t lb_size, dscr_size, l_ea, l_ad, max_l_ad, crclen;
1298 uint8_t *data_pos;
1299 int extnr;
1300
1301 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1302
1303 fe = udf_node->fe;
1304 efe = udf_node->efe;
1305 if (fe) {
1306 inflen = udf_rw64(fe->inf_len);
1307 objsize = inflen;
1308 dscr_size = sizeof(struct file_entry) -1;
1309 l_ea = udf_rw32(fe->l_ea);
1310 l_ad = udf_rw32(fe->l_ad);
1311 data_pos = (uint8_t *) fe + dscr_size + l_ea;
1312 } else {
1313 inflen = udf_rw64(efe->inf_len);
1314 objsize = udf_rw64(efe->obj_size);
1315 dscr_size = sizeof(struct extfile_entry) -1;
1316 l_ea = udf_rw32(efe->l_ea);
1317 l_ad = udf_rw32(efe->l_ad);
1318 data_pos = (uint8_t *) efe + dscr_size + l_ea;
1319 }
1320 max_l_ad = lb_size - dscr_size - l_ea;
1321
1322 /* wipe fe/efe */
1323 memset(data_pos, 0, max_l_ad);
1324 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea;
1325 if (fe) {
1326 fe->l_ad = udf_rw32(0);
1327 fe->logblks_rec = udf_rw64(0);
1328 fe->tag.desc_crc_len = udf_rw16(crclen);
1329 } else {
1330 efe->l_ad = udf_rw32(0);
1331 efe->logblks_rec = udf_rw64(0);
1332 efe->tag.desc_crc_len = udf_rw16(crclen);
1333 }
1334
1335 /* wipe all allocation extent entries */
1336 for (extnr = 0; extnr < udf_node->num_extensions; extnr++) {
1337 ext = udf_node->ext[extnr];
1338 dscr_size = sizeof(struct alloc_ext_entry) -1;
1339 data_pos = (uint8_t *) ext->data;
1340 max_l_ad = lb_size - dscr_size;
1341 memset(data_pos, 0, max_l_ad);
1342 ext->l_ad = udf_rw32(0);
1343
1344 crclen = dscr_size - UDF_DESC_TAG_LENGTH;
1345 ext->tag.desc_crc_len = udf_rw16(crclen);
1346 }
1347 udf_node->i_flags |= IN_NODE_REBUILD;
1348 }
1349
1350 /* --------------------------------------------------------------------- */
1351
1352 void
1353 udf_get_adslot(struct udf_node *udf_node, int slot, struct long_ad *icb,
1354 int *eof) {
1355 struct file_entry *fe;
1356 struct extfile_entry *efe;
1357 struct alloc_ext_entry *ext;
1358 struct icb_tag *icbtag;
1359 struct short_ad *short_ad;
1360 struct long_ad *long_ad, l_icb;
1361 uint32_t offset;
1362 uint32_t lb_size, dscr_size, l_ea, l_ad, flags;
1363 uint8_t *data_pos;
1364 int icbflags, addr_type, adlen, extnr;
1365
1366 /* determine what descriptor we are in */
1367 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1368
1369 fe = udf_node->fe;
1370 efe = udf_node->efe;
1371 if (fe) {
1372 icbtag = &fe->icbtag;
1373 dscr_size = sizeof(struct file_entry) -1;
1374 l_ea = udf_rw32(fe->l_ea);
1375 l_ad = udf_rw32(fe->l_ad);
1376 data_pos = (uint8_t *) fe + dscr_size + l_ea;
1377 } else {
1378 icbtag = &efe->icbtag;
1379 dscr_size = sizeof(struct extfile_entry) -1;
1380 l_ea = udf_rw32(efe->l_ea);
1381 l_ad = udf_rw32(efe->l_ad);
1382 data_pos = (uint8_t *) efe + dscr_size + l_ea;
1383 }
1384
1385 icbflags = udf_rw16(icbtag->flags);
1386 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
1387
1388 /* just in case we're called on an intern, its EOF */
1389 if (addr_type == UDF_ICB_INTERN_ALLOC) {
1390 memset(icb, 0, sizeof(struct long_ad));
1391 *eof = 1;
1392 return;
1393 }
1394
1395 adlen = 0;
1396 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1397 adlen = sizeof(struct short_ad);
1398 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1399 adlen = sizeof(struct long_ad);
1400 }
1401
1402 /* if offset too big, we go to the allocation extensions */
1403 offset = slot * adlen;
1404 extnr = -1;
1405 while (offset >= l_ad) {
1406 /* check if our last entry is a redirect */
1407 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1408 short_ad = (struct short_ad *) (data_pos + l_ad-adlen);
1409 l_icb.len = short_ad->len;
1410 l_icb.loc.part_num = udf_node->loc.loc.part_num;
1411 l_icb.loc.lb_num = short_ad->lb_num;
1412 } else {
1413 KASSERT(addr_type == UDF_ICB_LONG_ALLOC);
1414 long_ad = (struct long_ad *) (data_pos + l_ad-adlen);
1415 l_icb = *long_ad;
1416 }
1417 flags = UDF_EXT_FLAGS(udf_rw32(l_icb.len));
1418 if (flags != UDF_EXT_REDIRECT) {
1419 l_ad = 0; /* force EOF */
1420 break;
1421 }
1422
1423 /* advance to next extent */
1424 extnr++;
1425 if (extnr >= udf_node->num_extensions) {
1426 l_ad = 0; /* force EOF */
1427 break;
1428 }
1429 offset = offset - l_ad;
1430 ext = udf_node->ext[extnr];
1431 dscr_size = sizeof(struct alloc_ext_entry) -1;
1432 l_ad = udf_rw32(ext->l_ad);
1433 data_pos = (uint8_t *) ext + dscr_size;
1434 }
1435
1436 /* XXX l_ad == 0 should be enough to check */
1437 *eof = (offset >= l_ad) || (l_ad == 0);
1438 if (*eof) {
1439 DPRINTF(PARANOIDADWLK, ("returning EOF, extnr %d, offset %d, "
1440 "l_ad %d\n", extnr, offset, l_ad));
1441 memset(icb, 0, sizeof(struct long_ad));
1442 return;
1443 }
1444
1445 /* get the element */
1446 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1447 short_ad = (struct short_ad *) (data_pos + offset);
1448 icb->len = short_ad->len;
1449 icb->loc.part_num = udf_node->loc.loc.part_num;
1450 icb->loc.lb_num = short_ad->lb_num;
1451 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1452 long_ad = (struct long_ad *) (data_pos + offset);
1453 *icb = *long_ad;
1454 }
1455 DPRINTF(PARANOIDADWLK, ("returning element : v %d, lb %d, len %d, "
1456 "flags %d\n", icb->loc.part_num, icb->loc.lb_num,
1457 UDF_EXT_LEN(icb->len), UDF_EXT_FLAGS(icb->len)));
1458 }
1459
1460 /* --------------------------------------------------------------------- */
1461
1462 int
1463 udf_append_adslot(struct udf_node *udf_node, int *slot, struct long_ad *icb) {
1464 struct udf_mount *ump = udf_node->ump;
1465 union dscrptr *dscr, *extdscr;
1466 struct file_entry *fe;
1467 struct extfile_entry *efe;
1468 struct alloc_ext_entry *ext;
1469 struct icb_tag *icbtag;
1470 struct short_ad *short_ad;
1471 struct long_ad *long_ad, o_icb, l_icb;
1472 uint64_t logblks_rec, *logblks_rec_p;
1473 uint64_t lmapping;
1474 uint32_t offset, rest, len, lb_num;
1475 uint32_t lb_size, dscr_size, l_ea, l_ad, *l_ad_p, max_l_ad, crclen;
1476 uint32_t flags;
1477 uint16_t vpart_num;
1478 uint8_t *data_pos;
1479 int icbflags, addr_type, adlen, extnr;
1480 int error;
1481
1482 lb_size = udf_rw32(ump->logical_vol->lb_size);
1483 vpart_num = udf_rw16(udf_node->loc.loc.part_num);
1484
1485 /* determine what descriptor we are in */
1486 fe = udf_node->fe;
1487 efe = udf_node->efe;
1488 if (fe) {
1489 icbtag = &fe->icbtag;
1490 dscr = (union dscrptr *) fe;
1491 dscr_size = sizeof(struct file_entry) -1;
1492
1493 l_ea = udf_rw32(fe->l_ea);
1494 l_ad_p = &fe->l_ad;
1495 logblks_rec_p = &fe->logblks_rec;
1496 } else {
1497 icbtag = &efe->icbtag;
1498 dscr = (union dscrptr *) efe;
1499 dscr_size = sizeof(struct extfile_entry) -1;
1500
1501 l_ea = udf_rw32(efe->l_ea);
1502 l_ad_p = &efe->l_ad;
1503 logblks_rec_p = &efe->logblks_rec;
1504 }
1505 data_pos = (uint8_t *) dscr + dscr_size + l_ea;
1506 max_l_ad = lb_size - dscr_size - l_ea;
1507
1508 icbflags = udf_rw16(icbtag->flags);
1509 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
1510
1511 /* just in case we're called on an intern, its EOF */
1512 if (addr_type == UDF_ICB_INTERN_ALLOC) {
1513 panic("udf_append_adslot on UDF_ICB_INTERN_ALLOC\n");
1514 }
1515
1516 adlen = 0;
1517 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1518 adlen = sizeof(struct short_ad);
1519 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1520 adlen = sizeof(struct long_ad);
1521 }
1522
1523 /* clean up given long_ad since it can be a synthesized one */
1524 flags = UDF_EXT_FLAGS(udf_rw32(icb->len));
1525 if (flags == UDF_EXT_FREE) {
1526 icb->loc.part_num = udf_rw16(0);
1527 icb->loc.lb_num = udf_rw32(0);
1528 }
1529
1530 /* if offset too big, we go to the allocation extensions */
1531 l_ad = udf_rw32(*l_ad_p);
1532 offset = (*slot) * adlen;
1533 extnr = -1;
1534 while (offset >= l_ad) {
1535 /* check if our last entry is a redirect */
1536 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1537 short_ad = (struct short_ad *) (data_pos + l_ad-adlen);
1538 l_icb.len = short_ad->len;
1539 l_icb.loc.part_num = udf_node->loc.loc.part_num;
1540 l_icb.loc.lb_num = short_ad->lb_num;
1541 } else {
1542 KASSERT(addr_type == UDF_ICB_LONG_ALLOC);
1543 long_ad = (struct long_ad *) (data_pos + l_ad-adlen);
1544 l_icb = *long_ad;
1545 }
1546 flags = UDF_EXT_FLAGS(udf_rw32(l_icb.len));
1547 if (flags != UDF_EXT_REDIRECT) {
1548 /* only one past the last one is adressable */
1549 break;
1550 }
1551
1552 /* advance to next extent */
1553 extnr++;
1554 KASSERT(extnr < udf_node->num_extensions);
1555 offset = offset - l_ad;
1556
1557 ext = udf_node->ext[extnr];
1558 dscr = (union dscrptr *) ext;
1559 dscr_size = sizeof(struct alloc_ext_entry) -1;
1560 max_l_ad = lb_size - dscr_size;
1561 l_ad_p = &ext->l_ad;
1562 l_ad = udf_rw32(*l_ad_p);
1563 data_pos = (uint8_t *) ext + dscr_size;
1564 }
1565 DPRINTF(PARANOIDADWLK, ("append, ext %d, offset %d, l_ad %d\n",
1566 extnr, offset, udf_rw32(*l_ad_p)));
1567 KASSERT(l_ad == udf_rw32(*l_ad_p));
1568
1569 /* offset is offset within the current (E)FE/AED */
1570 l_ad = udf_rw32(*l_ad_p);
1571 crclen = udf_rw16(dscr->tag.desc_crc_len);
1572 logblks_rec = udf_rw64(*logblks_rec_p);
1573
1574 /* overwriting old piece? */
1575 if (offset < l_ad) {
1576 /* overwrite entry; compensate for the old element */
1577 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1578 short_ad = (struct short_ad *) (data_pos + offset);
1579 o_icb.len = short_ad->len;
1580 o_icb.loc.part_num = udf_rw16(0); /* ignore */
1581 o_icb.loc.lb_num = short_ad->lb_num;
1582 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1583 long_ad = (struct long_ad *) (data_pos + offset);
1584 o_icb = *long_ad;
1585 } else {
1586 panic("Invalid address type in udf_append_adslot\n");
1587 }
1588
1589 len = udf_rw32(o_icb.len);
1590 if (UDF_EXT_FLAGS(len) == UDF_EXT_ALLOCATED) {
1591 /* adjust counts */
1592 len = UDF_EXT_LEN(len);
1593 logblks_rec -= (len + lb_size -1) / lb_size;
1594 }
1595 }
1596
1597 /* check if we're not appending a redirection */
1598 flags = UDF_EXT_FLAGS(udf_rw32(icb->len));
1599 KASSERT(flags != UDF_EXT_REDIRECT);
1600
1601 /* round down available space */
1602 rest = adlen * ((max_l_ad - offset) / adlen);
1603 if (rest <= adlen) {
1604 /* have to append aed, see if we already have a spare one */
1605 extnr++;
1606 ext = udf_node->ext[extnr];
1607 l_icb = udf_node->ext_loc[extnr];
1608 if (ext == NULL) {
1609 DPRINTF(ALLOC,("adding allocation extent %d\n", extnr));
1610
1611 error = udf_pre_allocate_space(ump, UDF_C_NODE, 1,
1612 vpart_num, &lmapping);
1613 lb_num = lmapping;
1614 if (error)
1615 return error;
1616
1617 /* initialise pointer to location */
1618 memset(&l_icb, 0, sizeof(struct long_ad));
1619 l_icb.len = udf_rw32(lb_size | UDF_EXT_REDIRECT);
1620 l_icb.loc.lb_num = udf_rw32(lb_num);
1621 l_icb.loc.part_num = udf_rw16(vpart_num);
1622
1623 /* create new aed descriptor */
1624 udf_create_logvol_dscr(ump, udf_node, &l_icb, &extdscr);
1625 ext = &extdscr->aee;
1626
1627 udf_inittag(ump, &ext->tag, TAGID_ALLOCEXTENT, lb_num);
1628 dscr_size = sizeof(struct alloc_ext_entry) -1;
1629 max_l_ad = lb_size - dscr_size;
1630 memset(ext->data, 0, max_l_ad);
1631 ext->l_ad = udf_rw32(0);
1632 ext->tag.desc_crc_len =
1633 udf_rw16(dscr_size - UDF_DESC_TAG_LENGTH);
1634
1635 /* declare aed */
1636 udf_node->num_extensions++;
1637 udf_node->ext_loc[extnr] = l_icb;
1638 udf_node->ext[extnr] = ext;
1639 }
1640 /* add redirect and adjust l_ad and crclen for old descr */
1641 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1642 short_ad = (struct short_ad *) (data_pos + offset);
1643 short_ad->len = l_icb.len;
1644 short_ad->lb_num = l_icb.loc.lb_num;
1645 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1646 long_ad = (struct long_ad *) (data_pos + offset);
1647 *long_ad = l_icb;
1648 }
1649 l_ad += adlen;
1650 crclen += adlen;
1651 dscr->tag.desc_crc_len = udf_rw16(crclen);
1652 *l_ad_p = udf_rw32(l_ad);
1653
1654 /* advance to the new extension */
1655 KASSERT(ext != NULL);
1656 dscr = (union dscrptr *) ext;
1657 dscr_size = sizeof(struct alloc_ext_entry) -1;
1658 max_l_ad = lb_size - dscr_size;
1659 data_pos = (uint8_t *) dscr + dscr_size;
1660
1661 l_ad_p = &ext->l_ad;
1662 l_ad = udf_rw32(*l_ad_p);
1663 crclen = udf_rw16(dscr->tag.desc_crc_len);
1664 offset = 0;
1665
1666 /* adjust callees slot count for link insert */
1667 *slot += 1;
1668 }
1669
1670 /* write out the element */
1671 DPRINTF(PARANOIDADWLK, ("adding element : %p : v %d, lb %d, "
1672 "len %d, flags %d\n", data_pos + offset,
1673 icb->loc.part_num, icb->loc.lb_num,
1674 UDF_EXT_LEN(icb->len), UDF_EXT_FLAGS(icb->len)));
1675 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1676 short_ad = (struct short_ad *) (data_pos + offset);
1677 short_ad->len = icb->len;
1678 short_ad->lb_num = icb->loc.lb_num;
1679 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1680 long_ad = (struct long_ad *) (data_pos + offset);
1681 *long_ad = *icb;
1682 }
1683
1684 /* adjust logblks recorded count */
1685 len = udf_rw32(icb->len);
1686 flags = UDF_EXT_FLAGS(len);
1687 if (flags == UDF_EXT_ALLOCATED)
1688 logblks_rec += (UDF_EXT_LEN(len) + lb_size -1) / lb_size;
1689 *logblks_rec_p = udf_rw64(logblks_rec);
1690
1691 /* adjust l_ad and crclen when needed */
1692 if (offset >= l_ad) {
1693 l_ad += adlen;
1694 crclen += adlen;
1695 dscr->tag.desc_crc_len = udf_rw16(crclen);
1696 *l_ad_p = udf_rw32(l_ad);
1697 }
1698
1699 return 0;
1700 }
1701
1702 /* --------------------------------------------------------------------- */
1703
1704 static void
1705 udf_count_alloc_exts(struct udf_node *udf_node)
1706 {
1707 struct long_ad s_ad;
1708 uint32_t lb_num, len, flags;
1709 uint16_t vpart_num;
1710 int slot, eof;
1711 int num_extents, extnr;
1712 int lb_size;
1713
1714 if (udf_node->num_extensions == 0)
1715 return;
1716
1717 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1718 /* count number of allocation extents in use */
1719 num_extents = 0;
1720 slot = 0;
1721 for (;;) {
1722 udf_get_adslot(udf_node, slot, &s_ad, &eof);
1723 if (eof)
1724 break;
1725 len = udf_rw32(s_ad.len);
1726 flags = UDF_EXT_FLAGS(len);
1727
1728 if (flags == UDF_EXT_REDIRECT)
1729 num_extents++;
1730
1731 slot++;
1732 }
1733
1734 DPRINTF(ALLOC, ("udf_count_alloc_ext counted %d live extents\n",
1735 num_extents));
1736
1737 /* XXX choice: we could delay freeing them on node writeout */
1738 /* free excess entries */
1739 extnr = num_extents;
1740 for (;extnr < udf_node->num_extensions; extnr++) {
1741 DPRINTF(ALLOC, ("freeing alloc ext %d\n", extnr));
1742 /* free dscriptor */
1743 s_ad = udf_node->ext_loc[extnr];
1744 udf_free_logvol_dscr(udf_node->ump, &s_ad,
1745 udf_node->ext[extnr]);
1746 udf_node->ext[extnr] = NULL;
1747
1748 /* free disc space */
1749 lb_num = udf_rw32(s_ad.loc.lb_num);
1750 vpart_num = udf_rw16(s_ad.loc.part_num);
1751 udf_free_allocated_space(udf_node->ump, lb_num, vpart_num, 1);
1752
1753 memset(&udf_node->ext_loc[extnr], 0, sizeof(struct long_ad));
1754 }
1755
1756 /* set our new number of allocation extents */
1757 udf_node->num_extensions = num_extents;
1758 }
1759
1760
1761 /* --------------------------------------------------------------------- */
1762
1763 /*
1764 * Adjust the node's allocation descriptors to reflect the new mapping; do
1765 * take note that we might glue to existing allocation descriptors.
1766 *
1767 * XXX Note there can only be one allocation being recorded/mount; maybe
1768 * explicit allocation in shedule thread?
1769 */
1770
1771 static void
1772 udf_record_allocation_in_node(struct udf_mount *ump, struct buf *buf,
1773 uint16_t vpart_num, uint64_t *mapping, struct long_ad *node_ad_cpy)
1774 {
1775 struct vnode *vp = buf->b_vp;
1776 struct udf_node *udf_node = VTOI(vp);
1777 struct file_entry *fe;
1778 struct extfile_entry *efe;
1779 struct icb_tag *icbtag;
1780 struct long_ad s_ad, c_ad;
1781 uint64_t inflen, from, till;
1782 uint64_t foffset, end_foffset, restart_foffset;
1783 uint64_t orig_inflen, orig_lbrec, new_inflen, new_lbrec;
1784 uint32_t num_lb, len, flags, lb_num;
1785 uint32_t run_start;
1786 uint32_t slot_offset, replace_len, replace;
1787 int addr_type, icbflags;
1788 // int udf_c_type = buf->b_udf_c_type;
1789 int lb_size, run_length, eof;
1790 int slot, cpy_slot, cpy_slots, restart_slot;
1791 int error;
1792
1793 DPRINTF(ALLOC, ("udf_record_allocation_in_node\n"));
1794
1795 #if 0
1796 /* XXX disable sanity check for now */
1797 /* sanity check ... should be panic ? */
1798 if ((udf_c_type != UDF_C_USERDATA) && (udf_c_type != UDF_C_FIDS))
1799 return;
1800 #endif
1801
1802 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1803
1804 /* do the job */
1805 UDF_LOCK_NODE(udf_node, 0); /* XXX can deadlock ? */
1806 udf_node_sanity_check(udf_node, &orig_inflen, &orig_lbrec);
1807
1808 fe = udf_node->fe;
1809 efe = udf_node->efe;
1810 if (fe) {
1811 icbtag = &fe->icbtag;
1812 inflen = udf_rw64(fe->inf_len);
1813 } else {
1814 icbtag = &efe->icbtag;
1815 inflen = udf_rw64(efe->inf_len);
1816 }
1817
1818 /* do check if `till' is not past file information length */
1819 from = buf->b_lblkno * lb_size;
1820 till = MIN(inflen, from + buf->b_resid);
1821
1822 num_lb = (till - from + lb_size -1) / lb_size;
1823
1824 DPRINTF(ALLOC, ("record allocation from %"PRIu64" + %d\n", from, buf->b_bcount));
1825
1826 icbflags = udf_rw16(icbtag->flags);
1827 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
1828
1829 if (addr_type == UDF_ICB_INTERN_ALLOC) {
1830 /* nothing to do */
1831 /* XXX clean up rest of node? just in case? */
1832 UDF_UNLOCK_NODE(udf_node, 0);
1833 return;
1834 }
1835
1836 slot = 0;
1837 cpy_slot = 0;
1838 foffset = 0;
1839
1840 /* 1) copy till first overlap piece to the rewrite buffer */
1841 for (;;) {
1842 udf_get_adslot(udf_node, slot, &s_ad, &eof);
1843 if (eof) {
1844 DPRINTF(WRITE,
1845 ("Record allocation in node "
1846 "failed: encountered EOF\n"));
1847 UDF_UNLOCK_NODE(udf_node, 0);
1848 buf->b_error = EINVAL;
1849 return;
1850 }
1851 len = udf_rw32(s_ad.len);
1852 flags = UDF_EXT_FLAGS(len);
1853 len = UDF_EXT_LEN(len);
1854
1855 if (flags == UDF_EXT_REDIRECT) {
1856 slot++;
1857 continue;
1858 }
1859
1860 end_foffset = foffset + len;
1861 if (end_foffset > from)
1862 break; /* found */
1863
1864 node_ad_cpy[cpy_slot++] = s_ad;
1865
1866 DPRINTF(ALLOC, ("\t1: vp %d, lb %d, len %d, flags %d "
1867 "-> stack\n",
1868 udf_rw16(s_ad.loc.part_num),
1869 udf_rw32(s_ad.loc.lb_num),
1870 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1871 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1872
1873 foffset = end_foffset;
1874 slot++;
1875 }
1876 restart_slot = slot;
1877 restart_foffset = foffset;
1878
1879 /* 2) trunc overlapping slot at overlap and copy it */
1880 slot_offset = from - foffset;
1881 if (slot_offset > 0) {
1882 DPRINTF(ALLOC, ("\tslot_offset = %d, flags = %d (%d)\n",
1883 slot_offset, flags >> 30, flags));
1884
1885 s_ad.len = udf_rw32(slot_offset | flags);
1886 node_ad_cpy[cpy_slot++] = s_ad;
1887
1888 DPRINTF(ALLOC, ("\t2: vp %d, lb %d, len %d, flags %d "
1889 "-> stack\n",
1890 udf_rw16(s_ad.loc.part_num),
1891 udf_rw32(s_ad.loc.lb_num),
1892 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1893 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1894 }
1895 foffset += slot_offset;
1896
1897 /* 3) insert new mappings */
1898 memset(&s_ad, 0, sizeof(struct long_ad));
1899 lb_num = 0;
1900 for (lb_num = 0; lb_num < num_lb; lb_num++) {
1901 run_start = mapping[lb_num];
1902 run_length = 1;
1903 while (lb_num < num_lb-1) {
1904 if (mapping[lb_num+1] != mapping[lb_num]+1)
1905 if (mapping[lb_num+1] != mapping[lb_num])
1906 break;
1907 run_length++;
1908 lb_num++;
1909 }
1910 /* insert slot for this mapping */
1911 len = run_length * lb_size;
1912
1913 /* bounds checking */
1914 if (foffset + len > till)
1915 len = till - foffset;
1916 KASSERT(foffset + len <= inflen);
1917
1918 s_ad.len = udf_rw32(len | UDF_EXT_ALLOCATED);
1919 s_ad.loc.part_num = udf_rw16(vpart_num);
1920 s_ad.loc.lb_num = udf_rw32(run_start);
1921
1922 foffset += len;
1923
1924 /* paranoia */
1925 if (len == 0) {
1926 DPRINTF(WRITE,
1927 ("Record allocation in node "
1928 "failed: insert failed\n"));
1929 UDF_UNLOCK_NODE(udf_node, 0);
1930 buf->b_error = EINVAL;
1931 return;
1932 }
1933 node_ad_cpy[cpy_slot++] = s_ad;
1934
1935 DPRINTF(ALLOC, ("\t3: insert new mapping vp %d lb %d, len %d, "
1936 "flags %d -> stack\n",
1937 udf_rw16(s_ad.loc.part_num), udf_rw32(s_ad.loc.lb_num),
1938 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1939 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1940 }
1941
1942 /* 4) pop replaced length */
1943 slot = restart_slot;
1944 foffset = restart_foffset;
1945
1946 replace_len = till - foffset; /* total amount of bytes to pop */
1947 slot_offset = from - foffset; /* offset in first encounted slot */
1948 KASSERT((slot_offset % lb_size) == 0);
1949
1950 for (;;) {
1951 udf_get_adslot(udf_node, slot, &s_ad, &eof);
1952 if (eof)
1953 break;
1954
1955 len = udf_rw32(s_ad.len);
1956 flags = UDF_EXT_FLAGS(len);
1957 len = UDF_EXT_LEN(len);
1958 lb_num = udf_rw32(s_ad.loc.lb_num);
1959
1960 if (flags == UDF_EXT_REDIRECT) {
1961 slot++;
1962 continue;
1963 }
1964
1965 DPRINTF(ALLOC, ("\t4i: got slot %d, slot_offset %d, "
1966 "replace_len %d, "
1967 "vp %d, lb %d, len %d, flags %d\n",
1968 slot, slot_offset, replace_len,
1969 udf_rw16(s_ad.loc.part_num),
1970 udf_rw32(s_ad.loc.lb_num),
1971 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1972 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1973
1974 /* adjust for slot offset */
1975 if (slot_offset) {
1976 DPRINTF(ALLOC, ("\t4s: skipping %d\n", slot_offset));
1977 lb_num += slot_offset / lb_size;
1978 len -= slot_offset;
1979 foffset += slot_offset;
1980 replace_len -= slot_offset;
1981
1982 /* mark adjusted */
1983 slot_offset = 0;
1984 }
1985
1986 /* advance for (the rest of) this slot */
1987 replace = MIN(len, replace_len);
1988 DPRINTF(ALLOC, ("\t4d: replacing %d\n", replace));
1989
1990 /* advance for this slot */
1991 if (replace) {
1992 /* note: dont round DOWN on num_lb since we then
1993 * forget the last partial one */
1994 num_lb = (replace + lb_size - 1) / lb_size;
1995 if (flags != UDF_EXT_FREE) {
1996 udf_free_allocated_space(ump, lb_num,
1997 udf_rw16(s_ad.loc.part_num), num_lb);
1998 }
1999 lb_num += num_lb;
2000 len -= replace;
2001 foffset += replace;
2002 replace_len -= replace;
2003 }
2004
2005 /* do we have a slot tail ? */
2006 if (len) {
2007 KASSERT(foffset % lb_size == 0);
2008
2009 /* we arrived at our point, push remainder */
2010 s_ad.len = udf_rw32(len | flags);
2011 s_ad.loc.lb_num = udf_rw32(lb_num);
2012 if (flags == UDF_EXT_FREE)
2013 s_ad.loc.lb_num = udf_rw32(0);
2014 node_ad_cpy[cpy_slot++] = s_ad;
2015 foffset += len;
2016 slot++;
2017
2018 DPRINTF(ALLOC, ("\t4: vp %d, lb %d, len %d, flags %d "
2019 "-> stack\n",
2020 udf_rw16(s_ad.loc.part_num),
2021 udf_rw32(s_ad.loc.lb_num),
2022 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2023 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2024 break;
2025 }
2026
2027 slot++;
2028 }
2029
2030 /* 5) copy remainder */
2031 for (;;) {
2032 udf_get_adslot(udf_node, slot, &s_ad, &eof);
2033 if (eof)
2034 break;
2035
2036 len = udf_rw32(s_ad.len);
2037 flags = UDF_EXT_FLAGS(len);
2038 len = UDF_EXT_LEN(len);
2039
2040 if (flags == UDF_EXT_REDIRECT) {
2041 slot++;
2042 continue;
2043 }
2044
2045 node_ad_cpy[cpy_slot++] = s_ad;
2046
2047 DPRINTF(ALLOC, ("\t5: insert new mapping "
2048 "vp %d lb %d, len %d, flags %d "
2049 "-> stack\n",
2050 udf_rw16(s_ad.loc.part_num),
2051 udf_rw32(s_ad.loc.lb_num),
2052 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2053 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2054
2055 slot++;
2056 }
2057
2058 /* 6) reset node descriptors */
2059 udf_wipe_adslots(udf_node);
2060
2061 /* 7) copy back extents; merge when possible. Recounting on the fly */
2062 cpy_slots = cpy_slot;
2063
2064 c_ad = node_ad_cpy[0];
2065 slot = 0;
2066 DPRINTF(ALLOC, ("\t7s: stack -> got mapping vp %d "
2067 "lb %d, len %d, flags %d\n",
2068 udf_rw16(c_ad.loc.part_num),
2069 udf_rw32(c_ad.loc.lb_num),
2070 UDF_EXT_LEN(udf_rw32(c_ad.len)),
2071 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2072
2073 for (cpy_slot = 1; cpy_slot < cpy_slots; cpy_slot++) {
2074 s_ad = node_ad_cpy[cpy_slot];
2075
2076 DPRINTF(ALLOC, ("\t7i: stack -> got mapping vp %d "
2077 "lb %d, len %d, flags %d\n",
2078 udf_rw16(s_ad.loc.part_num),
2079 udf_rw32(s_ad.loc.lb_num),
2080 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2081 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2082
2083 /* see if we can merge */
2084 if (udf_ads_merge(lb_size, &c_ad, &s_ad)) {
2085 /* not mergable (anymore) */
2086 DPRINTF(ALLOC, ("\t7: appending vp %d lb %d, "
2087 "len %d, flags %d\n",
2088 udf_rw16(c_ad.loc.part_num),
2089 udf_rw32(c_ad.loc.lb_num),
2090 UDF_EXT_LEN(udf_rw32(c_ad.len)),
2091 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2092
2093 error = udf_append_adslot(udf_node, &slot, &c_ad);
2094 if (error) {
2095 buf->b_error = error;
2096 goto out;
2097 }
2098 c_ad = s_ad;
2099 slot++;
2100 }
2101 }
2102
2103 /* 8) push rest slot (if any) */
2104 if (UDF_EXT_LEN(c_ad.len) > 0) {
2105 DPRINTF(ALLOC, ("\t8: last append vp %d lb %d, "
2106 "len %d, flags %d\n",
2107 udf_rw16(c_ad.loc.part_num),
2108 udf_rw32(c_ad.loc.lb_num),
2109 UDF_EXT_LEN(udf_rw32(c_ad.len)),
2110 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2111
2112 error = udf_append_adslot(udf_node, &slot, &c_ad);
2113 if (error) {
2114 buf->b_error = error;
2115 goto out;
2116 }
2117 }
2118
2119 out:
2120 udf_count_alloc_exts(udf_node);
2121
2122 /* the node's descriptors should now be sane */
2123 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2124 UDF_UNLOCK_NODE(udf_node, 0);
2125
2126 KASSERT(orig_inflen == new_inflen);
2127 KASSERT(new_lbrec >= orig_lbrec);
2128
2129 return;
2130 }
2131
2132 /* --------------------------------------------------------------------- */
2133
2134 int
2135 udf_grow_node(struct udf_node *udf_node, uint64_t new_size)
2136 {
2137 union dscrptr *dscr;
2138 struct vnode *vp = udf_node->vnode;
2139 struct udf_mount *ump = udf_node->ump;
2140 struct file_entry *fe;
2141 struct extfile_entry *efe;
2142 struct icb_tag *icbtag;
2143 struct long_ad c_ad, s_ad;
2144 uint64_t size_diff, old_size, inflen, objsize, chunk, append_len;
2145 uint64_t foffset, end_foffset;
2146 uint64_t orig_inflen, orig_lbrec, new_inflen, new_lbrec;
2147 uint32_t lb_size, dscr_size, crclen, lastblock_grow;
2148 uint32_t icbflags, len, flags, max_len;
2149 uint32_t max_l_ad, l_ad, l_ea;
2150 uint16_t my_part, dst_part;
2151 uint8_t *data_pos, *evacuated_data;
2152 int addr_type;
2153 int slot, cpy_slot;
2154 int isdir, eof, error;
2155
2156 DPRINTF(ALLOC, ("udf_grow_node\n"));
2157
2158 UDF_LOCK_NODE(udf_node, 0);
2159 udf_node_sanity_check(udf_node, &orig_inflen, &orig_lbrec);
2160
2161 lb_size = udf_rw32(ump->logical_vol->lb_size);
2162 max_len = ((UDF_EXT_MAXLEN / lb_size) * lb_size);
2163
2164 fe = udf_node->fe;
2165 efe = udf_node->efe;
2166 if (fe) {
2167 dscr = (union dscrptr *) fe;
2168 icbtag = &fe->icbtag;
2169 inflen = udf_rw64(fe->inf_len);
2170 objsize = inflen;
2171 dscr_size = sizeof(struct file_entry) -1;
2172 l_ea = udf_rw32(fe->l_ea);
2173 l_ad = udf_rw32(fe->l_ad);
2174 } else {
2175 dscr = (union dscrptr *) efe;
2176 icbtag = &efe->icbtag;
2177 inflen = udf_rw64(efe->inf_len);
2178 objsize = udf_rw64(efe->obj_size);
2179 dscr_size = sizeof(struct extfile_entry) -1;
2180 l_ea = udf_rw32(efe->l_ea);
2181 l_ad = udf_rw32(efe->l_ad);
2182 }
2183 data_pos = (uint8_t *) dscr + dscr_size + l_ea;
2184 max_l_ad = lb_size - dscr_size - l_ea;
2185
2186 icbflags = udf_rw16(icbtag->flags);
2187 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2188
2189 old_size = inflen;
2190 size_diff = new_size - old_size;
2191
2192 DPRINTF(ALLOC, ("\tfrom %"PRIu64" to %"PRIu64"\n", old_size, new_size));
2193
2194 evacuated_data = NULL;
2195 if (addr_type == UDF_ICB_INTERN_ALLOC) {
2196 if (l_ad + size_diff <= max_l_ad) {
2197 /* only reflect size change directly in the node */
2198 inflen += size_diff;
2199 objsize += size_diff;
2200 l_ad += size_diff;
2201 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea + l_ad;
2202 if (fe) {
2203 fe->inf_len = udf_rw64(inflen);
2204 fe->l_ad = udf_rw32(l_ad);
2205 fe->tag.desc_crc_len = udf_rw16(crclen);
2206 } else {
2207 efe->inf_len = udf_rw64(inflen);
2208 efe->obj_size = udf_rw64(objsize);
2209 efe->l_ad = udf_rw32(l_ad);
2210 efe->tag.desc_crc_len = udf_rw16(crclen);
2211 }
2212 error = 0;
2213
2214 /* set new size for uvm */
2215 uvm_vnp_setsize(vp, old_size);
2216 uvm_vnp_setwritesize(vp, new_size);
2217
2218 #if 0
2219 /* zero append space in buffer */
2220 uvm_vnp_zerorange(vp, old_size, new_size - old_size);
2221 #endif
2222
2223 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2224
2225 /* unlock */
2226 UDF_UNLOCK_NODE(udf_node, 0);
2227
2228 KASSERT(new_inflen == orig_inflen + size_diff);
2229 KASSERT(new_lbrec == orig_lbrec);
2230 KASSERT(new_lbrec == 0);
2231 return 0;
2232 }
2233
2234 DPRINTF(ALLOC, ("\tCONVERT from internal\n"));
2235
2236 if (old_size > 0) {
2237 /* allocate some space and copy in the stuff to keep */
2238 evacuated_data = malloc(lb_size, M_UDFTEMP, M_WAITOK);
2239 memset(evacuated_data, 0, lb_size);
2240
2241 /* node is locked, so safe to exit mutex */
2242 UDF_UNLOCK_NODE(udf_node, 0);
2243
2244 /* read in using the `normal' vn_rdwr() */
2245 error = vn_rdwr(UIO_READ, udf_node->vnode,
2246 evacuated_data, old_size, 0,
2247 UIO_SYSSPACE, IO_ALTSEMANTICS | IO_NODELOCKED,
2248 FSCRED, NULL, NULL);
2249
2250 /* enter again */
2251 UDF_LOCK_NODE(udf_node, 0);
2252 }
2253
2254 /* convert to a normal alloc and select type */
2255 isdir = (vp->v_type == VDIR);
2256 my_part = udf_rw16(udf_node->loc.loc.part_num);
2257 dst_part = isdir? ump->fids_part : ump->data_part;
2258 addr_type = UDF_ICB_SHORT_ALLOC;
2259 if (dst_part != my_part)
2260 addr_type = UDF_ICB_LONG_ALLOC;
2261
2262 icbflags &= ~UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2263 icbflags |= addr_type;
2264 icbtag->flags = udf_rw16(icbflags);
2265
2266 /* wipe old descriptor space */
2267 udf_wipe_adslots(udf_node);
2268
2269 memset(&c_ad, 0, sizeof(struct long_ad));
2270 c_ad.len = udf_rw32(old_size | UDF_EXT_FREE);
2271 c_ad.loc.part_num = udf_rw16(0); /* not relevant */
2272 c_ad.loc.lb_num = udf_rw32(0); /* not relevant */
2273
2274 slot = 0;
2275 } else {
2276 /* goto the last entry (if any) */
2277 slot = 0;
2278 cpy_slot = 0;
2279 foffset = 0;
2280 memset(&c_ad, 0, sizeof(struct long_ad));
2281 for (;;) {
2282 udf_get_adslot(udf_node, slot, &c_ad, &eof);
2283 if (eof)
2284 break;
2285
2286 len = udf_rw32(c_ad.len);
2287 flags = UDF_EXT_FLAGS(len);
2288 len = UDF_EXT_LEN(len);
2289
2290 end_foffset = foffset + len;
2291 if (flags != UDF_EXT_REDIRECT)
2292 foffset = end_foffset;
2293
2294 slot++;
2295 }
2296 /* at end of adslots */
2297
2298 /* special case if the old size was zero, then there is no last slot */
2299 if (old_size == 0) {
2300 c_ad.len = udf_rw32(0 | UDF_EXT_FREE);
2301 c_ad.loc.part_num = udf_rw16(0); /* not relevant */
2302 c_ad.loc.lb_num = udf_rw32(0); /* not relevant */
2303 } else {
2304 /* refetch last slot */
2305 slot--;
2306 udf_get_adslot(udf_node, slot, &c_ad, &eof);
2307 }
2308 }
2309
2310 /*
2311 * If the length of the last slot is not a multiple of lb_size, adjust
2312 * length so that it is; don't forget to adjust `append_len'! relevant for
2313 * extending existing files
2314 */
2315 len = udf_rw32(c_ad.len);
2316 flags = UDF_EXT_FLAGS(len);
2317 len = UDF_EXT_LEN(len);
2318
2319 lastblock_grow = 0;
2320 if (len % lb_size > 0) {
2321 lastblock_grow = lb_size - (len % lb_size);
2322 lastblock_grow = MIN(size_diff, lastblock_grow);
2323 len += lastblock_grow;
2324 c_ad.len = udf_rw32(len | flags);
2325
2326 /* TODO zero appened space in buffer! */
2327 /* using uvm_vnp_zerorange(vp, old_size, new_size - old_size); ? */
2328 }
2329 memset(&s_ad, 0, sizeof(struct long_ad));
2330
2331 /* size_diff can be bigger than allowed, so grow in chunks */
2332 append_len = size_diff - lastblock_grow;
2333 while (append_len > 0) {
2334 chunk = MIN(append_len, max_len);
2335 s_ad.len = udf_rw32(chunk | UDF_EXT_FREE);
2336 s_ad.loc.part_num = udf_rw16(0);
2337 s_ad.loc.lb_num = udf_rw32(0);
2338
2339 if (udf_ads_merge(lb_size, &c_ad, &s_ad)) {
2340 /* not mergable (anymore) */
2341 error = udf_append_adslot(udf_node, &slot, &c_ad);
2342 if (error)
2343 goto errorout;
2344 slot++;
2345 c_ad = s_ad;
2346 memset(&s_ad, 0, sizeof(struct long_ad));
2347 }
2348 append_len -= chunk;
2349 }
2350
2351 /* if there is a rest piece in the accumulator, append it */
2352 if (UDF_EXT_LEN(udf_rw32(c_ad.len)) > 0) {
2353 error = udf_append_adslot(udf_node, &slot, &c_ad);
2354 if (error)
2355 goto errorout;
2356 slot++;
2357 }
2358
2359 /* if there is a rest piece that didn't fit, append it */
2360 if (UDF_EXT_LEN(udf_rw32(s_ad.len)) > 0) {
2361 error = udf_append_adslot(udf_node, &slot, &s_ad);
2362 if (error)
2363 goto errorout;
2364 slot++;
2365 }
2366
2367 inflen += size_diff;
2368 objsize += size_diff;
2369 if (fe) {
2370 fe->inf_len = udf_rw64(inflen);
2371 } else {
2372 efe->inf_len = udf_rw64(inflen);
2373 efe->obj_size = udf_rw64(objsize);
2374 }
2375 error = 0;
2376
2377 if (evacuated_data) {
2378 /* set new write size for uvm */
2379 uvm_vnp_setwritesize(vp, old_size);
2380
2381 /* write out evacuated data */
2382 error = vn_rdwr(UIO_WRITE, udf_node->vnode,
2383 evacuated_data, old_size, 0,
2384 UIO_SYSSPACE, IO_ALTSEMANTICS | IO_NODELOCKED,
2385 FSCRED, NULL, NULL);
2386 uvm_vnp_setsize(vp, old_size);
2387 }
2388
2389 errorout:
2390 if (evacuated_data)
2391 free(evacuated_data, M_UDFTEMP);
2392
2393 udf_count_alloc_exts(udf_node);
2394
2395 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2396 UDF_UNLOCK_NODE(udf_node, 0);
2397
2398 KASSERT(new_inflen == orig_inflen + size_diff);
2399 KASSERT(new_lbrec == orig_lbrec);
2400
2401 return error;
2402 }
2403
2404 /* --------------------------------------------------------------------- */
2405
2406 int
2407 udf_shrink_node(struct udf_node *udf_node, uint64_t new_size)
2408 {
2409 struct vnode *vp = udf_node->vnode;
2410 struct udf_mount *ump = udf_node->ump;
2411 struct file_entry *fe;
2412 struct extfile_entry *efe;
2413 struct icb_tag *icbtag;
2414 struct long_ad c_ad, s_ad, *node_ad_cpy;
2415 uint64_t size_diff, old_size, inflen, objsize;
2416 uint64_t foffset, end_foffset;
2417 uint64_t orig_inflen, orig_lbrec, new_inflen, new_lbrec;
2418 uint32_t lb_size, dscr_size, crclen;
2419 uint32_t slot_offset;
2420 uint32_t len, flags, max_len;
2421 uint32_t num_lb, lb_num;
2422 uint32_t max_l_ad, l_ad, l_ea;
2423 uint16_t vpart_num;
2424 uint8_t *data_pos;
2425 int icbflags, addr_type;
2426 int slot, cpy_slot, cpy_slots;
2427 int eof, error;
2428
2429 DPRINTF(ALLOC, ("udf_shrink_node\n"));
2430
2431 UDF_LOCK_NODE(udf_node, 0);
2432 udf_node_sanity_check(udf_node, &orig_inflen, &orig_lbrec);
2433
2434 lb_size = udf_rw32(ump->logical_vol->lb_size);
2435 max_len = ((UDF_EXT_MAXLEN / lb_size) * lb_size);
2436
2437 /* do the work */
2438 fe = udf_node->fe;
2439 efe = udf_node->efe;
2440 if (fe) {
2441 icbtag = &fe->icbtag;
2442 inflen = udf_rw64(fe->inf_len);
2443 objsize = inflen;
2444 dscr_size = sizeof(struct file_entry) -1;
2445 l_ea = udf_rw32(fe->l_ea);
2446 l_ad = udf_rw32(fe->l_ad);
2447 data_pos = (uint8_t *) fe + dscr_size + l_ea;
2448 } else {
2449 icbtag = &efe->icbtag;
2450 inflen = udf_rw64(efe->inf_len);
2451 objsize = udf_rw64(efe->obj_size);
2452 dscr_size = sizeof(struct extfile_entry) -1;
2453 l_ea = udf_rw32(efe->l_ea);
2454 l_ad = udf_rw32(efe->l_ad);
2455 data_pos = (uint8_t *) efe + dscr_size + l_ea;
2456 }
2457 max_l_ad = lb_size - dscr_size - l_ea;
2458
2459 icbflags = udf_rw16(icbtag->flags);
2460 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2461
2462 old_size = inflen;
2463 size_diff = old_size - new_size;
2464
2465 DPRINTF(ALLOC, ("\tfrom %"PRIu64" to %"PRIu64"\n", old_size, new_size));
2466
2467 /* shrink the node to its new size */
2468 if (addr_type == UDF_ICB_INTERN_ALLOC) {
2469 /* only reflect size change directly in the node */
2470 KASSERT(new_size <= max_l_ad);
2471 inflen -= size_diff;
2472 objsize -= size_diff;
2473 l_ad -= size_diff;
2474 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea + l_ad;
2475 if (fe) {
2476 fe->inf_len = udf_rw64(inflen);
2477 fe->l_ad = udf_rw32(l_ad);
2478 fe->tag.desc_crc_len = udf_rw16(crclen);
2479 } else {
2480 efe->inf_len = udf_rw64(inflen);
2481 efe->obj_size = udf_rw64(objsize);
2482 efe->l_ad = udf_rw32(l_ad);
2483 efe->tag.desc_crc_len = udf_rw16(crclen);
2484 }
2485 error = 0;
2486
2487 /* clear the space in the descriptor */
2488 KASSERT(old_size > new_size);
2489 memset(data_pos + new_size, 0, old_size - new_size);
2490
2491 /* TODO zero appened space in buffer! */
2492 /* using uvm_vnp_zerorange(vp, old_size, old_size - new_size); ? */
2493
2494 /* set new size for uvm */
2495 uvm_vnp_setsize(vp, new_size);
2496
2497 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2498 UDF_UNLOCK_NODE(udf_node, 0);
2499
2500 KASSERT(new_inflen == orig_inflen - size_diff);
2501 KASSERT(new_lbrec == orig_lbrec);
2502 KASSERT(new_lbrec == 0);
2503
2504 return 0;
2505 }
2506
2507 /* setup node cleanup extents copy space */
2508 node_ad_cpy = malloc(lb_size * UDF_MAX_ALLOC_EXTENTS,
2509 M_UDFMNT, M_WAITOK);
2510 memset(node_ad_cpy, 0, lb_size * UDF_MAX_ALLOC_EXTENTS);
2511
2512 /*
2513 * Shrink the node by releasing the allocations and truncate the last
2514 * allocation to the new size. If the new size fits into the
2515 * allocation descriptor itself, transform it into an
2516 * UDF_ICB_INTERN_ALLOC.
2517 */
2518 slot = 0;
2519 cpy_slot = 0;
2520 foffset = 0;
2521
2522 /* 1) copy till first overlap piece to the rewrite buffer */
2523 for (;;) {
2524 udf_get_adslot(udf_node, slot, &s_ad, &eof);
2525 if (eof) {
2526 DPRINTF(WRITE,
2527 ("Shrink node failed: "
2528 "encountered EOF\n"));
2529 error = EINVAL;
2530 goto errorout; /* panic? */
2531 }
2532 len = udf_rw32(s_ad.len);
2533 flags = UDF_EXT_FLAGS(len);
2534 len = UDF_EXT_LEN(len);
2535
2536 if (flags == UDF_EXT_REDIRECT) {
2537 slot++;
2538 continue;
2539 }
2540
2541 end_foffset = foffset + len;
2542 if (end_foffset > new_size)
2543 break; /* found */
2544
2545 node_ad_cpy[cpy_slot++] = s_ad;
2546
2547 DPRINTF(ALLOC, ("\t1: vp %d, lb %d, len %d, flags %d "
2548 "-> stack\n",
2549 udf_rw16(s_ad.loc.part_num),
2550 udf_rw32(s_ad.loc.lb_num),
2551 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2552 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2553
2554 foffset = end_foffset;
2555 slot++;
2556 }
2557 slot_offset = new_size - foffset;
2558
2559 /* 2) trunc overlapping slot at overlap and copy it */
2560 if (slot_offset > 0) {
2561 lb_num = udf_rw32(s_ad.loc.lb_num);
2562 vpart_num = udf_rw16(s_ad.loc.part_num);
2563
2564 if (flags == UDF_EXT_ALLOCATED) {
2565 /* note: round DOWN on num_lb */
2566 lb_num += (slot_offset + lb_size -1) / lb_size;
2567 num_lb = (len - slot_offset) / lb_size;
2568
2569 udf_free_allocated_space(ump, lb_num, vpart_num, num_lb);
2570 }
2571
2572 s_ad.len = udf_rw32(slot_offset | flags);
2573 node_ad_cpy[cpy_slot++] = s_ad;
2574 slot++;
2575
2576 DPRINTF(ALLOC, ("\t2: vp %d, lb %d, len %d, flags %d "
2577 "-> stack\n",
2578 udf_rw16(s_ad.loc.part_num),
2579 udf_rw32(s_ad.loc.lb_num),
2580 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2581 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2582 }
2583
2584 /* 3) delete remainder */
2585 for (;;) {
2586 udf_get_adslot(udf_node, slot, &s_ad, &eof);
2587 if (eof)
2588 break;
2589
2590 len = udf_rw32(s_ad.len);
2591 flags = UDF_EXT_FLAGS(len);
2592 len = UDF_EXT_LEN(len);
2593
2594 if (flags == UDF_EXT_REDIRECT) {
2595 slot++;
2596 continue;
2597 }
2598
2599 DPRINTF(ALLOC, ("\t3: delete remainder "
2600 "vp %d lb %d, len %d, flags %d\n",
2601 udf_rw16(s_ad.loc.part_num),
2602 udf_rw32(s_ad.loc.lb_num),
2603 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2604 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2605
2606 if (flags == UDF_EXT_ALLOCATED) {
2607 lb_num = udf_rw32(s_ad.loc.lb_num);
2608 vpart_num = udf_rw16(s_ad.loc.part_num);
2609 num_lb = (len + lb_size - 1) / lb_size;
2610
2611 udf_free_allocated_space(ump, lb_num, vpart_num,
2612 num_lb);
2613 }
2614
2615 slot++;
2616 }
2617
2618 /* 4) if it will fit into the descriptor then convert */
2619 if (new_size < max_l_ad) {
2620 /*
2621 * resque/evacuate old piece by reading it in, and convert it
2622 * to internal alloc.
2623 */
2624 if (new_size == 0) {
2625 /* XXX/TODO only for zero sizing now */
2626 udf_wipe_adslots(udf_node);
2627
2628 icbflags &= ~UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2629 icbflags |= UDF_ICB_INTERN_ALLOC;
2630 icbtag->flags = udf_rw16(icbflags);
2631
2632 inflen -= size_diff; KASSERT(inflen == 0);
2633 objsize -= size_diff;
2634 l_ad = new_size;
2635 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea + l_ad;
2636 if (fe) {
2637 fe->inf_len = udf_rw64(inflen);
2638 fe->l_ad = udf_rw32(l_ad);
2639 fe->tag.desc_crc_len = udf_rw16(crclen);
2640 } else {
2641 efe->inf_len = udf_rw64(inflen);
2642 efe->obj_size = udf_rw64(objsize);
2643 efe->l_ad = udf_rw32(l_ad);
2644 efe->tag.desc_crc_len = udf_rw16(crclen);
2645 }
2646 /* eventually copy in evacuated piece */
2647 /* set new size for uvm */
2648 uvm_vnp_setsize(vp, new_size);
2649
2650 free(node_ad_cpy, M_UDFMNT);
2651 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2652
2653 UDF_UNLOCK_NODE(udf_node, 0);
2654
2655 KASSERT(new_inflen == orig_inflen - size_diff);
2656 KASSERT(new_inflen == 0);
2657 KASSERT(new_lbrec == 0);
2658
2659 return 0;
2660 }
2661
2662 printf("UDF_SHRINK_NODE: could convert to internal alloc!\n");
2663 }
2664
2665 /* 5) reset node descriptors */
2666 udf_wipe_adslots(udf_node);
2667
2668 /* 6) copy back extents; merge when possible. Recounting on the fly */
2669 cpy_slots = cpy_slot;
2670
2671 c_ad = node_ad_cpy[0];
2672 slot = 0;
2673 for (cpy_slot = 1; cpy_slot < cpy_slots; cpy_slot++) {
2674 s_ad = node_ad_cpy[cpy_slot];
2675
2676 DPRINTF(ALLOC, ("\t6: stack -> got mapping vp %d "
2677 "lb %d, len %d, flags %d\n",
2678 udf_rw16(s_ad.loc.part_num),
2679 udf_rw32(s_ad.loc.lb_num),
2680 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2681 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2682
2683 /* see if we can merge */
2684 if (udf_ads_merge(lb_size, &c_ad, &s_ad)) {
2685 /* not mergable (anymore) */
2686 DPRINTF(ALLOC, ("\t6: appending vp %d lb %d, "
2687 "len %d, flags %d\n",
2688 udf_rw16(c_ad.loc.part_num),
2689 udf_rw32(c_ad.loc.lb_num),
2690 UDF_EXT_LEN(udf_rw32(c_ad.len)),
2691 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2692
2693 error = udf_append_adslot(udf_node, &slot, &c_ad);
2694 if (error)
2695 goto errorout; /* panic? */
2696 c_ad = s_ad;
2697 slot++;
2698 }
2699 }
2700
2701 /* 7) push rest slot (if any) */
2702 if (UDF_EXT_LEN(c_ad.len) > 0) {
2703 DPRINTF(ALLOC, ("\t7: last append vp %d lb %d, "
2704 "len %d, flags %d\n",
2705 udf_rw16(c_ad.loc.part_num),
2706 udf_rw32(c_ad.loc.lb_num),
2707 UDF_EXT_LEN(udf_rw32(c_ad.len)),
2708 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2709
2710 error = udf_append_adslot(udf_node, &slot, &c_ad);
2711 if (error)
2712 goto errorout; /* panic? */
2713 ;
2714 }
2715
2716 inflen -= size_diff;
2717 objsize -= size_diff;
2718 if (fe) {
2719 fe->inf_len = udf_rw64(inflen);
2720 } else {
2721 efe->inf_len = udf_rw64(inflen);
2722 efe->obj_size = udf_rw64(objsize);
2723 }
2724 error = 0;
2725
2726 /* set new size for uvm */
2727 uvm_vnp_setsize(vp, new_size);
2728
2729 errorout:
2730 free(node_ad_cpy, M_UDFMNT);
2731
2732 udf_count_alloc_exts(udf_node);
2733
2734 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2735 UDF_UNLOCK_NODE(udf_node, 0);
2736
2737 KASSERT(new_inflen == orig_inflen - size_diff);
2738
2739 return error;
2740 }
2741
2742