udf_allocation.c revision 1.2.2.5 1 /* $NetBSD: udf_allocation.c,v 1.2.2.5 2008/09/28 10:40:51 mjf Exp $ */
2
3 /*
4 * Copyright (c) 2006, 2008 Reinoud Zandijk
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 *
27 */
28
29 #include <sys/cdefs.h>
30 #ifndef lint
31 __KERNEL_RCSID(0, "$NetBSD: udf_allocation.c,v 1.2.2.5 2008/09/28 10:40:51 mjf Exp $");
32 #endif /* not lint */
33
34
35 #if defined(_KERNEL_OPT)
36 #include "opt_quota.h"
37 #include "opt_compat_netbsd.h"
38 #endif
39
40 /* TODO strip */
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/sysctl.h>
44 #include <sys/namei.h>
45 #include <sys/proc.h>
46 #include <sys/kernel.h>
47 #include <sys/vnode.h>
48 #include <miscfs/genfs/genfs_node.h>
49 #include <sys/mount.h>
50 #include <sys/buf.h>
51 #include <sys/file.h>
52 #include <sys/device.h>
53 #include <sys/disklabel.h>
54 #include <sys/ioctl.h>
55 #include <sys/malloc.h>
56 #include <sys/dirent.h>
57 #include <sys/stat.h>
58 #include <sys/conf.h>
59 #include <sys/kauth.h>
60 #include <sys/kthread.h>
61 #include <dev/clock_subr.h>
62
63 #include <fs/udf/ecma167-udf.h>
64 #include <fs/udf/udf_mount.h>
65
66 #include "udf.h"
67 #include "udf_subr.h"
68 #include "udf_bswap.h"
69
70
71 #define VTOI(vnode) ((struct udf_node *) vnode->v_data)
72
73 static void udf_record_allocation_in_node(struct udf_mount *ump,
74 struct buf *buf, uint16_t vpart_num, uint64_t *mapping,
75 struct long_ad *node_ad_cpy);
76
77 /*
78 * IDEA/BUSY: Each udf_node gets its own extentwalker state for all operations;
79 * this will hopefully/likely reduce O(nlog(n)) to O(1) for most functionality
80 * since actions are most likely sequencial and thus seeking doesn't need
81 * searching for the same or adjacent position again.
82 */
83
84 /* --------------------------------------------------------------------- */
85
86 #if 0
87 #if 1
88 static void
89 udf_node_dump(struct udf_node *udf_node) {
90 struct file_entry *fe;
91 struct extfile_entry *efe;
92 struct icb_tag *icbtag;
93 struct long_ad s_ad;
94 uint64_t inflen;
95 uint32_t icbflags, addr_type;
96 uint32_t len, lb_num;
97 uint32_t flags;
98 int part_num;
99 int lb_size, eof, slot;
100
101 if ((udf_verbose & UDF_DEBUG_NODEDUMP) == 0)
102 return;
103
104 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
105
106 fe = udf_node->fe;
107 efe = udf_node->efe;
108 if (fe) {
109 icbtag = &fe->icbtag;
110 inflen = udf_rw64(fe->inf_len);
111 } else {
112 icbtag = &efe->icbtag;
113 inflen = udf_rw64(efe->inf_len);
114 }
115
116 icbflags = udf_rw16(icbtag->flags);
117 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
118
119 printf("udf_node_dump %p :\n", udf_node);
120
121 if (addr_type == UDF_ICB_INTERN_ALLOC) {
122 printf("\tIntern alloc, len = %"PRIu64"\n", inflen);
123 return;
124 }
125
126 printf("\tInflen = %"PRIu64"\n", inflen);
127 printf("\t\t");
128
129 slot = 0;
130 for (;;) {
131 udf_get_adslot(udf_node, slot, &s_ad, &eof);
132 if (eof)
133 break;
134 part_num = udf_rw16(s_ad.loc.part_num);
135 lb_num = udf_rw32(s_ad.loc.lb_num);
136 len = udf_rw32(s_ad.len);
137 flags = UDF_EXT_FLAGS(len);
138 len = UDF_EXT_LEN(len);
139
140 printf("[");
141 if (part_num >= 0)
142 printf("part %d, ", part_num);
143 printf("lb_num %d, len %d", lb_num, len);
144 if (flags)
145 printf(", flags %d", flags>>30);
146 printf("] ");
147
148 if (flags == UDF_EXT_REDIRECT) {
149 printf("\n\textent END\n\tallocation extent\n\t\t");
150 }
151
152 slot++;
153 }
154 printf("\n\tl_ad END\n\n");
155 }
156 #else
157 #define udf_node_dump(a)
158 #endif
159
160
161 static void
162 udf_assert_allocated(struct udf_mount *ump, uint16_t vpart_num,
163 uint32_t lb_num, uint32_t num_lb)
164 {
165 struct udf_bitmap *bitmap;
166 struct part_desc *pdesc;
167 uint32_t ptov;
168 uint32_t bitval;
169 uint8_t *bpos;
170 int bit;
171 int phys_part;
172 int ok;
173
174 DPRINTF(PARANOIA, ("udf_assert_allocated: check virt lbnum %d "
175 "part %d + %d sect\n", lb_num, vpart_num, num_lb));
176
177 /* get partition backing up this vpart_num */
178 pdesc = ump->partitions[ump->vtop[vpart_num]];
179
180 switch (ump->vtop_tp[vpart_num]) {
181 case UDF_VTOP_TYPE_PHYS :
182 case UDF_VTOP_TYPE_SPARABLE :
183 /* free space to freed or unallocated space bitmap */
184 ptov = udf_rw32(pdesc->start_loc);
185 phys_part = ump->vtop[vpart_num];
186
187 /* use unallocated bitmap */
188 bitmap = &ump->part_unalloc_bits[phys_part];
189
190 /* if no bitmaps are defined, bail out */
191 if (bitmap->bits == NULL)
192 break;
193
194 /* check bits */
195 KASSERT(bitmap->bits);
196 ok = 1;
197 bpos = bitmap->bits + lb_num/8;
198 bit = lb_num % 8;
199 while (num_lb > 0) {
200 bitval = (1 << bit);
201 DPRINTF(PARANOIA, ("XXX : check %d, %p, bit %d\n",
202 lb_num, bpos, bit));
203 KASSERT(bitmap->bits + lb_num/8 == bpos);
204 if (*bpos & bitval) {
205 printf("\tlb_num %d is NOT marked busy\n",
206 lb_num);
207 ok = 0;
208 }
209 lb_num++; num_lb--;
210 bit = (bit + 1) % 8;
211 if (bit == 0)
212 bpos++;
213 }
214 if (!ok) {
215 /* KASSERT(0); */
216 }
217
218 break;
219 case UDF_VTOP_TYPE_VIRT :
220 /* TODO check space */
221 KASSERT(num_lb == 1);
222 break;
223 case UDF_VTOP_TYPE_META :
224 /* TODO check space in the metadata bitmap */
225 default:
226 /* not implemented */
227 break;
228 }
229 }
230
231
232 static void
233 udf_node_sanity_check(struct udf_node *udf_node,
234 uint64_t *cnt_inflen, uint64_t *cnt_logblksrec)
235 {
236 union dscrptr *dscr;
237 struct file_entry *fe;
238 struct extfile_entry *efe;
239 struct icb_tag *icbtag;
240 struct long_ad s_ad;
241 uint64_t inflen, logblksrec;
242 uint32_t icbflags, addr_type;
243 uint32_t len, lb_num, l_ea, l_ad, max_l_ad;
244 uint16_t part_num;
245 uint8_t *data_pos;
246 int dscr_size, lb_size, flags, whole_lb;
247 int i, slot, eof;
248
249 // KASSERT(mutex_owned(&udf_node->ump->allocate_mutex));
250
251 if (1)
252 udf_node_dump(udf_node);
253
254 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
255
256 fe = udf_node->fe;
257 efe = udf_node->efe;
258 if (fe) {
259 dscr = (union dscrptr *) fe;
260 icbtag = &fe->icbtag;
261 inflen = udf_rw64(fe->inf_len);
262 dscr_size = sizeof(struct file_entry) -1;
263 logblksrec = udf_rw64(fe->logblks_rec);
264 l_ad = udf_rw32(fe->l_ad);
265 l_ea = udf_rw32(fe->l_ea);
266 } else {
267 dscr = (union dscrptr *) efe;
268 icbtag = &efe->icbtag;
269 inflen = udf_rw64(efe->inf_len);
270 dscr_size = sizeof(struct extfile_entry) -1;
271 logblksrec = udf_rw64(efe->logblks_rec);
272 l_ad = udf_rw32(efe->l_ad);
273 l_ea = udf_rw32(efe->l_ea);
274 }
275 data_pos = (uint8_t *) dscr + dscr_size + l_ea;
276 max_l_ad = lb_size - dscr_size - l_ea;
277 icbflags = udf_rw16(icbtag->flags);
278 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
279
280 /* check if tail is zero */
281 DPRINTF(PARANOIA, ("Sanity check blank tail\n"));
282 for (i = l_ad; i < max_l_ad; i++) {
283 if (data_pos[i] != 0)
284 printf( "sanity_check: violation: node byte %d "
285 "has value %d\n", i, data_pos[i]);
286 }
287
288 /* reset counters */
289 *cnt_inflen = 0;
290 *cnt_logblksrec = 0;
291
292 if (addr_type == UDF_ICB_INTERN_ALLOC) {
293 KASSERT(l_ad <= max_l_ad);
294 KASSERT(l_ad == inflen);
295 *cnt_inflen = inflen;
296 return;
297 }
298
299 /* start counting */
300 whole_lb = 1;
301 slot = 0;
302 for (;;) {
303 udf_get_adslot(udf_node, slot, &s_ad, &eof);
304 if (eof)
305 break;
306 KASSERT(whole_lb == 1);
307
308 part_num = udf_rw16(s_ad.loc.part_num);
309 lb_num = udf_rw32(s_ad.loc.lb_num);
310 len = udf_rw32(s_ad.len);
311 flags = UDF_EXT_FLAGS(len);
312 len = UDF_EXT_LEN(len);
313
314 if (flags != UDF_EXT_REDIRECT) {
315 *cnt_inflen += len;
316 if (flags == UDF_EXT_ALLOCATED) {
317 *cnt_logblksrec += (len + lb_size -1) / lb_size;
318 }
319 } else {
320 KASSERT(len == lb_size);
321 }
322 /* check allocation */
323 if (flags == UDF_EXT_ALLOCATED)
324 udf_assert_allocated(udf_node->ump, part_num, lb_num,
325 (len + lb_size - 1) / lb_size);
326
327 /* check whole lb */
328 whole_lb = ((len % lb_size) == 0);
329
330 slot++;
331 }
332 /* rest should be zero (ad_off > l_ad < max_l_ad - adlen) */
333
334 KASSERT(*cnt_inflen == inflen);
335 KASSERT(*cnt_logblksrec == logblksrec);
336
337 // KASSERT(mutex_owned(&udf_node->ump->allocate_mutex));
338 }
339 #else
340 static void
341 udf_node_sanity_check(struct udf_node *udf_node,
342 uint64_t *cnt_inflen, uint64_t *cnt_logblksrec) {
343 struct file_entry *fe;
344 struct extfile_entry *efe;
345 struct icb_tag *icbtag;
346 uint64_t inflen, logblksrec;
347 int dscr_size, lb_size;
348
349 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
350
351 fe = udf_node->fe;
352 efe = udf_node->efe;
353 if (fe) {
354 icbtag = &fe->icbtag;
355 inflen = udf_rw64(fe->inf_len);
356 dscr_size = sizeof(struct file_entry) -1;
357 logblksrec = udf_rw64(fe->logblks_rec);
358 } else {
359 icbtag = &efe->icbtag;
360 inflen = udf_rw64(efe->inf_len);
361 dscr_size = sizeof(struct extfile_entry) -1;
362 logblksrec = udf_rw64(efe->logblks_rec);
363 }
364 *cnt_logblksrec = logblksrec;
365 *cnt_inflen = inflen;
366 }
367 #endif
368
369 /* --------------------------------------------------------------------- */
370
371 int
372 udf_translate_vtop(struct udf_mount *ump, struct long_ad *icb_loc,
373 uint32_t *lb_numres, uint32_t *extres)
374 {
375 struct part_desc *pdesc;
376 struct spare_map_entry *sme;
377 struct long_ad s_icb_loc;
378 uint64_t foffset, end_foffset;
379 uint32_t lb_size, len;
380 uint32_t lb_num, lb_rel, lb_packet;
381 uint32_t udf_rw32_lbmap, ext_offset;
382 uint16_t vpart;
383 int rel, part, error, eof, slot, flags;
384
385 assert(ump && icb_loc && lb_numres);
386
387 vpart = udf_rw16(icb_loc->loc.part_num);
388 lb_num = udf_rw32(icb_loc->loc.lb_num);
389 if (vpart > UDF_VTOP_RAWPART)
390 return EINVAL;
391
392 translate_again:
393 part = ump->vtop[vpart];
394 pdesc = ump->partitions[part];
395
396 switch (ump->vtop_tp[vpart]) {
397 case UDF_VTOP_TYPE_RAW :
398 /* 1:1 to the end of the device */
399 *lb_numres = lb_num;
400 *extres = INT_MAX;
401 return 0;
402 case UDF_VTOP_TYPE_PHYS :
403 /* transform into its disc logical block */
404 if (lb_num > udf_rw32(pdesc->part_len))
405 return EINVAL;
406 *lb_numres = lb_num + udf_rw32(pdesc->start_loc);
407
408 /* extent from here to the end of the partition */
409 *extres = udf_rw32(pdesc->part_len) - lb_num;
410 return 0;
411 case UDF_VTOP_TYPE_VIRT :
412 /* only maps one logical block, lookup in VAT */
413 if (lb_num >= ump->vat_entries) /* XXX > or >= ? */
414 return EINVAL;
415
416 /* lookup in virtual allocation table file */
417 mutex_enter(&ump->allocate_mutex);
418 error = udf_vat_read(ump->vat_node,
419 (uint8_t *) &udf_rw32_lbmap, 4,
420 ump->vat_offset + lb_num * 4);
421 mutex_exit(&ump->allocate_mutex);
422
423 if (error)
424 return error;
425
426 lb_num = udf_rw32(udf_rw32_lbmap);
427
428 /* transform into its disc logical block */
429 if (lb_num > udf_rw32(pdesc->part_len))
430 return EINVAL;
431 *lb_numres = lb_num + udf_rw32(pdesc->start_loc);
432
433 /* just one logical block */
434 *extres = 1;
435 return 0;
436 case UDF_VTOP_TYPE_SPARABLE :
437 /* check if the packet containing the lb_num is remapped */
438 lb_packet = lb_num / ump->sparable_packet_size;
439 lb_rel = lb_num % ump->sparable_packet_size;
440
441 for (rel = 0; rel < udf_rw16(ump->sparing_table->rt_l); rel++) {
442 sme = &ump->sparing_table->entries[rel];
443 if (lb_packet == udf_rw32(sme->org)) {
444 /* NOTE maps to absolute disc logical block! */
445 *lb_numres = udf_rw32(sme->map) + lb_rel;
446 *extres = ump->sparable_packet_size - lb_rel;
447 return 0;
448 }
449 }
450
451 /* transform into its disc logical block */
452 if (lb_num > udf_rw32(pdesc->part_len))
453 return EINVAL;
454 *lb_numres = lb_num + udf_rw32(pdesc->start_loc);
455
456 /* rest of block */
457 *extres = ump->sparable_packet_size - lb_rel;
458 return 0;
459 case UDF_VTOP_TYPE_META :
460 /* we have to look into the file's allocation descriptors */
461
462 /* use metadatafile allocation mutex */
463 lb_size = udf_rw32(ump->logical_vol->lb_size);
464
465 UDF_LOCK_NODE(ump->metadata_node, 0);
466
467 /* get first overlapping extent */
468 foffset = 0;
469 slot = 0;
470 for (;;) {
471 udf_get_adslot(ump->metadata_node,
472 slot, &s_icb_loc, &eof);
473 DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, "
474 "len = %d, lb_num = %d, part = %d\n",
475 slot, eof,
476 UDF_EXT_FLAGS(udf_rw32(s_icb_loc.len)),
477 UDF_EXT_LEN(udf_rw32(s_icb_loc.len)),
478 udf_rw32(s_icb_loc.loc.lb_num),
479 udf_rw16(s_icb_loc.loc.part_num)));
480 if (eof) {
481 DPRINTF(TRANSLATE,
482 ("Meta partition translation "
483 "failed: can't seek location\n"));
484 UDF_UNLOCK_NODE(ump->metadata_node, 0);
485 return EINVAL;
486 }
487 len = udf_rw32(s_icb_loc.len);
488 flags = UDF_EXT_FLAGS(len);
489 len = UDF_EXT_LEN(len);
490
491 if (flags == UDF_EXT_REDIRECT) {
492 slot++;
493 continue;
494 }
495
496 end_foffset = foffset + len;
497
498 if (end_foffset > lb_num * lb_size)
499 break; /* found */
500 foffset = end_foffset;
501 slot++;
502 }
503 /* found overlapping slot */
504 ext_offset = lb_num * lb_size - foffset;
505
506 /* process extent offset */
507 lb_num = udf_rw32(s_icb_loc.loc.lb_num);
508 vpart = udf_rw16(s_icb_loc.loc.part_num);
509 lb_num += (ext_offset + lb_size -1) / lb_size;
510 len -= ext_offset;
511 ext_offset = 0;
512
513 flags = UDF_EXT_FLAGS(s_icb_loc.len);
514
515 UDF_UNLOCK_NODE(ump->metadata_node, 0);
516 if (flags != UDF_EXT_ALLOCATED) {
517 DPRINTF(TRANSLATE, ("Metadata partition translation "
518 "failed: not allocated\n"));
519 return EINVAL;
520 }
521
522 /*
523 * vpart and lb_num are updated, translate again since we
524 * might be mapped on sparable media
525 */
526 goto translate_again;
527 default:
528 printf("UDF vtop translation scheme %d unimplemented yet\n",
529 ump->vtop_tp[vpart]);
530 }
531
532 return EINVAL;
533 }
534
535
536 /* XXX provisional primitive braindead version */
537 /* TODO use ext_res */
538 void
539 udf_translate_vtop_list(struct udf_mount *ump, uint32_t sectors,
540 uint16_t vpart_num, uint64_t *lmapping, uint64_t *pmapping)
541 {
542 struct long_ad loc;
543 uint32_t lb_numres, ext_res;
544 int sector;
545
546 for (sector = 0; sector < sectors; sector++) {
547 memset(&loc, 0, sizeof(struct long_ad));
548 loc.loc.part_num = udf_rw16(vpart_num);
549 loc.loc.lb_num = udf_rw32(*lmapping);
550 udf_translate_vtop(ump, &loc, &lb_numres, &ext_res);
551 *pmapping = lb_numres;
552 lmapping++; pmapping++;
553 }
554 }
555
556
557 /* --------------------------------------------------------------------- */
558
559 /*
560 * Translate an extent (in logical_blocks) into logical block numbers; used
561 * for read and write operations. DOESNT't check extents.
562 */
563
564 int
565 udf_translate_file_extent(struct udf_node *udf_node,
566 uint32_t from, uint32_t num_lb,
567 uint64_t *map)
568 {
569 struct udf_mount *ump;
570 struct icb_tag *icbtag;
571 struct long_ad t_ad, s_ad;
572 uint64_t transsec;
573 uint64_t foffset, end_foffset;
574 uint32_t transsec32;
575 uint32_t lb_size;
576 uint32_t ext_offset;
577 uint32_t lb_num, len;
578 uint32_t overlap, translen;
579 uint16_t vpart_num;
580 int eof, error, flags;
581 int slot, addr_type, icbflags;
582
583 if (!udf_node)
584 return ENOENT;
585
586 KASSERT(num_lb > 0);
587
588 UDF_LOCK_NODE(udf_node, 0);
589
590 /* initialise derivative vars */
591 ump = udf_node->ump;
592 lb_size = udf_rw32(ump->logical_vol->lb_size);
593
594 if (udf_node->fe) {
595 icbtag = &udf_node->fe->icbtag;
596 } else {
597 icbtag = &udf_node->efe->icbtag;
598 }
599 icbflags = udf_rw16(icbtag->flags);
600 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
601
602 /* do the work */
603 if (addr_type == UDF_ICB_INTERN_ALLOC) {
604 *map = UDF_TRANS_INTERN;
605 UDF_UNLOCK_NODE(udf_node, 0);
606 return 0;
607 }
608
609 /* find first overlapping extent */
610 foffset = 0;
611 slot = 0;
612 for (;;) {
613 udf_get_adslot(udf_node, slot, &s_ad, &eof);
614 DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, len = %d, "
615 "lb_num = %d, part = %d\n", slot, eof,
616 UDF_EXT_FLAGS(udf_rw32(s_ad.len)),
617 UDF_EXT_LEN(udf_rw32(s_ad.len)),
618 udf_rw32(s_ad.loc.lb_num),
619 udf_rw16(s_ad.loc.part_num)));
620 if (eof) {
621 DPRINTF(TRANSLATE,
622 ("Translate file extent "
623 "failed: can't seek location\n"));
624 UDF_UNLOCK_NODE(udf_node, 0);
625 return EINVAL;
626 }
627 len = udf_rw32(s_ad.len);
628 flags = UDF_EXT_FLAGS(len);
629 len = UDF_EXT_LEN(len);
630 lb_num = udf_rw32(s_ad.loc.lb_num);
631
632 if (flags == UDF_EXT_REDIRECT) {
633 slot++;
634 continue;
635 }
636
637 end_foffset = foffset + len;
638
639 if (end_foffset > from * lb_size)
640 break; /* found */
641 foffset = end_foffset;
642 slot++;
643 }
644 /* found overlapping slot */
645 ext_offset = from * lb_size - foffset;
646
647 for (;;) {
648 udf_get_adslot(udf_node, slot, &s_ad, &eof);
649 DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, len = %d, "
650 "lb_num = %d, part = %d\n", slot, eof,
651 UDF_EXT_FLAGS(udf_rw32(s_ad.len)),
652 UDF_EXT_LEN(udf_rw32(s_ad.len)),
653 udf_rw32(s_ad.loc.lb_num),
654 udf_rw16(s_ad.loc.part_num)));
655 if (eof) {
656 DPRINTF(TRANSLATE,
657 ("Translate file extent "
658 "failed: past eof\n"));
659 UDF_UNLOCK_NODE(udf_node, 0);
660 return EINVAL;
661 }
662
663 len = udf_rw32(s_ad.len);
664 flags = UDF_EXT_FLAGS(len);
665 len = UDF_EXT_LEN(len);
666
667 lb_num = udf_rw32(s_ad.loc.lb_num);
668 vpart_num = udf_rw16(s_ad.loc.part_num);
669
670 end_foffset = foffset + len;
671
672 /* process extent, don't forget to advance on ext_offset! */
673 lb_num += (ext_offset + lb_size -1) / lb_size;
674 overlap = (len - ext_offset + lb_size -1) / lb_size;
675 ext_offset = 0;
676
677 /*
678 * note that the while(){} is nessisary for the extent that
679 * the udf_translate_vtop() returns doens't have to span the
680 * whole extent.
681 */
682
683 overlap = MIN(overlap, num_lb);
684 while (overlap && (flags != UDF_EXT_REDIRECT)) {
685 switch (flags) {
686 case UDF_EXT_FREE :
687 case UDF_EXT_ALLOCATED_BUT_NOT_USED :
688 transsec = UDF_TRANS_ZERO;
689 translen = overlap;
690 while (overlap && num_lb && translen) {
691 *map++ = transsec;
692 lb_num++;
693 overlap--; num_lb--; translen--;
694 }
695 break;
696 case UDF_EXT_ALLOCATED :
697 t_ad.loc.lb_num = udf_rw32(lb_num);
698 t_ad.loc.part_num = udf_rw16(vpart_num);
699 error = udf_translate_vtop(ump,
700 &t_ad, &transsec32, &translen);
701 transsec = transsec32;
702 if (error) {
703 UDF_UNLOCK_NODE(udf_node, 0);
704 return error;
705 }
706 while (overlap && num_lb && translen) {
707 *map++ = transsec;
708 lb_num++; transsec++;
709 overlap--; num_lb--; translen--;
710 }
711 break;
712 default:
713 DPRINTF(TRANSLATE,
714 ("Translate file extent "
715 "failed: bad flags %x\n", flags));
716 UDF_UNLOCK_NODE(udf_node, 0);
717 return EINVAL;
718 }
719 }
720 if (num_lb == 0)
721 break;
722
723 if (flags != UDF_EXT_REDIRECT)
724 foffset = end_foffset;
725 slot++;
726 }
727 UDF_UNLOCK_NODE(udf_node, 0);
728
729 return 0;
730 }
731
732 /* --------------------------------------------------------------------- */
733
734 static int
735 udf_search_free_vatloc(struct udf_mount *ump, uint32_t *lbnumres)
736 {
737 uint32_t lb_size, lb_num, lb_map, udf_rw32_lbmap;
738 uint8_t *blob;
739 int entry, chunk, found, error;
740
741 KASSERT(ump);
742 KASSERT(ump->logical_vol);
743
744 lb_size = udf_rw32(ump->logical_vol->lb_size);
745 blob = malloc(lb_size, M_UDFTEMP, M_WAITOK);
746
747 /* TODO static allocation of search chunk */
748
749 lb_num = MIN(ump->vat_entries, ump->vat_last_free_lb);
750 found = 0;
751 error = 0;
752 entry = 0;
753 do {
754 chunk = MIN(lb_size, (ump->vat_entries - lb_num) * 4);
755 if (chunk <= 0)
756 break;
757 /* load in chunk */
758 error = udf_vat_read(ump->vat_node, blob, chunk,
759 ump->vat_offset + lb_num * 4);
760
761 if (error)
762 break;
763
764 /* search this chunk */
765 for (entry=0; entry < chunk /4; entry++, lb_num++) {
766 udf_rw32_lbmap = *((uint32_t *) (blob + entry * 4));
767 lb_map = udf_rw32(udf_rw32_lbmap);
768 if (lb_map == 0xffffffff) {
769 found = 1;
770 break;
771 }
772 }
773 } while (!found);
774 if (error) {
775 printf("udf_search_free_vatloc: error reading in vat chunk "
776 "(lb %d, size %d)\n", lb_num, chunk);
777 }
778
779 if (!found) {
780 /* extend VAT */
781 DPRINTF(WRITE, ("udf_search_free_vatloc: extending\n"));
782 lb_num = ump->vat_entries;
783 ump->vat_entries++;
784 }
785
786 /* mark entry with initialiser just in case */
787 lb_map = udf_rw32(0xfffffffe);
788 udf_vat_write(ump->vat_node, (uint8_t *) &lb_map, 4,
789 ump->vat_offset + lb_num *4);
790 ump->vat_last_free_lb = lb_num;
791
792 free(blob, M_UDFTEMP);
793 *lbnumres = lb_num;
794 return 0;
795 }
796
797
798 static void
799 udf_bitmap_allocate(struct udf_bitmap *bitmap, int ismetadata,
800 uint32_t *num_lb, uint64_t *lmappos)
801 {
802 uint32_t offset, lb_num, bit;
803 int32_t diff;
804 uint8_t *bpos;
805 int pass;
806
807 if (!ismetadata) {
808 /* heuristic to keep the two pointers not too close */
809 diff = bitmap->data_pos - bitmap->metadata_pos;
810 if ((diff >= 0) && (diff < 1024))
811 bitmap->data_pos = bitmap->metadata_pos + 1024;
812 }
813 offset = ismetadata ? bitmap->metadata_pos : bitmap->data_pos;
814 offset &= ~7;
815 for (pass = 0; pass < 2; pass++) {
816 if (offset >= bitmap->max_offset)
817 offset = 0;
818
819 while (offset < bitmap->max_offset) {
820 if (*num_lb == 0)
821 break;
822
823 /* use first bit not set */
824 bpos = bitmap->bits + offset/8;
825 bit = ffs(*bpos); /* returns 0 or 1..8 */
826 if (bit == 0) {
827 offset += 8;
828 continue;
829 }
830 DPRINTF(PARANOIA, ("XXX : allocate %d, %p, bit %d\n",
831 offset + bit -1, bpos, bit-1));
832 *bpos &= ~(1 << (bit-1));
833 lb_num = offset + bit-1;
834 *lmappos++ = lb_num;
835 *num_lb = *num_lb - 1;
836 // offset = (offset & ~7);
837 }
838 }
839
840 if (ismetadata) {
841 bitmap->metadata_pos = offset;
842 } else {
843 bitmap->data_pos = offset;
844 }
845 }
846
847
848 static void
849 udf_bitmap_free(struct udf_bitmap *bitmap, uint32_t lb_num, uint32_t num_lb)
850 {
851 uint32_t offset;
852 uint32_t bit, bitval;
853 uint8_t *bpos;
854
855 offset = lb_num;
856
857 /* starter bits */
858 bpos = bitmap->bits + offset/8;
859 bit = offset % 8;
860 while ((bit != 0) && (num_lb > 0)) {
861 bitval = (1 << bit);
862 KASSERT((*bpos & bitval) == 0);
863 DPRINTF(PARANOIA, ("XXX : free %d, %p, %d\n",
864 offset, bpos, bit));
865 *bpos |= bitval;
866 offset++; num_lb--;
867 bit = (bit + 1) % 8;
868 }
869 if (num_lb == 0)
870 return;
871
872 /* whole bytes */
873 KASSERT(bit == 0);
874 bpos = bitmap->bits + offset / 8;
875 while (num_lb >= 8) {
876 KASSERT((*bpos == 0));
877 DPRINTF(PARANOIA, ("XXX : free %d + 8, %p\n", offset, bpos));
878 *bpos = 255;
879 offset += 8; num_lb -= 8;
880 bpos++;
881 }
882
883 /* stop bits */
884 KASSERT(num_lb < 8);
885 bit = 0;
886 while (num_lb > 0) {
887 bitval = (1 << bit);
888 KASSERT((*bpos & bitval) == 0);
889 DPRINTF(PARANOIA, ("XXX : free %d, %p, %d\n",
890 offset, bpos, bit));
891 *bpos |= bitval;
892 offset++; num_lb--;
893 bit = (bit + 1) % 8;
894 }
895 }
896
897
898 /* allocate a contiguous sequence of sectornumbers */
899 static int
900 udf_allocate_space(struct udf_mount *ump, int udf_c_type,
901 uint16_t vpart_num, uint32_t num_lb, uint64_t *lmapping)
902 {
903 struct mmc_trackinfo *alloc_track, *other_track;
904 struct udf_bitmap *bitmap;
905 struct part_desc *pdesc;
906 struct logvol_int_desc *lvid;
907 uint64_t *lmappos;
908 uint32_t ptov, lb_num, *freepos, free_lbs;
909 int lb_size, alloc_num_lb;
910 int alloc_type, error;
911 int is_node;
912
913 DPRINTF(CALL, ("udf_allocate_space(ctype %d, vpart %d, num_lb %d\n",
914 udf_c_type, vpart_num, num_lb));
915 mutex_enter(&ump->allocate_mutex);
916
917 lb_size = udf_rw32(ump->logical_vol->lb_size);
918 KASSERT(lb_size == ump->discinfo.sector_size);
919
920 /* XXX TODO check disc space */
921
922 alloc_type = ump->vtop_alloc[vpart_num];
923 is_node = (udf_c_type == UDF_C_NODE);
924
925 lmappos = lmapping;
926 error = 0;
927 switch (alloc_type) {
928 case UDF_ALLOC_VAT :
929 /* search empty slot in VAT file */
930 KASSERT(num_lb == 1);
931 error = udf_search_free_vatloc(ump, &lb_num);
932 if (!error)
933 *lmappos = lb_num;
934 break;
935 case UDF_ALLOC_SEQUENTIAL :
936 /* sequential allocation on recordable media */
937 /* get partition backing up this vpart_num_num */
938 pdesc = ump->partitions[ump->vtop[vpart_num]];
939
940 /* calculate offset from physical base partition */
941 ptov = udf_rw32(pdesc->start_loc);
942
943 /* get our track descriptors */
944 if (vpart_num == ump->node_part) {
945 alloc_track = &ump->metadata_track;
946 other_track = &ump->data_track;
947 } else {
948 alloc_track = &ump->data_track;
949 other_track = &ump->metadata_track;
950 }
951
952 /* allocate */
953 for (lb_num = 0; lb_num < num_lb; lb_num++) {
954 *lmappos++ = alloc_track->next_writable - ptov;
955 alloc_track->next_writable++;
956 alloc_track->free_blocks--;
957 }
958
959 /* keep other track up-to-date */
960 if (alloc_track->tracknr == other_track->tracknr)
961 memcpy(other_track, alloc_track,
962 sizeof(struct mmc_trackinfo));
963 break;
964 case UDF_ALLOC_SPACEMAP :
965 /* try to allocate on unallocated bits */
966 alloc_num_lb = num_lb;
967 bitmap = &ump->part_unalloc_bits[vpart_num];
968 udf_bitmap_allocate(bitmap, is_node, &alloc_num_lb, lmappos);
969 ump->lvclose |= UDF_WRITE_PART_BITMAPS;
970
971 /* have we allocated all? */
972 if (alloc_num_lb) {
973 /* TODO convert freed to unalloc and try again */
974 /* free allocated piece for now */
975 lmappos = lmapping;
976 for (lb_num=0; lb_num < num_lb-alloc_num_lb; lb_num++) {
977 udf_bitmap_free(bitmap, *lmappos++, 1);
978 }
979 error = ENOSPC;
980 }
981 if (!error) {
982 /* adjust freecount */
983 lvid = ump->logvol_integrity;
984 freepos = &lvid->tables[0] + vpart_num;
985 free_lbs = udf_rw32(*freepos);
986 *freepos = udf_rw32(free_lbs - num_lb);
987 }
988 break;
989 case UDF_ALLOC_METABITMAP : /* UDF 2.50, 2.60 BluRay-RE */
990 /* allocate on metadata unallocated bits */
991 alloc_num_lb = num_lb;
992 bitmap = &ump->metadata_unalloc_bits;
993 udf_bitmap_allocate(bitmap, is_node, &alloc_num_lb, lmappos);
994 ump->lvclose |= UDF_WRITE_PART_BITMAPS;
995
996 /* have we allocated all? */
997 if (alloc_num_lb) {
998 /* YIKES! TODO we need to extend the metadata partition */
999 /* free allocated piece for now */
1000 lmappos = lmapping;
1001 for (lb_num=0; lb_num < num_lb-alloc_num_lb; lb_num++) {
1002 udf_bitmap_free(bitmap, *lmappos++, 1);
1003 }
1004 error = ENOSPC;
1005 }
1006 if (!error) {
1007 /* adjust freecount */
1008 lvid = ump->logvol_integrity;
1009 freepos = &lvid->tables[0] + vpart_num;
1010 free_lbs = udf_rw32(*freepos);
1011 *freepos = udf_rw32(free_lbs - num_lb);
1012 }
1013 break;
1014 case UDF_ALLOC_METASEQUENTIAL : /* UDF 2.60 BluRay-R */
1015 case UDF_ALLOC_RELAXEDSEQUENTIAL : /* UDF 2.50/~meta BluRay-R */
1016 printf("ALERT: udf_allocate_space : allocation %d "
1017 "not implemented yet!\n", alloc_type);
1018 /* TODO implement, doesn't have to be contiguous */
1019 error = ENOSPC;
1020 break;
1021 }
1022
1023 #ifdef DEBUG
1024 if (udf_verbose & UDF_DEBUG_ALLOC) {
1025 lmappos = lmapping;
1026 printf("udf_allocate_space, allocated logical lba :\n");
1027 for (lb_num = 0; lb_num < num_lb; lb_num++) {
1028 printf("%s %"PRIu64",", (lb_num > 0)?",":"",
1029 *lmappos++);
1030 }
1031 printf("\n");
1032 }
1033 #endif
1034 mutex_exit(&ump->allocate_mutex);
1035
1036 return error;
1037 }
1038
1039 /* --------------------------------------------------------------------- */
1040
1041 void
1042 udf_free_allocated_space(struct udf_mount *ump, uint32_t lb_num,
1043 uint16_t vpart_num, uint32_t num_lb)
1044 {
1045 struct udf_bitmap *bitmap;
1046 struct part_desc *pdesc;
1047 struct logvol_int_desc *lvid;
1048 uint32_t ptov, lb_map, udf_rw32_lbmap;
1049 uint32_t *freepos, free_lbs;
1050 int phys_part;
1051 int error;
1052
1053 DPRINTF(ALLOC, ("udf_free_allocated_space: freeing virt lbnum %d "
1054 "part %d + %d sect\n", lb_num, vpart_num, num_lb));
1055
1056 /* no use freeing zero length */
1057 if (num_lb == 0)
1058 return;
1059
1060 mutex_enter(&ump->allocate_mutex);
1061
1062 /* get partition backing up this vpart_num */
1063 pdesc = ump->partitions[ump->vtop[vpart_num]];
1064
1065 switch (ump->vtop_tp[vpart_num]) {
1066 case UDF_VTOP_TYPE_PHYS :
1067 case UDF_VTOP_TYPE_SPARABLE :
1068 /* free space to freed or unallocated space bitmap */
1069 ptov = udf_rw32(pdesc->start_loc);
1070 phys_part = ump->vtop[vpart_num];
1071
1072 /* first try freed space bitmap */
1073 bitmap = &ump->part_freed_bits[phys_part];
1074
1075 /* if not defined, use unallocated bitmap */
1076 if (bitmap->bits == NULL)
1077 bitmap = &ump->part_unalloc_bits[phys_part];
1078
1079 /* if no bitmaps are defined, bail out; XXX OK? */
1080 if (bitmap->bits == NULL)
1081 break;
1082
1083 /* free bits if its defined */
1084 KASSERT(bitmap->bits);
1085 ump->lvclose |= UDF_WRITE_PART_BITMAPS;
1086 udf_bitmap_free(bitmap, lb_num, num_lb);
1087
1088 /* adjust freecount */
1089 lvid = ump->logvol_integrity;
1090 freepos = &lvid->tables[0] + vpart_num;
1091 free_lbs = udf_rw32(*freepos);
1092 *freepos = udf_rw32(free_lbs + num_lb);
1093 break;
1094 case UDF_VTOP_TYPE_VIRT :
1095 /* free this VAT entry */
1096 KASSERT(num_lb == 1);
1097
1098 lb_map = 0xffffffff;
1099 udf_rw32_lbmap = udf_rw32(lb_map);
1100 error = udf_vat_write(ump->vat_node,
1101 (uint8_t *) &udf_rw32_lbmap, 4,
1102 ump->vat_offset + lb_num * 4);
1103 KASSERT(error == 0);
1104 ump->vat_last_free_lb = MIN(ump->vat_last_free_lb, lb_num);
1105 break;
1106 case UDF_VTOP_TYPE_META :
1107 /* free space in the metadata bitmap */
1108 bitmap = &ump->metadata_unalloc_bits;
1109 KASSERT(bitmap->bits);
1110
1111 ump->lvclose |= UDF_WRITE_PART_BITMAPS;
1112 udf_bitmap_free(bitmap, lb_num, num_lb);
1113
1114 /* adjust freecount */
1115 lvid = ump->logvol_integrity;
1116 freepos = &lvid->tables[0] + vpart_num;
1117 free_lbs = udf_rw32(*freepos);
1118 *freepos = udf_rw32(free_lbs + num_lb);
1119 break;
1120 default:
1121 printf("ALERT: udf_free_allocated_space : allocation %d "
1122 "not implemented yet!\n", ump->vtop_tp[vpart_num]);
1123 break;
1124 }
1125
1126 mutex_exit(&ump->allocate_mutex);
1127 }
1128
1129 /* --------------------------------------------------------------------- */
1130
1131 int
1132 udf_pre_allocate_space(struct udf_mount *ump, int udf_c_type,
1133 uint32_t num_lb, uint16_t vpartnr, uint64_t *lmapping)
1134 {
1135 /* TODO properly maintain uncomitted_lb per partition */
1136
1137 /* reserve size for VAT allocated data */
1138 if (ump->vtop_alloc[vpartnr] == UDF_ALLOC_VAT) {
1139 mutex_enter(&ump->allocate_mutex);
1140 ump->uncomitted_lb += num_lb;
1141 mutex_exit(&ump->allocate_mutex);
1142 }
1143
1144 return udf_allocate_space(ump, udf_c_type, vpartnr, num_lb, lmapping);
1145 }
1146
1147 /* --------------------------------------------------------------------- */
1148
1149 /*
1150 * Allocate a buf on disc for direct write out. The space doesn't have to be
1151 * contiguous as the caller takes care of this.
1152 */
1153
1154 void
1155 udf_late_allocate_buf(struct udf_mount *ump, struct buf *buf,
1156 uint64_t *lmapping, struct long_ad *node_ad_cpy, uint16_t *vpart_nump)
1157 {
1158 struct udf_node *udf_node = VTOI(buf->b_vp);
1159 int lb_size, blks, udf_c_type;
1160 int vpart_num, num_lb;
1161 int error, s;
1162
1163 /*
1164 * for each sector in the buf, allocate a sector on disc and record
1165 * its position in the provided mapping array.
1166 *
1167 * If its userdata or FIDs, record its location in its node.
1168 */
1169
1170 lb_size = udf_rw32(ump->logical_vol->lb_size);
1171 num_lb = (buf->b_bcount + lb_size -1) / lb_size;
1172 blks = lb_size / DEV_BSIZE;
1173 udf_c_type = buf->b_udf_c_type;
1174
1175 KASSERT(lb_size == ump->discinfo.sector_size);
1176
1177 /* select partition to record the buffer on */
1178 vpart_num = ump->data_part;
1179 if (udf_c_type == UDF_C_NODE)
1180 vpart_num = ump->node_part;
1181 if (udf_c_type == UDF_C_FIDS)
1182 vpart_num = ump->fids_part;
1183 *vpart_nump = vpart_num;
1184
1185 if (udf_c_type == UDF_C_NODE) {
1186 /* if not VAT, its allready allocated */
1187 if (ump->vtop_alloc[ump->node_part] != UDF_ALLOC_VAT)
1188 return;
1189
1190 /* allocate on its backing sequential partition */
1191 vpart_num = ump->data_part;
1192 }
1193
1194 /* do allocation on the selected partition */
1195 error = udf_allocate_space(ump, udf_c_type,
1196 vpart_num, num_lb, lmapping);
1197 if (error) {
1198 /* ARGH! we've not done our accounting right! */
1199 panic("UDF disc allocation accounting gone wrong");
1200 }
1201
1202 /* commit our sector count */
1203 mutex_enter(&ump->allocate_mutex);
1204 if (num_lb > ump->uncomitted_lb) {
1205 ump->uncomitted_lb = 0;
1206 } else {
1207 ump->uncomitted_lb -= num_lb;
1208 }
1209 mutex_exit(&ump->allocate_mutex);
1210
1211 /* If its userdata or FIDs, record its allocation in its node. */
1212 if ((udf_c_type == UDF_C_USERDATA) ||
1213 (udf_c_type == UDF_C_FIDS) ||
1214 (udf_c_type == UDF_C_METADATA_SBM))
1215 {
1216 udf_record_allocation_in_node(ump, buf, vpart_num, lmapping,
1217 node_ad_cpy);
1218 /* decrement our outstanding bufs counter */
1219 s = splbio();
1220 udf_node->outstanding_bufs--;
1221 splx(s);
1222 }
1223 }
1224
1225 /* --------------------------------------------------------------------- */
1226
1227 /*
1228 * Try to merge a1 with the new piece a2. udf_ads_merge returns error when not
1229 * possible (anymore); a2 returns the rest piece.
1230 */
1231
1232 static int
1233 udf_ads_merge(uint32_t lb_size, struct long_ad *a1, struct long_ad *a2)
1234 {
1235 uint32_t max_len, merge_len;
1236 uint32_t a1_len, a2_len;
1237 uint32_t a1_flags, a2_flags;
1238 uint32_t a1_lbnum, a2_lbnum;
1239 uint16_t a1_part, a2_part;
1240
1241 max_len = ((UDF_EXT_MAXLEN / lb_size) * lb_size);
1242
1243 a1_flags = UDF_EXT_FLAGS(udf_rw32(a1->len));
1244 a1_len = UDF_EXT_LEN(udf_rw32(a1->len));
1245 a1_lbnum = udf_rw32(a1->loc.lb_num);
1246 a1_part = udf_rw16(a1->loc.part_num);
1247
1248 a2_flags = UDF_EXT_FLAGS(udf_rw32(a2->len));
1249 a2_len = UDF_EXT_LEN(udf_rw32(a2->len));
1250 a2_lbnum = udf_rw32(a2->loc.lb_num);
1251 a2_part = udf_rw16(a2->loc.part_num);
1252
1253 /* defines same space */
1254 if (a1_flags != a2_flags)
1255 return 1;
1256
1257 if (a1_flags != UDF_EXT_FREE) {
1258 /* the same partition */
1259 if (a1_part != a2_part)
1260 return 1;
1261
1262 /* a2 is successor of a1 */
1263 if (a1_lbnum * lb_size + a1_len != a2_lbnum * lb_size)
1264 return 1;
1265 }
1266
1267 /* merge as most from a2 if possible */
1268 merge_len = MIN(a2_len, max_len - a1_len);
1269 a1_len += merge_len;
1270 a2_len -= merge_len;
1271 a2_lbnum += merge_len/lb_size;
1272
1273 a1->len = udf_rw32(a1_len | a1_flags);
1274 a2->len = udf_rw32(a2_len | a2_flags);
1275 a2->loc.lb_num = udf_rw32(a2_lbnum);
1276
1277 if (a2_len > 0)
1278 return 1;
1279
1280 /* there is space over to merge */
1281 return 0;
1282 }
1283
1284 /* --------------------------------------------------------------------- */
1285
1286 static void
1287 udf_wipe_adslots(struct udf_node *udf_node)
1288 {
1289 struct file_entry *fe;
1290 struct extfile_entry *efe;
1291 struct alloc_ext_entry *ext;
1292 uint64_t inflen, objsize;
1293 uint32_t lb_size, dscr_size, l_ea, l_ad, max_l_ad, crclen;
1294 uint8_t *data_pos;
1295 int extnr;
1296
1297 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1298
1299 fe = udf_node->fe;
1300 efe = udf_node->efe;
1301 if (fe) {
1302 inflen = udf_rw64(fe->inf_len);
1303 objsize = inflen;
1304 dscr_size = sizeof(struct file_entry) -1;
1305 l_ea = udf_rw32(fe->l_ea);
1306 l_ad = udf_rw32(fe->l_ad);
1307 data_pos = (uint8_t *) fe + dscr_size + l_ea;
1308 } else {
1309 inflen = udf_rw64(efe->inf_len);
1310 objsize = udf_rw64(efe->obj_size);
1311 dscr_size = sizeof(struct extfile_entry) -1;
1312 l_ea = udf_rw32(efe->l_ea);
1313 l_ad = udf_rw32(efe->l_ad);
1314 data_pos = (uint8_t *) efe + dscr_size + l_ea;
1315 }
1316 max_l_ad = lb_size - dscr_size - l_ea;
1317
1318 /* wipe fe/efe */
1319 memset(data_pos, 0, max_l_ad);
1320 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea;
1321 if (fe) {
1322 fe->l_ad = udf_rw32(0);
1323 fe->logblks_rec = udf_rw64(0);
1324 fe->tag.desc_crc_len = udf_rw32(crclen);
1325 } else {
1326 efe->l_ad = udf_rw32(0);
1327 efe->logblks_rec = udf_rw64(0);
1328 efe->tag.desc_crc_len = udf_rw32(crclen);
1329 }
1330
1331 /* wipe all allocation extent entries */
1332 for (extnr = 0; extnr < udf_node->num_extensions; extnr++) {
1333 ext = udf_node->ext[extnr];
1334 dscr_size = sizeof(struct alloc_ext_entry) -1;
1335 data_pos = (uint8_t *) ext->data;
1336 max_l_ad = lb_size - dscr_size;
1337 memset(data_pos, 0, max_l_ad);
1338 ext->l_ad = udf_rw32(0);
1339
1340 crclen = dscr_size - UDF_DESC_TAG_LENGTH;
1341 ext->tag.desc_crc_len = udf_rw32(crclen);
1342 }
1343 udf_node->i_flags |= IN_NODE_REBUILD;
1344 }
1345
1346 /* --------------------------------------------------------------------- */
1347
1348 void
1349 udf_get_adslot(struct udf_node *udf_node, int slot, struct long_ad *icb,
1350 int *eof) {
1351 struct file_entry *fe;
1352 struct extfile_entry *efe;
1353 struct alloc_ext_entry *ext;
1354 struct icb_tag *icbtag;
1355 struct short_ad *short_ad;
1356 struct long_ad *long_ad, l_icb;
1357 uint32_t offset;
1358 uint32_t lb_size, dscr_size, l_ea, l_ad, flags;
1359 uint8_t *data_pos;
1360 int icbflags, addr_type, adlen, extnr;
1361
1362 /* determine what descriptor we are in */
1363 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1364
1365 fe = udf_node->fe;
1366 efe = udf_node->efe;
1367 if (fe) {
1368 icbtag = &fe->icbtag;
1369 dscr_size = sizeof(struct file_entry) -1;
1370 l_ea = udf_rw32(fe->l_ea);
1371 l_ad = udf_rw32(fe->l_ad);
1372 data_pos = (uint8_t *) fe + dscr_size + l_ea;
1373 } else {
1374 icbtag = &efe->icbtag;
1375 dscr_size = sizeof(struct extfile_entry) -1;
1376 l_ea = udf_rw32(efe->l_ea);
1377 l_ad = udf_rw32(efe->l_ad);
1378 data_pos = (uint8_t *) efe + dscr_size + l_ea;
1379 }
1380
1381 icbflags = udf_rw16(icbtag->flags);
1382 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
1383
1384 /* just in case we're called on an intern, its EOF */
1385 if (addr_type == UDF_ICB_INTERN_ALLOC) {
1386 memset(icb, 0, sizeof(struct long_ad));
1387 *eof = 1;
1388 return;
1389 }
1390
1391 adlen = 0;
1392 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1393 adlen = sizeof(struct short_ad);
1394 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1395 adlen = sizeof(struct long_ad);
1396 }
1397
1398 /* if offset too big, we go to the allocation extensions */
1399 offset = slot * adlen;
1400 extnr = -1;
1401 while (offset >= l_ad) {
1402 /* check if our last entry is a redirect */
1403 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1404 short_ad = (struct short_ad *) (data_pos + l_ad-adlen);
1405 l_icb.len = short_ad->len;
1406 l_icb.loc.part_num = udf_node->loc.loc.part_num;
1407 l_icb.loc.lb_num = short_ad->lb_num;
1408 } else {
1409 KASSERT(addr_type == UDF_ICB_LONG_ALLOC);
1410 long_ad = (struct long_ad *) (data_pos + l_ad-adlen);
1411 l_icb = *long_ad;
1412 }
1413 flags = UDF_EXT_FLAGS(udf_rw32(l_icb.len));
1414 if (flags != UDF_EXT_REDIRECT) {
1415 l_ad = 0; /* force EOF */
1416 break;
1417 }
1418
1419 /* advance to next extent */
1420 extnr++;
1421 if (extnr >= udf_node->num_extensions) {
1422 l_ad = 0; /* force EOF */
1423 break;
1424 }
1425 offset = offset - l_ad;
1426 ext = udf_node->ext[extnr];
1427 dscr_size = sizeof(struct alloc_ext_entry) -1;
1428 l_ad = udf_rw32(ext->l_ad);
1429 data_pos = (uint8_t *) ext + dscr_size;
1430 }
1431
1432 /* XXX l_ad == 0 should be enough to check */
1433 *eof = (offset >= l_ad) || (l_ad == 0);
1434 if (*eof) {
1435 DPRINTF(PARANOIDADWLK, ("returning EOF, extnr %d, offset %d, "
1436 "l_ad %d\n", extnr, offset, l_ad));
1437 memset(icb, 0, sizeof(struct long_ad));
1438 return;
1439 }
1440
1441 /* get the element */
1442 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1443 short_ad = (struct short_ad *) (data_pos + offset);
1444 icb->len = short_ad->len;
1445 icb->loc.part_num = udf_node->loc.loc.part_num;
1446 icb->loc.lb_num = short_ad->lb_num;
1447 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1448 long_ad = (struct long_ad *) (data_pos + offset);
1449 *icb = *long_ad;
1450 }
1451 DPRINTF(PARANOIDADWLK, ("returning element : v %d, lb %d, len %d, "
1452 "flags %d\n", icb->loc.part_num, icb->loc.lb_num,
1453 UDF_EXT_LEN(icb->len), UDF_EXT_FLAGS(icb->len)));
1454 }
1455
1456 /* --------------------------------------------------------------------- */
1457
1458 int
1459 udf_append_adslot(struct udf_node *udf_node, int *slot, struct long_ad *icb) {
1460 struct udf_mount *ump = udf_node->ump;
1461 union dscrptr *dscr, *extdscr;
1462 struct file_entry *fe;
1463 struct extfile_entry *efe;
1464 struct alloc_ext_entry *ext;
1465 struct icb_tag *icbtag;
1466 struct short_ad *short_ad;
1467 struct long_ad *long_ad, o_icb, l_icb;
1468 uint64_t logblks_rec, *logblks_rec_p;
1469 uint64_t lmapping;
1470 uint32_t offset, rest, len, lb_num;
1471 uint32_t lb_size, dscr_size, l_ea, l_ad, *l_ad_p, max_l_ad, crclen;
1472 uint32_t flags;
1473 uint16_t vpart_num;
1474 uint8_t *data_pos;
1475 int icbflags, addr_type, adlen, extnr;
1476 int error;
1477
1478 lb_size = udf_rw32(ump->logical_vol->lb_size);
1479 vpart_num = udf_rw16(udf_node->loc.loc.part_num);
1480
1481 /* determine what descriptor we are in */
1482 fe = udf_node->fe;
1483 efe = udf_node->efe;
1484 if (fe) {
1485 icbtag = &fe->icbtag;
1486 dscr = (union dscrptr *) fe;
1487 dscr_size = sizeof(struct file_entry) -1;
1488
1489 l_ea = udf_rw32(fe->l_ea);
1490 l_ad_p = &fe->l_ad;
1491 logblks_rec_p = &fe->logblks_rec;
1492 } else {
1493 icbtag = &efe->icbtag;
1494 dscr = (union dscrptr *) efe;
1495 dscr_size = sizeof(struct extfile_entry) -1;
1496
1497 l_ea = udf_rw32(efe->l_ea);
1498 l_ad_p = &efe->l_ad;
1499 logblks_rec_p = &efe->logblks_rec;
1500 }
1501 data_pos = (uint8_t *) dscr + dscr_size + l_ea;
1502 max_l_ad = lb_size - dscr_size - l_ea;
1503
1504 icbflags = udf_rw16(icbtag->flags);
1505 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
1506
1507 /* just in case we're called on an intern, its EOF */
1508 if (addr_type == UDF_ICB_INTERN_ALLOC) {
1509 panic("udf_append_adslot on UDF_ICB_INTERN_ALLOC\n");
1510 }
1511
1512 adlen = 0;
1513 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1514 adlen = sizeof(struct short_ad);
1515 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1516 adlen = sizeof(struct long_ad);
1517 }
1518
1519 /* clean up given long_ad since it can be a synthesized one */
1520 flags = UDF_EXT_FLAGS(udf_rw32(icb->len));
1521 if (flags == UDF_EXT_FREE) {
1522 icb->loc.part_num = udf_rw16(0);
1523 icb->loc.lb_num = udf_rw32(0);
1524 }
1525
1526 /* if offset too big, we go to the allocation extensions */
1527 l_ad = udf_rw32(*l_ad_p);
1528 offset = (*slot) * adlen;
1529 extnr = -1;
1530 while (offset >= l_ad) {
1531 /* check if our last entry is a redirect */
1532 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1533 short_ad = (struct short_ad *) (data_pos + l_ad-adlen);
1534 l_icb.len = short_ad->len;
1535 l_icb.loc.part_num = udf_node->loc.loc.part_num;
1536 l_icb.loc.lb_num = short_ad->lb_num;
1537 } else {
1538 KASSERT(addr_type == UDF_ICB_LONG_ALLOC);
1539 long_ad = (struct long_ad *) (data_pos + l_ad-adlen);
1540 l_icb = *long_ad;
1541 }
1542 flags = UDF_EXT_FLAGS(udf_rw32(l_icb.len));
1543 if (flags != UDF_EXT_REDIRECT) {
1544 /* only one past the last one is adressable */
1545 break;
1546 }
1547
1548 /* advance to next extent */
1549 extnr++;
1550 KASSERT(extnr < udf_node->num_extensions);
1551 offset = offset - l_ad;
1552
1553 ext = udf_node->ext[extnr];
1554 dscr = (union dscrptr *) ext;
1555 dscr_size = sizeof(struct alloc_ext_entry) -1;
1556 max_l_ad = lb_size - dscr_size;
1557 l_ad_p = &ext->l_ad;
1558 l_ad = udf_rw32(*l_ad_p);
1559 data_pos = (uint8_t *) ext + dscr_size;
1560 }
1561 DPRINTF(PARANOIDADWLK, ("append, ext %d, offset %d, l_ad %d\n",
1562 extnr, offset, udf_rw32(*l_ad_p)));
1563 KASSERT(l_ad == udf_rw32(*l_ad_p));
1564
1565 /* offset is offset within the current (E)FE/AED */
1566 l_ad = udf_rw32(*l_ad_p);
1567 crclen = udf_rw32(dscr->tag.desc_crc_len);
1568 logblks_rec = udf_rw64(*logblks_rec_p);
1569
1570 /* overwriting old piece? */
1571 if (offset < l_ad) {
1572 /* overwrite entry; compensate for the old element */
1573 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1574 short_ad = (struct short_ad *) (data_pos + offset);
1575 o_icb.len = short_ad->len;
1576 o_icb.loc.part_num = udf_rw16(0); /* ignore */
1577 o_icb.loc.lb_num = short_ad->lb_num;
1578 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1579 long_ad = (struct long_ad *) (data_pos + offset);
1580 o_icb = *long_ad;
1581 } else {
1582 panic("Invalid address type in udf_append_adslot\n");
1583 }
1584
1585 len = udf_rw32(o_icb.len);
1586 if (UDF_EXT_FLAGS(len) == UDF_EXT_ALLOCATED) {
1587 /* adjust counts */
1588 len = UDF_EXT_LEN(len);
1589 logblks_rec -= (len + lb_size -1) / lb_size;
1590 }
1591 }
1592
1593 /* check if we're not appending a redirection */
1594 flags = UDF_EXT_FLAGS(udf_rw32(icb->len));
1595 KASSERT(flags != UDF_EXT_REDIRECT);
1596
1597 /* round down available space */
1598 rest = adlen * ((max_l_ad - offset) / adlen);
1599 if (rest <= adlen) {
1600 /* have to append aed, see if we already have a spare one */
1601 extnr++;
1602 ext = udf_node->ext[extnr];
1603 l_icb = udf_node->ext_loc[extnr];
1604 if (ext == NULL) {
1605 DPRINTF(ALLOC,("adding allocation extent %d\n", extnr));
1606
1607 error = udf_pre_allocate_space(ump, UDF_C_NODE, 1,
1608 vpart_num, &lmapping);
1609 lb_num = lmapping;
1610 if (error)
1611 return error;
1612
1613 /* initialise pointer to location */
1614 memset(&l_icb, 0, sizeof(struct long_ad));
1615 l_icb.len = udf_rw32(lb_size | UDF_EXT_REDIRECT);
1616 l_icb.loc.lb_num = udf_rw32(lb_num);
1617 l_icb.loc.part_num = udf_rw16(vpart_num);
1618
1619 /* create new aed descriptor */
1620 udf_create_logvol_dscr(ump, udf_node, &l_icb, &extdscr);
1621 ext = &extdscr->aee;
1622
1623 udf_inittag(ump, &ext->tag, TAGID_ALLOCEXTENT, lb_num);
1624 dscr_size = sizeof(struct alloc_ext_entry) -1;
1625 max_l_ad = lb_size - dscr_size;
1626 memset(ext->data, 0, max_l_ad);
1627 ext->l_ad = udf_rw32(0);
1628 ext->tag.desc_crc_len =
1629 udf_rw32(dscr_size - UDF_DESC_TAG_LENGTH);
1630
1631 /* declare aed */
1632 udf_node->num_extensions++;
1633 udf_node->ext_loc[extnr] = l_icb;
1634 udf_node->ext[extnr] = ext;
1635 }
1636 /* add redirect and adjust l_ad and crclen for old descr */
1637 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1638 short_ad = (struct short_ad *) (data_pos + offset);
1639 short_ad->len = l_icb.len;
1640 short_ad->lb_num = l_icb.loc.lb_num;
1641 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1642 long_ad = (struct long_ad *) (data_pos + offset);
1643 *long_ad = l_icb;
1644 }
1645 l_ad += adlen;
1646 crclen += adlen;
1647 dscr->tag.desc_crc_len = udf_rw32(crclen);
1648 *l_ad_p = udf_rw32(l_ad);
1649
1650 /* advance to the new extension */
1651 KASSERT(ext != NULL);
1652 dscr = (union dscrptr *) ext;
1653 dscr_size = sizeof(struct alloc_ext_entry) -1;
1654 max_l_ad = lb_size - dscr_size;
1655 data_pos = (uint8_t *) dscr + dscr_size;
1656
1657 l_ad_p = &ext->l_ad;
1658 l_ad = udf_rw32(*l_ad_p);
1659 crclen = udf_rw32(dscr->tag.desc_crc_len);
1660 offset = 0;
1661
1662 /* adjust callees slot count for link insert */
1663 *slot += 1;
1664 }
1665
1666 /* write out the element */
1667 DPRINTF(PARANOIDADWLK, ("adding element : %p : v %d, lb %d, "
1668 "len %d, flags %d\n", data_pos + offset,
1669 icb->loc.part_num, icb->loc.lb_num,
1670 UDF_EXT_LEN(icb->len), UDF_EXT_FLAGS(icb->len)));
1671 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1672 short_ad = (struct short_ad *) (data_pos + offset);
1673 short_ad->len = icb->len;
1674 short_ad->lb_num = icb->loc.lb_num;
1675 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1676 long_ad = (struct long_ad *) (data_pos + offset);
1677 *long_ad = *icb;
1678 }
1679
1680 /* adjust logblks recorded count */
1681 flags = UDF_EXT_FLAGS(udf_rw32(icb->len));
1682 if (flags == UDF_EXT_ALLOCATED)
1683 logblks_rec += (UDF_EXT_LEN(icb->len) + lb_size -1) / lb_size;
1684 *logblks_rec_p = udf_rw64(logblks_rec);
1685
1686 /* adjust l_ad and crclen when needed */
1687 if (offset >= l_ad) {
1688 l_ad += adlen;
1689 crclen += adlen;
1690 dscr->tag.desc_crc_len = udf_rw32(crclen);
1691 *l_ad_p = udf_rw32(l_ad);
1692 }
1693
1694 return 0;
1695 }
1696
1697 /* --------------------------------------------------------------------- */
1698
1699 static void
1700 udf_count_alloc_exts(struct udf_node *udf_node)
1701 {
1702 struct long_ad s_ad;
1703 uint32_t lb_num, len, flags;
1704 uint16_t vpart_num;
1705 int slot, eof;
1706 int num_extents, extnr;
1707 int lb_size;
1708
1709 if (udf_node->num_extensions == 0)
1710 return;
1711
1712 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1713 /* count number of allocation extents in use */
1714 num_extents = 0;
1715 slot = 0;
1716 for (;;) {
1717 udf_get_adslot(udf_node, slot, &s_ad, &eof);
1718 if (eof)
1719 break;
1720 len = udf_rw32(s_ad.len);
1721 flags = UDF_EXT_FLAGS(len);
1722
1723 if (flags == UDF_EXT_REDIRECT)
1724 num_extents++;
1725
1726 slot++;
1727 }
1728
1729 DPRINTF(ALLOC, ("udf_count_alloc_ext counted %d live extents\n",
1730 num_extents));
1731
1732 /* XXX choice: we could delay freeing them on node writeout */
1733 /* free excess entries */
1734 extnr = num_extents;
1735 for (;extnr < udf_node->num_extensions; extnr++) {
1736 DPRINTF(ALLOC, ("freeing alloc ext %d\n", extnr));
1737 /* free dscriptor */
1738 s_ad = udf_node->ext_loc[extnr];
1739 udf_free_logvol_dscr(udf_node->ump, &s_ad,
1740 udf_node->ext[extnr]);
1741 udf_node->ext[extnr] = NULL;
1742
1743 /* free disc space */
1744 lb_num = udf_rw32(s_ad.loc.lb_num);
1745 vpart_num = udf_rw16(s_ad.loc.part_num);
1746 udf_free_allocated_space(udf_node->ump, lb_num, vpart_num, 1);
1747
1748 memset(&udf_node->ext_loc[extnr], 0, sizeof(struct long_ad));
1749 }
1750
1751 /* set our new number of allocation extents */
1752 udf_node->num_extensions = num_extents;
1753 }
1754
1755
1756 /* --------------------------------------------------------------------- */
1757
1758 /*
1759 * Adjust the node's allocation descriptors to reflect the new mapping; do
1760 * take note that we might glue to existing allocation descriptors.
1761 *
1762 * XXX Note there can only be one allocation being recorded/mount; maybe
1763 * explicit allocation in shedule thread?
1764 */
1765
1766 static void
1767 udf_record_allocation_in_node(struct udf_mount *ump, struct buf *buf,
1768 uint16_t vpart_num, uint64_t *mapping, struct long_ad *node_ad_cpy)
1769 {
1770 struct vnode *vp = buf->b_vp;
1771 struct udf_node *udf_node = VTOI(vp);
1772 struct file_entry *fe;
1773 struct extfile_entry *efe;
1774 struct icb_tag *icbtag;
1775 struct long_ad s_ad, c_ad;
1776 uint64_t inflen, from, till;
1777 uint64_t foffset, end_foffset, restart_foffset;
1778 uint64_t orig_inflen, orig_lbrec, new_inflen, new_lbrec;
1779 uint32_t num_lb, len, flags, lb_num;
1780 uint32_t run_start;
1781 uint32_t slot_offset, replace_len, replace;
1782 int addr_type, icbflags;
1783 // int udf_c_type = buf->b_udf_c_type;
1784 int lb_size, run_length, eof;
1785 int slot, cpy_slot, cpy_slots, restart_slot;
1786 int error;
1787
1788 DPRINTF(ALLOC, ("udf_record_allocation_in_node\n"));
1789
1790 #if 0
1791 /* XXX disable sanity check for now */
1792 /* sanity check ... should be panic ? */
1793 if ((udf_c_type != UDF_C_USERDATA) && (udf_c_type != UDF_C_FIDS))
1794 return;
1795 #endif
1796
1797 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1798
1799 /* do the job */
1800 UDF_LOCK_NODE(udf_node, 0); /* XXX can deadlock ? */
1801 udf_node_sanity_check(udf_node, &orig_inflen, &orig_lbrec);
1802
1803 fe = udf_node->fe;
1804 efe = udf_node->efe;
1805 if (fe) {
1806 icbtag = &fe->icbtag;
1807 inflen = udf_rw64(fe->inf_len);
1808 } else {
1809 icbtag = &efe->icbtag;
1810 inflen = udf_rw64(efe->inf_len);
1811 }
1812
1813 /* do check if `till' is not past file information length */
1814 from = buf->b_lblkno * lb_size;
1815 till = MIN(inflen, from + buf->b_resid);
1816
1817 num_lb = (till - from + lb_size -1) / lb_size;
1818
1819 DPRINTF(ALLOC, ("record allocation from %"PRIu64" + %d\n", from, buf->b_bcount));
1820
1821 icbflags = udf_rw16(icbtag->flags);
1822 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
1823
1824 if (addr_type == UDF_ICB_INTERN_ALLOC) {
1825 /* nothing to do */
1826 /* XXX clean up rest of node? just in case? */
1827 UDF_UNLOCK_NODE(udf_node, 0);
1828 return;
1829 }
1830
1831 slot = 0;
1832 cpy_slot = 0;
1833 foffset = 0;
1834
1835 /* 1) copy till first overlap piece to the rewrite buffer */
1836 for (;;) {
1837 udf_get_adslot(udf_node, slot, &s_ad, &eof);
1838 if (eof) {
1839 DPRINTF(WRITE,
1840 ("Record allocation in node "
1841 "failed: encountered EOF\n"));
1842 UDF_UNLOCK_NODE(udf_node, 0);
1843 buf->b_error = EINVAL;
1844 return;
1845 }
1846 len = udf_rw32(s_ad.len);
1847 flags = UDF_EXT_FLAGS(len);
1848 len = UDF_EXT_LEN(len);
1849
1850 if (flags == UDF_EXT_REDIRECT) {
1851 slot++;
1852 continue;
1853 }
1854
1855 end_foffset = foffset + len;
1856 if (end_foffset > from)
1857 break; /* found */
1858
1859 node_ad_cpy[cpy_slot++] = s_ad;
1860
1861 DPRINTF(ALLOC, ("\t1: vp %d, lb %d, len %d, flags %d "
1862 "-> stack\n",
1863 udf_rw16(s_ad.loc.part_num),
1864 udf_rw32(s_ad.loc.lb_num),
1865 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1866 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1867
1868 foffset = end_foffset;
1869 slot++;
1870 }
1871 restart_slot = slot;
1872 restart_foffset = foffset;
1873
1874 /* 2) trunc overlapping slot at overlap and copy it */
1875 slot_offset = from - foffset;
1876 if (slot_offset > 0) {
1877 DPRINTF(ALLOC, ("\tslot_offset = %d, flags = %d (%d)\n",
1878 slot_offset, flags >> 30, flags));
1879
1880 s_ad.len = udf_rw32(slot_offset | flags);
1881 node_ad_cpy[cpy_slot++] = s_ad;
1882
1883 DPRINTF(ALLOC, ("\t2: vp %d, lb %d, len %d, flags %d "
1884 "-> stack\n",
1885 udf_rw16(s_ad.loc.part_num),
1886 udf_rw32(s_ad.loc.lb_num),
1887 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1888 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1889 }
1890 foffset += slot_offset;
1891
1892 /* 3) insert new mappings */
1893 memset(&s_ad, 0, sizeof(struct long_ad));
1894 lb_num = 0;
1895 for (lb_num = 0; lb_num < num_lb; lb_num++) {
1896 run_start = mapping[lb_num];
1897 run_length = 1;
1898 while (lb_num < num_lb-1) {
1899 if (mapping[lb_num+1] != mapping[lb_num]+1)
1900 if (mapping[lb_num+1] != mapping[lb_num])
1901 break;
1902 run_length++;
1903 lb_num++;
1904 }
1905 /* insert slot for this mapping */
1906 len = run_length * lb_size;
1907
1908 /* bounds checking */
1909 if (foffset + len > till)
1910 len = till - foffset;
1911 KASSERT(foffset + len <= inflen);
1912
1913 s_ad.len = udf_rw32(len | UDF_EXT_ALLOCATED);
1914 s_ad.loc.part_num = udf_rw16(vpart_num);
1915 s_ad.loc.lb_num = udf_rw32(run_start);
1916
1917 foffset += len;
1918
1919 /* paranoia */
1920 if (len == 0) {
1921 DPRINTF(WRITE,
1922 ("Record allocation in node "
1923 "failed: insert failed\n"));
1924 UDF_UNLOCK_NODE(udf_node, 0);
1925 buf->b_error = EINVAL;
1926 return;
1927 }
1928 node_ad_cpy[cpy_slot++] = s_ad;
1929
1930 DPRINTF(ALLOC, ("\t3: insert new mapping vp %d lb %d, len %d, "
1931 "flags %d -> stack\n",
1932 udf_rw16(s_ad.loc.part_num), udf_rw32(s_ad.loc.lb_num),
1933 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1934 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1935 }
1936
1937 /* 4) pop replaced length */
1938 slot = restart_slot;
1939 foffset = restart_foffset;
1940
1941 replace_len = till - foffset; /* total amount of bytes to pop */
1942 slot_offset = from - foffset; /* offset in first encounted slot */
1943 KASSERT((slot_offset % lb_size) == 0);
1944
1945 for (;;) {
1946 udf_get_adslot(udf_node, slot, &s_ad, &eof);
1947 if (eof)
1948 break;
1949
1950 len = udf_rw32(s_ad.len);
1951 flags = UDF_EXT_FLAGS(len);
1952 len = UDF_EXT_LEN(len);
1953 lb_num = udf_rw32(s_ad.loc.lb_num);
1954
1955 if (flags == UDF_EXT_REDIRECT) {
1956 slot++;
1957 continue;
1958 }
1959
1960 DPRINTF(ALLOC, ("\t4i: got slot %d, slot_offset %d, "
1961 "replace_len %d, "
1962 "vp %d, lb %d, len %d, flags %d\n",
1963 slot, slot_offset, replace_len,
1964 udf_rw16(s_ad.loc.part_num),
1965 udf_rw32(s_ad.loc.lb_num),
1966 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1967 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1968
1969 /* adjust for slot offset */
1970 if (slot_offset) {
1971 DPRINTF(ALLOC, ("\t4s: skipping %d\n", slot_offset));
1972 lb_num += slot_offset / lb_size;
1973 len -= slot_offset;
1974 foffset += slot_offset;
1975 replace_len -= slot_offset;
1976
1977 /* mark adjusted */
1978 slot_offset = 0;
1979 }
1980
1981 /* advance for (the rest of) this slot */
1982 replace = MIN(len, replace_len);
1983 DPRINTF(ALLOC, ("\t4d: replacing %d\n", replace));
1984
1985 /* advance for this slot */
1986 if (replace) {
1987 /* note: dont round DOWN on num_lb since we then
1988 * forget the last partial one */
1989 num_lb = (replace + lb_size - 1) / lb_size;
1990 if (flags != UDF_EXT_FREE) {
1991 udf_free_allocated_space(ump, lb_num,
1992 udf_rw16(s_ad.loc.part_num), num_lb);
1993 }
1994 lb_num += num_lb;
1995 len -= replace;
1996 foffset += replace;
1997 replace_len -= replace;
1998 }
1999
2000 /* do we have a slot tail ? */
2001 if (len) {
2002 KASSERT(foffset % lb_size == 0);
2003
2004 /* we arrived at our point, push remainder */
2005 s_ad.len = udf_rw32(len | flags);
2006 s_ad.loc.lb_num = udf_rw32(lb_num);
2007 if (flags == UDF_EXT_FREE)
2008 s_ad.loc.lb_num = udf_rw32(0);
2009 node_ad_cpy[cpy_slot++] = s_ad;
2010 foffset += len;
2011 slot++;
2012
2013 DPRINTF(ALLOC, ("\t4: vp %d, lb %d, len %d, flags %d "
2014 "-> stack\n",
2015 udf_rw16(s_ad.loc.part_num),
2016 udf_rw32(s_ad.loc.lb_num),
2017 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2018 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2019 break;
2020 }
2021
2022 slot++;
2023 }
2024
2025 /* 5) copy remainder */
2026 for (;;) {
2027 udf_get_adslot(udf_node, slot, &s_ad, &eof);
2028 if (eof)
2029 break;
2030
2031 len = udf_rw32(s_ad.len);
2032 flags = UDF_EXT_FLAGS(len);
2033 len = UDF_EXT_LEN(len);
2034
2035 if (flags == UDF_EXT_REDIRECT) {
2036 slot++;
2037 continue;
2038 }
2039
2040 node_ad_cpy[cpy_slot++] = s_ad;
2041
2042 DPRINTF(ALLOC, ("\t5: insert new mapping "
2043 "vp %d lb %d, len %d, flags %d "
2044 "-> stack\n",
2045 udf_rw16(s_ad.loc.part_num),
2046 udf_rw32(s_ad.loc.lb_num),
2047 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2048 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2049
2050 slot++;
2051 }
2052
2053 /* 6) reset node descriptors */
2054 udf_wipe_adslots(udf_node);
2055
2056 /* 7) copy back extents; merge when possible. Recounting on the fly */
2057 cpy_slots = cpy_slot;
2058
2059 c_ad = node_ad_cpy[0];
2060 slot = 0;
2061 DPRINTF(ALLOC, ("\t7s: stack -> got mapping vp %d "
2062 "lb %d, len %d, flags %d\n",
2063 udf_rw16(c_ad.loc.part_num),
2064 udf_rw32(c_ad.loc.lb_num),
2065 UDF_EXT_LEN(udf_rw32(c_ad.len)),
2066 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2067
2068 for (cpy_slot = 1; cpy_slot < cpy_slots; cpy_slot++) {
2069 s_ad = node_ad_cpy[cpy_slot];
2070
2071 DPRINTF(ALLOC, ("\t7i: stack -> got mapping vp %d "
2072 "lb %d, len %d, flags %d\n",
2073 udf_rw16(s_ad.loc.part_num),
2074 udf_rw32(s_ad.loc.lb_num),
2075 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2076 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2077
2078 /* see if we can merge */
2079 if (udf_ads_merge(lb_size, &c_ad, &s_ad)) {
2080 /* not mergable (anymore) */
2081 DPRINTF(ALLOC, ("\t7: appending vp %d lb %d, "
2082 "len %d, flags %d\n",
2083 udf_rw16(c_ad.loc.part_num),
2084 udf_rw32(c_ad.loc.lb_num),
2085 UDF_EXT_LEN(udf_rw32(c_ad.len)),
2086 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2087
2088 error = udf_append_adslot(udf_node, &slot, &c_ad);
2089 if (error) {
2090 buf->b_error = error;
2091 goto out;
2092 }
2093 c_ad = s_ad;
2094 slot++;
2095 }
2096 }
2097
2098 /* 8) push rest slot (if any) */
2099 if (UDF_EXT_LEN(c_ad.len) > 0) {
2100 DPRINTF(ALLOC, ("\t8: last append vp %d lb %d, "
2101 "len %d, flags %d\n",
2102 udf_rw16(c_ad.loc.part_num),
2103 udf_rw32(c_ad.loc.lb_num),
2104 UDF_EXT_LEN(udf_rw32(c_ad.len)),
2105 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2106
2107 error = udf_append_adslot(udf_node, &slot, &c_ad);
2108 if (error) {
2109 buf->b_error = error;
2110 goto out;
2111 }
2112 }
2113
2114 out:
2115 udf_count_alloc_exts(udf_node);
2116
2117 /* the node's descriptors should now be sane */
2118 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2119 UDF_UNLOCK_NODE(udf_node, 0);
2120
2121 KASSERT(orig_inflen == new_inflen);
2122 KASSERT(new_lbrec >= orig_lbrec);
2123
2124 return;
2125 }
2126
2127 /* --------------------------------------------------------------------- */
2128
2129 int
2130 udf_grow_node(struct udf_node *udf_node, uint64_t new_size)
2131 {
2132 union dscrptr *dscr;
2133 struct vnode *vp = udf_node->vnode;
2134 struct udf_mount *ump = udf_node->ump;
2135 struct file_entry *fe;
2136 struct extfile_entry *efe;
2137 struct icb_tag *icbtag;
2138 struct long_ad c_ad, s_ad;
2139 uint64_t size_diff, old_size, inflen, objsize, chunk, append_len;
2140 uint64_t foffset, end_foffset;
2141 uint64_t orig_inflen, orig_lbrec, new_inflen, new_lbrec;
2142 uint32_t lb_size, dscr_size, crclen, lastblock_grow;
2143 uint32_t icbflags, len, flags, max_len;
2144 uint32_t max_l_ad, l_ad, l_ea;
2145 uint16_t my_part, dst_part;
2146 uint8_t *data_pos, *evacuated_data;
2147 int addr_type;
2148 int slot, cpy_slot;
2149 int isdir, eof, error;
2150
2151 DPRINTF(ALLOC, ("udf_grow_node\n"));
2152
2153 UDF_LOCK_NODE(udf_node, 0);
2154 udf_node_sanity_check(udf_node, &orig_inflen, &orig_lbrec);
2155
2156 lb_size = udf_rw32(ump->logical_vol->lb_size);
2157 max_len = ((UDF_EXT_MAXLEN / lb_size) * lb_size);
2158
2159 fe = udf_node->fe;
2160 efe = udf_node->efe;
2161 if (fe) {
2162 dscr = (union dscrptr *) fe;
2163 icbtag = &fe->icbtag;
2164 inflen = udf_rw64(fe->inf_len);
2165 objsize = inflen;
2166 dscr_size = sizeof(struct file_entry) -1;
2167 l_ea = udf_rw32(fe->l_ea);
2168 l_ad = udf_rw32(fe->l_ad);
2169 } else {
2170 dscr = (union dscrptr *) efe;
2171 icbtag = &efe->icbtag;
2172 inflen = udf_rw64(efe->inf_len);
2173 objsize = udf_rw64(efe->obj_size);
2174 dscr_size = sizeof(struct extfile_entry) -1;
2175 l_ea = udf_rw32(efe->l_ea);
2176 l_ad = udf_rw32(efe->l_ad);
2177 }
2178 data_pos = (uint8_t *) dscr + dscr_size + l_ea;
2179 max_l_ad = lb_size - dscr_size - l_ea;
2180
2181 icbflags = udf_rw16(icbtag->flags);
2182 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2183
2184 old_size = inflen;
2185 size_diff = new_size - old_size;
2186
2187 DPRINTF(ALLOC, ("\tfrom %"PRIu64" to %"PRIu64"\n", old_size, new_size));
2188
2189 evacuated_data = NULL;
2190 if (addr_type == UDF_ICB_INTERN_ALLOC) {
2191 if (l_ad + size_diff <= max_l_ad) {
2192 /* only reflect size change directly in the node */
2193 inflen += size_diff;
2194 objsize += size_diff;
2195 l_ad += size_diff;
2196 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea + l_ad;
2197 if (fe) {
2198 fe->inf_len = udf_rw64(inflen);
2199 fe->l_ad = udf_rw32(l_ad);
2200 fe->tag.desc_crc_len = udf_rw32(crclen);
2201 } else {
2202 efe->inf_len = udf_rw64(inflen);
2203 efe->obj_size = udf_rw64(objsize);
2204 efe->l_ad = udf_rw32(l_ad);
2205 efe->tag.desc_crc_len = udf_rw32(crclen);
2206 }
2207 error = 0;
2208
2209 /* set new size for uvm */
2210 uvm_vnp_setsize(vp, old_size);
2211 uvm_vnp_setwritesize(vp, new_size);
2212
2213 #if 0
2214 /* zero append space in buffer */
2215 uvm_vnp_zerorange(vp, old_size, new_size - old_size);
2216 #endif
2217
2218 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2219
2220 /* unlock */
2221 UDF_UNLOCK_NODE(udf_node, 0);
2222
2223 KASSERT(new_inflen == orig_inflen + size_diff);
2224 KASSERT(new_lbrec == orig_lbrec);
2225 KASSERT(new_lbrec == 0);
2226 return 0;
2227 }
2228
2229 DPRINTF(ALLOC, ("\tCONVERT from internal\n"));
2230
2231 if (old_size > 0) {
2232 /* allocate some space and copy in the stuff to keep */
2233 evacuated_data = malloc(lb_size, M_UDFTEMP, M_WAITOK);
2234 memset(evacuated_data, 0, lb_size);
2235
2236 /* node is locked, so safe to exit mutex */
2237 UDF_UNLOCK_NODE(udf_node, 0);
2238
2239 /* read in using the `normal' vn_rdwr() */
2240 error = vn_rdwr(UIO_READ, udf_node->vnode,
2241 evacuated_data, old_size, 0,
2242 UIO_SYSSPACE, IO_ALTSEMANTICS | IO_NODELOCKED,
2243 FSCRED, NULL, NULL);
2244
2245 /* enter again */
2246 UDF_LOCK_NODE(udf_node, 0);
2247 }
2248
2249 /* convert to a normal alloc and select type */
2250 isdir = (vp->v_type == VDIR);
2251 my_part = udf_rw16(udf_node->loc.loc.part_num);
2252 dst_part = isdir? ump->fids_part : ump->data_part;
2253 addr_type = UDF_ICB_SHORT_ALLOC;
2254 if (dst_part != my_part)
2255 addr_type = UDF_ICB_LONG_ALLOC;
2256
2257 icbflags &= ~UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2258 icbflags |= addr_type;
2259 icbtag->flags = udf_rw16(icbflags);
2260
2261 /* wipe old descriptor space */
2262 udf_wipe_adslots(udf_node);
2263
2264 memset(&c_ad, 0, sizeof(struct long_ad));
2265 c_ad.len = udf_rw32(old_size | UDF_EXT_FREE);
2266 c_ad.loc.part_num = udf_rw16(0); /* not relevant */
2267 c_ad.loc.lb_num = udf_rw32(0); /* not relevant */
2268
2269 slot = 0;
2270 } else {
2271 /* goto the last entry (if any) */
2272 slot = 0;
2273 cpy_slot = 0;
2274 foffset = 0;
2275 memset(&c_ad, 0, sizeof(struct long_ad));
2276 for (;;) {
2277 udf_get_adslot(udf_node, slot, &c_ad, &eof);
2278 if (eof)
2279 break;
2280
2281 len = udf_rw32(c_ad.len);
2282 flags = UDF_EXT_FLAGS(len);
2283 len = UDF_EXT_LEN(len);
2284
2285 end_foffset = foffset + len;
2286 if (flags != UDF_EXT_REDIRECT)
2287 foffset = end_foffset;
2288
2289 slot++;
2290 }
2291 /* at end of adslots */
2292
2293 /* special case if the old size was zero, then there is no last slot */
2294 if (old_size == 0) {
2295 c_ad.len = udf_rw32(0 | UDF_EXT_FREE);
2296 c_ad.loc.part_num = udf_rw16(0); /* not relevant */
2297 c_ad.loc.lb_num = udf_rw32(0); /* not relevant */
2298 } else {
2299 /* refetch last slot */
2300 slot--;
2301 udf_get_adslot(udf_node, slot, &c_ad, &eof);
2302 }
2303 }
2304
2305 /*
2306 * If the length of the last slot is not a multiple of lb_size, adjust
2307 * length so that it is; don't forget to adjust `append_len'! relevant for
2308 * extending existing files
2309 */
2310 len = udf_rw32(c_ad.len);
2311 flags = UDF_EXT_FLAGS(len);
2312 len = UDF_EXT_LEN(len);
2313
2314 lastblock_grow = 0;
2315 if (len % lb_size > 0) {
2316 lastblock_grow = lb_size - (len % lb_size);
2317 lastblock_grow = MIN(size_diff, lastblock_grow);
2318 len += lastblock_grow;
2319 c_ad.len = udf_rw32(len | flags);
2320
2321 /* TODO zero appened space in buffer! */
2322 /* using uvm_vnp_zerorange(vp, old_size, new_size - old_size); ? */
2323 }
2324 memset(&s_ad, 0, sizeof(struct long_ad));
2325
2326 /* size_diff can be bigger than allowed, so grow in chunks */
2327 append_len = size_diff - lastblock_grow;
2328 while (append_len > 0) {
2329 chunk = MIN(append_len, max_len);
2330 s_ad.len = udf_rw32(chunk | UDF_EXT_FREE);
2331 s_ad.loc.part_num = udf_rw16(0);
2332 s_ad.loc.lb_num = udf_rw32(0);
2333
2334 if (udf_ads_merge(lb_size, &c_ad, &s_ad)) {
2335 /* not mergable (anymore) */
2336 error = udf_append_adslot(udf_node, &slot, &c_ad);
2337 if (error)
2338 goto errorout;
2339 slot++;
2340 c_ad = s_ad;
2341 memset(&s_ad, 0, sizeof(struct long_ad));
2342 }
2343 append_len -= chunk;
2344 }
2345
2346 /* if there is a rest piece in the accumulator, append it */
2347 if (UDF_EXT_LEN(udf_rw32(c_ad.len)) > 0) {
2348 error = udf_append_adslot(udf_node, &slot, &c_ad);
2349 if (error)
2350 goto errorout;
2351 slot++;
2352 }
2353
2354 /* if there is a rest piece that didn't fit, append it */
2355 if (UDF_EXT_LEN(udf_rw32(s_ad.len)) > 0) {
2356 error = udf_append_adslot(udf_node, &slot, &s_ad);
2357 if (error)
2358 goto errorout;
2359 slot++;
2360 }
2361
2362 inflen += size_diff;
2363 objsize += size_diff;
2364 if (fe) {
2365 fe->inf_len = udf_rw64(inflen);
2366 } else {
2367 efe->inf_len = udf_rw64(inflen);
2368 efe->obj_size = udf_rw64(objsize);
2369 }
2370 error = 0;
2371
2372 if (evacuated_data) {
2373 /* set new write size for uvm */
2374 uvm_vnp_setwritesize(vp, old_size);
2375
2376 /* write out evacuated data */
2377 error = vn_rdwr(UIO_WRITE, udf_node->vnode,
2378 evacuated_data, old_size, 0,
2379 UIO_SYSSPACE, IO_ALTSEMANTICS | IO_NODELOCKED,
2380 FSCRED, NULL, NULL);
2381 uvm_vnp_setsize(vp, old_size);
2382 }
2383
2384 errorout:
2385 if (evacuated_data)
2386 free(evacuated_data, M_UDFTEMP);
2387
2388 udf_count_alloc_exts(udf_node);
2389
2390 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2391 UDF_UNLOCK_NODE(udf_node, 0);
2392
2393 KASSERT(new_inflen == orig_inflen + size_diff);
2394 KASSERT(new_lbrec == orig_lbrec);
2395
2396 return error;
2397 }
2398
2399 /* --------------------------------------------------------------------- */
2400
2401 int
2402 udf_shrink_node(struct udf_node *udf_node, uint64_t new_size)
2403 {
2404 struct vnode *vp = udf_node->vnode;
2405 struct udf_mount *ump = udf_node->ump;
2406 struct file_entry *fe;
2407 struct extfile_entry *efe;
2408 struct icb_tag *icbtag;
2409 struct long_ad c_ad, s_ad, *node_ad_cpy;
2410 uint64_t size_diff, old_size, inflen, objsize;
2411 uint64_t foffset, end_foffset;
2412 uint64_t orig_inflen, orig_lbrec, new_inflen, new_lbrec;
2413 uint32_t lb_size, dscr_size, crclen;
2414 uint32_t slot_offset;
2415 uint32_t len, flags, max_len;
2416 uint32_t num_lb, lb_num;
2417 uint32_t max_l_ad, l_ad, l_ea;
2418 uint16_t vpart_num;
2419 uint8_t *data_pos;
2420 int icbflags, addr_type;
2421 int slot, cpy_slot, cpy_slots;
2422 int eof, error;
2423
2424 DPRINTF(ALLOC, ("udf_shrink_node\n"));
2425
2426 UDF_LOCK_NODE(udf_node, 0);
2427 udf_node_sanity_check(udf_node, &orig_inflen, &orig_lbrec);
2428
2429 lb_size = udf_rw32(ump->logical_vol->lb_size);
2430 max_len = ((UDF_EXT_MAXLEN / lb_size) * lb_size);
2431
2432 /* do the work */
2433 fe = udf_node->fe;
2434 efe = udf_node->efe;
2435 if (fe) {
2436 icbtag = &fe->icbtag;
2437 inflen = udf_rw64(fe->inf_len);
2438 objsize = inflen;
2439 dscr_size = sizeof(struct file_entry) -1;
2440 l_ea = udf_rw32(fe->l_ea);
2441 l_ad = udf_rw32(fe->l_ad);
2442 data_pos = (uint8_t *) fe + dscr_size + l_ea;
2443 } else {
2444 icbtag = &efe->icbtag;
2445 inflen = udf_rw64(efe->inf_len);
2446 objsize = udf_rw64(efe->obj_size);
2447 dscr_size = sizeof(struct extfile_entry) -1;
2448 l_ea = udf_rw32(efe->l_ea);
2449 l_ad = udf_rw32(efe->l_ad);
2450 data_pos = (uint8_t *) efe + dscr_size + l_ea;
2451 }
2452 max_l_ad = lb_size - dscr_size - l_ea;
2453
2454 icbflags = udf_rw16(icbtag->flags);
2455 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2456
2457 old_size = inflen;
2458 size_diff = old_size - new_size;
2459
2460 DPRINTF(ALLOC, ("\tfrom %"PRIu64" to %"PRIu64"\n", old_size, new_size));
2461
2462 /* shrink the node to its new size */
2463 if (addr_type == UDF_ICB_INTERN_ALLOC) {
2464 /* only reflect size change directly in the node */
2465 KASSERT(new_size <= max_l_ad);
2466 inflen -= size_diff;
2467 objsize -= size_diff;
2468 l_ad -= size_diff;
2469 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea + l_ad;
2470 if (fe) {
2471 fe->inf_len = udf_rw64(inflen);
2472 fe->l_ad = udf_rw32(l_ad);
2473 fe->tag.desc_crc_len = udf_rw32(crclen);
2474 } else {
2475 efe->inf_len = udf_rw64(inflen);
2476 efe->obj_size = udf_rw64(objsize);
2477 efe->l_ad = udf_rw32(l_ad);
2478 efe->tag.desc_crc_len = udf_rw32(crclen);
2479 }
2480 error = 0;
2481
2482 /* clear the space in the descriptor */
2483 KASSERT(old_size > new_size);
2484 memset(data_pos + new_size, 0, old_size - new_size);
2485
2486 /* TODO zero appened space in buffer! */
2487 /* using uvm_vnp_zerorange(vp, old_size, old_size - new_size); ? */
2488
2489 /* set new size for uvm */
2490 uvm_vnp_setsize(vp, new_size);
2491
2492 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2493 UDF_UNLOCK_NODE(udf_node, 0);
2494
2495 KASSERT(new_inflen == orig_inflen - size_diff);
2496 KASSERT(new_lbrec == orig_lbrec);
2497 KASSERT(new_lbrec == 0);
2498
2499 return 0;
2500 }
2501
2502 /* setup node cleanup extents copy space */
2503 node_ad_cpy = malloc(lb_size * UDF_MAX_ALLOC_EXTENTS,
2504 M_UDFMNT, M_WAITOK);
2505 memset(node_ad_cpy, 0, lb_size * UDF_MAX_ALLOC_EXTENTS);
2506
2507 /*
2508 * Shrink the node by releasing the allocations and truncate the last
2509 * allocation to the new size. If the new size fits into the
2510 * allocation descriptor itself, transform it into an
2511 * UDF_ICB_INTERN_ALLOC.
2512 */
2513 slot = 0;
2514 cpy_slot = 0;
2515 foffset = 0;
2516
2517 /* 1) copy till first overlap piece to the rewrite buffer */
2518 for (;;) {
2519 udf_get_adslot(udf_node, slot, &s_ad, &eof);
2520 if (eof) {
2521 DPRINTF(WRITE,
2522 ("Shrink node failed: "
2523 "encountered EOF\n"));
2524 error = EINVAL;
2525 goto errorout; /* panic? */
2526 }
2527 len = udf_rw32(s_ad.len);
2528 flags = UDF_EXT_FLAGS(len);
2529 len = UDF_EXT_LEN(len);
2530
2531 if (flags == UDF_EXT_REDIRECT) {
2532 slot++;
2533 continue;
2534 }
2535
2536 end_foffset = foffset + len;
2537 if (end_foffset > new_size)
2538 break; /* found */
2539
2540 node_ad_cpy[cpy_slot++] = s_ad;
2541
2542 DPRINTF(ALLOC, ("\t1: vp %d, lb %d, len %d, flags %d "
2543 "-> stack\n",
2544 udf_rw16(s_ad.loc.part_num),
2545 udf_rw32(s_ad.loc.lb_num),
2546 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2547 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2548
2549 foffset = end_foffset;
2550 slot++;
2551 }
2552 slot_offset = new_size - foffset;
2553
2554 /* 2) trunc overlapping slot at overlap and copy it */
2555 if (slot_offset > 0) {
2556 lb_num = udf_rw32(s_ad.loc.lb_num);
2557 vpart_num = udf_rw16(s_ad.loc.part_num);
2558
2559 if (flags == UDF_EXT_ALLOCATED) {
2560 /* note: round DOWN on num_lb */
2561 lb_num += (slot_offset + lb_size -1) / lb_size;
2562 num_lb = (len - slot_offset) / lb_size;
2563
2564 udf_free_allocated_space(ump, lb_num, vpart_num, num_lb);
2565 }
2566
2567 s_ad.len = udf_rw32(slot_offset | flags);
2568 node_ad_cpy[cpy_slot++] = s_ad;
2569 slot++;
2570
2571 DPRINTF(ALLOC, ("\t2: vp %d, lb %d, len %d, flags %d "
2572 "-> stack\n",
2573 udf_rw16(s_ad.loc.part_num),
2574 udf_rw32(s_ad.loc.lb_num),
2575 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2576 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2577 }
2578
2579 /* 3) delete remainder */
2580 for (;;) {
2581 udf_get_adslot(udf_node, slot, &s_ad, &eof);
2582 if (eof)
2583 break;
2584
2585 len = udf_rw32(s_ad.len);
2586 flags = UDF_EXT_FLAGS(len);
2587 len = UDF_EXT_LEN(len);
2588
2589 if (flags == UDF_EXT_REDIRECT) {
2590 slot++;
2591 continue;
2592 }
2593
2594 DPRINTF(ALLOC, ("\t3: delete remainder "
2595 "vp %d lb %d, len %d, flags %d\n",
2596 udf_rw16(s_ad.loc.part_num),
2597 udf_rw32(s_ad.loc.lb_num),
2598 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2599 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2600
2601 if (flags == UDF_EXT_ALLOCATED) {
2602 lb_num = udf_rw32(s_ad.loc.lb_num);
2603 vpart_num = udf_rw16(s_ad.loc.part_num);
2604 num_lb = (len + lb_size - 1) / lb_size;
2605
2606 udf_free_allocated_space(ump, lb_num, vpart_num,
2607 num_lb);
2608 }
2609
2610 slot++;
2611 }
2612
2613 /* 4) if it will fit into the descriptor then convert */
2614 if (new_size < max_l_ad) {
2615 /*
2616 * resque/evacuate old piece by reading it in, and convert it
2617 * to internal alloc.
2618 */
2619 if (new_size == 0) {
2620 /* XXX/TODO only for zero sizing now */
2621 udf_wipe_adslots(udf_node);
2622
2623 icbflags &= ~UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2624 icbflags |= UDF_ICB_INTERN_ALLOC;
2625 icbtag->flags = udf_rw16(icbflags);
2626
2627 inflen -= size_diff; KASSERT(inflen == 0);
2628 objsize -= size_diff;
2629 l_ad = new_size;
2630 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea + l_ad;
2631 if (fe) {
2632 fe->inf_len = udf_rw64(inflen);
2633 fe->l_ad = udf_rw32(l_ad);
2634 fe->tag.desc_crc_len = udf_rw32(crclen);
2635 } else {
2636 efe->inf_len = udf_rw64(inflen);
2637 efe->obj_size = udf_rw64(objsize);
2638 efe->l_ad = udf_rw32(l_ad);
2639 efe->tag.desc_crc_len = udf_rw32(crclen);
2640 }
2641 /* eventually copy in evacuated piece */
2642 /* set new size for uvm */
2643 uvm_vnp_setsize(vp, new_size);
2644
2645 free(node_ad_cpy, M_UDFMNT);
2646 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2647
2648 UDF_UNLOCK_NODE(udf_node, 0);
2649
2650 KASSERT(new_inflen == orig_inflen - size_diff);
2651 KASSERT(new_inflen == 0);
2652 KASSERT(new_lbrec == 0);
2653
2654 return 0;
2655 }
2656
2657 printf("UDF_SHRINK_NODE: could convert to internal alloc!\n");
2658 }
2659
2660 /* 5) reset node descriptors */
2661 udf_wipe_adslots(udf_node);
2662
2663 /* 6) copy back extents; merge when possible. Recounting on the fly */
2664 cpy_slots = cpy_slot;
2665
2666 c_ad = node_ad_cpy[0];
2667 slot = 0;
2668 for (cpy_slot = 1; cpy_slot < cpy_slots; cpy_slot++) {
2669 s_ad = node_ad_cpy[cpy_slot];
2670
2671 DPRINTF(ALLOC, ("\t6: stack -> got mapping vp %d "
2672 "lb %d, len %d, flags %d\n",
2673 udf_rw16(s_ad.loc.part_num),
2674 udf_rw32(s_ad.loc.lb_num),
2675 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2676 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2677
2678 /* see if we can merge */
2679 if (udf_ads_merge(lb_size, &c_ad, &s_ad)) {
2680 /* not mergable (anymore) */
2681 DPRINTF(ALLOC, ("\t6: appending vp %d lb %d, "
2682 "len %d, flags %d\n",
2683 udf_rw16(c_ad.loc.part_num),
2684 udf_rw32(c_ad.loc.lb_num),
2685 UDF_EXT_LEN(udf_rw32(c_ad.len)),
2686 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2687
2688 error = udf_append_adslot(udf_node, &slot, &c_ad);
2689 if (error)
2690 goto errorout; /* panic? */
2691 c_ad = s_ad;
2692 slot++;
2693 }
2694 }
2695
2696 /* 7) push rest slot (if any) */
2697 if (UDF_EXT_LEN(c_ad.len) > 0) {
2698 DPRINTF(ALLOC, ("\t7: last append vp %d lb %d, "
2699 "len %d, flags %d\n",
2700 udf_rw16(c_ad.loc.part_num),
2701 udf_rw32(c_ad.loc.lb_num),
2702 UDF_EXT_LEN(udf_rw32(c_ad.len)),
2703 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2704
2705 error = udf_append_adslot(udf_node, &slot, &c_ad);
2706 if (error)
2707 goto errorout; /* panic? */
2708 ;
2709 }
2710
2711 inflen -= size_diff;
2712 objsize -= size_diff;
2713 if (fe) {
2714 fe->inf_len = udf_rw64(inflen);
2715 } else {
2716 efe->inf_len = udf_rw64(inflen);
2717 efe->obj_size = udf_rw64(objsize);
2718 }
2719 error = 0;
2720
2721 /* set new size for uvm */
2722 uvm_vnp_setsize(vp, new_size);
2723
2724 errorout:
2725 free(node_ad_cpy, M_UDFMNT);
2726
2727 udf_count_alloc_exts(udf_node);
2728
2729 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2730 UDF_UNLOCK_NODE(udf_node, 0);
2731
2732 KASSERT(new_inflen == orig_inflen - size_diff);
2733
2734 return error;
2735 }
2736
2737