udf_allocation.c revision 1.18.4.2.4.1 1 /* $NetBSD: udf_allocation.c,v 1.18.4.2.4.1 2010/04/21 00:28:14 matt Exp $ */
2
3 /*
4 * Copyright (c) 2006, 2008 Reinoud Zandijk
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 *
27 */
28
29 #include <sys/cdefs.h>
30 #ifndef lint
31 __KERNEL_RCSID(0, "$NetBSD: udf_allocation.c,v 1.18.4.2.4.1 2010/04/21 00:28:14 matt Exp $");
32 #endif /* not lint */
33
34
35 #if defined(_KERNEL_OPT)
36 #include "opt_compat_netbsd.h"
37 #endif
38
39 /* TODO strip */
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/sysctl.h>
43 #include <sys/namei.h>
44 #include <sys/proc.h>
45 #include <sys/kernel.h>
46 #include <sys/vnode.h>
47 #include <miscfs/genfs/genfs_node.h>
48 #include <sys/mount.h>
49 #include <sys/buf.h>
50 #include <sys/file.h>
51 #include <sys/device.h>
52 #include <sys/disklabel.h>
53 #include <sys/ioctl.h>
54 #include <sys/malloc.h>
55 #include <sys/dirent.h>
56 #include <sys/stat.h>
57 #include <sys/conf.h>
58 #include <sys/kauth.h>
59 #include <sys/kthread.h>
60 #include <dev/clock_subr.h>
61
62 #include <fs/udf/ecma167-udf.h>
63 #include <fs/udf/udf_mount.h>
64
65 #include "udf.h"
66 #include "udf_subr.h"
67 #include "udf_bswap.h"
68
69
70 #define VTOI(vnode) ((struct udf_node *) vnode->v_data)
71
72 static void udf_record_allocation_in_node(struct udf_mount *ump,
73 struct buf *buf, uint16_t vpart_num, uint64_t *mapping,
74 struct long_ad *node_ad_cpy);
75
76 /*
77 * IDEA/BUSY: Each udf_node gets its own extentwalker state for all operations;
78 * this will hopefully/likely reduce O(nlog(n)) to O(1) for most functionality
79 * since actions are most likely sequencial and thus seeking doesn't need
80 * searching for the same or adjacent position again.
81 */
82
83 /* --------------------------------------------------------------------- */
84
85 #if 0
86 #if 1
87 static void
88 udf_node_dump(struct udf_node *udf_node) {
89 struct file_entry *fe;
90 struct extfile_entry *efe;
91 struct icb_tag *icbtag;
92 struct long_ad s_ad;
93 uint64_t inflen;
94 uint32_t icbflags, addr_type;
95 uint32_t len, lb_num;
96 uint32_t flags;
97 int part_num;
98 int lb_size, eof, slot;
99
100 if ((udf_verbose & UDF_DEBUG_NODEDUMP) == 0)
101 return;
102
103 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
104
105 fe = udf_node->fe;
106 efe = udf_node->efe;
107 if (fe) {
108 icbtag = &fe->icbtag;
109 inflen = udf_rw64(fe->inf_len);
110 } else {
111 icbtag = &efe->icbtag;
112 inflen = udf_rw64(efe->inf_len);
113 }
114
115 icbflags = udf_rw16(icbtag->flags);
116 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
117
118 printf("udf_node_dump %p :\n", udf_node);
119
120 if (addr_type == UDF_ICB_INTERN_ALLOC) {
121 printf("\tIntern alloc, len = %"PRIu64"\n", inflen);
122 return;
123 }
124
125 printf("\tInflen = %"PRIu64"\n", inflen);
126 printf("\t\t");
127
128 slot = 0;
129 for (;;) {
130 udf_get_adslot(udf_node, slot, &s_ad, &eof);
131 if (eof)
132 break;
133 part_num = udf_rw16(s_ad.loc.part_num);
134 lb_num = udf_rw32(s_ad.loc.lb_num);
135 len = udf_rw32(s_ad.len);
136 flags = UDF_EXT_FLAGS(len);
137 len = UDF_EXT_LEN(len);
138
139 printf("[");
140 if (part_num >= 0)
141 printf("part %d, ", part_num);
142 printf("lb_num %d, len %d", lb_num, len);
143 if (flags)
144 printf(", flags %d", flags>>30);
145 printf("] ");
146
147 if (flags == UDF_EXT_REDIRECT) {
148 printf("\n\textent END\n\tallocation extent\n\t\t");
149 }
150
151 slot++;
152 }
153 printf("\n\tl_ad END\n\n");
154 }
155 #else
156 #define udf_node_dump(a)
157 #endif
158
159
160 static void
161 udf_assert_allocated(struct udf_mount *ump, uint16_t vpart_num,
162 uint32_t lb_num, uint32_t num_lb)
163 {
164 struct udf_bitmap *bitmap;
165 struct part_desc *pdesc;
166 uint32_t ptov;
167 uint32_t bitval;
168 uint8_t *bpos;
169 int bit;
170 int phys_part;
171 int ok;
172
173 DPRINTF(PARANOIA, ("udf_assert_allocated: check virt lbnum %d "
174 "part %d + %d sect\n", lb_num, vpart_num, num_lb));
175
176 /* get partition backing up this vpart_num */
177 pdesc = ump->partitions[ump->vtop[vpart_num]];
178
179 switch (ump->vtop_tp[vpart_num]) {
180 case UDF_VTOP_TYPE_PHYS :
181 case UDF_VTOP_TYPE_SPARABLE :
182 /* free space to freed or unallocated space bitmap */
183 ptov = udf_rw32(pdesc->start_loc);
184 phys_part = ump->vtop[vpart_num];
185
186 /* use unallocated bitmap */
187 bitmap = &ump->part_unalloc_bits[phys_part];
188
189 /* if no bitmaps are defined, bail out */
190 if (bitmap->bits == NULL)
191 break;
192
193 /* check bits */
194 KASSERT(bitmap->bits);
195 ok = 1;
196 bpos = bitmap->bits + lb_num/8;
197 bit = lb_num % 8;
198 while (num_lb > 0) {
199 bitval = (1 << bit);
200 DPRINTF(PARANOIA, ("XXX : check %d, %p, bit %d\n",
201 lb_num, bpos, bit));
202 KASSERT(bitmap->bits + lb_num/8 == bpos);
203 if (*bpos & bitval) {
204 printf("\tlb_num %d is NOT marked busy\n",
205 lb_num);
206 ok = 0;
207 }
208 lb_num++; num_lb--;
209 bit = (bit + 1) % 8;
210 if (bit == 0)
211 bpos++;
212 }
213 if (!ok) {
214 /* KASSERT(0); */
215 }
216
217 break;
218 case UDF_VTOP_TYPE_VIRT :
219 /* TODO check space */
220 KASSERT(num_lb == 1);
221 break;
222 case UDF_VTOP_TYPE_META :
223 /* TODO check space in the metadata bitmap */
224 default:
225 /* not implemented */
226 break;
227 }
228 }
229
230
231 static void
232 udf_node_sanity_check(struct udf_node *udf_node,
233 uint64_t *cnt_inflen, uint64_t *cnt_logblksrec)
234 {
235 union dscrptr *dscr;
236 struct file_entry *fe;
237 struct extfile_entry *efe;
238 struct icb_tag *icbtag;
239 struct long_ad s_ad;
240 uint64_t inflen, logblksrec;
241 uint32_t icbflags, addr_type;
242 uint32_t len, lb_num, l_ea, l_ad, max_l_ad;
243 uint16_t part_num;
244 uint8_t *data_pos;
245 int dscr_size, lb_size, flags, whole_lb;
246 int i, slot, eof;
247
248 // KASSERT(mutex_owned(&udf_node->ump->allocate_mutex));
249
250 if (1)
251 udf_node_dump(udf_node);
252
253 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
254
255 fe = udf_node->fe;
256 efe = udf_node->efe;
257 if (fe) {
258 dscr = (union dscrptr *) fe;
259 icbtag = &fe->icbtag;
260 inflen = udf_rw64(fe->inf_len);
261 dscr_size = sizeof(struct file_entry) -1;
262 logblksrec = udf_rw64(fe->logblks_rec);
263 l_ad = udf_rw32(fe->l_ad);
264 l_ea = udf_rw32(fe->l_ea);
265 } else {
266 dscr = (union dscrptr *) efe;
267 icbtag = &efe->icbtag;
268 inflen = udf_rw64(efe->inf_len);
269 dscr_size = sizeof(struct extfile_entry) -1;
270 logblksrec = udf_rw64(efe->logblks_rec);
271 l_ad = udf_rw32(efe->l_ad);
272 l_ea = udf_rw32(efe->l_ea);
273 }
274 data_pos = (uint8_t *) dscr + dscr_size + l_ea;
275 max_l_ad = lb_size - dscr_size - l_ea;
276 icbflags = udf_rw16(icbtag->flags);
277 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
278
279 /* check if tail is zero */
280 DPRINTF(PARANOIA, ("Sanity check blank tail\n"));
281 for (i = l_ad; i < max_l_ad; i++) {
282 if (data_pos[i] != 0)
283 printf( "sanity_check: violation: node byte %d "
284 "has value %d\n", i, data_pos[i]);
285 }
286
287 /* reset counters */
288 *cnt_inflen = 0;
289 *cnt_logblksrec = 0;
290
291 if (addr_type == UDF_ICB_INTERN_ALLOC) {
292 KASSERT(l_ad <= max_l_ad);
293 KASSERT(l_ad == inflen);
294 *cnt_inflen = inflen;
295 return;
296 }
297
298 /* start counting */
299 whole_lb = 1;
300 slot = 0;
301 for (;;) {
302 udf_get_adslot(udf_node, slot, &s_ad, &eof);
303 if (eof)
304 break;
305 KASSERT(whole_lb == 1);
306
307 part_num = udf_rw16(s_ad.loc.part_num);
308 lb_num = udf_rw32(s_ad.loc.lb_num);
309 len = udf_rw32(s_ad.len);
310 flags = UDF_EXT_FLAGS(len);
311 len = UDF_EXT_LEN(len);
312
313 if (flags != UDF_EXT_REDIRECT) {
314 *cnt_inflen += len;
315 if (flags == UDF_EXT_ALLOCATED) {
316 *cnt_logblksrec += (len + lb_size -1) / lb_size;
317 }
318 } else {
319 KASSERT(len == lb_size);
320 }
321 /* check allocation */
322 if (flags == UDF_EXT_ALLOCATED)
323 udf_assert_allocated(udf_node->ump, part_num, lb_num,
324 (len + lb_size - 1) / lb_size);
325
326 /* check whole lb */
327 whole_lb = ((len % lb_size) == 0);
328
329 slot++;
330 }
331 /* rest should be zero (ad_off > l_ad < max_l_ad - adlen) */
332
333 KASSERT(*cnt_inflen == inflen);
334 KASSERT(*cnt_logblksrec == logblksrec);
335
336 // KASSERT(mutex_owned(&udf_node->ump->allocate_mutex));
337 }
338 #else
339 static void
340 udf_node_sanity_check(struct udf_node *udf_node,
341 uint64_t *cnt_inflen, uint64_t *cnt_logblksrec) {
342 struct file_entry *fe;
343 struct extfile_entry *efe;
344 struct icb_tag *icbtag;
345 uint64_t inflen, logblksrec;
346 int dscr_size, lb_size;
347
348 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
349
350 fe = udf_node->fe;
351 efe = udf_node->efe;
352 if (fe) {
353 icbtag = &fe->icbtag;
354 inflen = udf_rw64(fe->inf_len);
355 dscr_size = sizeof(struct file_entry) -1;
356 logblksrec = udf_rw64(fe->logblks_rec);
357 } else {
358 icbtag = &efe->icbtag;
359 inflen = udf_rw64(efe->inf_len);
360 dscr_size = sizeof(struct extfile_entry) -1;
361 logblksrec = udf_rw64(efe->logblks_rec);
362 }
363 *cnt_logblksrec = logblksrec;
364 *cnt_inflen = inflen;
365 }
366 #endif
367
368 /* --------------------------------------------------------------------- */
369
370 void
371 udf_calc_freespace(struct udf_mount *ump, uint64_t *sizeblks, uint64_t *freeblks)
372 {
373 struct logvol_int_desc *lvid;
374 uint32_t *pos1, *pos2;
375 int vpart, num_vpart;
376
377 lvid = ump->logvol_integrity;
378 *freeblks = *sizeblks = 0;
379
380 /*
381 * Sequentials media report free space directly (CD/DVD/BD-R), for the
382 * other media we need the logical volume integrity.
383 *
384 * We sum all free space up here regardless of type.
385 */
386
387 KASSERT(lvid);
388 num_vpart = udf_rw32(lvid->num_part);
389
390 if (ump->discinfo.mmc_cur & MMC_CAP_SEQUENTIAL) {
391 /* use track info directly summing if there are 2 open */
392 /* XXX assumption at most two tracks open */
393 *freeblks = ump->data_track.free_blocks;
394 if (ump->data_track.tracknr != ump->metadata_track.tracknr)
395 *freeblks += ump->metadata_track.free_blocks;
396 *sizeblks = ump->discinfo.last_possible_lba;
397 } else {
398 /* free and used space for mountpoint based on logvol integrity */
399 for (vpart = 0; vpart < num_vpart; vpart++) {
400 pos1 = &lvid->tables[0] + vpart;
401 pos2 = &lvid->tables[0] + num_vpart + vpart;
402 if (udf_rw32(*pos1) != (uint32_t) -1) {
403 *freeblks += udf_rw32(*pos1);
404 *sizeblks += udf_rw32(*pos2);
405 }
406 }
407 }
408 /* adjust for accounted uncommitted blocks */
409 for (vpart = 0; vpart < num_vpart; vpart++)
410 *freeblks -= ump->uncommitted_lbs[vpart];
411
412 if (*freeblks > UDF_DISC_SLACK) {
413 *freeblks -= UDF_DISC_SLACK;
414 } else {
415 *freeblks = 0;
416 }
417 }
418
419
420 static void
421 udf_calc_vpart_freespace(struct udf_mount *ump, uint16_t vpart_num, uint64_t *freeblks)
422 {
423 struct logvol_int_desc *lvid;
424 uint32_t *pos1;
425
426 lvid = ump->logvol_integrity;
427 *freeblks = 0;
428
429 /*
430 * Sequentials media report free space directly (CD/DVD/BD-R), for the
431 * other media we need the logical volume integrity.
432 *
433 * We sum all free space up here regardless of type.
434 */
435
436 KASSERT(lvid);
437 if (ump->discinfo.mmc_cur & MMC_CAP_SEQUENTIAL) {
438 /* XXX assumption at most two tracks open */
439 if (vpart_num == ump->data_part) {
440 *freeblks = ump->data_track.free_blocks;
441 } else {
442 *freeblks = ump->metadata_track.free_blocks;
443 }
444 } else {
445 /* free and used space for mountpoint based on logvol integrity */
446 pos1 = &lvid->tables[0] + vpart_num;
447 if (udf_rw32(*pos1) != (uint32_t) -1)
448 *freeblks += udf_rw32(*pos1);
449 }
450
451 /* adjust for accounted uncommitted blocks */
452 *freeblks -= ump->uncommitted_lbs[vpart_num];
453 }
454
455 /* --------------------------------------------------------------------- */
456
457 int
458 udf_translate_vtop(struct udf_mount *ump, struct long_ad *icb_loc,
459 uint32_t *lb_numres, uint32_t *extres)
460 {
461 struct part_desc *pdesc;
462 struct spare_map_entry *sme;
463 struct long_ad s_icb_loc;
464 uint64_t foffset, end_foffset;
465 uint32_t lb_size, len;
466 uint32_t lb_num, lb_rel, lb_packet;
467 uint32_t udf_rw32_lbmap, ext_offset;
468 uint16_t vpart;
469 int rel, part, error, eof, slot, flags;
470
471 assert(ump && icb_loc && lb_numres);
472
473 vpart = udf_rw16(icb_loc->loc.part_num);
474 lb_num = udf_rw32(icb_loc->loc.lb_num);
475 if (vpart > UDF_VTOP_RAWPART)
476 return EINVAL;
477
478 translate_again:
479 part = ump->vtop[vpart];
480 pdesc = ump->partitions[part];
481
482 switch (ump->vtop_tp[vpart]) {
483 case UDF_VTOP_TYPE_RAW :
484 /* 1:1 to the end of the device */
485 *lb_numres = lb_num;
486 *extres = INT_MAX;
487 return 0;
488 case UDF_VTOP_TYPE_PHYS :
489 /* transform into its disc logical block */
490 if (lb_num > udf_rw32(pdesc->part_len))
491 return EINVAL;
492 *lb_numres = lb_num + udf_rw32(pdesc->start_loc);
493
494 /* extent from here to the end of the partition */
495 *extres = udf_rw32(pdesc->part_len) - lb_num;
496 return 0;
497 case UDF_VTOP_TYPE_VIRT :
498 /* only maps one logical block, lookup in VAT */
499 if (lb_num >= ump->vat_entries) /* XXX > or >= ? */
500 return EINVAL;
501
502 /* lookup in virtual allocation table file */
503 mutex_enter(&ump->allocate_mutex);
504 error = udf_vat_read(ump->vat_node,
505 (uint8_t *) &udf_rw32_lbmap, 4,
506 ump->vat_offset + lb_num * 4);
507 mutex_exit(&ump->allocate_mutex);
508
509 if (error)
510 return error;
511
512 lb_num = udf_rw32(udf_rw32_lbmap);
513
514 /* transform into its disc logical block */
515 if (lb_num > udf_rw32(pdesc->part_len))
516 return EINVAL;
517 *lb_numres = lb_num + udf_rw32(pdesc->start_loc);
518
519 /* just one logical block */
520 *extres = 1;
521 return 0;
522 case UDF_VTOP_TYPE_SPARABLE :
523 /* check if the packet containing the lb_num is remapped */
524 lb_packet = lb_num / ump->sparable_packet_size;
525 lb_rel = lb_num % ump->sparable_packet_size;
526
527 for (rel = 0; rel < udf_rw16(ump->sparing_table->rt_l); rel++) {
528 sme = &ump->sparing_table->entries[rel];
529 if (lb_packet == udf_rw32(sme->org)) {
530 /* NOTE maps to absolute disc logical block! */
531 *lb_numres = udf_rw32(sme->map) + lb_rel;
532 *extres = ump->sparable_packet_size - lb_rel;
533 return 0;
534 }
535 }
536
537 /* transform into its disc logical block */
538 if (lb_num > udf_rw32(pdesc->part_len))
539 return EINVAL;
540 *lb_numres = lb_num + udf_rw32(pdesc->start_loc);
541
542 /* rest of block */
543 *extres = ump->sparable_packet_size - lb_rel;
544 return 0;
545 case UDF_VTOP_TYPE_META :
546 /* we have to look into the file's allocation descriptors */
547
548 /* use metadatafile allocation mutex */
549 lb_size = udf_rw32(ump->logical_vol->lb_size);
550
551 UDF_LOCK_NODE(ump->metadata_node, 0);
552
553 /* get first overlapping extent */
554 foffset = 0;
555 slot = 0;
556 for (;;) {
557 udf_get_adslot(ump->metadata_node,
558 slot, &s_icb_loc, &eof);
559 DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, "
560 "len = %d, lb_num = %d, part = %d\n",
561 slot, eof,
562 UDF_EXT_FLAGS(udf_rw32(s_icb_loc.len)),
563 UDF_EXT_LEN(udf_rw32(s_icb_loc.len)),
564 udf_rw32(s_icb_loc.loc.lb_num),
565 udf_rw16(s_icb_loc.loc.part_num)));
566 if (eof) {
567 DPRINTF(TRANSLATE,
568 ("Meta partition translation "
569 "failed: can't seek location\n"));
570 UDF_UNLOCK_NODE(ump->metadata_node, 0);
571 return EINVAL;
572 }
573 len = udf_rw32(s_icb_loc.len);
574 flags = UDF_EXT_FLAGS(len);
575 len = UDF_EXT_LEN(len);
576
577 if (flags == UDF_EXT_REDIRECT) {
578 slot++;
579 continue;
580 }
581
582 end_foffset = foffset + len;
583
584 if (end_foffset > lb_num * lb_size)
585 break; /* found */
586 foffset = end_foffset;
587 slot++;
588 }
589 /* found overlapping slot */
590 ext_offset = lb_num * lb_size - foffset;
591
592 /* process extent offset */
593 lb_num = udf_rw32(s_icb_loc.loc.lb_num);
594 vpart = udf_rw16(s_icb_loc.loc.part_num);
595 lb_num += (ext_offset + lb_size -1) / lb_size;
596 ext_offset = 0;
597
598 UDF_UNLOCK_NODE(ump->metadata_node, 0);
599 if (flags != UDF_EXT_ALLOCATED) {
600 DPRINTF(TRANSLATE, ("Metadata partition translation "
601 "failed: not allocated\n"));
602 return EINVAL;
603 }
604
605 /*
606 * vpart and lb_num are updated, translate again since we
607 * might be mapped on sparable media
608 */
609 goto translate_again;
610 default:
611 printf("UDF vtop translation scheme %d unimplemented yet\n",
612 ump->vtop_tp[vpart]);
613 }
614
615 return EINVAL;
616 }
617
618
619 /* XXX provisional primitive braindead version */
620 /* TODO use ext_res */
621 void
622 udf_translate_vtop_list(struct udf_mount *ump, uint32_t sectors,
623 uint16_t vpart_num, uint64_t *lmapping, uint64_t *pmapping)
624 {
625 struct long_ad loc;
626 uint32_t lb_numres, ext_res;
627 int sector;
628
629 for (sector = 0; sector < sectors; sector++) {
630 memset(&loc, 0, sizeof(struct long_ad));
631 loc.loc.part_num = udf_rw16(vpart_num);
632 loc.loc.lb_num = udf_rw32(*lmapping);
633 udf_translate_vtop(ump, &loc, &lb_numres, &ext_res);
634 *pmapping = lb_numres;
635 lmapping++; pmapping++;
636 }
637 }
638
639
640 /* --------------------------------------------------------------------- */
641
642 /*
643 * Translate an extent (in logical_blocks) into logical block numbers; used
644 * for read and write operations. DOESNT't check extents.
645 */
646
647 int
648 udf_translate_file_extent(struct udf_node *udf_node,
649 uint32_t from, uint32_t num_lb,
650 uint64_t *map)
651 {
652 struct udf_mount *ump;
653 struct icb_tag *icbtag;
654 struct long_ad t_ad, s_ad;
655 uint64_t transsec;
656 uint64_t foffset, end_foffset;
657 uint32_t transsec32;
658 uint32_t lb_size;
659 uint32_t ext_offset;
660 uint32_t lb_num, len;
661 uint32_t overlap, translen;
662 uint16_t vpart_num;
663 int eof, error, flags;
664 int slot, addr_type, icbflags;
665
666 if (!udf_node)
667 return ENOENT;
668
669 KASSERT(num_lb > 0);
670
671 UDF_LOCK_NODE(udf_node, 0);
672
673 /* initialise derivative vars */
674 ump = udf_node->ump;
675 lb_size = udf_rw32(ump->logical_vol->lb_size);
676
677 if (udf_node->fe) {
678 icbtag = &udf_node->fe->icbtag;
679 } else {
680 icbtag = &udf_node->efe->icbtag;
681 }
682 icbflags = udf_rw16(icbtag->flags);
683 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
684
685 /* do the work */
686 if (addr_type == UDF_ICB_INTERN_ALLOC) {
687 *map = UDF_TRANS_INTERN;
688 UDF_UNLOCK_NODE(udf_node, 0);
689 return 0;
690 }
691
692 /* find first overlapping extent */
693 foffset = 0;
694 slot = 0;
695 for (;;) {
696 udf_get_adslot(udf_node, slot, &s_ad, &eof);
697 DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, len = %d, "
698 "lb_num = %d, part = %d\n", slot, eof,
699 UDF_EXT_FLAGS(udf_rw32(s_ad.len)),
700 UDF_EXT_LEN(udf_rw32(s_ad.len)),
701 udf_rw32(s_ad.loc.lb_num),
702 udf_rw16(s_ad.loc.part_num)));
703 if (eof) {
704 DPRINTF(TRANSLATE,
705 ("Translate file extent "
706 "failed: can't seek location\n"));
707 UDF_UNLOCK_NODE(udf_node, 0);
708 return EINVAL;
709 }
710 len = udf_rw32(s_ad.len);
711 flags = UDF_EXT_FLAGS(len);
712 len = UDF_EXT_LEN(len);
713 lb_num = udf_rw32(s_ad.loc.lb_num);
714
715 if (flags == UDF_EXT_REDIRECT) {
716 slot++;
717 continue;
718 }
719
720 end_foffset = foffset + len;
721
722 if (end_foffset > from * lb_size)
723 break; /* found */
724 foffset = end_foffset;
725 slot++;
726 }
727 /* found overlapping slot */
728 ext_offset = from * lb_size - foffset;
729
730 for (;;) {
731 udf_get_adslot(udf_node, slot, &s_ad, &eof);
732 DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, len = %d, "
733 "lb_num = %d, part = %d\n", slot, eof,
734 UDF_EXT_FLAGS(udf_rw32(s_ad.len)),
735 UDF_EXT_LEN(udf_rw32(s_ad.len)),
736 udf_rw32(s_ad.loc.lb_num),
737 udf_rw16(s_ad.loc.part_num)));
738 if (eof) {
739 DPRINTF(TRANSLATE,
740 ("Translate file extent "
741 "failed: past eof\n"));
742 UDF_UNLOCK_NODE(udf_node, 0);
743 return EINVAL;
744 }
745
746 len = udf_rw32(s_ad.len);
747 flags = UDF_EXT_FLAGS(len);
748 len = UDF_EXT_LEN(len);
749
750 lb_num = udf_rw32(s_ad.loc.lb_num);
751 vpart_num = udf_rw16(s_ad.loc.part_num);
752
753 end_foffset = foffset + len;
754
755 /* process extent, don't forget to advance on ext_offset! */
756 lb_num += (ext_offset + lb_size -1) / lb_size;
757 overlap = (len - ext_offset + lb_size -1) / lb_size;
758 ext_offset = 0;
759
760 /*
761 * note that the while(){} is nessisary for the extent that
762 * the udf_translate_vtop() returns doens't have to span the
763 * whole extent.
764 */
765
766 overlap = MIN(overlap, num_lb);
767 while (overlap && (flags != UDF_EXT_REDIRECT)) {
768 switch (flags) {
769 case UDF_EXT_FREE :
770 case UDF_EXT_ALLOCATED_BUT_NOT_USED :
771 transsec = UDF_TRANS_ZERO;
772 translen = overlap;
773 while (overlap && num_lb && translen) {
774 *map++ = transsec;
775 lb_num++;
776 overlap--; num_lb--; translen--;
777 }
778 break;
779 case UDF_EXT_ALLOCATED :
780 t_ad.loc.lb_num = udf_rw32(lb_num);
781 t_ad.loc.part_num = udf_rw16(vpart_num);
782 error = udf_translate_vtop(ump,
783 &t_ad, &transsec32, &translen);
784 transsec = transsec32;
785 if (error) {
786 UDF_UNLOCK_NODE(udf_node, 0);
787 return error;
788 }
789 while (overlap && num_lb && translen) {
790 *map++ = transsec;
791 lb_num++; transsec++;
792 overlap--; num_lb--; translen--;
793 }
794 break;
795 default:
796 DPRINTF(TRANSLATE,
797 ("Translate file extent "
798 "failed: bad flags %x\n", flags));
799 UDF_UNLOCK_NODE(udf_node, 0);
800 return EINVAL;
801 }
802 }
803 if (num_lb == 0)
804 break;
805
806 if (flags != UDF_EXT_REDIRECT)
807 foffset = end_foffset;
808 slot++;
809 }
810 UDF_UNLOCK_NODE(udf_node, 0);
811
812 return 0;
813 }
814
815 /* --------------------------------------------------------------------- */
816
817 static int
818 udf_search_free_vatloc(struct udf_mount *ump, uint32_t *lbnumres)
819 {
820 uint32_t lb_size, lb_num, lb_map, udf_rw32_lbmap;
821 uint8_t *blob;
822 int entry, chunk, found, error;
823
824 KASSERT(ump);
825 KASSERT(ump->logical_vol);
826
827 lb_size = udf_rw32(ump->logical_vol->lb_size);
828 blob = malloc(lb_size, M_UDFTEMP, M_WAITOK);
829
830 /* TODO static allocation of search chunk */
831
832 lb_num = MIN(ump->vat_entries, ump->vat_last_free_lb);
833 found = 0;
834 error = 0;
835 entry = 0;
836 do {
837 chunk = MIN(lb_size, (ump->vat_entries - lb_num) * 4);
838 if (chunk <= 0)
839 break;
840 /* load in chunk */
841 error = udf_vat_read(ump->vat_node, blob, chunk,
842 ump->vat_offset + lb_num * 4);
843
844 if (error)
845 break;
846
847 /* search this chunk */
848 for (entry=0; entry < chunk /4; entry++, lb_num++) {
849 udf_rw32_lbmap = *((uint32_t *) (blob + entry * 4));
850 lb_map = udf_rw32(udf_rw32_lbmap);
851 if (lb_map == 0xffffffff) {
852 found = 1;
853 break;
854 }
855 }
856 } while (!found);
857 if (error) {
858 printf("udf_search_free_vatloc: error reading in vat chunk "
859 "(lb %d, size %d)\n", lb_num, chunk);
860 }
861
862 if (!found) {
863 /* extend VAT */
864 DPRINTF(WRITE, ("udf_search_free_vatloc: extending\n"));
865 lb_num = ump->vat_entries;
866 ump->vat_entries++;
867 }
868
869 /* mark entry with initialiser just in case */
870 lb_map = udf_rw32(0xfffffffe);
871 udf_vat_write(ump->vat_node, (uint8_t *) &lb_map, 4,
872 ump->vat_offset + lb_num *4);
873 ump->vat_last_free_lb = lb_num;
874
875 free(blob, M_UDFTEMP);
876 *lbnumres = lb_num;
877 return 0;
878 }
879
880
881 static void
882 udf_bitmap_allocate(struct udf_bitmap *bitmap, int ismetadata,
883 uint32_t *num_lb, uint64_t *lmappos)
884 {
885 uint32_t offset, lb_num, bit;
886 int32_t diff;
887 uint8_t *bpos;
888 int pass;
889
890 if (!ismetadata) {
891 /* heuristic to keep the two pointers not too close */
892 diff = bitmap->data_pos - bitmap->metadata_pos;
893 if ((diff >= 0) && (diff < 1024))
894 bitmap->data_pos = bitmap->metadata_pos + 1024;
895 }
896 offset = ismetadata ? bitmap->metadata_pos : bitmap->data_pos;
897 offset &= ~7;
898 for (pass = 0; pass < 2; pass++) {
899 if (offset >= bitmap->max_offset)
900 offset = 0;
901
902 while (offset < bitmap->max_offset) {
903 if (*num_lb == 0)
904 break;
905
906 /* use first bit not set */
907 bpos = bitmap->bits + offset/8;
908 bit = ffs(*bpos); /* returns 0 or 1..8 */
909 if (bit == 0) {
910 offset += 8;
911 continue;
912 }
913
914 /* check for ffs overshoot */
915 if (offset + bit-1 >= bitmap->max_offset) {
916 offset = bitmap->max_offset;
917 break;
918 }
919
920 DPRINTF(PARANOIA, ("XXX : allocate %d, %p, bit %d\n",
921 offset + bit -1, bpos, bit-1));
922 *bpos &= ~(1 << (bit-1));
923 lb_num = offset + bit-1;
924 *lmappos++ = lb_num;
925 *num_lb = *num_lb - 1;
926 // offset = (offset & ~7);
927 }
928 }
929
930 if (ismetadata) {
931 bitmap->metadata_pos = offset;
932 } else {
933 bitmap->data_pos = offset;
934 }
935 }
936
937
938 static void
939 udf_bitmap_free(struct udf_bitmap *bitmap, uint32_t lb_num, uint32_t num_lb)
940 {
941 uint32_t offset;
942 uint32_t bit, bitval;
943 uint8_t *bpos;
944
945 offset = lb_num;
946
947 /* starter bits */
948 bpos = bitmap->bits + offset/8;
949 bit = offset % 8;
950 while ((bit != 0) && (num_lb > 0)) {
951 bitval = (1 << bit);
952 KASSERT((*bpos & bitval) == 0);
953 DPRINTF(PARANOIA, ("XXX : free %d, %p, %d\n",
954 offset, bpos, bit));
955 *bpos |= bitval;
956 offset++; num_lb--;
957 bit = (bit + 1) % 8;
958 }
959 if (num_lb == 0)
960 return;
961
962 /* whole bytes */
963 KASSERT(bit == 0);
964 bpos = bitmap->bits + offset / 8;
965 while (num_lb >= 8) {
966 KASSERT((*bpos == 0));
967 DPRINTF(PARANOIA, ("XXX : free %d + 8, %p\n", offset, bpos));
968 *bpos = 255;
969 offset += 8; num_lb -= 8;
970 bpos++;
971 }
972
973 /* stop bits */
974 KASSERT(num_lb < 8);
975 bit = 0;
976 while (num_lb > 0) {
977 bitval = (1 << bit);
978 KASSERT((*bpos & bitval) == 0);
979 DPRINTF(PARANOIA, ("XXX : free %d, %p, %d\n",
980 offset, bpos, bit));
981 *bpos |= bitval;
982 offset++; num_lb--;
983 bit = (bit + 1) % 8;
984 }
985 }
986
987 /* --------------------------------------------------------------------- */
988
989 /*
990 * We check for overall disc space with a margin to prevent critical
991 * conditions. If disc space is low we try to force a sync() to improve our
992 * estimates. When confronted with meta-data partition size shortage we know
993 * we have to check if it can be extended and we need to extend it when
994 * needed.
995 *
996 * A 2nd strategy we could use when disc space is getting low on a disc
997 * formatted with a meta-data partition is to see if there are sparse areas in
998 * the meta-data partition and free blocks there for extra data.
999 */
1000
1001 void
1002 udf_do_reserve_space(struct udf_mount *ump, struct udf_node *udf_node,
1003 uint16_t vpart_num, uint32_t num_lb)
1004 {
1005 ump->uncommitted_lbs[vpart_num] += num_lb;
1006 if (udf_node)
1007 udf_node->uncommitted_lbs += num_lb;
1008 }
1009
1010
1011 void
1012 udf_do_unreserve_space(struct udf_mount *ump, struct udf_node *udf_node,
1013 uint16_t vpart_num, uint32_t num_lb)
1014 {
1015 ump->uncommitted_lbs[vpart_num] -= num_lb;
1016 if (ump->uncommitted_lbs[vpart_num] < 0) {
1017 DPRINTF(RESERVE, ("UDF: underflow on partition reservation, "
1018 "part %d: %d\n", vpart_num,
1019 ump->uncommitted_lbs[vpart_num]));
1020 ump->uncommitted_lbs[vpart_num] = 0;
1021 }
1022 if (udf_node) {
1023 udf_node->uncommitted_lbs -= num_lb;
1024 if (udf_node->uncommitted_lbs < 0) {
1025 DPRINTF(RESERVE, ("UDF: underflow of node "
1026 "reservation : %d\n",
1027 udf_node->uncommitted_lbs));
1028 udf_node->uncommitted_lbs = 0;
1029 }
1030 }
1031 }
1032
1033
1034 int
1035 udf_reserve_space(struct udf_mount *ump, struct udf_node *udf_node,
1036 int udf_c_type, uint16_t vpart_num, uint32_t num_lb, int can_fail)
1037 {
1038 uint64_t freeblks;
1039 uint64_t slack;
1040 int i, error;
1041
1042 slack = 0;
1043 if (can_fail)
1044 slack = UDF_DISC_SLACK;
1045
1046 error = 0;
1047 mutex_enter(&ump->allocate_mutex);
1048
1049 /* check if there is enough space available */
1050 for (i = 0; i < 16; i++) { /* XXX arbitrary number */
1051 udf_calc_vpart_freespace(ump, vpart_num, &freeblks);
1052 if (num_lb + slack < freeblks)
1053 break;
1054 /* issue SYNC */
1055 DPRINTF(RESERVE, ("udf_reserve_space: issuing sync\n"));
1056 mutex_exit(&ump->allocate_mutex);
1057 udf_do_sync(ump, FSCRED, 0);
1058 mutex_enter(&mntvnode_lock);
1059 /* 1/4 second wait */
1060 cv_timedwait(&ump->dirtynodes_cv, &mntvnode_lock,
1061 hz/4);
1062 mutex_exit(&mntvnode_lock);
1063 mutex_enter(&ump->allocate_mutex);
1064 }
1065
1066 /* check if there is enough space available now */
1067 udf_calc_vpart_freespace(ump, vpart_num, &freeblks);
1068 if (num_lb + slack >= freeblks) {
1069 DPRINTF(RESERVE, ("udf_reserve_space: try to juggle partitions\n"));
1070 /* TODO juggle with data and metadata partitions if possible */
1071 }
1072
1073 /* check if there is enough space available now */
1074 udf_calc_vpart_freespace(ump, vpart_num, &freeblks);
1075 if (num_lb + slack <= freeblks) {
1076 udf_do_reserve_space(ump, udf_node, vpart_num, num_lb);
1077 } else {
1078 DPRINTF(RESERVE, ("udf_reserve_space: out of disc space\n"));
1079 error = ENOSPC;
1080 }
1081
1082 mutex_exit(&ump->allocate_mutex);
1083 return error;
1084 }
1085
1086
1087 void
1088 udf_cleanup_reservation(struct udf_node *udf_node)
1089 {
1090 struct udf_mount *ump = udf_node->ump;
1091 int vpart_num;
1092
1093 mutex_enter(&ump->allocate_mutex);
1094
1095 /* compensate for overlapping blocks */
1096 DPRINTF(RESERVE, ("UDF: overlapped %d blocks in count\n", udf_node->uncommitted_lbs));
1097
1098 vpart_num = udf_get_record_vpart(ump, udf_get_c_type(udf_node));
1099 udf_do_unreserve_space(ump, udf_node, vpart_num, udf_node->uncommitted_lbs);
1100
1101 DPRINTF(RESERVE, ("\ttotal now %d\n", ump->uncommitted_lbs[vpart_num]));
1102
1103 /* sanity */
1104 if (ump->uncommitted_lbs[vpart_num] < 0)
1105 ump->uncommitted_lbs[vpart_num] = 0;
1106
1107 mutex_exit(&ump->allocate_mutex);
1108 }
1109
1110 /* --------------------------------------------------------------------- */
1111
1112 /*
1113 * Allocate an extent of given length on given virt. partition. It doesn't
1114 * have to be one stretch.
1115 */
1116
1117 int
1118 udf_allocate_space(struct udf_mount *ump, struct udf_node *udf_node,
1119 int udf_c_type, uint16_t vpart_num, uint32_t num_lb, uint64_t *lmapping)
1120 {
1121 struct mmc_trackinfo *alloc_track, *other_track;
1122 struct udf_bitmap *bitmap;
1123 struct part_desc *pdesc;
1124 struct logvol_int_desc *lvid;
1125 uint64_t *lmappos;
1126 uint32_t ptov, lb_num, *freepos, free_lbs;
1127 int lb_size, alloc_num_lb;
1128 int alloc_type, error;
1129 int is_node;
1130
1131 DPRINTF(CALL, ("udf_allocate_space(ctype %d, vpart %d, num_lb %d\n",
1132 udf_c_type, vpart_num, num_lb));
1133 mutex_enter(&ump->allocate_mutex);
1134
1135 lb_size = udf_rw32(ump->logical_vol->lb_size);
1136 KASSERT(lb_size == ump->discinfo.sector_size);
1137
1138 alloc_type = ump->vtop_alloc[vpart_num];
1139 is_node = (udf_c_type == UDF_C_NODE);
1140
1141 lmappos = lmapping;
1142 error = 0;
1143 switch (alloc_type) {
1144 case UDF_ALLOC_VAT :
1145 /* search empty slot in VAT file */
1146 KASSERT(num_lb == 1);
1147 error = udf_search_free_vatloc(ump, &lb_num);
1148 if (!error) {
1149 *lmappos = lb_num;
1150
1151 /* reserve on the backing sequential partition since
1152 * that partition is credited back later */
1153 udf_do_reserve_space(ump, udf_node,
1154 ump->vtop[vpart_num], num_lb);
1155 }
1156 break;
1157 case UDF_ALLOC_SEQUENTIAL :
1158 /* sequential allocation on recordable media */
1159 /* get partition backing up this vpart_num_num */
1160 pdesc = ump->partitions[ump->vtop[vpart_num]];
1161
1162 /* calculate offset from physical base partition */
1163 ptov = udf_rw32(pdesc->start_loc);
1164
1165 /* get our track descriptors */
1166 if (vpart_num == ump->node_part) {
1167 alloc_track = &ump->metadata_track;
1168 other_track = &ump->data_track;
1169 } else {
1170 alloc_track = &ump->data_track;
1171 other_track = &ump->metadata_track;
1172 }
1173
1174 /* allocate */
1175 for (lb_num = 0; lb_num < num_lb; lb_num++) {
1176 *lmappos++ = alloc_track->next_writable - ptov;
1177 alloc_track->next_writable++;
1178 alloc_track->free_blocks--;
1179 }
1180
1181 /* keep other track up-to-date */
1182 if (alloc_track->tracknr == other_track->tracknr)
1183 memcpy(other_track, alloc_track,
1184 sizeof(struct mmc_trackinfo));
1185 break;
1186 case UDF_ALLOC_SPACEMAP :
1187 /* try to allocate on unallocated bits */
1188 alloc_num_lb = num_lb;
1189 bitmap = &ump->part_unalloc_bits[vpart_num];
1190 udf_bitmap_allocate(bitmap, is_node, &alloc_num_lb, lmappos);
1191 ump->lvclose |= UDF_WRITE_PART_BITMAPS;
1192
1193 /* have we allocated all? */
1194 if (alloc_num_lb) {
1195 /* TODO convert freed to unalloc and try again */
1196 /* free allocated piece for now */
1197 lmappos = lmapping;
1198 for (lb_num=0; lb_num < num_lb-alloc_num_lb; lb_num++) {
1199 udf_bitmap_free(bitmap, *lmappos++, 1);
1200 }
1201 error = ENOSPC;
1202 }
1203 if (!error) {
1204 /* adjust freecount */
1205 lvid = ump->logvol_integrity;
1206 freepos = &lvid->tables[0] + vpart_num;
1207 free_lbs = udf_rw32(*freepos);
1208 *freepos = udf_rw32(free_lbs - num_lb);
1209 }
1210 break;
1211 case UDF_ALLOC_METABITMAP : /* UDF 2.50, 2.60 BluRay-RE */
1212 /* allocate on metadata unallocated bits */
1213 alloc_num_lb = num_lb;
1214 bitmap = &ump->metadata_unalloc_bits;
1215 udf_bitmap_allocate(bitmap, is_node, &alloc_num_lb, lmappos);
1216 ump->lvclose |= UDF_WRITE_PART_BITMAPS;
1217
1218 /* have we allocated all? */
1219 if (alloc_num_lb) {
1220 /* YIKES! TODO we need to extend the metadata partition */
1221 /* free allocated piece for now */
1222 lmappos = lmapping;
1223 for (lb_num=0; lb_num < num_lb-alloc_num_lb; lb_num++) {
1224 udf_bitmap_free(bitmap, *lmappos++, 1);
1225 }
1226 error = ENOSPC;
1227 }
1228 if (!error) {
1229 /* adjust freecount */
1230 lvid = ump->logvol_integrity;
1231 freepos = &lvid->tables[0] + vpart_num;
1232 free_lbs = udf_rw32(*freepos);
1233 *freepos = udf_rw32(free_lbs - num_lb);
1234 }
1235 break;
1236 case UDF_ALLOC_METASEQUENTIAL : /* UDF 2.60 BluRay-R */
1237 case UDF_ALLOC_RELAXEDSEQUENTIAL : /* UDF 2.50/~meta BluRay-R */
1238 printf("ALERT: udf_allocate_space : allocation %d "
1239 "not implemented yet!\n", alloc_type);
1240 /* TODO implement, doesn't have to be contiguous */
1241 error = ENOSPC;
1242 break;
1243 }
1244
1245 if (!error) {
1246 /* credit our partition since we have committed the space */
1247 udf_do_unreserve_space(ump, udf_node, vpart_num, num_lb);
1248 }
1249
1250 #ifdef DEBUG
1251 if (udf_verbose & UDF_DEBUG_ALLOC) {
1252 lmappos = lmapping;
1253 printf("udf_allocate_space, allocated logical lba :\n");
1254 for (lb_num = 0; lb_num < num_lb; lb_num++) {
1255 printf("%s %"PRIu64, (lb_num > 0)?",":"",
1256 *lmappos++);
1257 }
1258 printf("\n");
1259 }
1260 #endif
1261 mutex_exit(&ump->allocate_mutex);
1262
1263 return error;
1264 }
1265
1266 /* --------------------------------------------------------------------- */
1267
1268 void
1269 udf_free_allocated_space(struct udf_mount *ump, uint32_t lb_num,
1270 uint16_t vpart_num, uint32_t num_lb)
1271 {
1272 struct udf_bitmap *bitmap;
1273 struct part_desc *pdesc;
1274 struct logvol_int_desc *lvid;
1275 uint32_t ptov, lb_map, udf_rw32_lbmap;
1276 uint32_t *freepos, free_lbs;
1277 int phys_part;
1278 int error;
1279
1280 DPRINTF(ALLOC, ("udf_free_allocated_space: freeing virt lbnum %d "
1281 "part %d + %d sect\n", lb_num, vpart_num, num_lb));
1282
1283 /* no use freeing zero length */
1284 if (num_lb == 0)
1285 return;
1286
1287 mutex_enter(&ump->allocate_mutex);
1288
1289 /* get partition backing up this vpart_num */
1290 pdesc = ump->partitions[ump->vtop[vpart_num]];
1291
1292 switch (ump->vtop_tp[vpart_num]) {
1293 case UDF_VTOP_TYPE_PHYS :
1294 case UDF_VTOP_TYPE_SPARABLE :
1295 /* free space to freed or unallocated space bitmap */
1296 ptov = udf_rw32(pdesc->start_loc);
1297 phys_part = ump->vtop[vpart_num];
1298
1299 /* first try freed space bitmap */
1300 bitmap = &ump->part_freed_bits[phys_part];
1301
1302 /* if not defined, use unallocated bitmap */
1303 if (bitmap->bits == NULL)
1304 bitmap = &ump->part_unalloc_bits[phys_part];
1305
1306 /* if no bitmaps are defined, bail out; XXX OK? */
1307 if (bitmap->bits == NULL)
1308 break;
1309
1310 /* free bits if its defined */
1311 KASSERT(bitmap->bits);
1312 ump->lvclose |= UDF_WRITE_PART_BITMAPS;
1313 udf_bitmap_free(bitmap, lb_num, num_lb);
1314
1315 /* adjust freecount */
1316 lvid = ump->logvol_integrity;
1317 freepos = &lvid->tables[0] + vpart_num;
1318 free_lbs = udf_rw32(*freepos);
1319 *freepos = udf_rw32(free_lbs + num_lb);
1320 break;
1321 case UDF_VTOP_TYPE_VIRT :
1322 /* free this VAT entry */
1323 KASSERT(num_lb == 1);
1324
1325 lb_map = 0xffffffff;
1326 udf_rw32_lbmap = udf_rw32(lb_map);
1327 error = udf_vat_write(ump->vat_node,
1328 (uint8_t *) &udf_rw32_lbmap, 4,
1329 ump->vat_offset + lb_num * 4);
1330 KASSERT(error == 0);
1331 ump->vat_last_free_lb = MIN(ump->vat_last_free_lb, lb_num);
1332 break;
1333 case UDF_VTOP_TYPE_META :
1334 /* free space in the metadata bitmap */
1335 bitmap = &ump->metadata_unalloc_bits;
1336 KASSERT(bitmap->bits);
1337
1338 ump->lvclose |= UDF_WRITE_PART_BITMAPS;
1339 udf_bitmap_free(bitmap, lb_num, num_lb);
1340
1341 /* adjust freecount */
1342 lvid = ump->logvol_integrity;
1343 freepos = &lvid->tables[0] + vpart_num;
1344 free_lbs = udf_rw32(*freepos);
1345 *freepos = udf_rw32(free_lbs + num_lb);
1346 break;
1347 default:
1348 printf("ALERT: udf_free_allocated_space : allocation %d "
1349 "not implemented yet!\n", ump->vtop_tp[vpart_num]);
1350 break;
1351 }
1352
1353 mutex_exit(&ump->allocate_mutex);
1354 }
1355
1356 /* --------------------------------------------------------------------- */
1357
1358 /*
1359 * Allocate a buf on disc for direct write out. The space doesn't have to be
1360 * contiguous as the caller takes care of this.
1361 */
1362
1363 void
1364 udf_late_allocate_buf(struct udf_mount *ump, struct buf *buf,
1365 uint64_t *lmapping, struct long_ad *node_ad_cpy, uint16_t *vpart_nump)
1366 {
1367 struct udf_node *udf_node = VTOI(buf->b_vp);
1368 int lb_size, blks, udf_c_type;
1369 int vpart_num, num_lb;
1370 int error, s;
1371
1372 /*
1373 * for each sector in the buf, allocate a sector on disc and record
1374 * its position in the provided mapping array.
1375 *
1376 * If its userdata or FIDs, record its location in its node.
1377 */
1378
1379 lb_size = udf_rw32(ump->logical_vol->lb_size);
1380 num_lb = (buf->b_bcount + lb_size -1) / lb_size;
1381 blks = lb_size / DEV_BSIZE;
1382 udf_c_type = buf->b_udf_c_type;
1383
1384 KASSERT(lb_size == ump->discinfo.sector_size);
1385
1386 /* select partition to record the buffer on */
1387 vpart_num = *vpart_nump = udf_get_record_vpart(ump, udf_c_type);
1388
1389 if (udf_c_type == UDF_C_NODE) {
1390 /* if not VAT, its allready allocated */
1391 if (ump->vtop_alloc[ump->node_part] != UDF_ALLOC_VAT)
1392 return;
1393
1394 /* allocate on its backing sequential partition */
1395 vpart_num = ump->data_part;
1396 }
1397
1398 /* do allocation on the selected partition */
1399 error = udf_allocate_space(ump, udf_node, udf_c_type,
1400 vpart_num, num_lb, lmapping);
1401 if (error) {
1402 /*
1403 * ARGH! we haven't done our accounting right! it should
1404 * allways succeed.
1405 */
1406 panic("UDF disc allocation accounting gone wrong");
1407 }
1408
1409 /* If its userdata or FIDs, record its allocation in its node. */
1410 if ((udf_c_type == UDF_C_USERDATA) ||
1411 (udf_c_type == UDF_C_FIDS) ||
1412 (udf_c_type == UDF_C_METADATA_SBM))
1413 {
1414 udf_record_allocation_in_node(ump, buf, vpart_num, lmapping,
1415 node_ad_cpy);
1416 /* decrement our outstanding bufs counter */
1417 s = splbio();
1418 udf_node->outstanding_bufs--;
1419 splx(s);
1420 }
1421 }
1422
1423 /* --------------------------------------------------------------------- */
1424
1425 /*
1426 * Try to merge a1 with the new piece a2. udf_ads_merge returns error when not
1427 * possible (anymore); a2 returns the rest piece.
1428 */
1429
1430 static int
1431 udf_ads_merge(uint32_t lb_size, struct long_ad *a1, struct long_ad *a2)
1432 {
1433 uint32_t max_len, merge_len;
1434 uint32_t a1_len, a2_len;
1435 uint32_t a1_flags, a2_flags;
1436 uint32_t a1_lbnum, a2_lbnum;
1437 uint16_t a1_part, a2_part;
1438
1439 max_len = ((UDF_EXT_MAXLEN / lb_size) * lb_size);
1440
1441 a1_flags = UDF_EXT_FLAGS(udf_rw32(a1->len));
1442 a1_len = UDF_EXT_LEN(udf_rw32(a1->len));
1443 a1_lbnum = udf_rw32(a1->loc.lb_num);
1444 a1_part = udf_rw16(a1->loc.part_num);
1445
1446 a2_flags = UDF_EXT_FLAGS(udf_rw32(a2->len));
1447 a2_len = UDF_EXT_LEN(udf_rw32(a2->len));
1448 a2_lbnum = udf_rw32(a2->loc.lb_num);
1449 a2_part = udf_rw16(a2->loc.part_num);
1450
1451 /* defines same space */
1452 if (a1_flags != a2_flags)
1453 return 1;
1454
1455 if (a1_flags != UDF_EXT_FREE) {
1456 /* the same partition */
1457 if (a1_part != a2_part)
1458 return 1;
1459
1460 /* a2 is successor of a1 */
1461 if (a1_lbnum * lb_size + a1_len != a2_lbnum * lb_size)
1462 return 1;
1463 }
1464
1465 /* merge as most from a2 if possible */
1466 merge_len = MIN(a2_len, max_len - a1_len);
1467 a1_len += merge_len;
1468 a2_len -= merge_len;
1469 a2_lbnum += merge_len/lb_size;
1470
1471 a1->len = udf_rw32(a1_len | a1_flags);
1472 a2->len = udf_rw32(a2_len | a2_flags);
1473 a2->loc.lb_num = udf_rw32(a2_lbnum);
1474
1475 if (a2_len > 0)
1476 return 1;
1477
1478 /* there is space over to merge */
1479 return 0;
1480 }
1481
1482 /* --------------------------------------------------------------------- */
1483
1484 static void
1485 udf_wipe_adslots(struct udf_node *udf_node)
1486 {
1487 struct file_entry *fe;
1488 struct extfile_entry *efe;
1489 struct alloc_ext_entry *ext;
1490 uint64_t inflen, objsize;
1491 uint32_t lb_size, dscr_size, l_ea, l_ad, max_l_ad, crclen;
1492 uint8_t *data_pos;
1493 int extnr;
1494
1495 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1496
1497 fe = udf_node->fe;
1498 efe = udf_node->efe;
1499 if (fe) {
1500 inflen = udf_rw64(fe->inf_len);
1501 objsize = inflen;
1502 dscr_size = sizeof(struct file_entry) -1;
1503 l_ea = udf_rw32(fe->l_ea);
1504 l_ad = udf_rw32(fe->l_ad);
1505 data_pos = (uint8_t *) fe + dscr_size + l_ea;
1506 } else {
1507 inflen = udf_rw64(efe->inf_len);
1508 objsize = udf_rw64(efe->obj_size);
1509 dscr_size = sizeof(struct extfile_entry) -1;
1510 l_ea = udf_rw32(efe->l_ea);
1511 l_ad = udf_rw32(efe->l_ad);
1512 data_pos = (uint8_t *) efe + dscr_size + l_ea;
1513 }
1514 max_l_ad = lb_size - dscr_size - l_ea;
1515
1516 /* wipe fe/efe */
1517 memset(data_pos, 0, max_l_ad);
1518 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea;
1519 if (fe) {
1520 fe->l_ad = udf_rw32(0);
1521 fe->logblks_rec = udf_rw64(0);
1522 fe->tag.desc_crc_len = udf_rw16(crclen);
1523 } else {
1524 efe->l_ad = udf_rw32(0);
1525 efe->logblks_rec = udf_rw64(0);
1526 efe->tag.desc_crc_len = udf_rw16(crclen);
1527 }
1528
1529 /* wipe all allocation extent entries */
1530 for (extnr = 0; extnr < udf_node->num_extensions; extnr++) {
1531 ext = udf_node->ext[extnr];
1532 dscr_size = sizeof(struct alloc_ext_entry) -1;
1533 data_pos = (uint8_t *) ext->data;
1534 max_l_ad = lb_size - dscr_size;
1535 memset(data_pos, 0, max_l_ad);
1536 ext->l_ad = udf_rw32(0);
1537
1538 crclen = dscr_size - UDF_DESC_TAG_LENGTH;
1539 ext->tag.desc_crc_len = udf_rw16(crclen);
1540 }
1541 udf_node->i_flags |= IN_NODE_REBUILD;
1542 }
1543
1544 /* --------------------------------------------------------------------- */
1545
1546 void
1547 udf_get_adslot(struct udf_node *udf_node, int slot, struct long_ad *icb,
1548 int *eof) {
1549 struct file_entry *fe;
1550 struct extfile_entry *efe;
1551 struct alloc_ext_entry *ext;
1552 struct icb_tag *icbtag;
1553 struct short_ad *short_ad;
1554 struct long_ad *long_ad, l_icb;
1555 uint32_t offset;
1556 uint32_t lb_size, dscr_size, l_ea, l_ad, flags;
1557 uint8_t *data_pos;
1558 int icbflags, addr_type, adlen, extnr;
1559
1560 /* determine what descriptor we are in */
1561 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1562
1563 fe = udf_node->fe;
1564 efe = udf_node->efe;
1565 if (fe) {
1566 icbtag = &fe->icbtag;
1567 dscr_size = sizeof(struct file_entry) -1;
1568 l_ea = udf_rw32(fe->l_ea);
1569 l_ad = udf_rw32(fe->l_ad);
1570 data_pos = (uint8_t *) fe + dscr_size + l_ea;
1571 } else {
1572 icbtag = &efe->icbtag;
1573 dscr_size = sizeof(struct extfile_entry) -1;
1574 l_ea = udf_rw32(efe->l_ea);
1575 l_ad = udf_rw32(efe->l_ad);
1576 data_pos = (uint8_t *) efe + dscr_size + l_ea;
1577 }
1578
1579 icbflags = udf_rw16(icbtag->flags);
1580 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
1581
1582 /* just in case we're called on an intern, its EOF */
1583 if (addr_type == UDF_ICB_INTERN_ALLOC) {
1584 memset(icb, 0, sizeof(struct long_ad));
1585 *eof = 1;
1586 return;
1587 }
1588
1589 adlen = 0;
1590 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1591 adlen = sizeof(struct short_ad);
1592 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1593 adlen = sizeof(struct long_ad);
1594 }
1595
1596 /* if offset too big, we go to the allocation extensions */
1597 offset = slot * adlen;
1598 extnr = -1;
1599 while (offset >= l_ad) {
1600 /* check if our last entry is a redirect */
1601 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1602 short_ad = (struct short_ad *) (data_pos + l_ad-adlen);
1603 l_icb.len = short_ad->len;
1604 l_icb.loc.part_num = udf_node->loc.loc.part_num;
1605 l_icb.loc.lb_num = short_ad->lb_num;
1606 } else {
1607 KASSERT(addr_type == UDF_ICB_LONG_ALLOC);
1608 long_ad = (struct long_ad *) (data_pos + l_ad-adlen);
1609 l_icb = *long_ad;
1610 }
1611 flags = UDF_EXT_FLAGS(udf_rw32(l_icb.len));
1612 if (flags != UDF_EXT_REDIRECT) {
1613 l_ad = 0; /* force EOF */
1614 break;
1615 }
1616
1617 /* advance to next extent */
1618 extnr++;
1619 if (extnr >= udf_node->num_extensions) {
1620 l_ad = 0; /* force EOF */
1621 break;
1622 }
1623 offset = offset - l_ad;
1624 ext = udf_node->ext[extnr];
1625 dscr_size = sizeof(struct alloc_ext_entry) -1;
1626 l_ad = udf_rw32(ext->l_ad);
1627 data_pos = (uint8_t *) ext + dscr_size;
1628 }
1629
1630 /* XXX l_ad == 0 should be enough to check */
1631 *eof = (offset >= l_ad) || (l_ad == 0);
1632 if (*eof) {
1633 DPRINTF(PARANOIDADWLK, ("returning EOF, extnr %d, offset %d, "
1634 "l_ad %d\n", extnr, offset, l_ad));
1635 memset(icb, 0, sizeof(struct long_ad));
1636 return;
1637 }
1638
1639 /* get the element */
1640 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1641 short_ad = (struct short_ad *) (data_pos + offset);
1642 icb->len = short_ad->len;
1643 icb->loc.part_num = udf_node->loc.loc.part_num;
1644 icb->loc.lb_num = short_ad->lb_num;
1645 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1646 long_ad = (struct long_ad *) (data_pos + offset);
1647 *icb = *long_ad;
1648 }
1649 DPRINTF(PARANOIDADWLK, ("returning element : v %d, lb %d, len %d, "
1650 "flags %d\n", icb->loc.part_num, icb->loc.lb_num,
1651 UDF_EXT_LEN(icb->len), UDF_EXT_FLAGS(icb->len)));
1652 }
1653
1654 /* --------------------------------------------------------------------- */
1655
1656 int
1657 udf_append_adslot(struct udf_node *udf_node, int *slot, struct long_ad *icb) {
1658 struct udf_mount *ump = udf_node->ump;
1659 union dscrptr *dscr, *extdscr;
1660 struct file_entry *fe;
1661 struct extfile_entry *efe;
1662 struct alloc_ext_entry *ext;
1663 struct icb_tag *icbtag;
1664 struct short_ad *short_ad;
1665 struct long_ad *long_ad, o_icb, l_icb;
1666 uint64_t logblks_rec, *logblks_rec_p;
1667 uint64_t lmapping;
1668 uint32_t offset, rest, len, lb_num;
1669 uint32_t lb_size, dscr_size, l_ea, l_ad, *l_ad_p, max_l_ad, crclen;
1670 uint32_t flags;
1671 uint16_t vpart_num;
1672 uint8_t *data_pos;
1673 int icbflags, addr_type, adlen, extnr;
1674 int error;
1675
1676 lb_size = udf_rw32(ump->logical_vol->lb_size);
1677 vpart_num = udf_rw16(udf_node->loc.loc.part_num);
1678
1679 /* determine what descriptor we are in */
1680 fe = udf_node->fe;
1681 efe = udf_node->efe;
1682 if (fe) {
1683 icbtag = &fe->icbtag;
1684 dscr = (union dscrptr *) fe;
1685 dscr_size = sizeof(struct file_entry) -1;
1686
1687 l_ea = udf_rw32(fe->l_ea);
1688 l_ad_p = &fe->l_ad;
1689 logblks_rec_p = &fe->logblks_rec;
1690 } else {
1691 icbtag = &efe->icbtag;
1692 dscr = (union dscrptr *) efe;
1693 dscr_size = sizeof(struct extfile_entry) -1;
1694
1695 l_ea = udf_rw32(efe->l_ea);
1696 l_ad_p = &efe->l_ad;
1697 logblks_rec_p = &efe->logblks_rec;
1698 }
1699 data_pos = (uint8_t *) dscr + dscr_size + l_ea;
1700 max_l_ad = lb_size - dscr_size - l_ea;
1701
1702 icbflags = udf_rw16(icbtag->flags);
1703 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
1704
1705 /* just in case we're called on an intern, its EOF */
1706 if (addr_type == UDF_ICB_INTERN_ALLOC) {
1707 panic("udf_append_adslot on UDF_ICB_INTERN_ALLOC\n");
1708 }
1709
1710 adlen = 0;
1711 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1712 adlen = sizeof(struct short_ad);
1713 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1714 adlen = sizeof(struct long_ad);
1715 }
1716
1717 /* clean up given long_ad since it can be a synthesized one */
1718 flags = UDF_EXT_FLAGS(udf_rw32(icb->len));
1719 if (flags == UDF_EXT_FREE) {
1720 icb->loc.part_num = udf_rw16(0);
1721 icb->loc.lb_num = udf_rw32(0);
1722 }
1723
1724 /* if offset too big, we go to the allocation extensions */
1725 l_ad = udf_rw32(*l_ad_p);
1726 offset = (*slot) * adlen;
1727 extnr = -1;
1728 while (offset >= l_ad) {
1729 /* check if our last entry is a redirect */
1730 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1731 short_ad = (struct short_ad *) (data_pos + l_ad-adlen);
1732 l_icb.len = short_ad->len;
1733 l_icb.loc.part_num = udf_node->loc.loc.part_num;
1734 l_icb.loc.lb_num = short_ad->lb_num;
1735 } else {
1736 KASSERT(addr_type == UDF_ICB_LONG_ALLOC);
1737 long_ad = (struct long_ad *) (data_pos + l_ad-adlen);
1738 l_icb = *long_ad;
1739 }
1740 flags = UDF_EXT_FLAGS(udf_rw32(l_icb.len));
1741 if (flags != UDF_EXT_REDIRECT) {
1742 /* only one past the last one is adressable */
1743 break;
1744 }
1745
1746 /* advance to next extent */
1747 extnr++;
1748 KASSERT(extnr < udf_node->num_extensions);
1749 offset = offset - l_ad;
1750
1751 ext = udf_node->ext[extnr];
1752 dscr = (union dscrptr *) ext;
1753 dscr_size = sizeof(struct alloc_ext_entry) -1;
1754 max_l_ad = lb_size - dscr_size;
1755 l_ad_p = &ext->l_ad;
1756 l_ad = udf_rw32(*l_ad_p);
1757 data_pos = (uint8_t *) ext + dscr_size;
1758 }
1759 DPRINTF(PARANOIDADWLK, ("append, ext %d, offset %d, l_ad %d\n",
1760 extnr, offset, udf_rw32(*l_ad_p)));
1761 KASSERT(l_ad == udf_rw32(*l_ad_p));
1762
1763 /* offset is offset within the current (E)FE/AED */
1764 l_ad = udf_rw32(*l_ad_p);
1765 crclen = udf_rw16(dscr->tag.desc_crc_len);
1766 logblks_rec = udf_rw64(*logblks_rec_p);
1767
1768 /* overwriting old piece? */
1769 if (offset < l_ad) {
1770 /* overwrite entry; compensate for the old element */
1771 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1772 short_ad = (struct short_ad *) (data_pos + offset);
1773 o_icb.len = short_ad->len;
1774 o_icb.loc.part_num = udf_rw16(0); /* ignore */
1775 o_icb.loc.lb_num = short_ad->lb_num;
1776 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1777 long_ad = (struct long_ad *) (data_pos + offset);
1778 o_icb = *long_ad;
1779 } else {
1780 panic("Invalid address type in udf_append_adslot\n");
1781 }
1782
1783 len = udf_rw32(o_icb.len);
1784 if (UDF_EXT_FLAGS(len) == UDF_EXT_ALLOCATED) {
1785 /* adjust counts */
1786 len = UDF_EXT_LEN(len);
1787 logblks_rec -= (len + lb_size -1) / lb_size;
1788 }
1789 }
1790
1791 /* check if we're not appending a redirection */
1792 flags = UDF_EXT_FLAGS(udf_rw32(icb->len));
1793 KASSERT(flags != UDF_EXT_REDIRECT);
1794
1795 /* round down available space */
1796 rest = adlen * ((max_l_ad - offset) / adlen);
1797 if (rest <= adlen) {
1798 /* have to append aed, see if we already have a spare one */
1799 extnr++;
1800 ext = udf_node->ext[extnr];
1801 l_icb = udf_node->ext_loc[extnr];
1802 if (ext == NULL) {
1803 DPRINTF(ALLOC,("adding allocation extent %d\n", extnr));
1804
1805 error = udf_reserve_space(ump, NULL, UDF_C_NODE,
1806 vpart_num, 1, /* can fail */ false);
1807 if (error) {
1808 printf("UDF: couldn't reserve space for AED!\n");
1809 return error;
1810 }
1811 error = udf_allocate_space(ump, NULL, UDF_C_NODE,
1812 vpart_num, 1, &lmapping);
1813 lb_num = lmapping;
1814 if (error)
1815 panic("UDF: couldn't allocate AED!\n");
1816
1817 /* initialise pointer to location */
1818 memset(&l_icb, 0, sizeof(struct long_ad));
1819 l_icb.len = udf_rw32(lb_size | UDF_EXT_REDIRECT);
1820 l_icb.loc.lb_num = udf_rw32(lb_num);
1821 l_icb.loc.part_num = udf_rw16(vpart_num);
1822
1823 /* create new aed descriptor */
1824 udf_create_logvol_dscr(ump, udf_node, &l_icb, &extdscr);
1825 ext = &extdscr->aee;
1826
1827 udf_inittag(ump, &ext->tag, TAGID_ALLOCEXTENT, lb_num);
1828 dscr_size = sizeof(struct alloc_ext_entry) -1;
1829 max_l_ad = lb_size - dscr_size;
1830 memset(ext->data, 0, max_l_ad);
1831 ext->l_ad = udf_rw32(0);
1832 ext->tag.desc_crc_len =
1833 udf_rw16(dscr_size - UDF_DESC_TAG_LENGTH);
1834
1835 /* declare aed */
1836 udf_node->num_extensions++;
1837 udf_node->ext_loc[extnr] = l_icb;
1838 udf_node->ext[extnr] = ext;
1839 }
1840 /* add redirect and adjust l_ad and crclen for old descr */
1841 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1842 short_ad = (struct short_ad *) (data_pos + offset);
1843 short_ad->len = l_icb.len;
1844 short_ad->lb_num = l_icb.loc.lb_num;
1845 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1846 long_ad = (struct long_ad *) (data_pos + offset);
1847 *long_ad = l_icb;
1848 }
1849 l_ad += adlen;
1850 crclen += adlen;
1851 dscr->tag.desc_crc_len = udf_rw16(crclen);
1852 *l_ad_p = udf_rw32(l_ad);
1853
1854 /* advance to the new extension */
1855 KASSERT(ext != NULL);
1856 dscr = (union dscrptr *) ext;
1857 dscr_size = sizeof(struct alloc_ext_entry) -1;
1858 max_l_ad = lb_size - dscr_size;
1859 data_pos = (uint8_t *) dscr + dscr_size;
1860
1861 l_ad_p = &ext->l_ad;
1862 l_ad = udf_rw32(*l_ad_p);
1863 crclen = udf_rw16(dscr->tag.desc_crc_len);
1864 offset = 0;
1865
1866 /* adjust callees slot count for link insert */
1867 *slot += 1;
1868 }
1869
1870 /* write out the element */
1871 DPRINTF(PARANOIDADWLK, ("adding element : %p : v %d, lb %d, "
1872 "len %d, flags %d\n", data_pos + offset,
1873 icb->loc.part_num, icb->loc.lb_num,
1874 UDF_EXT_LEN(icb->len), UDF_EXT_FLAGS(icb->len)));
1875 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1876 short_ad = (struct short_ad *) (data_pos + offset);
1877 short_ad->len = icb->len;
1878 short_ad->lb_num = icb->loc.lb_num;
1879 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1880 long_ad = (struct long_ad *) (data_pos + offset);
1881 *long_ad = *icb;
1882 }
1883
1884 /* adjust logblks recorded count */
1885 len = udf_rw32(icb->len);
1886 flags = UDF_EXT_FLAGS(len);
1887 if (flags == UDF_EXT_ALLOCATED)
1888 logblks_rec += (UDF_EXT_LEN(len) + lb_size -1) / lb_size;
1889 *logblks_rec_p = udf_rw64(logblks_rec);
1890
1891 /* adjust l_ad and crclen when needed */
1892 if (offset >= l_ad) {
1893 l_ad += adlen;
1894 crclen += adlen;
1895 dscr->tag.desc_crc_len = udf_rw16(crclen);
1896 *l_ad_p = udf_rw32(l_ad);
1897 }
1898
1899 return 0;
1900 }
1901
1902 /* --------------------------------------------------------------------- */
1903
1904 static void
1905 udf_count_alloc_exts(struct udf_node *udf_node)
1906 {
1907 struct long_ad s_ad;
1908 uint32_t lb_num, len, flags;
1909 uint16_t vpart_num;
1910 int slot, eof;
1911 int num_extents, extnr;
1912 int lb_size;
1913
1914 if (udf_node->num_extensions == 0)
1915 return;
1916
1917 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1918 /* count number of allocation extents in use */
1919 num_extents = 0;
1920 slot = 0;
1921 for (;;) {
1922 udf_get_adslot(udf_node, slot, &s_ad, &eof);
1923 if (eof)
1924 break;
1925 len = udf_rw32(s_ad.len);
1926 flags = UDF_EXT_FLAGS(len);
1927
1928 if (flags == UDF_EXT_REDIRECT)
1929 num_extents++;
1930
1931 slot++;
1932 }
1933
1934 DPRINTF(ALLOC, ("udf_count_alloc_ext counted %d live extents\n",
1935 num_extents));
1936
1937 /* XXX choice: we could delay freeing them on node writeout */
1938 /* free excess entries */
1939 extnr = num_extents;
1940 for (;extnr < udf_node->num_extensions; extnr++) {
1941 DPRINTF(ALLOC, ("freeing alloc ext %d\n", extnr));
1942 /* free dscriptor */
1943 s_ad = udf_node->ext_loc[extnr];
1944 udf_free_logvol_dscr(udf_node->ump, &s_ad,
1945 udf_node->ext[extnr]);
1946 udf_node->ext[extnr] = NULL;
1947
1948 /* free disc space */
1949 lb_num = udf_rw32(s_ad.loc.lb_num);
1950 vpart_num = udf_rw16(s_ad.loc.part_num);
1951 udf_free_allocated_space(udf_node->ump, lb_num, vpart_num, 1);
1952
1953 memset(&udf_node->ext_loc[extnr], 0, sizeof(struct long_ad));
1954 }
1955
1956 /* set our new number of allocation extents */
1957 udf_node->num_extensions = num_extents;
1958 }
1959
1960
1961 /* --------------------------------------------------------------------- */
1962
1963 /*
1964 * Adjust the node's allocation descriptors to reflect the new mapping; do
1965 * take note that we might glue to existing allocation descriptors.
1966 *
1967 * XXX Note there can only be one allocation being recorded/mount; maybe
1968 * explicit allocation in shedule thread?
1969 */
1970
1971 static void
1972 udf_record_allocation_in_node(struct udf_mount *ump, struct buf *buf,
1973 uint16_t vpart_num, uint64_t *mapping, struct long_ad *node_ad_cpy)
1974 {
1975 struct vnode *vp = buf->b_vp;
1976 struct udf_node *udf_node = VTOI(vp);
1977 struct file_entry *fe;
1978 struct extfile_entry *efe;
1979 struct icb_tag *icbtag;
1980 struct long_ad s_ad, c_ad;
1981 uint64_t inflen, from, till;
1982 uint64_t foffset, end_foffset, restart_foffset;
1983 uint64_t orig_inflen, orig_lbrec, new_inflen, new_lbrec;
1984 uint32_t num_lb, len, flags, lb_num;
1985 uint32_t run_start;
1986 uint32_t slot_offset, replace_len, replace;
1987 int addr_type, icbflags;
1988 // int udf_c_type = buf->b_udf_c_type;
1989 int lb_size, run_length, eof;
1990 int slot, cpy_slot, cpy_slots, restart_slot;
1991 int error;
1992
1993 DPRINTF(ALLOC, ("udf_record_allocation_in_node\n"));
1994
1995 #if 0
1996 /* XXX disable sanity check for now */
1997 /* sanity check ... should be panic ? */
1998 if ((udf_c_type != UDF_C_USERDATA) && (udf_c_type != UDF_C_FIDS))
1999 return;
2000 #endif
2001
2002 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
2003
2004 /* do the job */
2005 UDF_LOCK_NODE(udf_node, 0); /* XXX can deadlock ? */
2006 udf_node_sanity_check(udf_node, &orig_inflen, &orig_lbrec);
2007
2008 fe = udf_node->fe;
2009 efe = udf_node->efe;
2010 if (fe) {
2011 icbtag = &fe->icbtag;
2012 inflen = udf_rw64(fe->inf_len);
2013 } else {
2014 icbtag = &efe->icbtag;
2015 inflen = udf_rw64(efe->inf_len);
2016 }
2017
2018 /* do check if `till' is not past file information length */
2019 from = buf->b_lblkno * lb_size;
2020 till = MIN(inflen, from + buf->b_resid);
2021
2022 num_lb = (till - from + lb_size -1) / lb_size;
2023
2024 DPRINTF(ALLOC, ("record allocation from %"PRIu64" + %d\n", from, buf->b_bcount));
2025
2026 icbflags = udf_rw16(icbtag->flags);
2027 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2028
2029 if (addr_type == UDF_ICB_INTERN_ALLOC) {
2030 /* nothing to do */
2031 /* XXX clean up rest of node? just in case? */
2032 UDF_UNLOCK_NODE(udf_node, 0);
2033 return;
2034 }
2035
2036 slot = 0;
2037 cpy_slot = 0;
2038 foffset = 0;
2039
2040 /* 1) copy till first overlap piece to the rewrite buffer */
2041 for (;;) {
2042 udf_get_adslot(udf_node, slot, &s_ad, &eof);
2043 if (eof) {
2044 DPRINTF(WRITE,
2045 ("Record allocation in node "
2046 "failed: encountered EOF\n"));
2047 UDF_UNLOCK_NODE(udf_node, 0);
2048 buf->b_error = EINVAL;
2049 return;
2050 }
2051 len = udf_rw32(s_ad.len);
2052 flags = UDF_EXT_FLAGS(len);
2053 len = UDF_EXT_LEN(len);
2054
2055 if (flags == UDF_EXT_REDIRECT) {
2056 slot++;
2057 continue;
2058 }
2059
2060 end_foffset = foffset + len;
2061 if (end_foffset > from)
2062 break; /* found */
2063
2064 node_ad_cpy[cpy_slot++] = s_ad;
2065
2066 DPRINTF(ALLOC, ("\t1: vp %d, lb %d, len %d, flags %d "
2067 "-> stack\n",
2068 udf_rw16(s_ad.loc.part_num),
2069 udf_rw32(s_ad.loc.lb_num),
2070 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2071 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2072
2073 foffset = end_foffset;
2074 slot++;
2075 }
2076 restart_slot = slot;
2077 restart_foffset = foffset;
2078
2079 /* 2) trunc overlapping slot at overlap and copy it */
2080 slot_offset = from - foffset;
2081 if (slot_offset > 0) {
2082 DPRINTF(ALLOC, ("\tslot_offset = %d, flags = %d (%d)\n",
2083 slot_offset, flags >> 30, flags));
2084
2085 s_ad.len = udf_rw32(slot_offset | flags);
2086 node_ad_cpy[cpy_slot++] = s_ad;
2087
2088 DPRINTF(ALLOC, ("\t2: vp %d, lb %d, len %d, flags %d "
2089 "-> stack\n",
2090 udf_rw16(s_ad.loc.part_num),
2091 udf_rw32(s_ad.loc.lb_num),
2092 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2093 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2094 }
2095 foffset += slot_offset;
2096
2097 /* 3) insert new mappings */
2098 memset(&s_ad, 0, sizeof(struct long_ad));
2099 lb_num = 0;
2100 for (lb_num = 0; lb_num < num_lb; lb_num++) {
2101 run_start = mapping[lb_num];
2102 run_length = 1;
2103 while (lb_num < num_lb-1) {
2104 if (mapping[lb_num+1] != mapping[lb_num]+1)
2105 if (mapping[lb_num+1] != mapping[lb_num])
2106 break;
2107 run_length++;
2108 lb_num++;
2109 }
2110 /* insert slot for this mapping */
2111 len = run_length * lb_size;
2112
2113 /* bounds checking */
2114 if (foffset + len > till)
2115 len = till - foffset;
2116 KASSERT(foffset + len <= inflen);
2117
2118 s_ad.len = udf_rw32(len | UDF_EXT_ALLOCATED);
2119 s_ad.loc.part_num = udf_rw16(vpart_num);
2120 s_ad.loc.lb_num = udf_rw32(run_start);
2121
2122 foffset += len;
2123
2124 /* paranoia */
2125 if (len == 0) {
2126 DPRINTF(WRITE,
2127 ("Record allocation in node "
2128 "failed: insert failed\n"));
2129 UDF_UNLOCK_NODE(udf_node, 0);
2130 buf->b_error = EINVAL;
2131 return;
2132 }
2133 node_ad_cpy[cpy_slot++] = s_ad;
2134
2135 DPRINTF(ALLOC, ("\t3: insert new mapping vp %d lb %d, len %d, "
2136 "flags %d -> stack\n",
2137 udf_rw16(s_ad.loc.part_num), udf_rw32(s_ad.loc.lb_num),
2138 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2139 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2140 }
2141
2142 /* 4) pop replaced length */
2143 slot = restart_slot;
2144 foffset = restart_foffset;
2145
2146 replace_len = till - foffset; /* total amount of bytes to pop */
2147 slot_offset = from - foffset; /* offset in first encounted slot */
2148 KASSERT((slot_offset % lb_size) == 0);
2149
2150 for (;;) {
2151 udf_get_adslot(udf_node, slot, &s_ad, &eof);
2152 if (eof)
2153 break;
2154
2155 len = udf_rw32(s_ad.len);
2156 flags = UDF_EXT_FLAGS(len);
2157 len = UDF_EXT_LEN(len);
2158 lb_num = udf_rw32(s_ad.loc.lb_num);
2159
2160 if (flags == UDF_EXT_REDIRECT) {
2161 slot++;
2162 continue;
2163 }
2164
2165 DPRINTF(ALLOC, ("\t4i: got slot %d, slot_offset %d, "
2166 "replace_len %d, "
2167 "vp %d, lb %d, len %d, flags %d\n",
2168 slot, slot_offset, replace_len,
2169 udf_rw16(s_ad.loc.part_num),
2170 udf_rw32(s_ad.loc.lb_num),
2171 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2172 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2173
2174 /* adjust for slot offset */
2175 if (slot_offset) {
2176 DPRINTF(ALLOC, ("\t4s: skipping %d\n", slot_offset));
2177 lb_num += slot_offset / lb_size;
2178 len -= slot_offset;
2179 foffset += slot_offset;
2180 replace_len -= slot_offset;
2181
2182 /* mark adjusted */
2183 slot_offset = 0;
2184 }
2185
2186 /* advance for (the rest of) this slot */
2187 replace = MIN(len, replace_len);
2188 DPRINTF(ALLOC, ("\t4d: replacing %d\n", replace));
2189
2190 /* advance for this slot */
2191 if (replace) {
2192 /* note: dont round DOWN on num_lb since we then
2193 * forget the last partial one */
2194 num_lb = (replace + lb_size - 1) / lb_size;
2195 if (flags != UDF_EXT_FREE) {
2196 udf_free_allocated_space(ump, lb_num,
2197 udf_rw16(s_ad.loc.part_num), num_lb);
2198 }
2199 lb_num += num_lb;
2200 len -= replace;
2201 foffset += replace;
2202 replace_len -= replace;
2203 }
2204
2205 /* do we have a slot tail ? */
2206 if (len) {
2207 KASSERT(foffset % lb_size == 0);
2208
2209 /* we arrived at our point, push remainder */
2210 s_ad.len = udf_rw32(len | flags);
2211 s_ad.loc.lb_num = udf_rw32(lb_num);
2212 if (flags == UDF_EXT_FREE)
2213 s_ad.loc.lb_num = udf_rw32(0);
2214 node_ad_cpy[cpy_slot++] = s_ad;
2215 foffset += len;
2216 slot++;
2217
2218 DPRINTF(ALLOC, ("\t4: vp %d, lb %d, len %d, flags %d "
2219 "-> stack\n",
2220 udf_rw16(s_ad.loc.part_num),
2221 udf_rw32(s_ad.loc.lb_num),
2222 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2223 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2224 break;
2225 }
2226
2227 slot++;
2228 }
2229
2230 /* 5) copy remainder */
2231 for (;;) {
2232 udf_get_adslot(udf_node, slot, &s_ad, &eof);
2233 if (eof)
2234 break;
2235
2236 len = udf_rw32(s_ad.len);
2237 flags = UDF_EXT_FLAGS(len);
2238 len = UDF_EXT_LEN(len);
2239
2240 if (flags == UDF_EXT_REDIRECT) {
2241 slot++;
2242 continue;
2243 }
2244
2245 node_ad_cpy[cpy_slot++] = s_ad;
2246
2247 DPRINTF(ALLOC, ("\t5: insert new mapping "
2248 "vp %d lb %d, len %d, flags %d "
2249 "-> stack\n",
2250 udf_rw16(s_ad.loc.part_num),
2251 udf_rw32(s_ad.loc.lb_num),
2252 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2253 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2254
2255 slot++;
2256 }
2257
2258 /* 6) reset node descriptors */
2259 udf_wipe_adslots(udf_node);
2260
2261 /* 7) copy back extents; merge when possible. Recounting on the fly */
2262 cpy_slots = cpy_slot;
2263
2264 c_ad = node_ad_cpy[0];
2265 slot = 0;
2266 DPRINTF(ALLOC, ("\t7s: stack -> got mapping vp %d "
2267 "lb %d, len %d, flags %d\n",
2268 udf_rw16(c_ad.loc.part_num),
2269 udf_rw32(c_ad.loc.lb_num),
2270 UDF_EXT_LEN(udf_rw32(c_ad.len)),
2271 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2272
2273 for (cpy_slot = 1; cpy_slot < cpy_slots; cpy_slot++) {
2274 s_ad = node_ad_cpy[cpy_slot];
2275
2276 DPRINTF(ALLOC, ("\t7i: stack -> got mapping vp %d "
2277 "lb %d, len %d, flags %d\n",
2278 udf_rw16(s_ad.loc.part_num),
2279 udf_rw32(s_ad.loc.lb_num),
2280 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2281 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2282
2283 /* see if we can merge */
2284 if (udf_ads_merge(lb_size, &c_ad, &s_ad)) {
2285 /* not mergable (anymore) */
2286 DPRINTF(ALLOC, ("\t7: appending vp %d lb %d, "
2287 "len %d, flags %d\n",
2288 udf_rw16(c_ad.loc.part_num),
2289 udf_rw32(c_ad.loc.lb_num),
2290 UDF_EXT_LEN(udf_rw32(c_ad.len)),
2291 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2292
2293 error = udf_append_adslot(udf_node, &slot, &c_ad);
2294 if (error) {
2295 buf->b_error = error;
2296 goto out;
2297 }
2298 c_ad = s_ad;
2299 slot++;
2300 }
2301 }
2302
2303 /* 8) push rest slot (if any) */
2304 if (UDF_EXT_LEN(c_ad.len) > 0) {
2305 DPRINTF(ALLOC, ("\t8: last append vp %d lb %d, "
2306 "len %d, flags %d\n",
2307 udf_rw16(c_ad.loc.part_num),
2308 udf_rw32(c_ad.loc.lb_num),
2309 UDF_EXT_LEN(udf_rw32(c_ad.len)),
2310 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2311
2312 error = udf_append_adslot(udf_node, &slot, &c_ad);
2313 if (error) {
2314 buf->b_error = error;
2315 goto out;
2316 }
2317 }
2318
2319 out:
2320 udf_count_alloc_exts(udf_node);
2321
2322 /* the node's descriptors should now be sane */
2323 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2324 UDF_UNLOCK_NODE(udf_node, 0);
2325
2326 KASSERT(orig_inflen == new_inflen);
2327 KASSERT(new_lbrec >= orig_lbrec);
2328
2329 return;
2330 }
2331
2332 /* --------------------------------------------------------------------- */
2333
2334 int
2335 udf_grow_node(struct udf_node *udf_node, uint64_t new_size)
2336 {
2337 union dscrptr *dscr;
2338 struct vnode *vp = udf_node->vnode;
2339 struct udf_mount *ump = udf_node->ump;
2340 struct file_entry *fe;
2341 struct extfile_entry *efe;
2342 struct icb_tag *icbtag;
2343 struct long_ad c_ad, s_ad;
2344 uint64_t size_diff, old_size, inflen, objsize, chunk, append_len;
2345 uint64_t foffset, end_foffset;
2346 uint64_t orig_inflen, orig_lbrec, new_inflen, new_lbrec;
2347 uint32_t lb_size, dscr_size, crclen, lastblock_grow;
2348 uint32_t icbflags, len, flags, max_len;
2349 uint32_t max_l_ad, l_ad, l_ea;
2350 uint16_t my_part, dst_part;
2351 uint8_t *data_pos, *evacuated_data;
2352 int addr_type;
2353 int slot, cpy_slot;
2354 int eof, error;
2355
2356 DPRINTF(ALLOC, ("udf_grow_node\n"));
2357
2358 UDF_LOCK_NODE(udf_node, 0);
2359 udf_node_sanity_check(udf_node, &orig_inflen, &orig_lbrec);
2360
2361 lb_size = udf_rw32(ump->logical_vol->lb_size);
2362 max_len = ((UDF_EXT_MAXLEN / lb_size) * lb_size);
2363
2364 fe = udf_node->fe;
2365 efe = udf_node->efe;
2366 if (fe) {
2367 dscr = (union dscrptr *) fe;
2368 icbtag = &fe->icbtag;
2369 inflen = udf_rw64(fe->inf_len);
2370 objsize = inflen;
2371 dscr_size = sizeof(struct file_entry) -1;
2372 l_ea = udf_rw32(fe->l_ea);
2373 l_ad = udf_rw32(fe->l_ad);
2374 } else {
2375 dscr = (union dscrptr *) efe;
2376 icbtag = &efe->icbtag;
2377 inflen = udf_rw64(efe->inf_len);
2378 objsize = udf_rw64(efe->obj_size);
2379 dscr_size = sizeof(struct extfile_entry) -1;
2380 l_ea = udf_rw32(efe->l_ea);
2381 l_ad = udf_rw32(efe->l_ad);
2382 }
2383 data_pos = (uint8_t *) dscr + dscr_size + l_ea;
2384 max_l_ad = lb_size - dscr_size - l_ea;
2385
2386 icbflags = udf_rw16(icbtag->flags);
2387 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2388
2389 old_size = inflen;
2390 size_diff = new_size - old_size;
2391
2392 DPRINTF(ALLOC, ("\tfrom %"PRIu64" to %"PRIu64"\n", old_size, new_size));
2393
2394 evacuated_data = NULL;
2395 if (addr_type == UDF_ICB_INTERN_ALLOC) {
2396 if (l_ad + size_diff <= max_l_ad) {
2397 /* only reflect size change directly in the node */
2398 inflen += size_diff;
2399 objsize += size_diff;
2400 l_ad += size_diff;
2401 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea + l_ad;
2402 if (fe) {
2403 fe->inf_len = udf_rw64(inflen);
2404 fe->l_ad = udf_rw32(l_ad);
2405 fe->tag.desc_crc_len = udf_rw16(crclen);
2406 } else {
2407 efe->inf_len = udf_rw64(inflen);
2408 efe->obj_size = udf_rw64(objsize);
2409 efe->l_ad = udf_rw32(l_ad);
2410 efe->tag.desc_crc_len = udf_rw16(crclen);
2411 }
2412 error = 0;
2413
2414 /* set new size for uvm */
2415 uvm_vnp_setsize(vp, old_size);
2416 uvm_vnp_setwritesize(vp, new_size);
2417
2418 #if 0
2419 /* zero append space in buffer */
2420 uvm_vnp_zerorange(vp, old_size, new_size - old_size);
2421 #endif
2422
2423 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2424
2425 /* unlock */
2426 UDF_UNLOCK_NODE(udf_node, 0);
2427
2428 KASSERT(new_inflen == orig_inflen + size_diff);
2429 KASSERT(new_lbrec == orig_lbrec);
2430 KASSERT(new_lbrec == 0);
2431 return 0;
2432 }
2433
2434 DPRINTF(ALLOC, ("\tCONVERT from internal\n"));
2435
2436 if (old_size > 0) {
2437 /* allocate some space and copy in the stuff to keep */
2438 evacuated_data = malloc(lb_size, M_UDFTEMP, M_WAITOK);
2439 memset(evacuated_data, 0, lb_size);
2440
2441 /* node is locked, so safe to exit mutex */
2442 UDF_UNLOCK_NODE(udf_node, 0);
2443
2444 /* read in using the `normal' vn_rdwr() */
2445 error = vn_rdwr(UIO_READ, udf_node->vnode,
2446 evacuated_data, old_size, 0,
2447 UIO_SYSSPACE, IO_ALTSEMANTICS | IO_NODELOCKED,
2448 FSCRED, NULL, NULL);
2449
2450 /* enter again */
2451 UDF_LOCK_NODE(udf_node, 0);
2452 }
2453
2454 /* convert to a normal alloc and select type */
2455 my_part = udf_rw16(udf_node->loc.loc.part_num);
2456 dst_part = udf_get_record_vpart(ump, udf_get_c_type(udf_node));
2457 addr_type = UDF_ICB_SHORT_ALLOC;
2458 if (dst_part != my_part)
2459 addr_type = UDF_ICB_LONG_ALLOC;
2460
2461 icbflags &= ~UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2462 icbflags |= addr_type;
2463 icbtag->flags = udf_rw16(icbflags);
2464
2465 /* wipe old descriptor space */
2466 udf_wipe_adslots(udf_node);
2467
2468 memset(&c_ad, 0, sizeof(struct long_ad));
2469 c_ad.len = udf_rw32(old_size | UDF_EXT_FREE);
2470 c_ad.loc.part_num = udf_rw16(0); /* not relevant */
2471 c_ad.loc.lb_num = udf_rw32(0); /* not relevant */
2472
2473 slot = 0;
2474 } else {
2475 /* goto the last entry (if any) */
2476 slot = 0;
2477 cpy_slot = 0;
2478 foffset = 0;
2479 memset(&c_ad, 0, sizeof(struct long_ad));
2480 for (;;) {
2481 udf_get_adslot(udf_node, slot, &c_ad, &eof);
2482 if (eof)
2483 break;
2484
2485 len = udf_rw32(c_ad.len);
2486 flags = UDF_EXT_FLAGS(len);
2487 len = UDF_EXT_LEN(len);
2488
2489 end_foffset = foffset + len;
2490 if (flags != UDF_EXT_REDIRECT)
2491 foffset = end_foffset;
2492
2493 slot++;
2494 }
2495 /* at end of adslots */
2496
2497 /* special case if the old size was zero, then there is no last slot */
2498 if (old_size == 0) {
2499 c_ad.len = udf_rw32(0 | UDF_EXT_FREE);
2500 c_ad.loc.part_num = udf_rw16(0); /* not relevant */
2501 c_ad.loc.lb_num = udf_rw32(0); /* not relevant */
2502 } else {
2503 /* refetch last slot */
2504 slot--;
2505 udf_get_adslot(udf_node, slot, &c_ad, &eof);
2506 }
2507 }
2508
2509 /*
2510 * If the length of the last slot is not a multiple of lb_size, adjust
2511 * length so that it is; don't forget to adjust `append_len'! relevant for
2512 * extending existing files
2513 */
2514 len = udf_rw32(c_ad.len);
2515 flags = UDF_EXT_FLAGS(len);
2516 len = UDF_EXT_LEN(len);
2517
2518 lastblock_grow = 0;
2519 if (len % lb_size > 0) {
2520 lastblock_grow = lb_size - (len % lb_size);
2521 lastblock_grow = MIN(size_diff, lastblock_grow);
2522 len += lastblock_grow;
2523 c_ad.len = udf_rw32(len | flags);
2524
2525 /* TODO zero appened space in buffer! */
2526 /* using uvm_vnp_zerorange(vp, old_size, new_size - old_size); ? */
2527 }
2528 memset(&s_ad, 0, sizeof(struct long_ad));
2529
2530 /* size_diff can be bigger than allowed, so grow in chunks */
2531 append_len = size_diff - lastblock_grow;
2532 while (append_len > 0) {
2533 chunk = MIN(append_len, max_len);
2534 s_ad.len = udf_rw32(chunk | UDF_EXT_FREE);
2535 s_ad.loc.part_num = udf_rw16(0);
2536 s_ad.loc.lb_num = udf_rw32(0);
2537
2538 if (udf_ads_merge(lb_size, &c_ad, &s_ad)) {
2539 /* not mergable (anymore) */
2540 error = udf_append_adslot(udf_node, &slot, &c_ad);
2541 if (error)
2542 goto errorout;
2543 slot++;
2544 c_ad = s_ad;
2545 memset(&s_ad, 0, sizeof(struct long_ad));
2546 }
2547 append_len -= chunk;
2548 }
2549
2550 /* if there is a rest piece in the accumulator, append it */
2551 if (UDF_EXT_LEN(udf_rw32(c_ad.len)) > 0) {
2552 error = udf_append_adslot(udf_node, &slot, &c_ad);
2553 if (error)
2554 goto errorout;
2555 slot++;
2556 }
2557
2558 /* if there is a rest piece that didn't fit, append it */
2559 if (UDF_EXT_LEN(udf_rw32(s_ad.len)) > 0) {
2560 error = udf_append_adslot(udf_node, &slot, &s_ad);
2561 if (error)
2562 goto errorout;
2563 slot++;
2564 }
2565
2566 inflen += size_diff;
2567 objsize += size_diff;
2568 if (fe) {
2569 fe->inf_len = udf_rw64(inflen);
2570 } else {
2571 efe->inf_len = udf_rw64(inflen);
2572 efe->obj_size = udf_rw64(objsize);
2573 }
2574 error = 0;
2575
2576 if (evacuated_data) {
2577 /* set new write size for uvm */
2578 uvm_vnp_setwritesize(vp, old_size);
2579
2580 /* write out evacuated data */
2581 error = vn_rdwr(UIO_WRITE, udf_node->vnode,
2582 evacuated_data, old_size, 0,
2583 UIO_SYSSPACE, IO_ALTSEMANTICS | IO_NODELOCKED,
2584 FSCRED, NULL, NULL);
2585 uvm_vnp_setsize(vp, old_size);
2586 }
2587
2588 errorout:
2589 if (evacuated_data)
2590 free(evacuated_data, M_UDFTEMP);
2591
2592 udf_count_alloc_exts(udf_node);
2593
2594 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2595 UDF_UNLOCK_NODE(udf_node, 0);
2596
2597 KASSERT(new_inflen == orig_inflen + size_diff);
2598 KASSERT(new_lbrec == orig_lbrec);
2599
2600 return error;
2601 }
2602
2603 /* --------------------------------------------------------------------- */
2604
2605 int
2606 udf_shrink_node(struct udf_node *udf_node, uint64_t new_size)
2607 {
2608 struct vnode *vp = udf_node->vnode;
2609 struct udf_mount *ump = udf_node->ump;
2610 struct file_entry *fe;
2611 struct extfile_entry *efe;
2612 struct icb_tag *icbtag;
2613 struct long_ad c_ad, s_ad, *node_ad_cpy;
2614 uint64_t size_diff, old_size, inflen, objsize;
2615 uint64_t foffset, end_foffset;
2616 uint64_t orig_inflen, orig_lbrec, new_inflen, new_lbrec;
2617 uint32_t lb_size, dscr_size, crclen;
2618 uint32_t slot_offset, slot_offset_lb;
2619 uint32_t len, flags, max_len;
2620 uint32_t num_lb, lb_num;
2621 uint32_t max_l_ad, l_ad, l_ea;
2622 uint16_t vpart_num;
2623 uint8_t *data_pos;
2624 int icbflags, addr_type;
2625 int slot, cpy_slot, cpy_slots;
2626 int eof, error;
2627
2628 DPRINTF(ALLOC, ("udf_shrink_node\n"));
2629
2630 UDF_LOCK_NODE(udf_node, 0);
2631 udf_node_sanity_check(udf_node, &orig_inflen, &orig_lbrec);
2632
2633 lb_size = udf_rw32(ump->logical_vol->lb_size);
2634 max_len = ((UDF_EXT_MAXLEN / lb_size) * lb_size);
2635
2636 /* do the work */
2637 fe = udf_node->fe;
2638 efe = udf_node->efe;
2639 if (fe) {
2640 icbtag = &fe->icbtag;
2641 inflen = udf_rw64(fe->inf_len);
2642 objsize = inflen;
2643 dscr_size = sizeof(struct file_entry) -1;
2644 l_ea = udf_rw32(fe->l_ea);
2645 l_ad = udf_rw32(fe->l_ad);
2646 data_pos = (uint8_t *) fe + dscr_size + l_ea;
2647 } else {
2648 icbtag = &efe->icbtag;
2649 inflen = udf_rw64(efe->inf_len);
2650 objsize = udf_rw64(efe->obj_size);
2651 dscr_size = sizeof(struct extfile_entry) -1;
2652 l_ea = udf_rw32(efe->l_ea);
2653 l_ad = udf_rw32(efe->l_ad);
2654 data_pos = (uint8_t *) efe + dscr_size + l_ea;
2655 }
2656 max_l_ad = lb_size - dscr_size - l_ea;
2657
2658 icbflags = udf_rw16(icbtag->flags);
2659 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2660
2661 old_size = inflen;
2662 size_diff = old_size - new_size;
2663
2664 DPRINTF(ALLOC, ("\tfrom %"PRIu64" to %"PRIu64"\n", old_size, new_size));
2665
2666 /* shrink the node to its new size */
2667 if (addr_type == UDF_ICB_INTERN_ALLOC) {
2668 /* only reflect size change directly in the node */
2669 KASSERT(new_size <= max_l_ad);
2670 inflen -= size_diff;
2671 objsize -= size_diff;
2672 l_ad -= size_diff;
2673 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea + l_ad;
2674 if (fe) {
2675 fe->inf_len = udf_rw64(inflen);
2676 fe->l_ad = udf_rw32(l_ad);
2677 fe->tag.desc_crc_len = udf_rw16(crclen);
2678 } else {
2679 efe->inf_len = udf_rw64(inflen);
2680 efe->obj_size = udf_rw64(objsize);
2681 efe->l_ad = udf_rw32(l_ad);
2682 efe->tag.desc_crc_len = udf_rw16(crclen);
2683 }
2684 error = 0;
2685
2686 /* clear the space in the descriptor */
2687 KASSERT(old_size > new_size);
2688 memset(data_pos + new_size, 0, old_size - new_size);
2689
2690 /* TODO zero appened space in buffer! */
2691 /* using uvm_vnp_zerorange(vp, old_size, old_size - new_size); ? */
2692
2693 /* set new size for uvm */
2694 uvm_vnp_setsize(vp, new_size);
2695
2696 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2697 UDF_UNLOCK_NODE(udf_node, 0);
2698
2699 KASSERT(new_inflen == orig_inflen - size_diff);
2700 KASSERT(new_lbrec == orig_lbrec);
2701 KASSERT(new_lbrec == 0);
2702
2703 return 0;
2704 }
2705
2706 /* setup node cleanup extents copy space */
2707 node_ad_cpy = malloc(lb_size * UDF_MAX_ALLOC_EXTENTS,
2708 M_UDFMNT, M_WAITOK);
2709 memset(node_ad_cpy, 0, lb_size * UDF_MAX_ALLOC_EXTENTS);
2710
2711 /*
2712 * Shrink the node by releasing the allocations and truncate the last
2713 * allocation to the new size. If the new size fits into the
2714 * allocation descriptor itself, transform it into an
2715 * UDF_ICB_INTERN_ALLOC.
2716 */
2717 slot = 0;
2718 cpy_slot = 0;
2719 foffset = 0;
2720
2721 /* 1) copy till first overlap piece to the rewrite buffer */
2722 for (;;) {
2723 udf_get_adslot(udf_node, slot, &s_ad, &eof);
2724 if (eof) {
2725 DPRINTF(WRITE,
2726 ("Shrink node failed: "
2727 "encountered EOF\n"));
2728 error = EINVAL;
2729 goto errorout; /* panic? */
2730 }
2731 len = udf_rw32(s_ad.len);
2732 flags = UDF_EXT_FLAGS(len);
2733 len = UDF_EXT_LEN(len);
2734
2735 if (flags == UDF_EXT_REDIRECT) {
2736 slot++;
2737 continue;
2738 }
2739
2740 end_foffset = foffset + len;
2741 if (end_foffset > new_size)
2742 break; /* found */
2743
2744 node_ad_cpy[cpy_slot++] = s_ad;
2745
2746 DPRINTF(ALLOC, ("\t1: vp %d, lb %d, len %d, flags %d "
2747 "-> stack\n",
2748 udf_rw16(s_ad.loc.part_num),
2749 udf_rw32(s_ad.loc.lb_num),
2750 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2751 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2752
2753 foffset = end_foffset;
2754 slot++;
2755 }
2756 slot_offset = new_size - foffset;
2757
2758 /* 2) trunc overlapping slot at overlap and copy it */
2759 if (slot_offset > 0) {
2760 lb_num = udf_rw32(s_ad.loc.lb_num);
2761 vpart_num = udf_rw16(s_ad.loc.part_num);
2762
2763 if (flags == UDF_EXT_ALLOCATED) {
2764 /* calculate extent in lb, and offset in lb */
2765 num_lb = (len + lb_size -1) / lb_size;
2766 slot_offset_lb = (slot_offset + lb_size -1) / lb_size;
2767
2768 /* adjust our slot */
2769 lb_num += slot_offset_lb;
2770 num_lb -= slot_offset_lb;
2771
2772 udf_free_allocated_space(ump, lb_num, vpart_num, num_lb);
2773 }
2774
2775 s_ad.len = udf_rw32(slot_offset | flags);
2776 node_ad_cpy[cpy_slot++] = s_ad;
2777 slot++;
2778
2779 DPRINTF(ALLOC, ("\t2: vp %d, lb %d, len %d, flags %d "
2780 "-> stack\n",
2781 udf_rw16(s_ad.loc.part_num),
2782 udf_rw32(s_ad.loc.lb_num),
2783 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2784 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2785 }
2786
2787 /* 3) delete remainder */
2788 for (;;) {
2789 udf_get_adslot(udf_node, slot, &s_ad, &eof);
2790 if (eof)
2791 break;
2792
2793 len = udf_rw32(s_ad.len);
2794 flags = UDF_EXT_FLAGS(len);
2795 len = UDF_EXT_LEN(len);
2796
2797 if (flags == UDF_EXT_REDIRECT) {
2798 slot++;
2799 continue;
2800 }
2801
2802 DPRINTF(ALLOC, ("\t3: delete remainder "
2803 "vp %d lb %d, len %d, flags %d\n",
2804 udf_rw16(s_ad.loc.part_num),
2805 udf_rw32(s_ad.loc.lb_num),
2806 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2807 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2808
2809 if (flags == UDF_EXT_ALLOCATED) {
2810 lb_num = udf_rw32(s_ad.loc.lb_num);
2811 vpart_num = udf_rw16(s_ad.loc.part_num);
2812 num_lb = (len + lb_size - 1) / lb_size;
2813
2814 udf_free_allocated_space(ump, lb_num, vpart_num,
2815 num_lb);
2816 }
2817
2818 slot++;
2819 }
2820
2821 /* 4) if it will fit into the descriptor then convert */
2822 if (new_size < max_l_ad) {
2823 /*
2824 * resque/evacuate old piece by reading it in, and convert it
2825 * to internal alloc.
2826 */
2827 if (new_size == 0) {
2828 /* XXX/TODO only for zero sizing now */
2829 udf_wipe_adslots(udf_node);
2830
2831 icbflags &= ~UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2832 icbflags |= UDF_ICB_INTERN_ALLOC;
2833 icbtag->flags = udf_rw16(icbflags);
2834
2835 inflen -= size_diff; KASSERT(inflen == 0);
2836 objsize -= size_diff;
2837 l_ad = new_size;
2838 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea + l_ad;
2839 if (fe) {
2840 fe->inf_len = udf_rw64(inflen);
2841 fe->l_ad = udf_rw32(l_ad);
2842 fe->tag.desc_crc_len = udf_rw16(crclen);
2843 } else {
2844 efe->inf_len = udf_rw64(inflen);
2845 efe->obj_size = udf_rw64(objsize);
2846 efe->l_ad = udf_rw32(l_ad);
2847 efe->tag.desc_crc_len = udf_rw16(crclen);
2848 }
2849 /* eventually copy in evacuated piece */
2850 /* set new size for uvm */
2851 uvm_vnp_setsize(vp, new_size);
2852
2853 free(node_ad_cpy, M_UDFMNT);
2854 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2855
2856 UDF_UNLOCK_NODE(udf_node, 0);
2857
2858 KASSERT(new_inflen == orig_inflen - size_diff);
2859 KASSERT(new_inflen == 0);
2860 KASSERT(new_lbrec == 0);
2861
2862 return 0;
2863 }
2864
2865 printf("UDF_SHRINK_NODE: could convert to internal alloc!\n");
2866 }
2867
2868 /* 5) reset node descriptors */
2869 udf_wipe_adslots(udf_node);
2870
2871 /* 6) copy back extents; merge when possible. Recounting on the fly */
2872 cpy_slots = cpy_slot;
2873
2874 c_ad = node_ad_cpy[0];
2875 slot = 0;
2876 for (cpy_slot = 1; cpy_slot < cpy_slots; cpy_slot++) {
2877 s_ad = node_ad_cpy[cpy_slot];
2878
2879 DPRINTF(ALLOC, ("\t6: stack -> got mapping vp %d "
2880 "lb %d, len %d, flags %d\n",
2881 udf_rw16(s_ad.loc.part_num),
2882 udf_rw32(s_ad.loc.lb_num),
2883 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2884 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2885
2886 /* see if we can merge */
2887 if (udf_ads_merge(lb_size, &c_ad, &s_ad)) {
2888 /* not mergable (anymore) */
2889 DPRINTF(ALLOC, ("\t6: appending vp %d lb %d, "
2890 "len %d, flags %d\n",
2891 udf_rw16(c_ad.loc.part_num),
2892 udf_rw32(c_ad.loc.lb_num),
2893 UDF_EXT_LEN(udf_rw32(c_ad.len)),
2894 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2895
2896 error = udf_append_adslot(udf_node, &slot, &c_ad);
2897 if (error)
2898 goto errorout; /* panic? */
2899 c_ad = s_ad;
2900 slot++;
2901 }
2902 }
2903
2904 /* 7) push rest slot (if any) */
2905 if (UDF_EXT_LEN(c_ad.len) > 0) {
2906 DPRINTF(ALLOC, ("\t7: last append vp %d lb %d, "
2907 "len %d, flags %d\n",
2908 udf_rw16(c_ad.loc.part_num),
2909 udf_rw32(c_ad.loc.lb_num),
2910 UDF_EXT_LEN(udf_rw32(c_ad.len)),
2911 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2912
2913 error = udf_append_adslot(udf_node, &slot, &c_ad);
2914 if (error)
2915 goto errorout; /* panic? */
2916 ;
2917 }
2918
2919 inflen -= size_diff;
2920 objsize -= size_diff;
2921 if (fe) {
2922 fe->inf_len = udf_rw64(inflen);
2923 } else {
2924 efe->inf_len = udf_rw64(inflen);
2925 efe->obj_size = udf_rw64(objsize);
2926 }
2927 error = 0;
2928
2929 /* set new size for uvm */
2930 uvm_vnp_setsize(vp, new_size);
2931
2932 errorout:
2933 free(node_ad_cpy, M_UDFMNT);
2934
2935 udf_count_alloc_exts(udf_node);
2936
2937 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2938 UDF_UNLOCK_NODE(udf_node, 0);
2939
2940 KASSERT(new_inflen == orig_inflen - size_diff);
2941
2942 return error;
2943 }
2944
2945