udf_allocation.c revision 1.8 1 /* $NetBSD: udf_allocation.c,v 1.8 2008/06/30 16:43:13 reinoud Exp $ */
2
3 /*
4 * Copyright (c) 2006, 2008 Reinoud Zandijk
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 *
27 */
28
29 #include <sys/cdefs.h>
30 #ifndef lint
31 __KERNEL_RCSID(0, "$NetBSD: udf_allocation.c,v 1.8 2008/06/30 16:43:13 reinoud Exp $");
32 #endif /* not lint */
33
34
35 #if defined(_KERNEL_OPT)
36 #include "opt_quota.h"
37 #include "opt_compat_netbsd.h"
38 #endif
39
40 /* TODO strip */
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/sysctl.h>
44 #include <sys/namei.h>
45 #include <sys/proc.h>
46 #include <sys/kernel.h>
47 #include <sys/vnode.h>
48 #include <miscfs/genfs/genfs_node.h>
49 #include <sys/mount.h>
50 #include <sys/buf.h>
51 #include <sys/file.h>
52 #include <sys/device.h>
53 #include <sys/disklabel.h>
54 #include <sys/ioctl.h>
55 #include <sys/malloc.h>
56 #include <sys/dirent.h>
57 #include <sys/stat.h>
58 #include <sys/conf.h>
59 #include <sys/kauth.h>
60 #include <sys/kthread.h>
61 #include <dev/clock_subr.h>
62
63 #include <fs/udf/ecma167-udf.h>
64 #include <fs/udf/udf_mount.h>
65
66 #if defined(_KERNEL_OPT)
67 #include "opt_udf.h"
68 #endif
69
70 #include "udf.h"
71 #include "udf_subr.h"
72 #include "udf_bswap.h"
73
74
75 #define VTOI(vnode) ((struct udf_node *) vnode->v_data)
76
77 static void udf_record_allocation_in_node(struct udf_mount *ump,
78 struct buf *buf, uint16_t vpart_num, uint64_t *mapping,
79 struct long_ad *node_ad_cpy);
80
81 /*
82 * IDEA/BUSY: Each udf_node gets its own extentwalker state for all operations;
83 * this will hopefully/likely reduce O(nlog(n)) to O(1) for most functionality
84 * since actions are most likely sequencial and thus seeking doesn't need
85 * searching for the same or adjacent position again.
86 */
87
88 /* --------------------------------------------------------------------- */
89 //#ifdef DEBUG
90 #if 1
91 #if 1
92 static void
93 udf_node_dump(struct udf_node *udf_node) {
94 struct file_entry *fe;
95 struct extfile_entry *efe;
96 struct icb_tag *icbtag;
97 struct short_ad *short_ad;
98 struct long_ad *long_ad;
99 uint64_t inflen;
100 uint32_t icbflags, addr_type, max_l_ad;
101 uint32_t len, lb_num;
102 uint8_t *data_pos;
103 int part_num;
104 int adlen, ad_off, dscr_size, l_ea, l_ad, lb_size, flags;
105
106 if ((udf_verbose & UDF_DEBUG_ADWLK) == 0)
107 return;
108
109 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
110
111 fe = udf_node->fe;
112 efe = udf_node->efe;
113 if (fe) {
114 icbtag = &fe->icbtag;
115 inflen = udf_rw64(fe->inf_len);
116 dscr_size = sizeof(struct file_entry) -1;
117 l_ea = udf_rw32(fe->l_ea);
118 l_ad = udf_rw32(fe->l_ad);
119 data_pos = (uint8_t *) fe + dscr_size + l_ea;
120 } else {
121 icbtag = &efe->icbtag;
122 inflen = udf_rw64(efe->inf_len);
123 dscr_size = sizeof(struct extfile_entry) -1;
124 l_ea = udf_rw32(efe->l_ea);
125 l_ad = udf_rw32(efe->l_ad);
126 data_pos = (uint8_t *) efe + dscr_size + l_ea;
127 }
128 max_l_ad = lb_size - dscr_size - l_ea;
129
130 icbflags = udf_rw16(icbtag->flags);
131 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
132
133 printf("udf_node_dump:\n");
134 printf("\tudf_node %p\n", udf_node);
135
136 if (addr_type == UDF_ICB_INTERN_ALLOC) {
137 printf("\t\tIntern alloc, len = %"PRIu64"\n", inflen);
138 return;
139 }
140
141 printf("\t\tInflen = %"PRIu64"\n", inflen);
142 printf("\t\tl_ad = %d\n", l_ad);
143
144 if (addr_type == UDF_ICB_SHORT_ALLOC) {
145 adlen = sizeof(struct short_ad);
146 } else {
147 adlen = sizeof(struct long_ad);
148 }
149
150 printf("\t\t");
151 for (ad_off = 0; ad_off < max_l_ad-adlen; ad_off += adlen) {
152 if (addr_type == UDF_ICB_SHORT_ALLOC) {
153 short_ad = (struct short_ad *) (data_pos + ad_off);
154 len = udf_rw32(short_ad->len);
155 lb_num = udf_rw32(short_ad->lb_num);
156 part_num = -1;
157 flags = UDF_EXT_FLAGS(len);
158 len = UDF_EXT_LEN(len);
159 } else {
160 long_ad = (struct long_ad *) (data_pos + ad_off);
161 len = udf_rw32(long_ad->len);
162 lb_num = udf_rw32(long_ad->loc.lb_num);
163 part_num = udf_rw16(long_ad->loc.part_num);
164 flags = UDF_EXT_FLAGS(len);
165 len = UDF_EXT_LEN(len);
166 }
167 printf("[");
168 if (part_num >= 0)
169 printf("part %d, ", part_num);
170 printf("lb_num %d, len %d", lb_num, len);
171 if (flags)
172 printf(", flags %d", flags);
173 printf("] ");
174 if (ad_off + adlen == l_ad)
175 printf("\n\t\tl_ad END\n\t\t");
176 }
177 printf("\n");
178 }
179 #else
180 #define udf_node_dump(a)
181 #endif
182
183 static void
184 udf_node_sanity_check(struct udf_node *udf_node,
185 uint64_t *cnt_inflen, uint64_t *cnt_logblksrec) {
186 struct file_entry *fe;
187 struct extfile_entry *efe;
188 struct icb_tag *icbtag;
189 struct short_ad *short_ad;
190 struct long_ad *long_ad;
191 uint64_t inflen, logblksrec;
192 uint32_t icbflags, addr_type, max_l_ad;
193 uint32_t len, lb_num;
194 uint8_t *data_pos;
195 int part_num;
196 int adlen, ad_off, dscr_size, l_ea, l_ad, lb_size, flags, whole_lb;
197
198 /* only lock mutex; we're not changing and its a debug checking func */
199 mutex_enter(&udf_node->node_mutex);
200
201 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
202
203 fe = udf_node->fe;
204 efe = udf_node->efe;
205 if (fe) {
206 icbtag = &fe->icbtag;
207 inflen = udf_rw64(fe->inf_len);
208 logblksrec = udf_rw64(fe->logblks_rec);
209 dscr_size = sizeof(struct file_entry) -1;
210 l_ea = udf_rw32(fe->l_ea);
211 l_ad = udf_rw32(fe->l_ad);
212 data_pos = (uint8_t *) fe + dscr_size + l_ea;
213 } else {
214 icbtag = &efe->icbtag;
215 inflen = udf_rw64(efe->inf_len);
216 logblksrec = udf_rw64(efe->logblks_rec);
217 dscr_size = sizeof(struct extfile_entry) -1;
218 l_ea = udf_rw32(efe->l_ea);
219 l_ad = udf_rw32(efe->l_ad);
220 data_pos = (uint8_t *) efe + dscr_size + l_ea;
221 }
222 max_l_ad = lb_size - dscr_size - l_ea;
223 icbflags = udf_rw16(icbtag->flags);
224 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
225
226 /* reset counters */
227 *cnt_inflen = 0;
228 *cnt_logblksrec = 0;
229
230 if (addr_type == UDF_ICB_INTERN_ALLOC) {
231 KASSERT(l_ad <= max_l_ad);
232 KASSERT(l_ad == inflen);
233 *cnt_inflen = inflen;
234 mutex_exit(&udf_node->node_mutex);
235 return;
236 }
237
238 if (addr_type == UDF_ICB_SHORT_ALLOC) {
239 adlen = sizeof(struct short_ad);
240 } else {
241 adlen = sizeof(struct long_ad);
242 }
243
244 /* start counting */
245 whole_lb = 1;
246 for (ad_off = 0; ad_off < l_ad; ad_off += adlen) {
247 KASSERT(whole_lb == 1);
248 if (addr_type == UDF_ICB_SHORT_ALLOC) {
249 short_ad = (struct short_ad *) (data_pos + ad_off);
250 len = udf_rw32(short_ad->len);
251 lb_num = udf_rw32(short_ad->lb_num);
252 part_num = -1;
253 flags = UDF_EXT_FLAGS(len);
254 len = UDF_EXT_LEN(len);
255 } else {
256 long_ad = (struct long_ad *) (data_pos + ad_off);
257 len = udf_rw32(long_ad->len);
258 lb_num = udf_rw32(long_ad->loc.lb_num);
259 part_num = udf_rw16(long_ad->loc.part_num);
260 flags = UDF_EXT_FLAGS(len);
261 len = UDF_EXT_LEN(len);
262 }
263 if (flags != UDF_EXT_REDIRECT) {
264 *cnt_inflen += len;
265 if (flags == UDF_EXT_ALLOCATED) {
266 *cnt_logblksrec += (len + lb_size -1) / lb_size;
267 }
268 } else {
269 KASSERT(len == lb_size);
270 }
271
272 /* check whole lb */
273 whole_lb = ((len % lb_size) == 0);
274 }
275 /* rest should be zero (ad_off > l_ad < max_l_ad - adlen) */
276
277 KASSERT(*cnt_inflen == inflen);
278 KASSERT(*cnt_logblksrec == logblksrec);
279
280 mutex_exit(&udf_node->node_mutex);
281 if (0)
282 udf_node_dump(udf_node);
283 }
284 #else
285 #define udf_node_sanity_check(a, b, c)
286 #endif
287
288 /* --------------------------------------------------------------------- */
289
290 int
291 udf_translate_vtop(struct udf_mount *ump, struct long_ad *icb_loc,
292 uint32_t *lb_numres, uint32_t *extres)
293 {
294 struct part_desc *pdesc;
295 struct spare_map_entry *sme;
296 struct long_ad s_icb_loc;
297 uint64_t foffset, end_foffset;
298 uint32_t lb_size, len;
299 uint32_t lb_num, lb_rel, lb_packet;
300 uint32_t udf_rw32_lbmap, ext_offset;
301 uint16_t vpart;
302 int rel, part, error, eof, slot, flags;
303
304 assert(ump && icb_loc && lb_numres);
305
306 vpart = udf_rw16(icb_loc->loc.part_num);
307 lb_num = udf_rw32(icb_loc->loc.lb_num);
308 if (vpart > UDF_VTOP_RAWPART)
309 return EINVAL;
310
311 translate_again:
312 part = ump->vtop[vpart];
313 pdesc = ump->partitions[part];
314
315 switch (ump->vtop_tp[vpart]) {
316 case UDF_VTOP_TYPE_RAW :
317 /* 1:1 to the end of the device */
318 *lb_numres = lb_num;
319 *extres = INT_MAX;
320 return 0;
321 case UDF_VTOP_TYPE_PHYS :
322 /* transform into its disc logical block */
323 if (lb_num > udf_rw32(pdesc->part_len))
324 return EINVAL;
325 *lb_numres = lb_num + udf_rw32(pdesc->start_loc);
326
327 /* extent from here to the end of the partition */
328 *extres = udf_rw32(pdesc->part_len) - lb_num;
329 return 0;
330 case UDF_VTOP_TYPE_VIRT :
331 /* only maps one logical block, lookup in VAT */
332 if (lb_num >= ump->vat_entries) /* XXX > or >= ? */
333 return EINVAL;
334
335 /* lookup in virtual allocation table file */
336 mutex_enter(&ump->allocate_mutex);
337 error = udf_vat_read(ump->vat_node,
338 (uint8_t *) &udf_rw32_lbmap, 4,
339 ump->vat_offset + lb_num * 4);
340 mutex_exit(&ump->allocate_mutex);
341
342 if (error)
343 return error;
344
345 lb_num = udf_rw32(udf_rw32_lbmap);
346
347 /* transform into its disc logical block */
348 if (lb_num > udf_rw32(pdesc->part_len))
349 return EINVAL;
350 *lb_numres = lb_num + udf_rw32(pdesc->start_loc);
351
352 /* just one logical block */
353 *extres = 1;
354 return 0;
355 case UDF_VTOP_TYPE_SPARABLE :
356 /* check if the packet containing the lb_num is remapped */
357 lb_packet = lb_num / ump->sparable_packet_size;
358 lb_rel = lb_num % ump->sparable_packet_size;
359
360 for (rel = 0; rel < udf_rw16(ump->sparing_table->rt_l); rel++) {
361 sme = &ump->sparing_table->entries[rel];
362 if (lb_packet == udf_rw32(sme->org)) {
363 /* NOTE maps to absolute disc logical block! */
364 *lb_numres = udf_rw32(sme->map) + lb_rel;
365 *extres = ump->sparable_packet_size - lb_rel;
366 return 0;
367 }
368 }
369
370 /* transform into its disc logical block */
371 if (lb_num > udf_rw32(pdesc->part_len))
372 return EINVAL;
373 *lb_numres = lb_num + udf_rw32(pdesc->start_loc);
374
375 /* rest of block */
376 *extres = ump->sparable_packet_size - lb_rel;
377 return 0;
378 case UDF_VTOP_TYPE_META :
379 /* we have to look into the file's allocation descriptors */
380
381 /* use metadatafile allocation mutex */
382 lb_size = udf_rw32(ump->logical_vol->lb_size);
383
384 UDF_LOCK_NODE(ump->metadata_node, 0);
385
386 /* get first overlapping extent */
387 foffset = 0;
388 slot = 0;
389 for (;;) {
390 udf_get_adslot(ump->metadata_node,
391 slot, &s_icb_loc, &eof);
392 DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, "
393 "len = %d, lb_num = %d, part = %d\n",
394 slot, eof,
395 UDF_EXT_FLAGS(udf_rw32(s_icb_loc.len)),
396 UDF_EXT_LEN(udf_rw32(s_icb_loc.len)),
397 udf_rw32(s_icb_loc.loc.lb_num),
398 udf_rw16(s_icb_loc.loc.part_num)));
399 if (eof) {
400 DPRINTF(TRANSLATE,
401 ("Meta partition translation "
402 "failed: can't seek location\n"));
403 UDF_UNLOCK_NODE(ump->metadata_node, 0);
404 return EINVAL;
405 }
406 len = udf_rw32(s_icb_loc.len);
407 flags = UDF_EXT_FLAGS(len);
408 len = UDF_EXT_LEN(len);
409
410 if (flags == UDF_EXT_REDIRECT) {
411 slot++;
412 continue;
413 }
414
415 end_foffset = foffset + len;
416
417 if (end_foffset > lb_num * lb_size)
418 break; /* found */
419 foffset = end_foffset;
420 slot++;
421 }
422 /* found overlapping slot */
423 ext_offset = lb_num * lb_size - foffset;
424
425 /* process extent offset */
426 lb_num = udf_rw32(s_icb_loc.loc.lb_num);
427 vpart = udf_rw16(s_icb_loc.loc.part_num);
428 lb_num += (ext_offset + lb_size -1) / lb_size;
429 len -= ext_offset;
430 ext_offset = 0;
431
432 flags = UDF_EXT_FLAGS(s_icb_loc.len);
433
434 UDF_UNLOCK_NODE(ump->metadata_node, 0);
435 if (flags != UDF_EXT_ALLOCATED) {
436 DPRINTF(TRANSLATE, ("Metadata partition translation "
437 "failed: not allocated\n"));
438 return EINVAL;
439 }
440
441 /*
442 * vpart and lb_num are updated, translate again since we
443 * might be mapped on sparable media
444 */
445 goto translate_again;
446 default:
447 printf("UDF vtop translation scheme %d unimplemented yet\n",
448 ump->vtop_tp[vpart]);
449 }
450
451 return EINVAL;
452 }
453
454 /* --------------------------------------------------------------------- */
455
456 /*
457 * Translate an extent (in logical_blocks) into logical block numbers; used
458 * for read and write operations. DOESNT't check extents.
459 */
460
461 int
462 udf_translate_file_extent(struct udf_node *udf_node,
463 uint32_t from, uint32_t num_lb,
464 uint64_t *map)
465 {
466 struct udf_mount *ump;
467 struct icb_tag *icbtag;
468 struct long_ad t_ad, s_ad;
469 uint64_t transsec;
470 uint64_t foffset, end_foffset;
471 uint32_t transsec32;
472 uint32_t lb_size;
473 uint32_t ext_offset;
474 uint32_t lb_num, len;
475 uint32_t overlap, translen;
476 uint16_t vpart_num;
477 int eof, error, flags;
478 int slot, addr_type, icbflags;
479
480 if (!udf_node)
481 return ENOENT;
482
483 KASSERT(num_lb > 0);
484
485 UDF_LOCK_NODE(udf_node, 0);
486
487 /* initialise derivative vars */
488 ump = udf_node->ump;
489 lb_size = udf_rw32(ump->logical_vol->lb_size);
490
491 if (udf_node->fe) {
492 icbtag = &udf_node->fe->icbtag;
493 } else {
494 icbtag = &udf_node->efe->icbtag;
495 }
496 icbflags = udf_rw16(icbtag->flags);
497 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
498
499 /* do the work */
500 if (addr_type == UDF_ICB_INTERN_ALLOC) {
501 *map = UDF_TRANS_INTERN;
502 UDF_UNLOCK_NODE(udf_node, 0);
503 return 0;
504 }
505
506 /* find first overlapping extent */
507 foffset = 0;
508 slot = 0;
509 for (;;) {
510 udf_get_adslot(udf_node, slot, &s_ad, &eof);
511 DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, len = %d, "
512 "lb_num = %d, part = %d\n", slot, eof,
513 UDF_EXT_FLAGS(udf_rw32(s_ad.len)),
514 UDF_EXT_LEN(udf_rw32(s_ad.len)),
515 udf_rw32(s_ad.loc.lb_num),
516 udf_rw16(s_ad.loc.part_num)));
517 if (eof) {
518 DPRINTF(TRANSLATE,
519 ("Translate file extent "
520 "failed: can't seek location\n"));
521 UDF_UNLOCK_NODE(udf_node, 0);
522 return EINVAL;
523 }
524 len = udf_rw32(s_ad.len);
525 flags = UDF_EXT_FLAGS(len);
526 len = UDF_EXT_LEN(len);
527 lb_num = udf_rw32(s_ad.loc.lb_num);
528
529 if (flags == UDF_EXT_REDIRECT) {
530 slot++;
531 continue;
532 }
533
534 end_foffset = foffset + len;
535
536 if (end_foffset > from * lb_size)
537 break; /* found */
538 foffset = end_foffset;
539 slot++;
540 }
541 /* found overlapping slot */
542 ext_offset = from * lb_size - foffset;
543
544 for (;;) {
545 udf_get_adslot(udf_node, slot, &s_ad, &eof);
546 DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, len = %d, "
547 "lb_num = %d, part = %d\n", slot, eof,
548 UDF_EXT_FLAGS(udf_rw32(s_ad.len)),
549 UDF_EXT_LEN(udf_rw32(s_ad.len)),
550 udf_rw32(s_ad.loc.lb_num),
551 udf_rw16(s_ad.loc.part_num)));
552 if (eof) {
553 DPRINTF(TRANSLATE,
554 ("Translate file extent "
555 "failed: past eof\n"));
556 UDF_UNLOCK_NODE(udf_node, 0);
557 return EINVAL;
558 }
559
560 len = udf_rw32(s_ad.len);
561 flags = UDF_EXT_FLAGS(len);
562 len = UDF_EXT_LEN(len);
563
564 lb_num = udf_rw32(s_ad.loc.lb_num);
565 vpart_num = udf_rw16(s_ad.loc.part_num);
566
567 end_foffset = foffset + len;
568
569 /* process extent, don't forget to advance on ext_offset! */
570 lb_num += (ext_offset + lb_size -1) / lb_size;
571 overlap = (len - ext_offset + lb_size -1) / lb_size;
572 ext_offset = 0;
573
574 /*
575 * note that the while(){} is nessisary for the extent that
576 * the udf_translate_vtop() returns doens't have to span the
577 * whole extent.
578 */
579
580 overlap = MIN(overlap, num_lb);
581 while (overlap && (flags != UDF_EXT_REDIRECT)) {
582 switch (flags) {
583 case UDF_EXT_FREE :
584 case UDF_EXT_ALLOCATED_BUT_NOT_USED :
585 transsec = UDF_TRANS_ZERO;
586 translen = overlap;
587 while (overlap && num_lb && translen) {
588 *map++ = transsec;
589 lb_num++;
590 overlap--; num_lb--; translen--;
591 }
592 break;
593 case UDF_EXT_ALLOCATED :
594 t_ad.loc.lb_num = udf_rw32(lb_num);
595 t_ad.loc.part_num = udf_rw16(vpart_num);
596 error = udf_translate_vtop(ump,
597 &t_ad, &transsec32, &translen);
598 transsec = transsec32;
599 if (error) {
600 UDF_UNLOCK_NODE(udf_node, 0);
601 return error;
602 }
603 while (overlap && num_lb && translen) {
604 *map++ = transsec;
605 lb_num++; transsec++;
606 overlap--; num_lb--; translen--;
607 }
608 break;
609 default:
610 DPRINTF(TRANSLATE,
611 ("Translate file extent "
612 "failed: bad flags %x\n", flags));
613 UDF_UNLOCK_NODE(udf_node, 0);
614 return EINVAL;
615 }
616 }
617 if (num_lb == 0)
618 break;
619
620 if (flags != UDF_EXT_REDIRECT)
621 foffset = end_foffset;
622 slot++;
623 }
624 UDF_UNLOCK_NODE(udf_node, 0);
625
626 return 0;
627 }
628
629 /* --------------------------------------------------------------------- */
630
631 static int
632 udf_search_free_vatloc(struct udf_mount *ump, uint32_t *lbnumres)
633 {
634 uint32_t lb_size, lb_num, lb_map, udf_rw32_lbmap;
635 uint8_t *blob;
636 int entry, chunk, found, error;
637
638 KASSERT(ump);
639 KASSERT(ump->logical_vol);
640
641 lb_size = udf_rw32(ump->logical_vol->lb_size);
642 blob = malloc(lb_size, M_UDFTEMP, M_WAITOK);
643
644 /* TODO static allocation of search chunk */
645
646 lb_num = MIN(ump->vat_entries, ump->vat_last_free_lb);
647 found = 0;
648 error = 0;
649 entry = 0;
650 do {
651 chunk = MIN(lb_size, (ump->vat_entries - lb_num) * 4);
652 if (chunk <= 0)
653 break;
654 /* load in chunk */
655 error = udf_vat_read(ump->vat_node, blob, chunk,
656 ump->vat_offset + lb_num * 4);
657
658 if (error)
659 break;
660
661 /* search this chunk */
662 for (entry=0; entry < chunk /4; entry++, lb_num++) {
663 udf_rw32_lbmap = *((uint32_t *) (blob + entry * 4));
664 lb_map = udf_rw32(udf_rw32_lbmap);
665 if (lb_map == 0xffffffff) {
666 found = 1;
667 break;
668 }
669 }
670 } while (!found);
671 if (error) {
672 printf("udf_search_free_vatloc: error reading in vat chunk "
673 "(lb %d, size %d)\n", lb_num, chunk);
674 }
675
676 if (!found) {
677 /* extend VAT */
678 DPRINTF(WRITE, ("udf_search_free_vatloc: extending\n"));
679 lb_num = ump->vat_entries;
680 ump->vat_entries++;
681 }
682
683 /* mark entry with initialiser just in case */
684 lb_map = udf_rw32(0xfffffffe);
685 udf_vat_write(ump->vat_node, (uint8_t *) &lb_map, 4,
686 ump->vat_offset + lb_num *4);
687 ump->vat_last_free_lb = lb_num;
688
689 free(blob, M_UDFTEMP);
690 *lbnumres = lb_num;
691 return 0;
692 }
693
694
695 static void
696 udf_bitmap_allocate(struct udf_bitmap *bitmap, int ismetadata,
697 uint32_t ptov, uint32_t *num_lb, uint64_t *pmappos, uint64_t *lmappos)
698 {
699 uint32_t offset, lb_num, bit;
700 int32_t diff;
701 uint8_t *bpos;
702 int pass;
703
704 if (!ismetadata) {
705 /* heuristic to keep the two pointers not too close */
706 diff = bitmap->data_pos - bitmap->metadata_pos;
707 if ((diff >= 0) && (diff < 1024))
708 bitmap->data_pos = bitmap->metadata_pos + 1024;
709 }
710 offset = ismetadata ? bitmap->metadata_pos : bitmap->data_pos;
711 offset &= ~7;
712 for (pass = 0; pass < 2; pass++) {
713 if (offset >= bitmap->max_offset)
714 offset = 0;
715
716 while (offset < bitmap->max_offset) {
717 if (*num_lb == 0)
718 break;
719
720 /* use first bit not set */
721 bpos = bitmap->bits + offset/8;
722 bit = ffs(*bpos);
723 if (bit == 0) {
724 offset += 8;
725 continue;
726 }
727 *bpos &= ~(1 << (bit-1));
728 lb_num = offset + bit-1;
729 *lmappos++ = lb_num;
730 *pmappos++ = lb_num + ptov;
731 *num_lb = *num_lb - 1;
732 // offset = (offset & ~7);
733 }
734 }
735
736 if (ismetadata) {
737 bitmap->metadata_pos = offset;
738 } else {
739 bitmap->data_pos = offset;
740 }
741 }
742
743
744 static void
745 udf_bitmap_free(struct udf_bitmap *bitmap, uint32_t lb_num, uint32_t num_lb)
746 {
747 uint32_t offset;
748 uint32_t bit, bitval;
749 uint8_t *bpos;
750
751 offset = lb_num;
752
753 /* starter bits */
754 bpos = bitmap->bits + offset/8;
755 bit = offset % 8;
756 while ((bit != 0) && (num_lb > 0)) {
757 bitval = (1 << bit);
758 KASSERT((*bpos & bitval) == 0);
759 *bpos |= bitval;
760 offset++; num_lb--;
761 bit = (bit + 1) % 8;
762 }
763 if (num_lb == 0)
764 return;
765
766 /* whole bytes */
767 KASSERT(bit == 0);
768 bpos = bitmap->bits + offset / 8;
769 while (num_lb >= 8) {
770 KASSERT((*bpos == 0));
771 *bpos = 255;
772 offset += 8; num_lb -= 8;
773 bpos++;
774 }
775
776 /* stop bits */
777 KASSERT(num_lb < 8);
778 bit = 0;
779 while (num_lb > 0) {
780 bitval = (1 << bit);
781 KASSERT((*bpos & bitval) == 0);
782 *bpos |= bitval;
783 offset++; num_lb--;
784 bit = (bit + 1) % 8;
785 }
786 }
787
788
789 /* allocate a contiguous sequence of sectornumbers */
790 static int
791 udf_allocate_space(struct udf_mount *ump, int ismetadata, int alloc_type,
792 int num_lb, uint16_t *alloc_partp,
793 uint64_t *lmapping, uint64_t *pmapping)
794 {
795 struct mmc_trackinfo *alloc_track, *other_track;
796 struct udf_bitmap *bitmap;
797 struct part_desc *pdesc;
798 struct logvol_int_desc *lvid;
799 uint64_t *lmappos, *pmappos;
800 uint32_t ptov, lb_num, *freepos, free_lbs;
801 int lb_size, alloc_num_lb;
802 int alloc_part;
803 int error;
804
805 mutex_enter(&ump->allocate_mutex);
806
807 lb_size = udf_rw32(ump->logical_vol->lb_size);
808 KASSERT(lb_size == ump->discinfo.sector_size);
809
810 if (ismetadata) {
811 alloc_part = ump->metadata_part;
812 alloc_track = &ump->metadata_track;
813 other_track = &ump->data_track;
814 } else {
815 alloc_part = ump->data_part;
816 alloc_track = &ump->data_track;
817 other_track = &ump->metadata_track;
818 }
819
820 *alloc_partp = alloc_part;
821
822 error = 0;
823 /* XXX check disc space */
824
825 pdesc = ump->partitions[ump->vtop[alloc_part]];
826 lmappos = lmapping;
827 pmappos = pmapping;
828
829 switch (alloc_type) {
830 case UDF_ALLOC_VAT :
831 /* search empty slot in VAT file */
832 KASSERT(num_lb == 1);
833 error = udf_search_free_vatloc(ump, &lb_num);
834 if (!error) {
835 *lmappos = lb_num;
836 *pmappos = 0; /* will get late-allocated */
837 }
838 break;
839 case UDF_ALLOC_SEQUENTIAL :
840 /* sequential allocation on recordable media */
841 /* calculate offset from physical base partition */
842 ptov = udf_rw32(pdesc->start_loc);
843
844 for (lb_num = 0; lb_num < num_lb; lb_num++) {
845 *pmappos++ = alloc_track->next_writable;
846 *lmappos++ = alloc_track->next_writable - ptov;
847 alloc_track->next_writable++;
848 alloc_track->free_blocks--;
849 }
850 if (alloc_track->tracknr == other_track->tracknr)
851 memcpy(other_track, alloc_track,
852 sizeof(struct mmc_trackinfo));
853 break;
854 case UDF_ALLOC_SPACEMAP :
855 ptov = udf_rw32(pdesc->start_loc);
856
857 /* allocate on unallocated bits page */
858 alloc_num_lb = num_lb;
859 bitmap = &ump->part_unalloc_bits[alloc_part];
860 udf_bitmap_allocate(bitmap, ismetadata, ptov, &alloc_num_lb,
861 pmappos, lmappos);
862 ump->lvclose |= UDF_WRITE_PART_BITMAPS;
863 if (alloc_num_lb) {
864 /* TODO convert freed to unalloc and try again */
865 /* free allocated piece for now */
866 lmappos = lmapping;
867 for (lb_num=0; lb_num < num_lb-alloc_num_lb; lb_num++) {
868 udf_bitmap_free(bitmap, *lmappos++, 1);
869 }
870 error = ENOSPC;
871 }
872 if (!error) {
873 /* adjust freecount */
874 lvid = ump->logvol_integrity;
875 freepos = &lvid->tables[0] + alloc_part;
876 free_lbs = udf_rw32(*freepos);
877 *freepos = udf_rw32(free_lbs - num_lb);
878 }
879 break;
880 case UDF_ALLOC_METABITMAP :
881 case UDF_ALLOC_METASEQUENTIAL :
882 case UDF_ALLOC_RELAXEDSEQUENTIAL :
883 printf("ALERT: udf_allocate_space : allocation %d "
884 "not implemented yet!\n", alloc_type);
885 /* TODO implement, doesn't have to be contiguous */
886 error = ENOSPC;
887 break;
888 }
889
890 #ifdef DEBUG
891 if (udf_verbose & UDF_DEBUG_ALLOC) {
892 lmappos = lmapping;
893 pmappos = pmapping;
894 printf("udf_allocate_space, mapping l->p:\n");
895 for (lb_num = 0; lb_num < num_lb; lb_num++) {
896 printf("\t%"PRIu64" -> %"PRIu64"\n",
897 *lmappos++, *pmappos++);
898 }
899 }
900 #endif
901 mutex_exit(&ump->allocate_mutex);
902
903 return error;
904 }
905
906 /* --------------------------------------------------------------------- */
907
908 void
909 udf_free_allocated_space(struct udf_mount *ump, uint32_t lb_num,
910 uint16_t vpart_num, uint32_t num_lb)
911 {
912 struct udf_bitmap *bitmap;
913 struct part_desc *pdesc;
914 struct logvol_int_desc *lvid;
915 uint32_t ptov, lb_map, udf_rw32_lbmap;
916 uint32_t *freepos, free_lbs;
917 int phys_part;
918 int error;
919
920 DPRINTF(ALLOC, ("udf_free_allocated_space: freeing virt lbnum %d "
921 "part %d + %d sect\n", lb_num, vpart_num, num_lb));
922
923 mutex_enter(&ump->allocate_mutex);
924
925 /* get partition backing up this vpart_num */
926 pdesc = ump->partitions[ump->vtop[vpart_num]];
927
928 switch (ump->vtop_tp[vpart_num]) {
929 case UDF_VTOP_TYPE_PHYS :
930 case UDF_VTOP_TYPE_SPARABLE :
931 /* free space to freed or unallocated space bitmap */
932 ptov = udf_rw32(pdesc->start_loc);
933 phys_part = ump->vtop[vpart_num];
934
935 /* first try freed space bitmap */
936 bitmap = &ump->part_freed_bits[phys_part];
937
938 /* if not defined, use unallocated bitmap */
939 if (bitmap->bits == NULL)
940 bitmap = &ump->part_unalloc_bits[phys_part];
941
942 /* if no bitmaps are defined, bail out */
943 if (bitmap->bits == NULL)
944 break;
945
946 /* free bits if its defined */
947 KASSERT(bitmap->bits);
948 ump->lvclose |= UDF_WRITE_PART_BITMAPS;
949 udf_bitmap_free(bitmap, lb_num, num_lb);
950
951 /* adjust freecount */
952 lvid = ump->logvol_integrity;
953 freepos = &lvid->tables[0] + vpart_num;
954 free_lbs = udf_rw32(*freepos);
955 *freepos = udf_rw32(free_lbs + num_lb);
956 break;
957 case UDF_VTOP_TYPE_VIRT :
958 /* free this VAT entry */
959 KASSERT(num_lb == 1);
960
961 lb_map = 0xffffffff;
962 udf_rw32_lbmap = udf_rw32(lb_map);
963 error = udf_vat_write(ump->vat_node,
964 (uint8_t *) &udf_rw32_lbmap, 4,
965 ump->vat_offset + lb_num * 4);
966 KASSERT(error == 0);
967 ump->vat_last_free_lb = MIN(ump->vat_last_free_lb, lb_num);
968 break;
969 case UDF_VTOP_TYPE_META :
970 /* free space in the metadata bitmap */
971 default:
972 printf("ALERT: udf_free_allocated_space : allocation %d "
973 "not implemented yet!\n", ump->vtop_tp[vpart_num]);
974 break;
975 }
976
977 mutex_exit(&ump->allocate_mutex);
978 }
979
980 /* --------------------------------------------------------------------- */
981
982 int
983 udf_pre_allocate_space(struct udf_mount *ump, int udf_c_type, int num_lb,
984 uint16_t *alloc_partp, uint64_t *lmapping, uint64_t *pmapping)
985 {
986 int ismetadata, alloc_type;
987
988 ismetadata = (udf_c_type == UDF_C_NODE);
989 alloc_type = ismetadata? ump->meta_alloc : ump->data_alloc;
990
991 #ifdef DIAGNOSTIC
992 if ((alloc_type == UDF_ALLOC_VAT) && (udf_c_type != UDF_C_NODE)) {
993 panic("udf_pre_allocate_space: bad c_type on VAT!\n");
994 }
995 #endif
996
997 /* reserve size for VAT allocated data */
998 if (alloc_type == UDF_ALLOC_VAT) {
999 mutex_enter(&ump->allocate_mutex);
1000 ump->uncomitted_lb += num_lb;
1001 mutex_exit(&ump->allocate_mutex);
1002 }
1003
1004 return udf_allocate_space(ump, ismetadata, alloc_type,
1005 num_lb, alloc_partp, lmapping, pmapping);
1006 }
1007
1008 /* --------------------------------------------------------------------- */
1009
1010 /*
1011 * Allocate a buf on disc for direct write out. The space doesn't have to be
1012 * contiguous as the caller takes care of this.
1013 */
1014
1015 void
1016 udf_late_allocate_buf(struct udf_mount *ump, struct buf *buf,
1017 uint64_t *lmapping, uint64_t *pmapping, struct long_ad *node_ad_cpy)
1018 {
1019 struct udf_node *udf_node = VTOI(buf->b_vp);
1020 uint16_t vpart_num;
1021 int lb_size, blks, udf_c_type;
1022 int ismetadata, alloc_type;
1023 int num_lb;
1024 int error, s;
1025
1026 /*
1027 * for each sector in the buf, allocate a sector on disc and record
1028 * its position in the provided mapping array.
1029 *
1030 * If its userdata or FIDs, record its location in its node.
1031 */
1032
1033 lb_size = udf_rw32(ump->logical_vol->lb_size);
1034 num_lb = (buf->b_bcount + lb_size -1) / lb_size;
1035 blks = lb_size / DEV_BSIZE;
1036 udf_c_type = buf->b_udf_c_type;
1037
1038 KASSERT(lb_size == ump->discinfo.sector_size);
1039
1040 ismetadata = (udf_c_type == UDF_C_NODE);
1041 alloc_type = ismetadata? ump->meta_alloc : ump->data_alloc;
1042
1043 #ifdef DIAGNOSTIC
1044 if ((alloc_type == UDF_ALLOC_VAT) && (udf_c_type != UDF_C_NODE)) {
1045 panic("udf_late_allocate_buf: bad c_type on VAT!\n");
1046 }
1047 #endif
1048
1049 if (udf_c_type == UDF_C_NODE) {
1050 /* if not VAT, its allready allocated */
1051 if (alloc_type != UDF_ALLOC_VAT)
1052 return;
1053
1054 /* allocate sequential */
1055 alloc_type = UDF_ALLOC_SEQUENTIAL;
1056 }
1057
1058 error = udf_allocate_space(ump, ismetadata, alloc_type,
1059 num_lb, &vpart_num, lmapping, pmapping);
1060 if (error) {
1061 /* ARGH! we've not done our accounting right! */
1062 panic("UDF disc allocation accounting gone wrong");
1063 }
1064
1065 /* commit our sector count */
1066 mutex_enter(&ump->allocate_mutex);
1067 if (num_lb > ump->uncomitted_lb) {
1068 ump->uncomitted_lb = 0;
1069 } else {
1070 ump->uncomitted_lb -= num_lb;
1071 }
1072 mutex_exit(&ump->allocate_mutex);
1073
1074 buf->b_blkno = (*pmapping) * blks;
1075
1076 /* If its userdata or FIDs, record its allocation in its node. */
1077 if ((udf_c_type == UDF_C_USERDATA) || (udf_c_type == UDF_C_FIDS)) {
1078 udf_record_allocation_in_node(ump, buf, vpart_num, lmapping,
1079 node_ad_cpy);
1080 /* decrement our outstanding bufs counter */
1081 s = splbio();
1082 udf_node->outstanding_bufs--;
1083 splx(s);
1084 }
1085 }
1086
1087 /* --------------------------------------------------------------------- */
1088
1089 /*
1090 * Try to merge a1 with the new piece a2. udf_ads_merge returns error when not
1091 * possible (anymore); a2 returns the rest piece.
1092 */
1093
1094 static int
1095 udf_ads_merge(uint32_t lb_size, struct long_ad *a1, struct long_ad *a2)
1096 {
1097 uint32_t max_len, merge_len;
1098 uint32_t a1_len, a2_len;
1099 uint32_t a1_flags, a2_flags;
1100 uint32_t a1_lbnum, a2_lbnum;
1101 uint16_t a1_part, a2_part;
1102
1103 max_len = ((UDF_EXT_MAXLEN / lb_size) * lb_size);
1104
1105 a1_flags = UDF_EXT_FLAGS(udf_rw32(a1->len));
1106 a1_len = UDF_EXT_LEN(udf_rw32(a1->len));
1107 a1_lbnum = udf_rw32(a1->loc.lb_num);
1108 a1_part = udf_rw16(a1->loc.part_num);
1109
1110 a2_flags = UDF_EXT_FLAGS(udf_rw32(a2->len));
1111 a2_len = UDF_EXT_LEN(udf_rw32(a2->len));
1112 a2_lbnum = udf_rw32(a2->loc.lb_num);
1113 a2_part = udf_rw16(a2->loc.part_num);
1114
1115 /* defines same space */
1116 if (a1_flags != a2_flags)
1117 return 1;
1118
1119 if (a1_flags != UDF_EXT_FREE) {
1120 /* the same partition */
1121 if (a1_part != a2_part)
1122 return 1;
1123
1124 /* a2 is successor of a1 */
1125 if (a1_lbnum * lb_size + a1_len != a2_lbnum * lb_size)
1126 return 1;
1127 }
1128
1129 /* merge as most from a2 if possible */
1130 merge_len = MIN(a2_len, max_len - a1_len);
1131 a1_len += merge_len;
1132 a2_len -= merge_len;
1133 a2_lbnum += merge_len/lb_size;
1134
1135 a1->len = udf_rw32(a1_len | a1_flags);
1136 a2->len = udf_rw32(a2_len | a2_flags);
1137 a2->loc.lb_num = udf_rw32(a2_lbnum);
1138
1139 if (a2_len > 0)
1140 return 1;
1141
1142 /* there is space over to merge */
1143 return 0;
1144 }
1145
1146 /* --------------------------------------------------------------------- */
1147
1148 static void
1149 udf_wipe_adslots(struct udf_node *udf_node)
1150 {
1151 struct file_entry *fe;
1152 struct extfile_entry *efe;
1153 struct alloc_ext_entry *ext;
1154 uint64_t inflen, objsize;
1155 uint32_t lb_size, dscr_size, l_ea, l_ad, max_l_ad, crclen;
1156 uint8_t *data_pos;
1157 int extnr;
1158
1159 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1160
1161 fe = udf_node->fe;
1162 efe = udf_node->efe;
1163 if (fe) {
1164 inflen = udf_rw64(fe->inf_len);
1165 objsize = inflen;
1166 dscr_size = sizeof(struct file_entry) -1;
1167 l_ea = udf_rw32(fe->l_ea);
1168 l_ad = udf_rw32(fe->l_ad);
1169 data_pos = (uint8_t *) fe + dscr_size + l_ea;
1170 } else {
1171 inflen = udf_rw64(efe->inf_len);
1172 objsize = udf_rw64(efe->obj_size);
1173 dscr_size = sizeof(struct extfile_entry) -1;
1174 l_ea = udf_rw32(efe->l_ea);
1175 l_ad = udf_rw32(efe->l_ad);
1176 data_pos = (uint8_t *) efe + dscr_size + l_ea;
1177 }
1178 max_l_ad = lb_size - dscr_size - l_ea;
1179
1180 /* wipe fe/efe */
1181 memset(data_pos, 0, max_l_ad);
1182 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea;
1183 if (fe) {
1184 fe->l_ad = udf_rw32(0);
1185 fe->logblks_rec = udf_rw64(0);
1186 fe->tag.desc_crc_len = udf_rw32(crclen);
1187 } else {
1188 efe->l_ad = udf_rw32(0);
1189 efe->logblks_rec = udf_rw64(0);
1190 efe->tag.desc_crc_len = udf_rw32(crclen);
1191 }
1192
1193 /* wipe all allocation extent entries */
1194 for (extnr = 0; extnr < udf_node->num_extensions; extnr++) {
1195 ext = udf_node->ext[extnr];
1196 dscr_size = sizeof(struct alloc_ext_entry) -1;
1197 max_l_ad = lb_size - dscr_size;
1198 memset(data_pos, 0, max_l_ad);
1199 ext->l_ad = udf_rw32(0);
1200
1201 crclen = dscr_size - UDF_DESC_TAG_LENGTH;
1202 ext->tag.desc_crc_len = udf_rw32(crclen);
1203 }
1204 }
1205
1206 /* --------------------------------------------------------------------- */
1207
1208 void
1209 udf_get_adslot(struct udf_node *udf_node, int slot, struct long_ad *icb,
1210 int *eof) {
1211 struct file_entry *fe;
1212 struct extfile_entry *efe;
1213 struct alloc_ext_entry *ext;
1214 struct icb_tag *icbtag;
1215 struct short_ad *short_ad;
1216 struct long_ad *long_ad;
1217 uint32_t offset;
1218 uint32_t lb_size, dscr_size, l_ea, l_ad, max_l_ad;
1219 uint8_t *data_pos;
1220 int icbflags, addr_type, adlen, extnr;
1221
1222 /* determine what descriptor we are in */
1223 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1224
1225 fe = udf_node->fe;
1226 efe = udf_node->efe;
1227 if (fe) {
1228 icbtag = &fe->icbtag;
1229 dscr_size = sizeof(struct file_entry) -1;
1230 l_ea = udf_rw32(fe->l_ea);
1231 l_ad = udf_rw32(fe->l_ad);
1232 data_pos = (uint8_t *) fe + dscr_size + l_ea;
1233 } else {
1234 icbtag = &efe->icbtag;
1235 dscr_size = sizeof(struct extfile_entry) -1;
1236 l_ea = udf_rw32(efe->l_ea);
1237 l_ad = udf_rw32(efe->l_ad);
1238 data_pos = (uint8_t *) efe + dscr_size + l_ea;
1239 }
1240 max_l_ad = lb_size - dscr_size - l_ea;
1241
1242 icbflags = udf_rw16(icbtag->flags);
1243 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
1244
1245 /* just in case we're called on an intern, its EOF */
1246 if (addr_type == UDF_ICB_INTERN_ALLOC) {
1247 memset(icb, 0, sizeof(struct long_ad));
1248 *eof = 1;
1249 return;
1250 }
1251
1252 adlen = 0;
1253 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1254 adlen = sizeof(struct short_ad);
1255 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1256 adlen = sizeof(struct long_ad);
1257 }
1258
1259 /* if offset too big, we go to the allocation extensions */
1260 offset = slot * adlen;
1261 extnr = -1;
1262 while (offset >= max_l_ad) {
1263 extnr++;
1264 offset -= max_l_ad;
1265 ext = udf_node->ext[extnr];
1266 dscr_size = sizeof(struct alloc_ext_entry) -1;
1267 l_ad = udf_rw32(ext->l_ad);
1268 max_l_ad = lb_size - dscr_size;
1269 data_pos = (uint8_t *) ext + dscr_size;
1270 if (extnr > udf_node->num_extensions) {
1271 l_ad = 0; /* force EOF */
1272 break;
1273 }
1274 }
1275
1276 *eof = (offset >= l_ad) || (l_ad == 0);
1277 if (*eof) {
1278 memset(icb, 0, sizeof(struct long_ad));
1279 return;
1280 }
1281
1282 /* get the element */
1283 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1284 short_ad = (struct short_ad *) (data_pos + offset);
1285 icb->len = short_ad->len;
1286 icb->loc.part_num = udf_node->loc.loc.part_num;
1287 icb->loc.lb_num = short_ad->lb_num;
1288 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1289 long_ad = (struct long_ad *) (data_pos + offset);
1290 *icb = *long_ad;
1291 }
1292 }
1293
1294 /* --------------------------------------------------------------------- */
1295
1296 int
1297 udf_append_adslot(struct udf_node *udf_node, int slot, struct long_ad *icb) {
1298 union dscrptr *dscr;
1299 struct file_entry *fe;
1300 struct extfile_entry *efe;
1301 struct alloc_ext_entry *ext;
1302 struct icb_tag *icbtag;
1303 struct short_ad *short_ad;
1304 struct long_ad *long_ad, o_icb;
1305 uint64_t logblks_rec, *logblks_rec_p;
1306 uint32_t offset, rest, len;
1307 uint32_t lb_size, dscr_size, l_ea, l_ad, *l_ad_p, max_l_ad, crclen;
1308 uint8_t *data_pos;
1309 int icbflags, addr_type, adlen, extnr;
1310
1311 /* determine what descriptor we are in */
1312 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1313
1314 fe = udf_node->fe;
1315 efe = udf_node->efe;
1316 if (fe) {
1317 icbtag = &fe->icbtag;
1318 dscr = (union dscrptr *) fe;
1319 dscr_size = sizeof(struct file_entry) -1;
1320
1321 l_ea = udf_rw32(fe->l_ea);
1322 l_ad_p = &fe->l_ad;
1323 logblks_rec_p = &fe->logblks_rec;
1324 } else {
1325 icbtag = &efe->icbtag;
1326 dscr = (union dscrptr *) efe;
1327 dscr_size = sizeof(struct extfile_entry) -1;
1328
1329 l_ea = udf_rw32(efe->l_ea);
1330 l_ad_p = &efe->l_ad;
1331 logblks_rec_p = &efe->logblks_rec;
1332 }
1333 data_pos = (uint8_t *) dscr + dscr_size + l_ea;
1334 max_l_ad = lb_size - dscr_size - l_ea;
1335
1336 icbflags = udf_rw16(icbtag->flags);
1337 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
1338
1339 /* just in case we're called on an intern, its EOF */
1340 if (addr_type == UDF_ICB_INTERN_ALLOC) {
1341 panic("udf_append_adslot on UDF_ICB_INTERN_ALLOC\n");
1342 }
1343
1344 adlen = 0;
1345 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1346 adlen = sizeof(struct short_ad);
1347 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1348 adlen = sizeof(struct long_ad);
1349 }
1350
1351 /* if offset too big, we go to the allocation extensions */
1352 offset = slot * adlen;
1353 extnr = 0;
1354 while (offset > max_l_ad) {
1355 offset -= max_l_ad;
1356 ext = udf_node->ext[extnr];
1357 dscr = (union dscrptr *) ext;
1358 dscr_size = sizeof(struct alloc_ext_entry) -1;
1359
1360 KASSERT(ext != NULL);
1361 l_ad_p = &ext->l_ad;
1362 max_l_ad = lb_size - dscr_size;
1363 data_pos = (uint8_t *) dscr + dscr_size;
1364
1365 extnr++;
1366 }
1367 /* offset is offset within the current (E)FE/AED */
1368 l_ad = udf_rw32(*l_ad_p);
1369 crclen = udf_rw32(dscr->tag.desc_crc_len);
1370 logblks_rec = udf_rw64(*logblks_rec_p);
1371
1372 if (extnr > udf_node->num_extensions)
1373 return EFBIG; /* too fragmented */
1374
1375 /* overwriting old piece? */
1376 if (offset < l_ad) {
1377 /* overwrite entry; compensate for the old element */
1378 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1379 short_ad = (struct short_ad *) (data_pos + offset);
1380 o_icb.len = short_ad->len;
1381 o_icb.loc.part_num = udf_rw16(0); /* ignore */
1382 o_icb.loc.lb_num = short_ad->lb_num;
1383 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1384 long_ad = (struct long_ad *) (data_pos + offset);
1385 o_icb = *long_ad;
1386 } else {
1387 panic("Invalid address type in udf_append_adslot\n");
1388 }
1389
1390 len = udf_rw32(o_icb.len);
1391 if (UDF_EXT_FLAGS(len) == UDF_EXT_ALLOCATED) {
1392 /* adjust counts */
1393 len = UDF_EXT_LEN(len);
1394 logblks_rec -= (len + lb_size -1) / lb_size;
1395 }
1396 }
1397
1398 /* calculate rest space in this descriptor */
1399 rest = max_l_ad - offset;
1400 if (rest <= adlen) {
1401 /* create redirect and link new allocation extension */
1402 printf("udf_append_to_adslot: can't create allocation extention yet\n");
1403 return EFBIG;
1404 }
1405
1406 /* write out the element */
1407 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1408 short_ad = (struct short_ad *) (data_pos + offset);
1409 short_ad->len = icb->len;
1410 short_ad->lb_num = icb->loc.lb_num;
1411 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1412 long_ad = (struct long_ad *) (data_pos + offset);
1413 *long_ad = *icb;
1414 }
1415
1416 /* adjust logblks recorded count */
1417 if (UDF_EXT_FLAGS(icb->len) == UDF_EXT_ALLOCATED)
1418 logblks_rec += (UDF_EXT_LEN(icb->len) + lb_size -1) / lb_size;
1419 *logblks_rec_p = udf_rw64(logblks_rec);
1420
1421 /* adjust l_ad and crclen when needed */
1422 if (offset >= l_ad) {
1423 l_ad += adlen;
1424 crclen += adlen;
1425 dscr->tag.desc_crc_len = udf_rw32(crclen);
1426 *l_ad_p = udf_rw32(l_ad);
1427 }
1428
1429 return 0;
1430 }
1431
1432 /* --------------------------------------------------------------------- */
1433
1434 /*
1435 * Adjust the node's allocation descriptors to reflect the new mapping; do
1436 * take note that we might glue to existing allocation descriptors.
1437 *
1438 * XXX Note there can only be one allocation being recorded/mount; maybe
1439 * explicit allocation in shedule thread?
1440 */
1441
1442 static void
1443 udf_record_allocation_in_node(struct udf_mount *ump, struct buf *buf,
1444 uint16_t vpart_num, uint64_t *mapping, struct long_ad *node_ad_cpy)
1445 {
1446 struct vnode *vp = buf->b_vp;
1447 struct udf_node *udf_node = VTOI(vp);
1448 struct file_entry *fe;
1449 struct extfile_entry *efe;
1450 struct icb_tag *icbtag;
1451 struct long_ad s_ad, c_ad;
1452 uint64_t inflen, from, till;
1453 uint64_t foffset, end_foffset, restart_foffset;
1454 uint64_t orig_inflen, orig_lbrec, new_inflen, new_lbrec;
1455 uint32_t num_lb, len, flags, lb_num;
1456 uint32_t run_start;
1457 uint32_t slot_offset;
1458 uint32_t skip_len, skipped;
1459 int addr_type, icbflags;
1460 int udf_c_type = buf->b_udf_c_type;
1461 int lb_size, run_length, eof;
1462 int slot, cpy_slot, cpy_slots, restart_slot;
1463 int error;
1464
1465 DPRINTF(ALLOC, ("udf_record_allocation_in_node\n"));
1466 udf_node_sanity_check(udf_node, &orig_inflen, &orig_lbrec);
1467
1468 /* sanity check ... should be panic ? */
1469 if ((udf_c_type != UDF_C_USERDATA) && (udf_c_type != UDF_C_FIDS))
1470 return;
1471
1472 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1473
1474 /* do the job */
1475 UDF_LOCK_NODE(udf_node, 0); /* XXX can deadlock ? */
1476
1477 fe = udf_node->fe;
1478 efe = udf_node->efe;
1479 if (fe) {
1480 icbtag = &fe->icbtag;
1481 inflen = udf_rw64(fe->inf_len);
1482 } else {
1483 icbtag = &efe->icbtag;
1484 inflen = udf_rw64(efe->inf_len);
1485 }
1486
1487 /* do check if `till' is not past file information length */
1488 from = buf->b_lblkno * lb_size;
1489 till = MIN(inflen, from + buf->b_resid);
1490
1491 num_lb = (till - from + lb_size -1) / lb_size;
1492
1493 DPRINTF(ALLOC, ("record allocation from = %"PRIu64" + %d\n", from, buf->b_bcount));
1494
1495 icbflags = udf_rw16(icbtag->flags);
1496 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
1497
1498 if (addr_type == UDF_ICB_INTERN_ALLOC) {
1499 /* nothing to do */
1500 /* XXX clean up rest of node? just in case? */
1501 UDF_UNLOCK_NODE(udf_node, 0);
1502 return;
1503 }
1504
1505 slot = 0;
1506 cpy_slot = 0;
1507 foffset = 0;
1508
1509 /* 1) copy till first overlap piece to the rewrite buffer */
1510 for (;;) {
1511 udf_get_adslot(udf_node, slot, &s_ad, &eof);
1512 if (eof) {
1513 DPRINTF(WRITE,
1514 ("Record allocation in node "
1515 "failed: encountered EOF\n"));
1516 UDF_UNLOCK_NODE(udf_node, 0);
1517 buf->b_error = EINVAL;
1518 return;
1519 }
1520 len = udf_rw32(s_ad.len);
1521 flags = UDF_EXT_FLAGS(len);
1522 len = UDF_EXT_LEN(len);
1523
1524 if (flags == UDF_EXT_REDIRECT) {
1525 slot++;
1526 continue;
1527 }
1528
1529 end_foffset = foffset + len;
1530 if (end_foffset > from)
1531 break; /* found */
1532
1533 node_ad_cpy[cpy_slot++] = s_ad;
1534
1535 DPRINTF(ALLOC, ("\t1: vp %d, lb %d, len %d, flags %d "
1536 "-> stack\n",
1537 udf_rw16(s_ad.loc.part_num),
1538 udf_rw32(s_ad.loc.lb_num),
1539 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1540 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1541
1542 foffset = end_foffset;
1543 slot++;
1544 }
1545 restart_slot = slot;
1546 restart_foffset = foffset;
1547
1548 /* 2) trunc overlapping slot at overlap and copy it */
1549 slot_offset = from - foffset;
1550 if (slot_offset > 0) {
1551 DPRINTF(ALLOC, ("\tslot_offset = %d, flags = %d (%d)\n",
1552 slot_offset, flags >> 30, flags));
1553
1554 s_ad.len = udf_rw32(slot_offset | flags);
1555 node_ad_cpy[cpy_slot++] = s_ad;
1556
1557 DPRINTF(ALLOC, ("\t2: vp %d, lb %d, len %d, flags %d "
1558 "-> stack\n",
1559 udf_rw16(s_ad.loc.part_num),
1560 udf_rw32(s_ad.loc.lb_num),
1561 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1562 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1563 }
1564 foffset += slot_offset;
1565
1566 /* 3) insert new mappings */
1567 memset(&s_ad, 0, sizeof(struct long_ad));
1568 lb_num = 0;
1569 for (lb_num = 0; lb_num < num_lb; lb_num++) {
1570 run_start = mapping[lb_num];
1571 run_length = 1;
1572 while (lb_num < num_lb-1) {
1573 if (mapping[lb_num+1] != mapping[lb_num]+1)
1574 if (mapping[lb_num+1] != mapping[lb_num])
1575 break;
1576 run_length++;
1577 lb_num++;
1578 }
1579 /* insert slot for this mapping */
1580 len = run_length * lb_size;
1581
1582 /* bounds checking */
1583 if (foffset + len > till)
1584 len = till - foffset;
1585 KASSERT(foffset + len <= inflen);
1586
1587 s_ad.len = udf_rw32(len | UDF_EXT_ALLOCATED);
1588 s_ad.loc.part_num = udf_rw16(vpart_num);
1589 s_ad.loc.lb_num = udf_rw32(run_start);
1590
1591 foffset += len;
1592
1593 /* paranoia */
1594 if (len == 0) {
1595 DPRINTF(WRITE,
1596 ("Record allocation in node "
1597 "failed: insert failed\n"));
1598 UDF_UNLOCK_NODE(udf_node, 0);
1599 buf->b_error = EINVAL;
1600 return;
1601 }
1602 node_ad_cpy[cpy_slot++] = s_ad;
1603
1604 DPRINTF(ALLOC, ("\t3: insert new mapping vp %d lb %d, len %d, "
1605 "flags %d -> stack\n",
1606 udf_rw16(s_ad.loc.part_num), udf_rw32(s_ad.loc.lb_num),
1607 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1608 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1609 }
1610
1611 /* 4) pop replaced length */
1612 slot = restart_slot;
1613 foffset = restart_foffset;
1614
1615 skip_len = till - foffset; /* relative to start of slot */
1616 slot_offset = from - foffset; /* offset in first encounted slot */
1617 for (;;) {
1618 udf_get_adslot(udf_node, slot, &s_ad, &eof);
1619 if (eof)
1620 break;
1621
1622 len = udf_rw32(s_ad.len);
1623 flags = UDF_EXT_FLAGS(len);
1624 len = UDF_EXT_LEN(len);
1625 lb_num = udf_rw32(s_ad.loc.lb_num);
1626
1627 if (flags == UDF_EXT_REDIRECT) {
1628 slot++;
1629 continue;
1630 }
1631
1632 DPRINTF(ALLOC, ("\t4i: got slot %d, slot_offset %d, "
1633 "skip_len %d, "
1634 "vp %d, lb %d, len %d, flags %d\n",
1635 slot, slot_offset, skip_len,
1636 udf_rw16(s_ad.loc.part_num),
1637 udf_rw32(s_ad.loc.lb_num),
1638 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1639 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1640
1641 skipped = MIN(len, skip_len);
1642 DPRINTF(ALLOC, ("\t4d: skipped %d\n", skipped));
1643
1644 /* we're in the first slot and need to skip its head */
1645 if (flags != UDF_EXT_FREE) {
1646 /* skip these blocks first */
1647 num_lb = (slot_offset + lb_size-1) / lb_size;
1648 len -= slot_offset;
1649 skip_len -= slot_offset;
1650 foffset += slot_offset;
1651 lb_num += num_lb;
1652 skipped -= slot_offset;
1653
1654 /* free space till `skipped' */
1655 num_lb = (skipped + lb_size-1) / lb_size;
1656 udf_free_allocated_space(ump, lb_num,
1657 udf_rw16(s_ad.loc.part_num), num_lb);
1658 lb_num += num_lb;
1659 }
1660 /* we're by definition at the 2nd slot, so clear */
1661 slot_offset = 0;
1662
1663 /* proceed */
1664 len -= skipped;
1665 skip_len -= skipped;
1666 foffset += skipped;
1667
1668 if (len) {
1669 KASSERT(skipped % lb_size == 0);
1670
1671 /* we arrived at our point, push remainder */
1672 s_ad.len = udf_rw32(len | flags);
1673 s_ad.loc.lb_num = udf_rw32(lb_num);
1674 node_ad_cpy[cpy_slot++] = s_ad;
1675 foffset += len;
1676 slot++;
1677
1678 DPRINTF(ALLOC, ("\t4: vp %d, lb %d, len %d, flags %d "
1679 "-> stack\n",
1680 udf_rw16(s_ad.loc.part_num),
1681 udf_rw32(s_ad.loc.lb_num),
1682 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1683 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1684 break;
1685 }
1686 slot++;
1687 }
1688
1689 /* 5) copy remainder */
1690 for (;;) {
1691 udf_get_adslot(udf_node, slot, &s_ad, &eof);
1692 if (eof)
1693 break;
1694
1695 len = udf_rw32(s_ad.len);
1696 flags = UDF_EXT_FLAGS(len);
1697 len = UDF_EXT_LEN(len);
1698
1699 if (flags == UDF_EXT_REDIRECT) {
1700 slot++;
1701 continue;
1702 }
1703
1704 node_ad_cpy[cpy_slot++] = s_ad;
1705
1706 DPRINTF(ALLOC, ("\t5: insert new mapping "
1707 "vp %d lb %d, len %d, flags %d "
1708 "-> stack\n",
1709 udf_rw16(s_ad.loc.part_num),
1710 udf_rw32(s_ad.loc.lb_num),
1711 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1712 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1713
1714 slot++;
1715 }
1716
1717 /* 6) reset node descriptors */
1718 udf_wipe_adslots(udf_node);
1719
1720 /* 7) copy back extents; merge when possible. Recounting on the fly */
1721 cpy_slots = cpy_slot;
1722
1723 c_ad = node_ad_cpy[0];
1724 slot = 0;
1725 DPRINTF(ALLOC, ("\t7s: stack -> got mapping vp %d "
1726 "lb %d, len %d, flags %d\n",
1727 udf_rw16(c_ad.loc.part_num),
1728 udf_rw32(c_ad.loc.lb_num),
1729 UDF_EXT_LEN(udf_rw32(c_ad.len)),
1730 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
1731
1732 for (cpy_slot = 1; cpy_slot < cpy_slots; cpy_slot++) {
1733 s_ad = node_ad_cpy[cpy_slot];
1734
1735 DPRINTF(ALLOC, ("\t7i: stack -> got mapping vp %d "
1736 "lb %d, len %d, flags %d\n",
1737 udf_rw16(s_ad.loc.part_num),
1738 udf_rw32(s_ad.loc.lb_num),
1739 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1740 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1741
1742 /* see if we can merge */
1743 if (udf_ads_merge(lb_size, &c_ad, &s_ad)) {
1744 /* not mergable (anymore) */
1745 DPRINTF(ALLOC, ("\t7: appending vp %d lb %d, "
1746 "len %d, flags %d\n",
1747 udf_rw16(c_ad.loc.part_num),
1748 udf_rw32(c_ad.loc.lb_num),
1749 UDF_EXT_LEN(udf_rw32(c_ad.len)),
1750 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
1751
1752 error = udf_append_adslot(udf_node, slot, &c_ad);
1753 if (error) {
1754 buf->b_error = error;
1755 goto out;
1756 }
1757 c_ad = s_ad;
1758 slot++;
1759 }
1760 }
1761
1762 /* 8) push rest slot (if any) */
1763 if (UDF_EXT_LEN(c_ad.len) > 0) {
1764 DPRINTF(ALLOC, ("\t8: last append vp %d lb %d, "
1765 "len %d, flags %d\n",
1766 udf_rw16(c_ad.loc.part_num),
1767 udf_rw32(c_ad.loc.lb_num),
1768 UDF_EXT_LEN(udf_rw32(c_ad.len)),
1769 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
1770
1771 error = udf_append_adslot(udf_node, slot, &c_ad);
1772 if (error) {
1773 buf->b_error = error;
1774 goto out;
1775 }
1776 }
1777
1778 out:
1779 /* the node's descriptors should now be sane */
1780 UDF_UNLOCK_NODE(udf_node, 0);
1781
1782 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
1783
1784 KASSERT(orig_inflen == new_inflen);
1785 KASSERT(new_lbrec >= orig_lbrec);
1786
1787 return;
1788 }
1789
1790 /* --------------------------------------------------------------------- */
1791
1792 int
1793 udf_grow_node(struct udf_node *udf_node, uint64_t new_size)
1794 {
1795 union dscrptr *dscr;
1796 struct vnode *vp = udf_node->vnode;
1797 struct udf_mount *ump = udf_node->ump;
1798 struct file_entry *fe;
1799 struct extfile_entry *efe;
1800 struct icb_tag *icbtag;
1801 struct long_ad c_ad, s_ad;
1802 uint64_t size_diff, old_size, inflen, objsize, chunk, append_len;
1803 uint64_t foffset, end_foffset;
1804 uint64_t orig_inflen, orig_lbrec, new_inflen, new_lbrec;
1805 uint32_t lb_size, dscr_size, crclen, lastblock_grow;
1806 uint32_t len, flags, max_len;
1807 uint32_t max_l_ad, l_ad, l_ea;
1808 uint8_t *data_pos, *evacuated_data;
1809 int icbflags, addr_type;
1810 int slot, cpy_slot;
1811 int eof, error;
1812
1813 DPRINTF(ALLOC, ("udf_grow_node\n"));
1814 udf_node_sanity_check(udf_node, &orig_inflen, &orig_lbrec);
1815
1816 UDF_LOCK_NODE(udf_node, 0);
1817 lb_size = udf_rw32(ump->logical_vol->lb_size);
1818 max_len = ((UDF_EXT_MAXLEN / lb_size) * lb_size);
1819
1820 fe = udf_node->fe;
1821 efe = udf_node->efe;
1822 if (fe) {
1823 dscr = (union dscrptr *) fe;
1824 icbtag = &fe->icbtag;
1825 inflen = udf_rw64(fe->inf_len);
1826 objsize = inflen;
1827 dscr_size = sizeof(struct file_entry) -1;
1828 l_ea = udf_rw32(fe->l_ea);
1829 l_ad = udf_rw32(fe->l_ad);
1830 } else {
1831 dscr = (union dscrptr *) efe;
1832 icbtag = &efe->icbtag;
1833 inflen = udf_rw64(efe->inf_len);
1834 objsize = udf_rw64(efe->obj_size);
1835 dscr_size = sizeof(struct extfile_entry) -1;
1836 l_ea = udf_rw32(efe->l_ea);
1837 l_ad = udf_rw32(efe->l_ad);
1838 }
1839 data_pos = (uint8_t *) dscr + dscr_size + l_ea;
1840 max_l_ad = lb_size - dscr_size - l_ea;
1841
1842 icbflags = udf_rw16(icbtag->flags);
1843 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
1844
1845 old_size = inflen;
1846 size_diff = new_size - old_size;
1847
1848 DPRINTF(ALLOC, ("\tfrom %"PRIu64" to %"PRIu64"\n", old_size, new_size));
1849
1850 evacuated_data = NULL;
1851 if (addr_type == UDF_ICB_INTERN_ALLOC) {
1852 if (l_ad + size_diff <= max_l_ad) {
1853 /* only reflect size change directly in the node */
1854 inflen += size_diff;
1855 objsize += size_diff;
1856 l_ad += size_diff;
1857 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea + l_ad;
1858 if (fe) {
1859 fe->inf_len = udf_rw64(inflen);
1860 fe->l_ad = udf_rw32(l_ad);
1861 fe->tag.desc_crc_len = udf_rw32(crclen);
1862 } else {
1863 efe->inf_len = udf_rw64(inflen);
1864 efe->obj_size = udf_rw64(objsize);
1865 efe->l_ad = udf_rw32(l_ad);
1866 efe->tag.desc_crc_len = udf_rw32(crclen);
1867 }
1868 error = 0;
1869
1870 /* set new size for uvm */
1871 uvm_vnp_setsize(vp, old_size);
1872 uvm_vnp_setwritesize(vp, new_size);
1873
1874 #if 0
1875 /* zero append space in buffer */
1876 uvm_vnp_zerorange(vp, old_size, new_size - old_size);
1877 #endif
1878
1879 /* unlock */
1880 UDF_UNLOCK_NODE(udf_node, 0);
1881
1882 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
1883 KASSERT(new_inflen == orig_inflen + size_diff);
1884 KASSERT(new_lbrec == orig_lbrec);
1885 KASSERT(new_lbrec == 0);
1886 return 0;
1887 }
1888
1889 DPRINTF(ALLOC, ("\tCONVERT from internal\n"));
1890
1891 if (old_size > 0) {
1892 /* allocate some space and copy in the stuff to keep */
1893 evacuated_data = malloc(lb_size, M_UDFTEMP, M_WAITOK);
1894 memset(evacuated_data, 0, lb_size);
1895
1896 /* node is locked, so safe to exit mutex */
1897 UDF_UNLOCK_NODE(udf_node, 0);
1898
1899 /* read in using the `normal' vn_rdwr() */
1900 error = vn_rdwr(UIO_READ, udf_node->vnode,
1901 evacuated_data, old_size, 0,
1902 UIO_SYSSPACE, IO_ALTSEMANTICS | IO_NODELOCKED,
1903 FSCRED, NULL, NULL);
1904
1905 /* enter again */
1906 UDF_LOCK_NODE(udf_node, 0);
1907 }
1908
1909 /* convert to a normal alloc */
1910 /* XXX HOWTO selecting allocation method ? */
1911 icbflags &= ~UDF_ICB_TAG_FLAGS_ALLOC_MASK;
1912 icbflags |= UDF_ICB_LONG_ALLOC; /* XXX or SHORT_ALLOC */
1913 icbtag->flags = udf_rw16(icbflags);
1914
1915 /* wipe old descriptor space */
1916 udf_wipe_adslots(udf_node);
1917
1918 memset(&c_ad, 0, sizeof(struct long_ad));
1919 c_ad.len = udf_rw32(old_size | UDF_EXT_FREE);
1920 c_ad.loc.part_num = udf_rw16(0); /* not relevant */
1921 c_ad.loc.lb_num = udf_rw32(0); /* not relevant */
1922
1923 slot = 0;
1924 } else {
1925 /* goto the last entry (if any) */
1926 slot = 0;
1927 cpy_slot = 0;
1928 foffset = 0;
1929 memset(&c_ad, 0, sizeof(struct long_ad));
1930 for (;;) {
1931 udf_get_adslot(udf_node, slot, &c_ad, &eof);
1932 if (eof)
1933 break;
1934
1935 len = udf_rw32(c_ad.len);
1936 flags = UDF_EXT_FLAGS(len);
1937 len = UDF_EXT_LEN(len);
1938
1939 end_foffset = foffset + len;
1940 if (flags != UDF_EXT_REDIRECT)
1941 foffset = end_foffset;
1942
1943 slot++;
1944 }
1945 /* at end of adslots */
1946
1947 /* special case if the old size was zero, then there is no last slot */
1948 if (old_size == 0) {
1949 c_ad.len = udf_rw32(0 | UDF_EXT_FREE);
1950 c_ad.loc.part_num = udf_rw16(0); /* not relevant */
1951 c_ad.loc.lb_num = udf_rw32(0); /* not relevant */
1952 } else {
1953 /* refetch last slot */
1954 slot--;
1955 udf_get_adslot(udf_node, slot, &c_ad, &eof);
1956 }
1957 }
1958
1959 /*
1960 * If the length of the last slot is not a multiple of lb_size, adjust
1961 * length so that it is; don't forget to adjust `append_len'! relevant for
1962 * extending existing files
1963 */
1964 len = udf_rw32(c_ad.len);
1965 flags = UDF_EXT_FLAGS(len);
1966 len = UDF_EXT_LEN(len);
1967
1968 lastblock_grow = 0;
1969 if (len % lb_size > 0) {
1970 lastblock_grow = lb_size - (len % lb_size);
1971 lastblock_grow = MIN(size_diff, lastblock_grow);
1972 len += lastblock_grow;
1973 c_ad.len = udf_rw32(len | flags);
1974
1975 /* TODO zero appened space in buffer! */
1976 /* using uvm_vnp_zerorange(vp, old_size, new_size - old_size); ? */
1977 }
1978 memset(&s_ad, 0, sizeof(struct long_ad));
1979
1980 /* size_diff can be bigger than allowed, so grow in chunks */
1981 append_len = size_diff - lastblock_grow;
1982 while (append_len > 0) {
1983 chunk = MIN(append_len, max_len);
1984 s_ad.len = udf_rw32(chunk | UDF_EXT_FREE);
1985 s_ad.loc.part_num = udf_rw16(0);
1986 s_ad.loc.lb_num = udf_rw32(0);
1987
1988 if (udf_ads_merge(lb_size, &c_ad, &s_ad)) {
1989 /* not mergable (anymore) */
1990 error = udf_append_adslot(udf_node, slot, &c_ad);
1991 if (error)
1992 goto errorout;
1993 slot++;
1994 c_ad = s_ad;
1995 memset(&s_ad, 0, sizeof(struct long_ad));
1996 }
1997 append_len -= chunk;
1998 }
1999
2000 /* if there is a rest piece in the accumulator, append it */
2001 if (UDF_EXT_LEN(udf_rw32(c_ad.len)) > 0) {
2002 error = udf_append_adslot(udf_node, slot, &c_ad);
2003 if (error)
2004 goto errorout;
2005 slot++;
2006 }
2007
2008 /* if there is a rest piece that didn't fit, append it */
2009 if (UDF_EXT_LEN(udf_rw32(s_ad.len)) > 0) {
2010 error = udf_append_adslot(udf_node, slot, &s_ad);
2011 if (error)
2012 goto errorout;
2013 slot++;
2014 }
2015
2016 inflen += size_diff;
2017 objsize += size_diff;
2018 if (fe) {
2019 fe->inf_len = udf_rw64(inflen);
2020 } else {
2021 efe->inf_len = udf_rw64(inflen);
2022 efe->obj_size = udf_rw64(objsize);
2023 }
2024 error = 0;
2025
2026 if (evacuated_data) {
2027 /* set new write size for uvm */
2028 uvm_vnp_setwritesize(vp, old_size);
2029
2030 /* write out evacuated data */
2031 error = vn_rdwr(UIO_WRITE, udf_node->vnode,
2032 evacuated_data, old_size, 0,
2033 UIO_SYSSPACE, IO_ALTSEMANTICS | IO_NODELOCKED,
2034 FSCRED, NULL, NULL);
2035 uvm_vnp_setsize(vp, old_size);
2036 }
2037
2038 errorout:
2039 if (evacuated_data)
2040 free(evacuated_data, M_UDFTEMP);
2041 UDF_UNLOCK_NODE(udf_node, 0);
2042
2043 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2044 KASSERT(new_inflen == orig_inflen + size_diff);
2045 KASSERT(new_lbrec == orig_lbrec);
2046
2047 return error;
2048 }
2049
2050 /* --------------------------------------------------------------------- */
2051
2052 int
2053 udf_shrink_node(struct udf_node *udf_node, uint64_t new_size)
2054 {
2055 struct vnode *vp = udf_node->vnode;
2056 struct udf_mount *ump = udf_node->ump;
2057 struct file_entry *fe;
2058 struct extfile_entry *efe;
2059 struct icb_tag *icbtag;
2060 struct long_ad c_ad, s_ad, *node_ad_cpy;
2061 uint64_t size_diff, old_size, inflen, objsize;
2062 uint64_t foffset, end_foffset;
2063 uint64_t orig_inflen, orig_lbrec, new_inflen, new_lbrec;
2064 uint32_t lb_size, dscr_size, crclen;
2065 uint32_t slot_offset;
2066 uint32_t len, flags, max_len;
2067 uint32_t num_lb, lb_num;
2068 uint32_t max_l_ad, l_ad, l_ea;
2069 uint16_t vpart_num;
2070 uint8_t *data_pos;
2071 int icbflags, addr_type;
2072 int slot, cpy_slot, cpy_slots;
2073 int eof, error;
2074
2075 DPRINTF(ALLOC, ("udf_shrink_node\n"));
2076 udf_node_sanity_check(udf_node, &orig_inflen, &orig_lbrec);
2077
2078 UDF_LOCK_NODE(udf_node, 0);
2079 lb_size = udf_rw32(ump->logical_vol->lb_size);
2080 max_len = ((UDF_EXT_MAXLEN / lb_size) * lb_size);
2081
2082 /* do the work */
2083 fe = udf_node->fe;
2084 efe = udf_node->efe;
2085 if (fe) {
2086 icbtag = &fe->icbtag;
2087 inflen = udf_rw64(fe->inf_len);
2088 objsize = inflen;
2089 dscr_size = sizeof(struct file_entry) -1;
2090 l_ea = udf_rw32(fe->l_ea);
2091 l_ad = udf_rw32(fe->l_ad);
2092 data_pos = (uint8_t *) fe + dscr_size + l_ea;
2093 } else {
2094 icbtag = &efe->icbtag;
2095 inflen = udf_rw64(efe->inf_len);
2096 objsize = udf_rw64(efe->obj_size);
2097 dscr_size = sizeof(struct extfile_entry) -1;
2098 l_ea = udf_rw32(efe->l_ea);
2099 l_ad = udf_rw32(efe->l_ad);
2100 data_pos = (uint8_t *) efe + dscr_size + l_ea;
2101 }
2102 max_l_ad = lb_size - dscr_size - l_ea;
2103
2104 icbflags = udf_rw16(icbtag->flags);
2105 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2106
2107 old_size = inflen;
2108 size_diff = old_size - new_size;
2109
2110 DPRINTF(ALLOC, ("\tfrom %"PRIu64" to %"PRIu64"\n", old_size, new_size));
2111
2112 /* shrink the node to its new size */
2113 if (addr_type == UDF_ICB_INTERN_ALLOC) {
2114 /* only reflect size change directly in the node */
2115 KASSERT(new_size <= max_l_ad);
2116 inflen -= size_diff;
2117 objsize -= size_diff;
2118 l_ad -= size_diff;
2119 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea + l_ad;
2120 if (fe) {
2121 fe->inf_len = udf_rw64(inflen);
2122 fe->l_ad = udf_rw32(l_ad);
2123 fe->tag.desc_crc_len = udf_rw32(crclen);
2124 } else {
2125 efe->inf_len = udf_rw64(inflen);
2126 efe->obj_size = udf_rw64(objsize);
2127 efe->l_ad = udf_rw32(l_ad);
2128 efe->tag.desc_crc_len = udf_rw32(crclen);
2129 }
2130 error = 0;
2131
2132 /* clear the space in the descriptor */
2133 KASSERT(old_size > new_size);
2134 memset(data_pos + new_size, 0, old_size - new_size);
2135
2136 /* TODO zero appened space in buffer! */
2137 /* using uvm_vnp_zerorange(vp, old_size, old_size - new_size); ? */
2138
2139 /* set new size for uvm */
2140 uvm_vnp_setsize(vp, new_size);
2141 UDF_UNLOCK_NODE(udf_node, 0);
2142
2143 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2144 KASSERT(new_inflen == orig_inflen - size_diff);
2145 KASSERT(new_lbrec == orig_lbrec);
2146 KASSERT(new_lbrec == 0);
2147
2148 return 0;
2149 }
2150
2151 /* setup node cleanup extents copy space */
2152 node_ad_cpy = malloc(lb_size * UDF_MAX_ALLOC_EXTENTS,
2153 M_UDFMNT, M_WAITOK);
2154 memset(node_ad_cpy, 0, lb_size * UDF_MAX_ALLOC_EXTENTS);
2155
2156 /*
2157 * Shrink the node by releasing the allocations and truncate the last
2158 * allocation to the new size. If the new size fits into the
2159 * allocation descriptor itself, transform it into an
2160 * UDF_ICB_INTERN_ALLOC.
2161 */
2162 slot = 0;
2163 cpy_slot = 0;
2164 foffset = 0;
2165
2166 /* 1) copy till first overlap piece to the rewrite buffer */
2167 for (;;) {
2168 udf_get_adslot(udf_node, slot, &s_ad, &eof);
2169 if (eof) {
2170 DPRINTF(WRITE,
2171 ("Shrink node failed: "
2172 "encountered EOF\n"));
2173 error = EINVAL;
2174 goto errorout; /* panic? */
2175 }
2176 len = udf_rw32(s_ad.len);
2177 flags = UDF_EXT_FLAGS(len);
2178 len = UDF_EXT_LEN(len);
2179
2180 if (flags == UDF_EXT_REDIRECT) {
2181 slot++;
2182 continue;
2183 }
2184
2185 end_foffset = foffset + len;
2186 if (end_foffset > new_size)
2187 break; /* found */
2188
2189 node_ad_cpy[cpy_slot++] = s_ad;
2190
2191 DPRINTF(ALLOC, ("\t1: vp %d, lb %d, len %d, flags %d "
2192 "-> stack\n",
2193 udf_rw16(s_ad.loc.part_num),
2194 udf_rw32(s_ad.loc.lb_num),
2195 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2196 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2197
2198 foffset = end_foffset;
2199 slot++;
2200 }
2201 slot_offset = new_size - foffset;
2202
2203 /* 2) trunc overlapping slot at overlap and copy it */
2204 if (slot_offset > 0) {
2205 lb_num = udf_rw32(s_ad.loc.lb_num);
2206 vpart_num = udf_rw16(s_ad.loc.part_num);
2207
2208 if (flags == UDF_EXT_ALLOCATED) {
2209 lb_num += (slot_offset + lb_size -1) / lb_size;
2210 num_lb = (len - slot_offset + lb_size - 1) / lb_size;
2211
2212 udf_free_allocated_space(ump, lb_num, vpart_num, num_lb);
2213 }
2214
2215 s_ad.len = udf_rw32(slot_offset | flags);
2216 node_ad_cpy[cpy_slot++] = s_ad;
2217 slot++;
2218
2219 DPRINTF(ALLOC, ("\t2: vp %d, lb %d, len %d, flags %d "
2220 "-> stack\n",
2221 udf_rw16(s_ad.loc.part_num),
2222 udf_rw32(s_ad.loc.lb_num),
2223 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2224 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2225 }
2226
2227 /* 3) delete remainder */
2228 for (;;) {
2229 udf_get_adslot(udf_node, slot, &s_ad, &eof);
2230 if (eof)
2231 break;
2232
2233 len = udf_rw32(s_ad.len);
2234 flags = UDF_EXT_FLAGS(len);
2235 len = UDF_EXT_LEN(len);
2236
2237 if (flags == UDF_EXT_REDIRECT) {
2238 slot++;
2239 continue;
2240 }
2241
2242 DPRINTF(ALLOC, ("\t3: delete remainder "
2243 "vp %d lb %d, len %d, flags %d\n",
2244 udf_rw16(s_ad.loc.part_num),
2245 udf_rw32(s_ad.loc.lb_num),
2246 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2247 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2248
2249 if (flags == UDF_EXT_ALLOCATED) {
2250 lb_num = udf_rw32(s_ad.loc.lb_num);
2251 vpart_num = udf_rw16(s_ad.loc.part_num);
2252 num_lb = (len + lb_size - 1) / lb_size;
2253
2254 udf_free_allocated_space(ump, lb_num, vpart_num,
2255 num_lb);
2256 }
2257
2258 slot++;
2259 }
2260
2261 /* 4) if it will fit into the descriptor then convert */
2262 if (new_size < max_l_ad) {
2263 /*
2264 * resque/evacuate old piece by reading it in, and convert it
2265 * to internal alloc.
2266 */
2267 if (new_size == 0) {
2268 /* XXX/TODO only for zero sizing now */
2269 udf_wipe_adslots(udf_node);
2270
2271 icbflags &= ~UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2272 icbflags |= UDF_ICB_INTERN_ALLOC;
2273 icbtag->flags = udf_rw16(icbflags);
2274
2275 inflen -= size_diff; KASSERT(inflen == 0);
2276 objsize -= size_diff;
2277 l_ad = new_size;
2278 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea + l_ad;
2279 if (fe) {
2280 fe->inf_len = udf_rw64(inflen);
2281 fe->l_ad = udf_rw32(l_ad);
2282 fe->tag.desc_crc_len = udf_rw32(crclen);
2283 } else {
2284 efe->inf_len = udf_rw64(inflen);
2285 efe->obj_size = udf_rw64(objsize);
2286 efe->l_ad = udf_rw32(l_ad);
2287 efe->tag.desc_crc_len = udf_rw32(crclen);
2288 }
2289 /* eventually copy in evacuated piece */
2290 /* set new size for uvm */
2291 uvm_vnp_setsize(vp, new_size);
2292
2293 free(node_ad_cpy, M_UDFMNT);
2294 UDF_UNLOCK_NODE(udf_node, 0);
2295
2296 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2297 KASSERT(new_inflen == orig_inflen - size_diff);
2298 KASSERT(new_inflen == 0);
2299 KASSERT(new_lbrec == 0);
2300
2301 return 0;
2302 }
2303
2304 printf("UDF_SHRINK_NODE: could convert to internal alloc!\n");
2305 }
2306
2307 /* 5) reset node descriptors */
2308 udf_wipe_adslots(udf_node);
2309
2310 /* 6) copy back extents; merge when possible. Recounting on the fly */
2311 cpy_slots = cpy_slot;
2312
2313 c_ad = node_ad_cpy[0];
2314 slot = 0;
2315 for (cpy_slot = 1; cpy_slot < cpy_slots; cpy_slot++) {
2316 s_ad = node_ad_cpy[cpy_slot];
2317
2318 DPRINTF(ALLOC, ("\t6: stack -> got mapping vp %d "
2319 "lb %d, len %d, flags %d\n",
2320 udf_rw16(s_ad.loc.part_num),
2321 udf_rw32(s_ad.loc.lb_num),
2322 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2323 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2324
2325 /* see if we can merge */
2326 if (udf_ads_merge(lb_size, &c_ad, &s_ad)) {
2327 /* not mergable (anymore) */
2328 DPRINTF(ALLOC, ("\t6: appending vp %d lb %d, "
2329 "len %d, flags %d\n",
2330 udf_rw16(c_ad.loc.part_num),
2331 udf_rw32(c_ad.loc.lb_num),
2332 UDF_EXT_LEN(udf_rw32(c_ad.len)),
2333 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2334
2335 error = udf_append_adslot(udf_node, slot, &c_ad);
2336 if (error)
2337 goto errorout; /* panic? */
2338 c_ad = s_ad;
2339 slot++;
2340 }
2341 }
2342
2343 /* 7) push rest slot (if any) */
2344 if (UDF_EXT_LEN(c_ad.len) > 0) {
2345 DPRINTF(ALLOC, ("\t7: last append vp %d lb %d, "
2346 "len %d, flags %d\n",
2347 udf_rw16(c_ad.loc.part_num),
2348 udf_rw32(c_ad.loc.lb_num),
2349 UDF_EXT_LEN(udf_rw32(c_ad.len)),
2350 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2351
2352 error = udf_append_adslot(udf_node, slot, &c_ad);
2353 if (error)
2354 goto errorout; /* panic? */
2355 ;
2356 }
2357
2358 inflen -= size_diff;
2359 objsize -= size_diff;
2360 if (fe) {
2361 fe->inf_len = udf_rw64(inflen);
2362 } else {
2363 efe->inf_len = udf_rw64(inflen);
2364 efe->obj_size = udf_rw64(objsize);
2365 }
2366 error = 0;
2367
2368 /* set new size for uvm */
2369 uvm_vnp_setsize(vp, new_size);
2370
2371 errorout:
2372 free(node_ad_cpy, M_UDFMNT);
2373 UDF_UNLOCK_NODE(udf_node, 0);
2374
2375 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2376 KASSERT(new_inflen == orig_inflen - size_diff);
2377
2378 return error;
2379 }
2380
2381