udf_allocation.c revision 1.5 1 /* $NetBSD: udf_allocation.c,v 1.5 2008/06/25 15:28:29 reinoud Exp $ */
2
3 /*
4 * Copyright (c) 2006, 2008 Reinoud Zandijk
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 *
27 */
28
29 #include <sys/cdefs.h>
30 #ifndef lint
31 __KERNEL_RCSID(0, "$NetBSD: udf_allocation.c,v 1.5 2008/06/25 15:28:29 reinoud Exp $");
32 #endif /* not lint */
33
34
35 #if defined(_KERNEL_OPT)
36 #include "opt_quota.h"
37 #include "opt_compat_netbsd.h"
38 #endif
39
40 /* TODO strip */
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/sysctl.h>
44 #include <sys/namei.h>
45 #include <sys/proc.h>
46 #include <sys/kernel.h>
47 #include <sys/vnode.h>
48 #include <miscfs/genfs/genfs_node.h>
49 #include <sys/mount.h>
50 #include <sys/buf.h>
51 #include <sys/file.h>
52 #include <sys/device.h>
53 #include <sys/disklabel.h>
54 #include <sys/ioctl.h>
55 #include <sys/malloc.h>
56 #include <sys/dirent.h>
57 #include <sys/stat.h>
58 #include <sys/conf.h>
59 #include <sys/kauth.h>
60 #include <sys/kthread.h>
61 #include <dev/clock_subr.h>
62
63 #include <fs/udf/ecma167-udf.h>
64 #include <fs/udf/udf_mount.h>
65
66 #if defined(_KERNEL_OPT)
67 #include "opt_udf.h"
68 #endif
69
70 #include "udf.h"
71 #include "udf_subr.h"
72 #include "udf_bswap.h"
73
74
75 #define VTOI(vnode) ((struct udf_node *) vnode->v_data)
76
77 static void udf_record_allocation_in_node(struct udf_mount *ump,
78 struct buf *buf, uint16_t vpart_num, uint64_t *mapping,
79 struct long_ad *node_ad_cpy);
80
81 /*
82 * IDEA/BUSY: Each udf_node gets its own extentwalker state for all operations;
83 * this will hopefully/likely reduce O(nlog(n)) to O(1) for most functionality
84 * since actions are most likely sequencial and thus seeking doesn't need
85 * searching for the same or adjacent position again.
86 */
87
88 /* --------------------------------------------------------------------- */
89 //#ifdef DEBUG
90 #if 1
91 #if 1
92 static void
93 udf_node_dump(struct udf_node *udf_node) {
94 struct file_entry *fe;
95 struct extfile_entry *efe;
96 struct icb_tag *icbtag;
97 struct short_ad *short_ad;
98 struct long_ad *long_ad;
99 uint64_t inflen;
100 uint32_t icbflags, addr_type, max_l_ad;
101 uint32_t len, lb_num;
102 uint8_t *data_pos;
103 int part_num;
104 int adlen, ad_off, dscr_size, l_ea, l_ad, lb_size, flags;
105
106 if ((udf_verbose & UDF_DEBUG_ADWLK) == 0)
107 return;
108
109 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
110
111 fe = udf_node->fe;
112 efe = udf_node->efe;
113 if (fe) {
114 icbtag = &fe->icbtag;
115 inflen = udf_rw64(fe->inf_len);
116 dscr_size = sizeof(struct file_entry) -1;
117 l_ea = udf_rw32(fe->l_ea);
118 l_ad = udf_rw32(fe->l_ad);
119 data_pos = (uint8_t *) fe + dscr_size + l_ea;
120 } else {
121 icbtag = &efe->icbtag;
122 inflen = udf_rw64(efe->inf_len);
123 dscr_size = sizeof(struct extfile_entry) -1;
124 l_ea = udf_rw32(efe->l_ea);
125 l_ad = udf_rw32(efe->l_ad);
126 data_pos = (uint8_t *) efe + dscr_size + l_ea;
127 }
128 max_l_ad = lb_size - dscr_size - l_ea;
129
130 icbflags = udf_rw16(icbtag->flags);
131 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
132
133 printf("udf_node_dump:\n");
134 printf("\tudf_node %p\n", udf_node);
135
136 if (addr_type == UDF_ICB_INTERN_ALLOC) {
137 printf("\t\tIntern alloc, len = %"PRIu64"\n", inflen);
138 return;
139 }
140
141 printf("\t\tInflen = %"PRIu64"\n", inflen);
142 printf("\t\tl_ad = %d\n", l_ad);
143
144 if (addr_type == UDF_ICB_SHORT_ALLOC) {
145 adlen = sizeof(struct short_ad);
146 } else {
147 adlen = sizeof(struct long_ad);
148 }
149
150 printf("\t\t");
151 for (ad_off = 0; ad_off < max_l_ad-adlen; ad_off += adlen) {
152 if (addr_type == UDF_ICB_SHORT_ALLOC) {
153 short_ad = (struct short_ad *) (data_pos + ad_off);
154 len = udf_rw32(short_ad->len);
155 lb_num = udf_rw32(short_ad->lb_num);
156 part_num = -1;
157 flags = UDF_EXT_FLAGS(len);
158 len = UDF_EXT_LEN(len);
159 } else {
160 long_ad = (struct long_ad *) (data_pos + ad_off);
161 len = udf_rw32(long_ad->len);
162 lb_num = udf_rw32(long_ad->loc.lb_num);
163 part_num = udf_rw16(long_ad->loc.part_num);
164 flags = UDF_EXT_FLAGS(len);
165 len = UDF_EXT_LEN(len);
166 }
167 printf("[");
168 if (part_num >= 0)
169 printf("part %d, ", part_num);
170 printf("lb_num %d, len %d", lb_num, len);
171 if (flags)
172 printf(", flags %d", flags);
173 printf("] ");
174 if (ad_off + adlen == l_ad)
175 printf("\n\t\tl_ad END\n\t\t");
176 }
177 printf("\n");
178 }
179 #else
180 #define udf_node_dump(a)
181 #endif
182
183 static void
184 udf_node_sanity_check(struct udf_node *udf_node,
185 uint64_t *cnt_inflen, uint64_t *cnt_logblksrec) {
186 struct file_entry *fe;
187 struct extfile_entry *efe;
188 struct icb_tag *icbtag;
189 struct short_ad *short_ad;
190 struct long_ad *long_ad;
191 uint64_t inflen, logblksrec;
192 uint32_t icbflags, addr_type, max_l_ad;
193 uint32_t len, lb_num;
194 uint8_t *data_pos;
195 int part_num;
196 int adlen, ad_off, dscr_size, l_ea, l_ad, lb_size, flags, whole_lb;
197
198 /* only lock mutex; we're not changing and its a debug checking func */
199 mutex_enter(&udf_node->node_mutex);
200
201 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
202
203 fe = udf_node->fe;
204 efe = udf_node->efe;
205 if (fe) {
206 icbtag = &fe->icbtag;
207 inflen = udf_rw64(fe->inf_len);
208 logblksrec = udf_rw64(fe->logblks_rec);
209 dscr_size = sizeof(struct file_entry) -1;
210 l_ea = udf_rw32(fe->l_ea);
211 l_ad = udf_rw32(fe->l_ad);
212 data_pos = (uint8_t *) fe + dscr_size + l_ea;
213 } else {
214 icbtag = &efe->icbtag;
215 inflen = udf_rw64(efe->inf_len);
216 logblksrec = udf_rw64(efe->logblks_rec);
217 dscr_size = sizeof(struct extfile_entry) -1;
218 l_ea = udf_rw32(efe->l_ea);
219 l_ad = udf_rw32(efe->l_ad);
220 data_pos = (uint8_t *) efe + dscr_size + l_ea;
221 }
222 max_l_ad = lb_size - dscr_size - l_ea;
223 icbflags = udf_rw16(icbtag->flags);
224 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
225
226 /* reset counters */
227 *cnt_inflen = 0;
228 *cnt_logblksrec = 0;
229
230 if (addr_type == UDF_ICB_INTERN_ALLOC) {
231 KASSERT(l_ad <= max_l_ad);
232 KASSERT(l_ad == inflen);
233 *cnt_inflen = inflen;
234 mutex_exit(&udf_node->node_mutex);
235 return;
236 }
237
238 if (addr_type == UDF_ICB_SHORT_ALLOC) {
239 adlen = sizeof(struct short_ad);
240 } else {
241 adlen = sizeof(struct long_ad);
242 }
243
244 /* start counting */
245 whole_lb = 1;
246 for (ad_off = 0; ad_off < l_ad; ad_off += adlen) {
247 KASSERT(whole_lb == 1);
248 if (addr_type == UDF_ICB_SHORT_ALLOC) {
249 short_ad = (struct short_ad *) (data_pos + ad_off);
250 len = udf_rw32(short_ad->len);
251 lb_num = udf_rw32(short_ad->lb_num);
252 part_num = -1;
253 flags = UDF_EXT_FLAGS(len);
254 len = UDF_EXT_LEN(len);
255 } else {
256 long_ad = (struct long_ad *) (data_pos + ad_off);
257 len = udf_rw32(long_ad->len);
258 lb_num = udf_rw32(long_ad->loc.lb_num);
259 part_num = udf_rw16(long_ad->loc.part_num);
260 flags = UDF_EXT_FLAGS(len);
261 len = UDF_EXT_LEN(len);
262 }
263 KASSERT(flags != UDF_EXT_REDIRECT); /* not implemented yet */
264 *cnt_inflen += len;
265 if (flags == UDF_EXT_ALLOCATED) {
266 *cnt_logblksrec += (len + lb_size -1) / lb_size;
267 }
268 whole_lb = ((len % lb_size) == 0);
269 }
270 /* rest should be zero (ad_off > l_ad < max_l_ad - adlen) */
271
272 KASSERT(*cnt_inflen == inflen);
273 KASSERT(*cnt_logblksrec == logblksrec);
274
275 mutex_exit(&udf_node->node_mutex);
276 if (0)
277 udf_node_dump(udf_node);
278 }
279 #else
280 #define udf_node_sanity_check(a, b, c)
281 #endif
282
283 /* --------------------------------------------------------------------- */
284
285 int
286 udf_translate_vtop(struct udf_mount *ump, struct long_ad *icb_loc,
287 uint32_t *lb_numres, uint32_t *extres)
288 {
289 struct part_desc *pdesc;
290 struct spare_map_entry *sme;
291 struct long_ad s_icb_loc;
292 uint64_t foffset, end_foffset;
293 uint32_t lb_size, len;
294 uint32_t lb_num, lb_rel, lb_packet;
295 uint32_t udf_rw32_lbmap, ext_offset;
296 uint16_t vpart;
297 int rel, part, error, eof, slot, flags;
298
299 assert(ump && icb_loc && lb_numres);
300
301 vpart = udf_rw16(icb_loc->loc.part_num);
302 lb_num = udf_rw32(icb_loc->loc.lb_num);
303 if (vpart > UDF_VTOP_RAWPART)
304 return EINVAL;
305
306 translate_again:
307 part = ump->vtop[vpart];
308 pdesc = ump->partitions[part];
309
310 switch (ump->vtop_tp[vpart]) {
311 case UDF_VTOP_TYPE_RAW :
312 /* 1:1 to the end of the device */
313 *lb_numres = lb_num;
314 *extres = INT_MAX;
315 return 0;
316 case UDF_VTOP_TYPE_PHYS :
317 /* transform into its disc logical block */
318 if (lb_num > udf_rw32(pdesc->part_len))
319 return EINVAL;
320 *lb_numres = lb_num + udf_rw32(pdesc->start_loc);
321
322 /* extent from here to the end of the partition */
323 *extres = udf_rw32(pdesc->part_len) - lb_num;
324 return 0;
325 case UDF_VTOP_TYPE_VIRT :
326 /* only maps one logical block, lookup in VAT */
327 if (lb_num >= ump->vat_entries) /* XXX > or >= ? */
328 return EINVAL;
329
330 /* lookup in virtual allocation table file */
331 mutex_enter(&ump->allocate_mutex);
332 error = udf_vat_read(ump->vat_node,
333 (uint8_t *) &udf_rw32_lbmap, 4,
334 ump->vat_offset + lb_num * 4);
335 mutex_exit(&ump->allocate_mutex);
336
337 if (error)
338 return error;
339
340 lb_num = udf_rw32(udf_rw32_lbmap);
341
342 /* transform into its disc logical block */
343 if (lb_num > udf_rw32(pdesc->part_len))
344 return EINVAL;
345 *lb_numres = lb_num + udf_rw32(pdesc->start_loc);
346
347 /* just one logical block */
348 *extres = 1;
349 return 0;
350 case UDF_VTOP_TYPE_SPARABLE :
351 /* check if the packet containing the lb_num is remapped */
352 lb_packet = lb_num / ump->sparable_packet_size;
353 lb_rel = lb_num % ump->sparable_packet_size;
354
355 for (rel = 0; rel < udf_rw16(ump->sparing_table->rt_l); rel++) {
356 sme = &ump->sparing_table->entries[rel];
357 if (lb_packet == udf_rw32(sme->org)) {
358 /* NOTE maps to absolute disc logical block! */
359 *lb_numres = udf_rw32(sme->map) + lb_rel;
360 *extres = ump->sparable_packet_size - lb_rel;
361 return 0;
362 }
363 }
364
365 /* transform into its disc logical block */
366 if (lb_num > udf_rw32(pdesc->part_len))
367 return EINVAL;
368 *lb_numres = lb_num + udf_rw32(pdesc->start_loc);
369
370 /* rest of block */
371 *extres = ump->sparable_packet_size - lb_rel;
372 return 0;
373 case UDF_VTOP_TYPE_META :
374 /* we have to look into the file's allocation descriptors */
375
376 /* use metadatafile allocation mutex */
377 lb_size = udf_rw32(ump->logical_vol->lb_size);
378
379 UDF_LOCK_NODE(ump->metadata_node, 0);
380
381 /* get first overlapping extent */
382 foffset = 0;
383 slot = 0;
384 for (;;) {
385 udf_get_adslot(ump->metadata_node,
386 slot, &s_icb_loc, &eof);
387 DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, "
388 "len = %d, lb_num = %d, part = %d\n",
389 slot, eof,
390 UDF_EXT_FLAGS(udf_rw32(s_icb_loc.len)),
391 UDF_EXT_LEN(udf_rw32(s_icb_loc.len)),
392 udf_rw32(s_icb_loc.loc.lb_num),
393 udf_rw16(s_icb_loc.loc.part_num)));
394 if (eof) {
395 DPRINTF(TRANSLATE,
396 ("Meta partition translation "
397 "failed: can't seek location\n"));
398 UDF_UNLOCK_NODE(ump->metadata_node, 0);
399 return EINVAL;
400 }
401 len = udf_rw32(s_icb_loc.len);
402 flags = UDF_EXT_FLAGS(len);
403 len = UDF_EXT_LEN(len);
404
405 if (flags == UDF_EXT_REDIRECT) {
406 slot++;
407 continue;
408 }
409
410 end_foffset = foffset + len;
411
412 if (end_foffset > lb_num * lb_size)
413 break; /* found */
414 foffset = end_foffset;
415 slot++;
416 }
417 /* found overlapping slot */
418 ext_offset = lb_num * lb_size - foffset;
419
420 /* process extent offset */
421 lb_num = udf_rw32(s_icb_loc.loc.lb_num);
422 vpart = udf_rw16(s_icb_loc.loc.part_num);
423 lb_num += (ext_offset + lb_size -1) / lb_size;
424 len -= ext_offset;
425 ext_offset = 0;
426
427 flags = UDF_EXT_FLAGS(s_icb_loc.len);
428
429 UDF_UNLOCK_NODE(ump->metadata_node, 0);
430 if (flags != UDF_EXT_ALLOCATED) {
431 DPRINTF(TRANSLATE, ("Metadata partition translation "
432 "failed: not allocated\n"));
433 return EINVAL;
434 }
435
436 /*
437 * vpart and lb_num are updated, translate again since we
438 * might be mapped on sparable media
439 */
440 goto translate_again;
441 default:
442 printf("UDF vtop translation scheme %d unimplemented yet\n",
443 ump->vtop_tp[vpart]);
444 }
445
446 return EINVAL;
447 }
448
449 /* --------------------------------------------------------------------- */
450
451 /*
452 * Translate an extent (in logical_blocks) into logical block numbers; used
453 * for read and write operations. DOESNT't check extents.
454 */
455
456 int
457 udf_translate_file_extent(struct udf_node *udf_node,
458 uint32_t from, uint32_t num_lb,
459 uint64_t *map)
460 {
461 struct udf_mount *ump;
462 struct icb_tag *icbtag;
463 struct long_ad t_ad, s_ad;
464 uint64_t transsec;
465 uint64_t foffset, end_foffset;
466 uint32_t transsec32;
467 uint32_t lb_size;
468 uint32_t ext_offset;
469 uint32_t lb_num, len;
470 uint32_t overlap, translen;
471 uint16_t vpart_num;
472 int eof, error, flags;
473 int slot, addr_type, icbflags;
474
475 if (!udf_node)
476 return ENOENT;
477
478 KASSERT(num_lb > 0);
479
480 UDF_LOCK_NODE(udf_node, 0);
481
482 /* initialise derivative vars */
483 ump = udf_node->ump;
484 lb_size = udf_rw32(ump->logical_vol->lb_size);
485
486 if (udf_node->fe) {
487 icbtag = &udf_node->fe->icbtag;
488 } else {
489 icbtag = &udf_node->efe->icbtag;
490 }
491 icbflags = udf_rw16(icbtag->flags);
492 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
493
494 /* do the work */
495 if (addr_type == UDF_ICB_INTERN_ALLOC) {
496 *map = UDF_TRANS_INTERN;
497 UDF_UNLOCK_NODE(udf_node, 0);
498 return 0;
499 }
500
501 /* find first overlapping extent */
502 foffset = 0;
503 slot = 0;
504 for (;;) {
505 udf_get_adslot(udf_node, slot, &s_ad, &eof);
506 DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, len = %d, "
507 "lb_num = %d, part = %d\n", slot, eof,
508 UDF_EXT_FLAGS(udf_rw32(s_ad.len)),
509 UDF_EXT_LEN(udf_rw32(s_ad.len)),
510 udf_rw32(s_ad.loc.lb_num),
511 udf_rw16(s_ad.loc.part_num)));
512 if (eof) {
513 DPRINTF(TRANSLATE,
514 ("Translate file extent "
515 "failed: can't seek location\n"));
516 UDF_UNLOCK_NODE(udf_node, 0);
517 return EINVAL;
518 }
519 len = udf_rw32(s_ad.len);
520 flags = UDF_EXT_FLAGS(len);
521 len = UDF_EXT_LEN(len);
522 lb_num = udf_rw32(s_ad.loc.lb_num);
523
524 if (flags == UDF_EXT_REDIRECT) {
525 slot++;
526 continue;
527 }
528
529 end_foffset = foffset + len;
530
531 if (end_foffset > from * lb_size)
532 break; /* found */
533 foffset = end_foffset;
534 slot++;
535 }
536 /* found overlapping slot */
537 ext_offset = from * lb_size - foffset;
538
539 for (;;) {
540 udf_get_adslot(udf_node, slot, &s_ad, &eof);
541 DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, len = %d, "
542 "lb_num = %d, part = %d\n", slot, eof,
543 UDF_EXT_FLAGS(udf_rw32(s_ad.len)),
544 UDF_EXT_LEN(udf_rw32(s_ad.len)),
545 udf_rw32(s_ad.loc.lb_num),
546 udf_rw16(s_ad.loc.part_num)));
547 if (eof) {
548 DPRINTF(TRANSLATE,
549 ("Translate file extent "
550 "failed: past eof\n"));
551 UDF_UNLOCK_NODE(udf_node, 0);
552 return EINVAL;
553 }
554
555 len = udf_rw32(s_ad.len);
556 flags = UDF_EXT_FLAGS(len);
557 len = UDF_EXT_LEN(len);
558
559 lb_num = udf_rw32(s_ad.loc.lb_num);
560 vpart_num = udf_rw16(s_ad.loc.part_num);
561
562 end_foffset = foffset + len;
563
564 /* process extent, don't forget to advance on ext_offset! */
565 lb_num += (ext_offset + lb_size -1) / lb_size;
566 overlap = (len - ext_offset + lb_size -1) / lb_size;
567 ext_offset = 0;
568
569 /*
570 * note that the while(){} is nessisary for the extent that
571 * the udf_translate_vtop() returns doens't have to span the
572 * whole extent.
573 */
574
575 overlap = MIN(overlap, num_lb);
576 while (overlap && (flags != UDF_EXT_REDIRECT)) {
577 switch (flags) {
578 case UDF_EXT_FREE :
579 case UDF_EXT_ALLOCATED_BUT_NOT_USED :
580 transsec = UDF_TRANS_ZERO;
581 translen = overlap;
582 while (overlap && num_lb && translen) {
583 *map++ = transsec;
584 lb_num++;
585 overlap--; num_lb--; translen--;
586 }
587 break;
588 case UDF_EXT_ALLOCATED :
589 t_ad.loc.lb_num = udf_rw32(lb_num);
590 t_ad.loc.part_num = udf_rw16(vpart_num);
591 error = udf_translate_vtop(ump,
592 &t_ad, &transsec32, &translen);
593 transsec = transsec32;
594 if (error) {
595 UDF_UNLOCK_NODE(udf_node, 0);
596 return error;
597 }
598 while (overlap && num_lb && translen) {
599 *map++ = transsec;
600 lb_num++; transsec++;
601 overlap--; num_lb--; translen--;
602 }
603 break;
604 default:
605 DPRINTF(TRANSLATE,
606 ("Translate file extent "
607 "failed: bad flags %x\n", flags));
608 UDF_UNLOCK_NODE(udf_node, 0);
609 return EINVAL;
610 }
611 }
612 if (num_lb == 0)
613 break;
614
615 if (flags != UDF_EXT_REDIRECT)
616 foffset = end_foffset;
617 slot++;
618 }
619 UDF_UNLOCK_NODE(udf_node, 0);
620
621 return 0;
622 }
623
624 /* --------------------------------------------------------------------- */
625
626 static int
627 udf_search_free_vatloc(struct udf_mount *ump, uint32_t *lbnumres)
628 {
629 uint32_t lb_size, lb_num, lb_map, udf_rw32_lbmap;
630 uint8_t *blob;
631 int entry, chunk, found, error;
632
633 KASSERT(ump);
634 KASSERT(ump->logical_vol);
635
636 lb_size = udf_rw32(ump->logical_vol->lb_size);
637 blob = malloc(lb_size, M_UDFTEMP, M_WAITOK);
638
639 /* TODO static allocation of search chunk */
640
641 lb_num = MIN(ump->vat_entries, ump->vat_last_free_lb);
642 found = 0;
643 error = 0;
644 entry = 0;
645 do {
646 chunk = MIN(lb_size, (ump->vat_entries - lb_num) * 4);
647 if (chunk <= 0)
648 break;
649 /* load in chunk */
650 error = udf_vat_read(ump->vat_node, blob, chunk,
651 ump->vat_offset + lb_num * 4);
652
653 if (error)
654 break;
655
656 /* search this chunk */
657 for (entry=0; entry < chunk /4; entry++, lb_num++) {
658 udf_rw32_lbmap = *((uint32_t *) (blob + entry * 4));
659 lb_map = udf_rw32(udf_rw32_lbmap);
660 if (lb_map == 0xffffffff) {
661 found = 1;
662 break;
663 }
664 }
665 } while (!found);
666 if (error) {
667 printf("udf_search_free_vatloc: error reading in vat chunk "
668 "(lb %d, size %d)\n", lb_num, chunk);
669 }
670
671 if (!found) {
672 /* extend VAT */
673 DPRINTF(WRITE, ("udf_search_free_vatloc: extending\n"));
674 lb_num = ump->vat_entries;
675 ump->vat_entries++;
676 }
677
678 /* mark entry with initialiser just in case */
679 lb_map = udf_rw32(0xfffffffe);
680 udf_vat_write(ump->vat_node, (uint8_t *) &lb_map, 4,
681 ump->vat_offset + lb_num *4);
682 ump->vat_last_free_lb = lb_num;
683
684 free(blob, M_UDFTEMP);
685 *lbnumres = lb_num;
686 return 0;
687 }
688
689
690 static void
691 udf_bitmap_allocate(struct udf_bitmap *bitmap, int ismetadata,
692 uint32_t ptov, uint32_t *num_lb, uint64_t *pmappos, uint64_t *lmappos)
693 {
694 uint32_t offset, lb_num, bit;
695 int32_t diff;
696 uint8_t *bpos;
697 int pass;
698
699 if (!ismetadata) {
700 /* heuristic to keep the two pointers not too close */
701 diff = bitmap->data_pos - bitmap->metadata_pos;
702 if ((diff >= 0) && (diff < 1024))
703 bitmap->data_pos = bitmap->metadata_pos + 1024;
704 }
705 offset = ismetadata ? bitmap->metadata_pos : bitmap->data_pos;
706 offset &= ~7;
707 for (pass = 0; pass < 2; pass++) {
708 if (offset >= bitmap->max_offset)
709 offset = 0;
710
711 while (offset < bitmap->max_offset) {
712 if (*num_lb == 0)
713 break;
714
715 /* use first bit not set */
716 bpos = bitmap->bits + offset/8;
717 bit = ffs(*bpos);
718 if (bit == 0) {
719 offset += 8;
720 continue;
721 }
722 *bpos &= ~(1 << (bit-1));
723 lb_num = offset + bit-1;
724 *lmappos++ = lb_num;
725 *pmappos++ = lb_num + ptov;
726 *num_lb = *num_lb - 1;
727 // offset = (offset & ~7);
728 }
729 }
730
731 if (ismetadata) {
732 bitmap->metadata_pos = offset;
733 } else {
734 bitmap->data_pos = offset;
735 }
736 }
737
738
739 static void
740 udf_bitmap_free(struct udf_bitmap *bitmap, uint32_t lb_num, uint32_t num_lb)
741 {
742 uint32_t offset;
743 uint32_t bit, bitval;
744 uint8_t *bpos;
745
746 offset = lb_num;
747
748 /* starter bits */
749 bpos = bitmap->bits + offset/8;
750 bit = offset % 8;
751 while ((bit != 0) && (num_lb > 0)) {
752 bitval = (1 << bit);
753 KASSERT((*bpos & bitval) == 0);
754 *bpos |= bitval;
755 offset++; num_lb--;
756 bit = (bit + 1) % 8;
757 }
758 if (num_lb == 0)
759 return;
760
761 /* whole bytes */
762 KASSERT(bit == 0);
763 bpos = bitmap->bits + offset / 8;
764 while (num_lb >= 8) {
765 KASSERT((*bpos == 0));
766 *bpos = 255;
767 offset += 8; num_lb -= 8;
768 bpos++;
769 }
770
771 /* stop bits */
772 KASSERT(num_lb < 8);
773 bit = 0;
774 while (num_lb > 0) {
775 bitval = (1 << bit);
776 KASSERT((*bpos & bitval) == 0);
777 *bpos |= bitval;
778 offset++; num_lb--;
779 bit = (bit + 1) % 8;
780 }
781 }
782
783
784 /* allocate a contiguous sequence of sectornumbers */
785 static int
786 udf_allocate_space(struct udf_mount *ump, int ismetadata, int alloc_type,
787 int num_lb, uint16_t *alloc_partp,
788 uint64_t *lmapping, uint64_t *pmapping)
789 {
790 struct mmc_trackinfo *alloc_track, *other_track;
791 struct udf_bitmap *bitmap;
792 struct part_desc *pdesc;
793 struct logvol_int_desc *lvid;
794 uint64_t *lmappos, *pmappos;
795 uint32_t ptov, lb_num, *freepos, free_lbs;
796 int lb_size, alloc_num_lb;
797 int alloc_part;
798 int error;
799
800 mutex_enter(&ump->allocate_mutex);
801
802 lb_size = udf_rw32(ump->logical_vol->lb_size);
803 KASSERT(lb_size == ump->discinfo.sector_size);
804
805 if (ismetadata) {
806 alloc_part = ump->metadata_part;
807 alloc_track = &ump->metadata_track;
808 other_track = &ump->data_track;
809 } else {
810 alloc_part = ump->data_part;
811 alloc_track = &ump->data_track;
812 other_track = &ump->metadata_track;
813 }
814
815 *alloc_partp = alloc_part;
816
817 error = 0;
818 /* XXX check disc space */
819
820 pdesc = ump->partitions[ump->vtop[alloc_part]];
821 lmappos = lmapping;
822 pmappos = pmapping;
823
824 switch (alloc_type) {
825 case UDF_ALLOC_VAT :
826 /* search empty slot in VAT file */
827 KASSERT(num_lb == 1);
828 error = udf_search_free_vatloc(ump, &lb_num);
829 if (!error) {
830 *lmappos = lb_num;
831 *pmappos = 0; /* will get late-allocated */
832 }
833 break;
834 case UDF_ALLOC_SEQUENTIAL :
835 /* sequential allocation on recordable media */
836 /* calculate offset from physical base partition */
837 ptov = udf_rw32(pdesc->start_loc);
838
839 for (lb_num = 0; lb_num < num_lb; lb_num++) {
840 *pmappos++ = alloc_track->next_writable;
841 *lmappos++ = alloc_track->next_writable - ptov;
842 alloc_track->next_writable++;
843 alloc_track->free_blocks--;
844 }
845 if (alloc_track->tracknr == other_track->tracknr)
846 memcpy(other_track, alloc_track,
847 sizeof(struct mmc_trackinfo));
848 break;
849 case UDF_ALLOC_SPACEMAP :
850 ptov = udf_rw32(pdesc->start_loc);
851
852 /* allocate on unallocated bits page */
853 alloc_num_lb = num_lb;
854 bitmap = &ump->part_unalloc_bits[alloc_part];
855 udf_bitmap_allocate(bitmap, ismetadata, ptov, &alloc_num_lb,
856 pmappos, lmappos);
857 ump->lvclose |= UDF_WRITE_PART_BITMAPS;
858 if (alloc_num_lb) {
859 /* TODO convert freed to unalloc and try again */
860 /* free allocated piece for now */
861 lmappos = lmapping;
862 for (lb_num=0; lb_num < num_lb-alloc_num_lb; lb_num++) {
863 udf_bitmap_free(bitmap, *lmappos++, 1);
864 }
865 error = ENOSPC;
866 }
867 if (!error) {
868 /* adjust freecount */
869 lvid = ump->logvol_integrity;
870 freepos = &lvid->tables[0] + alloc_part;
871 free_lbs = udf_rw32(*freepos);
872 *freepos = udf_rw32(free_lbs - num_lb);
873 }
874 break;
875 case UDF_ALLOC_METABITMAP :
876 case UDF_ALLOC_METASEQUENTIAL :
877 case UDF_ALLOC_RELAXEDSEQUENTIAL :
878 printf("ALERT: udf_allocate_space : allocation %d "
879 "not implemented yet!\n", alloc_type);
880 /* TODO implement, doesn't have to be contiguous */
881 error = ENOSPC;
882 break;
883 }
884
885 #ifdef DEBUG
886 if (udf_verbose & UDF_DEBUG_ALLOC) {
887 lmappos = lmapping;
888 pmappos = pmapping;
889 printf("udf_allocate_space, mapping l->p:\n");
890 for (lb_num = 0; lb_num < num_lb; lb_num++) {
891 printf("\t%"PRIu64" -> %"PRIu64"\n",
892 *lmappos++, *pmappos++);
893 }
894 }
895 #endif
896 mutex_exit(&ump->allocate_mutex);
897
898 return error;
899 }
900
901 /* --------------------------------------------------------------------- */
902
903 void
904 udf_free_allocated_space(struct udf_mount *ump, uint32_t lb_num,
905 uint16_t vpart_num, uint32_t num_lb)
906 {
907 struct udf_bitmap *bitmap;
908 struct part_desc *pdesc;
909 struct logvol_int_desc *lvid;
910 uint32_t ptov, lb_map, udf_rw32_lbmap;
911 uint32_t *freepos, free_lbs;
912 int phys_part;
913 int error;
914
915 DPRINTF(ALLOC, ("udf_free_allocated_space: freeing virt lbnum %d "
916 "part %d + %d sect\n", lb_num, vpart_num, num_lb));
917
918 mutex_enter(&ump->allocate_mutex);
919
920 /* get partition backing up this vpart_num */
921 pdesc = ump->partitions[ump->vtop[vpart_num]];
922
923 switch (ump->vtop_tp[vpart_num]) {
924 case UDF_VTOP_TYPE_PHYS :
925 case UDF_VTOP_TYPE_SPARABLE :
926 /* free space to freed or unallocated space bitmap */
927 ptov = udf_rw32(pdesc->start_loc);
928 phys_part = ump->vtop[vpart_num];
929
930 /* first try freed space bitmap */
931 bitmap = &ump->part_freed_bits[phys_part];
932
933 /* if not defined, use unallocated bitmap */
934 if (bitmap->bits == NULL)
935 bitmap = &ump->part_unalloc_bits[phys_part];
936
937 /* if no bitmaps are defined, bail out */
938 if (bitmap->bits == NULL)
939 break;
940
941 /* free bits if its defined */
942 KASSERT(bitmap->bits);
943 ump->lvclose |= UDF_WRITE_PART_BITMAPS;
944 udf_bitmap_free(bitmap, lb_num, num_lb);
945
946 /* adjust freecount */
947 lvid = ump->logvol_integrity;
948 freepos = &lvid->tables[0] + vpart_num;
949 free_lbs = udf_rw32(*freepos);
950 *freepos = udf_rw32(free_lbs + num_lb);
951 break;
952 case UDF_VTOP_TYPE_VIRT :
953 /* free this VAT entry */
954 KASSERT(num_lb == 1);
955
956 lb_map = 0xffffffff;
957 udf_rw32_lbmap = udf_rw32(lb_map);
958 error = udf_vat_write(ump->vat_node,
959 (uint8_t *) &udf_rw32_lbmap, 4,
960 ump->vat_offset + lb_num * 4);
961 KASSERT(error == 0);
962 ump->vat_last_free_lb = MIN(ump->vat_last_free_lb, lb_num);
963 break;
964 case UDF_VTOP_TYPE_META :
965 /* free space in the metadata bitmap */
966 default:
967 printf("ALERT: udf_free_allocated_space : allocation %d "
968 "not implemented yet!\n", ump->vtop_tp[vpart_num]);
969 break;
970 }
971
972 mutex_exit(&ump->allocate_mutex);
973 }
974
975 /* --------------------------------------------------------------------- */
976
977 int
978 udf_pre_allocate_space(struct udf_mount *ump, int udf_c_type, int num_lb,
979 uint16_t *alloc_partp, uint64_t *lmapping, uint64_t *pmapping)
980 {
981 int ismetadata, alloc_type;
982
983 ismetadata = (udf_c_type == UDF_C_NODE);
984 alloc_type = ismetadata? ump->meta_alloc : ump->data_alloc;
985
986 #ifdef DIAGNOSTIC
987 if ((alloc_type == UDF_ALLOC_VAT) && (udf_c_type != UDF_C_NODE)) {
988 panic("udf_pre_allocate_space: bad c_type on VAT!\n");
989 }
990 #endif
991
992 /* reserve size for VAT allocated data */
993 if (alloc_type == UDF_ALLOC_VAT) {
994 mutex_enter(&ump->allocate_mutex);
995 ump->uncomitted_lb += num_lb;
996 mutex_exit(&ump->allocate_mutex);
997 }
998
999 return udf_allocate_space(ump, ismetadata, alloc_type,
1000 num_lb, alloc_partp, lmapping, pmapping);
1001 }
1002
1003 /* --------------------------------------------------------------------- */
1004
1005 /*
1006 * Allocate a buf on disc for direct write out. The space doesn't have to be
1007 * contiguous as the caller takes care of this.
1008 */
1009
1010 void
1011 udf_late_allocate_buf(struct udf_mount *ump, struct buf *buf,
1012 uint64_t *lmapping, uint64_t *pmapping, struct long_ad *node_ad_cpy)
1013 {
1014 struct udf_node *udf_node = VTOI(buf->b_vp);
1015 uint16_t vpart_num;
1016 int lb_size, blks, udf_c_type;
1017 int ismetadata, alloc_type;
1018 int num_lb;
1019 int error, s;
1020
1021 /*
1022 * for each sector in the buf, allocate a sector on disc and record
1023 * its position in the provided mapping array.
1024 *
1025 * If its userdata or FIDs, record its location in its node.
1026 */
1027
1028 lb_size = udf_rw32(ump->logical_vol->lb_size);
1029 num_lb = (buf->b_bcount + lb_size -1) / lb_size;
1030 blks = lb_size / DEV_BSIZE;
1031 udf_c_type = buf->b_udf_c_type;
1032
1033 KASSERT(lb_size == ump->discinfo.sector_size);
1034
1035 ismetadata = (udf_c_type == UDF_C_NODE);
1036 alloc_type = ismetadata? ump->meta_alloc : ump->data_alloc;
1037
1038 #ifdef DIAGNOSTIC
1039 if ((alloc_type == UDF_ALLOC_VAT) && (udf_c_type != UDF_C_NODE)) {
1040 panic("udf_late_allocate_buf: bad c_type on VAT!\n");
1041 }
1042 #endif
1043
1044 if (udf_c_type == UDF_C_NODE) {
1045 /* if not VAT, its allready allocated */
1046 if (alloc_type != UDF_ALLOC_VAT)
1047 return;
1048
1049 /* allocate sequential */
1050 alloc_type = UDF_ALLOC_SEQUENTIAL;
1051 }
1052
1053 error = udf_allocate_space(ump, ismetadata, alloc_type,
1054 num_lb, &vpart_num, lmapping, pmapping);
1055 if (error) {
1056 /* ARGH! we've not done our accounting right! */
1057 panic("UDF disc allocation accounting gone wrong");
1058 }
1059
1060 /* commit our sector count */
1061 mutex_enter(&ump->allocate_mutex);
1062 if (num_lb > ump->uncomitted_lb) {
1063 ump->uncomitted_lb = 0;
1064 } else {
1065 ump->uncomitted_lb -= num_lb;
1066 }
1067 mutex_exit(&ump->allocate_mutex);
1068
1069 buf->b_blkno = (*pmapping) * blks;
1070
1071 /* If its userdata or FIDs, record its allocation in its node. */
1072 if ((udf_c_type == UDF_C_USERDATA) || (udf_c_type == UDF_C_FIDS)) {
1073 udf_record_allocation_in_node(ump, buf, vpart_num, lmapping,
1074 node_ad_cpy);
1075 /* decrement our outstanding bufs counter */
1076 s = splbio();
1077 udf_node->outstanding_bufs--;
1078 splx(s);
1079 }
1080 }
1081
1082 /* --------------------------------------------------------------------- */
1083
1084 /*
1085 * Try to merge a1 with the new piece a2. udf_ads_merge returns error when not
1086 * possible (anymore); a2 returns the rest piece.
1087 */
1088
1089 static int
1090 udf_ads_merge(uint32_t lb_size, struct long_ad *a1, struct long_ad *a2)
1091 {
1092 uint32_t max_len, merge_len;
1093 uint32_t a1_len, a2_len;
1094 uint32_t a1_flags, a2_flags;
1095 uint32_t a1_lbnum, a2_lbnum;
1096 uint16_t a1_part, a2_part;
1097
1098 max_len = ((UDF_EXT_MAXLEN / lb_size) * lb_size);
1099
1100 a1_flags = UDF_EXT_FLAGS(udf_rw32(a1->len));
1101 a1_len = UDF_EXT_LEN(udf_rw32(a1->len));
1102 a1_lbnum = udf_rw32(a1->loc.lb_num);
1103 a1_part = udf_rw16(a1->loc.part_num);
1104
1105 a2_flags = UDF_EXT_FLAGS(udf_rw32(a2->len));
1106 a2_len = UDF_EXT_LEN(udf_rw32(a2->len));
1107 a2_lbnum = udf_rw32(a2->loc.lb_num);
1108 a2_part = udf_rw16(a2->loc.part_num);
1109
1110 /* defines same space */
1111 if (a1_flags != a2_flags)
1112 return 1;
1113
1114 if (a1_flags != UDF_EXT_FREE) {
1115 /* the same partition */
1116 if (a1_part != a2_part)
1117 return 1;
1118
1119 /* a2 is successor of a1 */
1120 if (a1_lbnum * lb_size + a1_len != a2_lbnum * lb_size)
1121 return 1;
1122 }
1123
1124 /* merge as most from a2 if possible */
1125 merge_len = MIN(a2_len, max_len - a1_len);
1126 a1_len += merge_len;
1127 a2_len -= merge_len;
1128 a2_lbnum += merge_len/lb_size;
1129
1130 a1->len = udf_rw32(a1_len | a1_flags);
1131 a2->len = udf_rw32(a2_len | a2_flags);
1132 a2->loc.lb_num = udf_rw32(a2_lbnum);
1133
1134 if (a2_len > 0)
1135 return 1;
1136
1137 /* there is space over to merge */
1138 return 0;
1139 }
1140
1141 /* --------------------------------------------------------------------- */
1142
1143 static void
1144 udf_wipe_adslots(struct udf_node *udf_node)
1145 {
1146 struct file_entry *fe;
1147 struct extfile_entry *efe;
1148 struct alloc_ext_entry *ext;
1149 uint64_t inflen, objsize;
1150 uint32_t lb_size, dscr_size, l_ea, l_ad, max_l_ad, crclen;
1151 uint8_t *data_pos;
1152 int extnr;
1153
1154 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1155
1156 fe = udf_node->fe;
1157 efe = udf_node->efe;
1158 if (fe) {
1159 inflen = udf_rw64(fe->inf_len);
1160 objsize = inflen;
1161 dscr_size = sizeof(struct file_entry) -1;
1162 l_ea = udf_rw32(fe->l_ea);
1163 l_ad = udf_rw32(fe->l_ad);
1164 data_pos = (uint8_t *) fe + dscr_size + l_ea;
1165 } else {
1166 inflen = udf_rw64(efe->inf_len);
1167 objsize = udf_rw64(efe->obj_size);
1168 dscr_size = sizeof(struct extfile_entry) -1;
1169 l_ea = udf_rw32(efe->l_ea);
1170 l_ad = udf_rw32(efe->l_ad);
1171 data_pos = (uint8_t *) efe + dscr_size + l_ea;
1172 }
1173 max_l_ad = lb_size - dscr_size - l_ea;
1174
1175 /* wipe fe/efe */
1176 memset(data_pos, 0, max_l_ad);
1177 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea;
1178 if (fe) {
1179 fe->l_ad = udf_rw32(0);
1180 fe->logblks_rec = udf_rw64(0);
1181 fe->tag.desc_crc_len = udf_rw32(crclen);
1182 } else {
1183 efe->l_ad = udf_rw32(0);
1184 efe->logblks_rec = udf_rw64(0);
1185 efe->tag.desc_crc_len = udf_rw32(crclen);
1186 }
1187
1188 /* wipe all allocation extent entries */
1189 for (extnr = 0; extnr < udf_node->num_extensions; extnr++) {
1190 ext = udf_node->ext[extnr];
1191 dscr_size = sizeof(struct alloc_ext_entry) -1;
1192 max_l_ad = lb_size - dscr_size;
1193 memset(data_pos, 0, max_l_ad);
1194 ext->l_ad = udf_rw32(0);
1195
1196 crclen = dscr_size - UDF_DESC_TAG_LENGTH;
1197 ext->tag.desc_crc_len = udf_rw32(crclen);
1198 }
1199 }
1200
1201 /* --------------------------------------------------------------------- */
1202
1203 void
1204 udf_get_adslot(struct udf_node *udf_node, int slot, struct long_ad *icb,
1205 int *eof) {
1206 struct file_entry *fe;
1207 struct extfile_entry *efe;
1208 struct alloc_ext_entry *ext;
1209 struct icb_tag *icbtag;
1210 struct short_ad *short_ad;
1211 struct long_ad *long_ad;
1212 uint32_t offset;
1213 uint32_t lb_size, dscr_size, l_ea, l_ad, max_l_ad;
1214 uint8_t *data_pos;
1215 int icbflags, addr_type, adlen, extnr;
1216
1217 /* determine what descriptor we are in */
1218 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1219
1220 fe = udf_node->fe;
1221 efe = udf_node->efe;
1222 if (fe) {
1223 icbtag = &fe->icbtag;
1224 dscr_size = sizeof(struct file_entry) -1;
1225 l_ea = udf_rw32(fe->l_ea);
1226 l_ad = udf_rw32(fe->l_ad);
1227 data_pos = (uint8_t *) fe + dscr_size + l_ea;
1228 } else {
1229 icbtag = &efe->icbtag;
1230 dscr_size = sizeof(struct extfile_entry) -1;
1231 l_ea = udf_rw32(efe->l_ea);
1232 l_ad = udf_rw32(efe->l_ad);
1233 data_pos = (uint8_t *) efe + dscr_size + l_ea;
1234 }
1235 max_l_ad = lb_size - dscr_size - l_ea;
1236
1237 icbflags = udf_rw16(icbtag->flags);
1238 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
1239
1240 /* just in case we're called on an intern, its EOF */
1241 if (addr_type == UDF_ICB_INTERN_ALLOC) {
1242 memset(icb, 0, sizeof(struct long_ad));
1243 *eof = 1;
1244 return;
1245 }
1246
1247 adlen = 0;
1248 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1249 adlen = sizeof(struct short_ad);
1250 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1251 adlen = sizeof(struct long_ad);
1252 }
1253
1254 /* if offset too big, we go to the allocation extensions */
1255 offset = slot * adlen;
1256 extnr = -1;
1257 while (offset >= max_l_ad) {
1258 extnr++;
1259 offset -= max_l_ad;
1260 ext = udf_node->ext[extnr];
1261 dscr_size = sizeof(struct alloc_ext_entry) -1;
1262 l_ad = udf_rw32(ext->l_ad);
1263 max_l_ad = lb_size - dscr_size;
1264 data_pos = (uint8_t *) ext + dscr_size;
1265 if (extnr > udf_node->num_extensions) {
1266 l_ad = 0; /* force EOF */
1267 break;
1268 }
1269 }
1270
1271 *eof = (offset >= l_ad) || (l_ad == 0);
1272 if (*eof) {
1273 memset(icb, 0, sizeof(struct long_ad));
1274 return;
1275 }
1276
1277 /* get the element */
1278 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1279 short_ad = (struct short_ad *) (data_pos + offset);
1280 icb->len = short_ad->len;
1281 icb->loc.part_num = udf_node->loc.loc.part_num;
1282 icb->loc.lb_num = short_ad->lb_num;
1283 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1284 long_ad = (struct long_ad *) (data_pos + offset);
1285 *icb = *long_ad;
1286 }
1287 }
1288
1289 /* --------------------------------------------------------------------- */
1290
1291 int
1292 udf_append_adslot(struct udf_node *udf_node, int slot, struct long_ad *icb) {
1293 union dscrptr *dscr;
1294 struct file_entry *fe;
1295 struct extfile_entry *efe;
1296 struct alloc_ext_entry *ext;
1297 struct icb_tag *icbtag;
1298 struct short_ad *short_ad;
1299 struct long_ad *long_ad, o_icb;
1300 uint64_t logblks_rec, *logblks_rec_p;
1301 uint32_t offset, rest, len;
1302 uint32_t lb_size, dscr_size, l_ea, l_ad, *l_ad_p, max_l_ad, crclen;
1303 uint8_t *data_pos;
1304 int icbflags, addr_type, adlen, extnr;
1305
1306 /* determine what descriptor we are in */
1307 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1308
1309 fe = udf_node->fe;
1310 efe = udf_node->efe;
1311 if (fe) {
1312 icbtag = &fe->icbtag;
1313 dscr = (union dscrptr *) fe;
1314 dscr_size = sizeof(struct file_entry) -1;
1315
1316 l_ea = udf_rw32(fe->l_ea);
1317 l_ad_p = &fe->l_ad;
1318 logblks_rec_p = &fe->logblks_rec;
1319 } else {
1320 icbtag = &efe->icbtag;
1321 dscr = (union dscrptr *) efe;
1322 dscr_size = sizeof(struct extfile_entry) -1;
1323
1324 l_ea = udf_rw32(efe->l_ea);
1325 l_ad_p = &efe->l_ad;
1326 logblks_rec_p = &efe->logblks_rec;
1327 }
1328 data_pos = (uint8_t *) dscr + dscr_size + l_ea;
1329 max_l_ad = lb_size - dscr_size - l_ea;
1330
1331 icbflags = udf_rw16(icbtag->flags);
1332 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
1333
1334 /* just in case we're called on an intern, its EOF */
1335 if (addr_type == UDF_ICB_INTERN_ALLOC) {
1336 panic("udf_append_adslot on UDF_ICB_INTERN_ALLOC\n");
1337 }
1338
1339 adlen = 0;
1340 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1341 adlen = sizeof(struct short_ad);
1342 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1343 adlen = sizeof(struct long_ad);
1344 }
1345
1346 /* if offset too big, we go to the allocation extensions */
1347 offset = slot * adlen;
1348 extnr = 0;
1349 while (offset > max_l_ad) {
1350 offset -= max_l_ad;
1351 ext = udf_node->ext[extnr];
1352 dscr = (union dscrptr *) ext;
1353 dscr_size = sizeof(struct alloc_ext_entry) -1;
1354
1355 KASSERT(ext != NULL);
1356 l_ad_p = &ext->l_ad;
1357 max_l_ad = lb_size - dscr_size;
1358 data_pos = (uint8_t *) dscr + dscr_size;
1359
1360 extnr++;
1361 }
1362 /* offset is offset within the current (E)FE/AED */
1363 l_ad = udf_rw32(*l_ad_p);
1364 crclen = udf_rw32(dscr->tag.desc_crc_len);
1365 logblks_rec = udf_rw64(*logblks_rec_p);
1366
1367 if (extnr > udf_node->num_extensions)
1368 return EFBIG; /* too fragmented */
1369
1370 /* overwriting old piece? */
1371 if (offset < l_ad) {
1372 /* overwrite entry; compensate for the old element */
1373 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1374 short_ad = (struct short_ad *) (data_pos + offset);
1375 o_icb.len = short_ad->len;
1376 o_icb.loc.part_num = udf_rw16(0); /* ignore */
1377 o_icb.loc.lb_num = short_ad->lb_num;
1378 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1379 long_ad = (struct long_ad *) (data_pos + offset);
1380 o_icb = *long_ad;
1381 } else {
1382 panic("Invalid address type in udf_append_adslot\n");
1383 }
1384
1385 len = udf_rw32(o_icb.len);
1386 if (UDF_EXT_FLAGS(len) == UDF_EXT_ALLOCATED) {
1387 /* adjust counts */
1388 len = UDF_EXT_LEN(len);
1389 logblks_rec -= (len + lb_size -1) / lb_size;
1390 }
1391 }
1392
1393 /* calculate rest space in this descriptor */
1394 rest = max_l_ad - offset;
1395 if (rest <= adlen) {
1396 /* create redirect and link new allocation extension */
1397 printf("udf_append_to_adslot: can't create allocation extention yet\n");
1398 return EFBIG;
1399 }
1400
1401 /* write out the element */
1402 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1403 short_ad = (struct short_ad *) (data_pos + offset);
1404 short_ad->len = icb->len;
1405 short_ad->lb_num = icb->loc.lb_num;
1406 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1407 long_ad = (struct long_ad *) (data_pos + offset);
1408 *long_ad = *icb;
1409 }
1410
1411 /* adjust logblks recorded count */
1412 if (UDF_EXT_FLAGS(icb->len) == UDF_EXT_ALLOCATED)
1413 logblks_rec += (UDF_EXT_LEN(icb->len) + lb_size -1) / lb_size;
1414 *logblks_rec_p = udf_rw64(logblks_rec);
1415
1416 /* adjust l_ad and crclen when needed */
1417 if (offset >= l_ad) {
1418 l_ad += adlen;
1419 crclen += adlen;
1420 dscr->tag.desc_crc_len = udf_rw32(crclen);
1421 *l_ad_p = udf_rw32(l_ad);
1422 }
1423
1424 return 0;
1425 }
1426
1427 /* --------------------------------------------------------------------- */
1428
1429 /*
1430 * Adjust the node's allocation descriptors to reflect the new mapping; do
1431 * take note that we might glue to existing allocation descriptors.
1432 *
1433 * XXX Note there can only be one allocation being recorded/mount; maybe
1434 * explicit allocation in shedule thread?
1435 */
1436
1437 static void
1438 udf_record_allocation_in_node(struct udf_mount *ump, struct buf *buf,
1439 uint16_t vpart_num, uint64_t *mapping, struct long_ad *node_ad_cpy)
1440 {
1441 struct vnode *vp = buf->b_vp;
1442 struct udf_node *udf_node = VTOI(vp);
1443 struct file_entry *fe;
1444 struct extfile_entry *efe;
1445 struct icb_tag *icbtag;
1446 struct long_ad s_ad, c_ad;
1447 uint64_t inflen, from, till;
1448 uint64_t foffset, end_foffset, restart_foffset;
1449 uint64_t orig_inflen, orig_lbrec, new_inflen, new_lbrec;
1450 uint32_t num_lb, len, flags, lb_num;
1451 uint32_t run_start;
1452 uint32_t slot_offset;
1453 uint32_t skip_len, skipped;
1454 int addr_type, icbflags;
1455 int udf_c_type = buf->b_udf_c_type;
1456 int lb_size, run_length, eof;
1457 int slot, cpy_slot, cpy_slots, restart_slot;
1458 int error;
1459
1460 DPRINTF(ALLOC, ("udf_record_allocation_in_node\n"));
1461 udf_node_sanity_check(udf_node, &orig_inflen, &orig_lbrec);
1462
1463 /* sanity check ... should be panic ? */
1464 if ((udf_c_type != UDF_C_USERDATA) && (udf_c_type != UDF_C_FIDS))
1465 return;
1466
1467 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1468
1469 /* do the job */
1470 UDF_LOCK_NODE(udf_node, 0); /* XXX can deadlock ? */
1471
1472 fe = udf_node->fe;
1473 efe = udf_node->efe;
1474 if (fe) {
1475 icbtag = &fe->icbtag;
1476 inflen = udf_rw64(fe->inf_len);
1477 } else {
1478 icbtag = &efe->icbtag;
1479 inflen = udf_rw64(efe->inf_len);
1480 }
1481
1482 /* do check if `till' is not past file information length */
1483 from = buf->b_lblkno * lb_size;
1484 till = MIN(inflen, from + buf->b_resid);
1485
1486 num_lb = (till - from + lb_size -1) / lb_size;
1487
1488 DPRINTF(ALLOC, ("record allocation from = %"PRIu64" + %d\n", from, buf->b_bcount));
1489
1490 icbflags = udf_rw16(icbtag->flags);
1491 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
1492
1493 if (addr_type == UDF_ICB_INTERN_ALLOC) {
1494 /* nothing to do */
1495 /* XXX clean up rest of node? just in case? */
1496 UDF_UNLOCK_NODE(udf_node, 0);
1497 return;
1498 }
1499
1500 slot = 0;
1501 cpy_slot = 0;
1502 foffset = 0;
1503
1504 /* 1) copy till first overlap piece to the rewrite buffer */
1505 for (;;) {
1506 udf_get_adslot(udf_node, slot, &s_ad, &eof);
1507 if (eof) {
1508 DPRINTF(WRITE,
1509 ("Record allocation in node "
1510 "failed: encountered EOF\n"));
1511 UDF_UNLOCK_NODE(udf_node, 0);
1512 buf->b_error = EINVAL;
1513 return;
1514 }
1515 len = udf_rw32(s_ad.len);
1516 flags = UDF_EXT_FLAGS(len);
1517 len = UDF_EXT_LEN(len);
1518
1519 if (flags == UDF_EXT_REDIRECT) {
1520 slot++;
1521 continue;
1522 }
1523
1524 end_foffset = foffset + len;
1525 if (end_foffset > from)
1526 break; /* found */
1527
1528 node_ad_cpy[cpy_slot++] = s_ad;
1529
1530 DPRINTF(ALLOC, ("\t1: vp %d, lb %d, len %d, flags %d "
1531 "-> stack\n",
1532 udf_rw16(s_ad.loc.part_num),
1533 udf_rw32(s_ad.loc.lb_num),
1534 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1535 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1536
1537 foffset = end_foffset;
1538 slot++;
1539 }
1540 restart_slot = slot;
1541 restart_foffset = foffset;
1542
1543 /* 2) trunc overlapping slot at overlap and copy it */
1544 slot_offset = from - foffset;
1545 if (slot_offset > 0) {
1546 DPRINTF(ALLOC, ("\tslot_offset = %d, flags = %d (%d)\n",
1547 slot_offset, flags >> 30, flags));
1548
1549 s_ad.len = udf_rw32(slot_offset | flags);
1550 node_ad_cpy[cpy_slot++] = s_ad;
1551
1552 DPRINTF(ALLOC, ("\t2: vp %d, lb %d, len %d, flags %d "
1553 "-> stack\n",
1554 udf_rw16(s_ad.loc.part_num),
1555 udf_rw32(s_ad.loc.lb_num),
1556 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1557 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1558 }
1559 foffset += slot_offset;
1560
1561 /* 3) insert new mappings */
1562 memset(&s_ad, 0, sizeof(struct long_ad));
1563 lb_num = 0;
1564 for (lb_num = 0; lb_num < num_lb; lb_num++) {
1565 run_start = mapping[lb_num];
1566 run_length = 1;
1567 while (lb_num < num_lb-1) {
1568 if (mapping[lb_num+1] != mapping[lb_num]+1)
1569 if (mapping[lb_num+1] != mapping[lb_num])
1570 break;
1571 run_length++;
1572 lb_num++;
1573 }
1574 /* insert slot for this mapping */
1575 len = run_length * lb_size;
1576
1577 /* bounds checking */
1578 if (foffset + len > till)
1579 len = till - foffset;
1580 KASSERT(foffset + len <= inflen);
1581
1582 s_ad.len = udf_rw32(len | UDF_EXT_ALLOCATED);
1583 s_ad.loc.part_num = udf_rw16(vpart_num);
1584 s_ad.loc.lb_num = udf_rw32(run_start);
1585
1586 foffset += len;
1587
1588 /* paranoia */
1589 if (len == 0) {
1590 DPRINTF(WRITE,
1591 ("Record allocation in node "
1592 "failed: insert failed\n"));
1593 UDF_UNLOCK_NODE(udf_node, 0);
1594 buf->b_error = EINVAL;
1595 return;
1596 }
1597 node_ad_cpy[cpy_slot++] = s_ad;
1598
1599 DPRINTF(ALLOC, ("\t3: insert new mapping vp %d lb %d, len %d, "
1600 "flags %d -> stack\n",
1601 udf_rw16(s_ad.loc.part_num), udf_rw32(s_ad.loc.lb_num),
1602 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1603 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1604 }
1605
1606 /* 4) pop replaced length */
1607 slot = restart_slot;
1608 foffset = restart_foffset;
1609
1610 skip_len = till - foffset; /* relative to start of slot */
1611 slot_offset = from - foffset;
1612 for (;;) {
1613 udf_get_adslot(udf_node, slot, &s_ad, &eof);
1614 if (eof)
1615 break;
1616
1617 len = udf_rw32(s_ad.len);
1618 flags = UDF_EXT_FLAGS(len);
1619 len = UDF_EXT_LEN(len);
1620 lb_num = udf_rw32(s_ad.loc.lb_num);
1621
1622 if (flags == UDF_EXT_REDIRECT) {
1623 slot++;
1624 continue;
1625 }
1626
1627 DPRINTF(ALLOC, ("\t4i: got slot %d, skip_len %d, vp %d, "
1628 "lb %d, len %d, flags %d\n",
1629 slot, skip_len, udf_rw16(s_ad.loc.part_num),
1630 udf_rw32(s_ad.loc.lb_num),
1631 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1632 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1633
1634 skipped = MIN(len, skip_len);
1635 if (flags != UDF_EXT_FREE) {
1636 if (slot_offset) {
1637 /* skip these blocks first */
1638 num_lb = (slot_offset + lb_size-1) / lb_size;
1639 len -= slot_offset;
1640 skip_len -= slot_offset;
1641 foffset += slot_offset;
1642 lb_num += num_lb;
1643 skipped -= slot_offset;
1644 slot_offset = 0;
1645 }
1646 /* free space from current position till `skipped' */
1647 num_lb = (skipped + lb_size-1) / lb_size;
1648 udf_free_allocated_space(ump, lb_num,
1649 udf_rw16(s_ad.loc.part_num), num_lb);
1650 lb_num += num_lb;
1651 }
1652 len -= skipped;
1653 skip_len -= skipped;
1654 foffset += skipped;
1655
1656 if (len) {
1657 KASSERT(skipped % lb_size == 0);
1658
1659 /* we arrived at our point, push remainder */
1660 s_ad.len = udf_rw32(len | flags);
1661 s_ad.loc.lb_num = udf_rw32(lb_num);
1662 node_ad_cpy[cpy_slot++] = s_ad;
1663 foffset += len;
1664 slot++;
1665
1666 DPRINTF(ALLOC, ("\t4: vp %d, lb %d, len %d, flags %d "
1667 "-> stack\n",
1668 udf_rw16(s_ad.loc.part_num),
1669 udf_rw32(s_ad.loc.lb_num),
1670 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1671 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1672 break;
1673 }
1674 slot++;
1675 }
1676
1677 /* 5) copy remainder */
1678 for (;;) {
1679 udf_get_adslot(udf_node, slot, &s_ad, &eof);
1680 if (eof)
1681 break;
1682
1683 len = udf_rw32(s_ad.len);
1684 flags = UDF_EXT_FLAGS(len);
1685 len = UDF_EXT_LEN(len);
1686
1687 if (flags == UDF_EXT_REDIRECT) {
1688 slot++;
1689 continue;
1690 }
1691
1692 node_ad_cpy[cpy_slot++] = s_ad;
1693
1694 DPRINTF(ALLOC, ("\t5: insert new mapping "
1695 "vp %d lb %d, len %d, flags %d "
1696 "-> stack\n",
1697 udf_rw16(s_ad.loc.part_num),
1698 udf_rw32(s_ad.loc.lb_num),
1699 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1700 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1701
1702 slot++;
1703 }
1704
1705 /* 6) reset node descriptors */
1706 udf_wipe_adslots(udf_node);
1707
1708 /* 7) copy back extents; merge when possible. Recounting on the fly */
1709 cpy_slots = cpy_slot;
1710
1711 c_ad = node_ad_cpy[0];
1712 slot = 0;
1713 DPRINTF(ALLOC, ("\t7s: stack -> got mapping vp %d "
1714 "lb %d, len %d, flags %d\n",
1715 udf_rw16(c_ad.loc.part_num),
1716 udf_rw32(c_ad.loc.lb_num),
1717 UDF_EXT_LEN(udf_rw32(c_ad.len)),
1718 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
1719
1720 for (cpy_slot = 1; cpy_slot < cpy_slots; cpy_slot++) {
1721 s_ad = node_ad_cpy[cpy_slot];
1722
1723 DPRINTF(ALLOC, ("\t7i: stack -> got mapping vp %d "
1724 "lb %d, len %d, flags %d\n",
1725 udf_rw16(s_ad.loc.part_num),
1726 udf_rw32(s_ad.loc.lb_num),
1727 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1728 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1729
1730 /* see if we can merge */
1731 if (udf_ads_merge(lb_size, &c_ad, &s_ad)) {
1732 /* not mergable (anymore) */
1733 DPRINTF(ALLOC, ("\t7: appending vp %d lb %d, "
1734 "len %d, flags %d\n",
1735 udf_rw16(c_ad.loc.part_num),
1736 udf_rw32(c_ad.loc.lb_num),
1737 UDF_EXT_LEN(udf_rw32(c_ad.len)),
1738 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
1739
1740 error = udf_append_adslot(udf_node, slot, &c_ad);
1741 if (error) {
1742 buf->b_error = error;
1743 goto out;
1744 }
1745 c_ad = s_ad;
1746 slot++;
1747 }
1748 }
1749
1750 /* 8) push rest slot (if any) */
1751 if (UDF_EXT_LEN(c_ad.len) > 0) {
1752 DPRINTF(ALLOC, ("\t8: last append vp %d lb %d, "
1753 "len %d, flags %d\n",
1754 udf_rw16(c_ad.loc.part_num),
1755 udf_rw32(c_ad.loc.lb_num),
1756 UDF_EXT_LEN(udf_rw32(c_ad.len)),
1757 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
1758
1759 error = udf_append_adslot(udf_node, slot, &c_ad);
1760 if (error) {
1761 buf->b_error = error;
1762 goto out;
1763 }
1764 }
1765
1766 out:
1767 /* the node's descriptors should now be sane */
1768 UDF_UNLOCK_NODE(udf_node, 0);
1769
1770 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
1771
1772 KASSERT(orig_inflen == new_inflen);
1773 KASSERT(new_lbrec >= orig_lbrec);
1774
1775 return;
1776 }
1777
1778 /* --------------------------------------------------------------------- */
1779
1780 int
1781 udf_grow_node(struct udf_node *udf_node, uint64_t new_size)
1782 {
1783 union dscrptr *dscr;
1784 struct vnode *vp = udf_node->vnode;
1785 struct udf_mount *ump = udf_node->ump;
1786 struct file_entry *fe;
1787 struct extfile_entry *efe;
1788 struct icb_tag *icbtag;
1789 struct long_ad c_ad, s_ad;
1790 uint64_t size_diff, old_size, inflen, objsize, chunk, append_len;
1791 uint64_t foffset, end_foffset;
1792 uint64_t orig_inflen, orig_lbrec, new_inflen, new_lbrec;
1793 uint32_t lb_size, dscr_size, crclen, lastblock_grow;
1794 uint32_t len, flags, max_len;
1795 uint32_t max_l_ad, l_ad, l_ea;
1796 uint8_t *data_pos, *evacuated_data;
1797 int icbflags, addr_type;
1798 int slot, cpy_slot;
1799 int eof, error;
1800
1801 DPRINTF(ALLOC, ("udf_grow_node\n"));
1802 udf_node_sanity_check(udf_node, &orig_inflen, &orig_lbrec);
1803
1804 UDF_LOCK_NODE(udf_node, 0);
1805 lb_size = udf_rw32(ump->logical_vol->lb_size);
1806 max_len = ((UDF_EXT_MAXLEN / lb_size) * lb_size);
1807
1808 fe = udf_node->fe;
1809 efe = udf_node->efe;
1810 if (fe) {
1811 dscr = (union dscrptr *) fe;
1812 icbtag = &fe->icbtag;
1813 inflen = udf_rw64(fe->inf_len);
1814 objsize = inflen;
1815 dscr_size = sizeof(struct file_entry) -1;
1816 l_ea = udf_rw32(fe->l_ea);
1817 l_ad = udf_rw32(fe->l_ad);
1818 } else {
1819 dscr = (union dscrptr *) efe;
1820 icbtag = &efe->icbtag;
1821 inflen = udf_rw64(efe->inf_len);
1822 objsize = udf_rw64(efe->obj_size);
1823 dscr_size = sizeof(struct extfile_entry) -1;
1824 l_ea = udf_rw32(efe->l_ea);
1825 l_ad = udf_rw32(efe->l_ad);
1826 }
1827 data_pos = (uint8_t *) dscr + dscr_size + l_ea;
1828 max_l_ad = lb_size - dscr_size - l_ea;
1829
1830 icbflags = udf_rw16(icbtag->flags);
1831 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
1832
1833 old_size = inflen;
1834 size_diff = new_size - old_size;
1835
1836 DPRINTF(ALLOC, ("\tfrom %"PRIu64" to %"PRIu64"\n", old_size, new_size));
1837
1838 evacuated_data = NULL;
1839 if (addr_type == UDF_ICB_INTERN_ALLOC) {
1840 if (l_ad + size_diff <= max_l_ad) {
1841 /* only reflect size change directly in the node */
1842 inflen += size_diff;
1843 objsize += size_diff;
1844 l_ad += size_diff;
1845 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea + l_ad;
1846 if (fe) {
1847 fe->inf_len = udf_rw64(inflen);
1848 fe->l_ad = udf_rw32(l_ad);
1849 fe->tag.desc_crc_len = udf_rw32(crclen);
1850 } else {
1851 efe->inf_len = udf_rw64(inflen);
1852 efe->obj_size = udf_rw64(objsize);
1853 efe->l_ad = udf_rw32(l_ad);
1854 efe->tag.desc_crc_len = udf_rw32(crclen);
1855 }
1856 error = 0;
1857
1858 /* set new size for uvm */
1859 uvm_vnp_setsize(vp, old_size);
1860 uvm_vnp_setwritesize(vp, new_size);
1861
1862 #if 0
1863 /* zero append space in buffer */
1864 uvm_vnp_zerorange(vp, old_size, new_size - old_size);
1865 #endif
1866
1867 /* unlock */
1868 UDF_UNLOCK_NODE(udf_node, 0);
1869
1870 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
1871 KASSERT(new_inflen == orig_inflen + size_diff);
1872 KASSERT(new_lbrec == orig_lbrec);
1873 KASSERT(new_lbrec == 0);
1874 return 0;
1875 }
1876
1877 DPRINTF(ALLOC, ("\tCONVERT from internal\n"));
1878
1879 if (old_size > 0) {
1880 /* allocate some space and copy in the stuff to keep */
1881 evacuated_data = malloc(lb_size, M_UDFTEMP, M_WAITOK);
1882 memset(evacuated_data, 0, lb_size);
1883
1884 /* node is locked, so safe to exit mutex */
1885 UDF_UNLOCK_NODE(udf_node, 0);
1886
1887 /* read in using the `normal' vn_rdwr() */
1888 error = vn_rdwr(UIO_READ, udf_node->vnode,
1889 evacuated_data, old_size, 0,
1890 UIO_SYSSPACE, IO_ALTSEMANTICS | IO_NODELOCKED,
1891 FSCRED, NULL, NULL);
1892
1893 /* enter again */
1894 UDF_LOCK_NODE(udf_node, 0);
1895 }
1896
1897 /* convert to a normal alloc */
1898 /* XXX HOWTO selecting allocation method ? */
1899 icbflags &= ~UDF_ICB_TAG_FLAGS_ALLOC_MASK;
1900 icbflags |= UDF_ICB_LONG_ALLOC; /* XXX or SHORT_ALLOC */
1901 icbtag->flags = udf_rw16(icbflags);
1902
1903 /* wipe old descriptor space */
1904 udf_wipe_adslots(udf_node);
1905
1906 memset(&c_ad, 0, sizeof(struct long_ad));
1907 c_ad.len = udf_rw32(old_size | UDF_EXT_FREE);
1908 c_ad.loc.part_num = udf_rw16(0); /* not relevant */
1909 c_ad.loc.lb_num = udf_rw32(0); /* not relevant */
1910
1911 slot = 0;
1912 } else {
1913 /* goto the last entry (if any) */
1914 slot = 0;
1915 cpy_slot = 0;
1916 foffset = 0;
1917 memset(&c_ad, 0, sizeof(struct long_ad));
1918 for (;;) {
1919 udf_get_adslot(udf_node, slot, &c_ad, &eof);
1920 if (eof)
1921 break;
1922
1923 len = udf_rw32(c_ad.len);
1924 flags = UDF_EXT_FLAGS(len);
1925 len = UDF_EXT_LEN(len);
1926
1927 end_foffset = foffset + len;
1928 if (flags != UDF_EXT_REDIRECT)
1929 foffset = end_foffset;
1930
1931 slot++;
1932 }
1933 /* at end of adslots */
1934
1935 /* special case if the old size was zero, then there is no last slot */
1936 if (old_size == 0) {
1937 c_ad.len = udf_rw32(0 | UDF_EXT_FREE);
1938 c_ad.loc.part_num = udf_rw16(0); /* not relevant */
1939 c_ad.loc.lb_num = udf_rw32(0); /* not relevant */
1940 } else {
1941 /* refetch last slot */
1942 slot--;
1943 udf_get_adslot(udf_node, slot, &c_ad, &eof);
1944 }
1945 }
1946
1947 /*
1948 * If the length of the last slot is not a multiple of lb_size, adjust
1949 * length so that it is; don't forget to adjust `append_len'! relevant for
1950 * extending existing files
1951 */
1952 len = udf_rw32(c_ad.len);
1953 flags = UDF_EXT_FLAGS(len);
1954 len = UDF_EXT_LEN(len);
1955
1956 lastblock_grow = 0;
1957 if (len % lb_size > 0) {
1958 lastblock_grow = lb_size - (len % lb_size);
1959 lastblock_grow = MIN(size_diff, lastblock_grow);
1960 len += lastblock_grow;
1961 c_ad.len = udf_rw32(len | flags);
1962
1963 /* TODO zero appened space in buffer! */
1964 /* using uvm_vnp_zerorange(vp, old_size, new_size - old_size); ? */
1965 }
1966 memset(&s_ad, 0, sizeof(struct long_ad));
1967
1968 /* size_diff can be bigger than allowed, so grow in chunks */
1969 append_len = size_diff - lastblock_grow;
1970 while (append_len > 0) {
1971 chunk = MIN(append_len, max_len);
1972 s_ad.len = udf_rw32(chunk | UDF_EXT_FREE);
1973 s_ad.loc.part_num = udf_rw16(0);
1974 s_ad.loc.lb_num = udf_rw32(0);
1975
1976 if (udf_ads_merge(lb_size, &c_ad, &s_ad)) {
1977 /* not mergable (anymore) */
1978 error = udf_append_adslot(udf_node, slot, &c_ad);
1979 if (error)
1980 goto errorout;
1981 slot++;
1982 c_ad = s_ad;
1983 memset(&s_ad, 0, sizeof(struct long_ad));
1984 }
1985 append_len -= chunk;
1986 }
1987
1988 /* if there is a rest piece in the accumulator, append it */
1989 if (UDF_EXT_LEN(c_ad.len) > 0) {
1990 error = udf_append_adslot(udf_node, slot, &c_ad);
1991 if (error)
1992 goto errorout;
1993 slot++;
1994 }
1995
1996 /* if there is a rest piece that didn't fit, append it */
1997 if (UDF_EXT_LEN(s_ad.len) > 0) {
1998 error = udf_append_adslot(udf_node, slot, &s_ad);
1999 if (error)
2000 goto errorout;
2001 slot++;
2002 }
2003
2004 inflen += size_diff;
2005 objsize += size_diff;
2006 if (fe) {
2007 fe->inf_len = udf_rw64(inflen);
2008 } else {
2009 efe->inf_len = udf_rw64(inflen);
2010 efe->obj_size = udf_rw64(objsize);
2011 }
2012 error = 0;
2013
2014 if (evacuated_data) {
2015 /* set new write size for uvm */
2016 uvm_vnp_setwritesize(vp, old_size);
2017
2018 /* write out evacuated data */
2019 error = vn_rdwr(UIO_WRITE, udf_node->vnode,
2020 evacuated_data, old_size, 0,
2021 UIO_SYSSPACE, IO_ALTSEMANTICS | IO_NODELOCKED,
2022 FSCRED, NULL, NULL);
2023 uvm_vnp_setsize(vp, old_size);
2024 }
2025
2026 errorout:
2027 if (evacuated_data)
2028 free(evacuated_data, M_UDFTEMP);
2029 UDF_UNLOCK_NODE(udf_node, 0);
2030
2031 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2032 KASSERT(new_inflen == orig_inflen + size_diff);
2033 KASSERT(new_lbrec == orig_lbrec);
2034
2035 return error;
2036 }
2037
2038 /* --------------------------------------------------------------------- */
2039
2040 int
2041 udf_shrink_node(struct udf_node *udf_node, uint64_t new_size)
2042 {
2043 struct vnode *vp = udf_node->vnode;
2044 struct udf_mount *ump = udf_node->ump;
2045 struct file_entry *fe;
2046 struct extfile_entry *efe;
2047 struct icb_tag *icbtag;
2048 struct long_ad c_ad, s_ad, *node_ad_cpy;
2049 uint64_t size_diff, old_size, inflen, objsize;
2050 uint64_t foffset, end_foffset;
2051 uint64_t orig_inflen, orig_lbrec, new_inflen, new_lbrec;
2052 uint32_t lb_size, dscr_size, crclen;
2053 uint32_t slot_offset;
2054 uint32_t len, flags, max_len;
2055 uint32_t num_lb, lb_num;
2056 uint32_t max_l_ad, l_ad, l_ea;
2057 uint16_t vpart_num;
2058 uint8_t *data_pos;
2059 int icbflags, addr_type;
2060 int slot, cpy_slot, cpy_slots;
2061 int eof, error;
2062
2063 DPRINTF(ALLOC, ("udf_shrink_node\n"));
2064 udf_node_sanity_check(udf_node, &orig_inflen, &orig_lbrec);
2065
2066 UDF_LOCK_NODE(udf_node, 0);
2067 lb_size = udf_rw32(ump->logical_vol->lb_size);
2068 max_len = ((UDF_EXT_MAXLEN / lb_size) * lb_size);
2069
2070 /* do the work */
2071 fe = udf_node->fe;
2072 efe = udf_node->efe;
2073 if (fe) {
2074 icbtag = &fe->icbtag;
2075 inflen = udf_rw64(fe->inf_len);
2076 objsize = inflen;
2077 dscr_size = sizeof(struct file_entry) -1;
2078 l_ea = udf_rw32(fe->l_ea);
2079 l_ad = udf_rw32(fe->l_ad);
2080 data_pos = (uint8_t *) fe + dscr_size + l_ea;
2081 } else {
2082 icbtag = &efe->icbtag;
2083 inflen = udf_rw64(efe->inf_len);
2084 objsize = udf_rw64(efe->obj_size);
2085 dscr_size = sizeof(struct extfile_entry) -1;
2086 l_ea = udf_rw32(efe->l_ea);
2087 l_ad = udf_rw32(efe->l_ad);
2088 data_pos = (uint8_t *) efe + dscr_size + l_ea;
2089 }
2090 max_l_ad = lb_size - dscr_size - l_ea;
2091
2092 icbflags = udf_rw16(icbtag->flags);
2093 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2094
2095 old_size = inflen;
2096 size_diff = old_size - new_size;
2097
2098 DPRINTF(ALLOC, ("\tfrom %"PRIu64" to %"PRIu64"\n", old_size, new_size));
2099
2100 /* shrink the node to its new size */
2101 if (addr_type == UDF_ICB_INTERN_ALLOC) {
2102 /* only reflect size change directly in the node */
2103 KASSERT(new_size <= max_l_ad);
2104 inflen -= size_diff;
2105 objsize -= size_diff;
2106 l_ad -= size_diff;
2107 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea + l_ad;
2108 if (fe) {
2109 fe->inf_len = udf_rw64(inflen);
2110 fe->l_ad = udf_rw32(l_ad);
2111 fe->tag.desc_crc_len = udf_rw32(crclen);
2112 } else {
2113 efe->inf_len = udf_rw64(inflen);
2114 efe->obj_size = udf_rw64(objsize);
2115 efe->l_ad = udf_rw32(l_ad);
2116 efe->tag.desc_crc_len = udf_rw32(crclen);
2117 }
2118 error = 0;
2119 /* TODO zero appened space in buffer! */
2120 /* using uvm_vnp_zerorange(vp, old_size, old_size - new_size); ? */
2121
2122 /* set new size for uvm */
2123 uvm_vnp_setsize(vp, new_size);
2124 UDF_UNLOCK_NODE(udf_node, 0);
2125
2126 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2127 KASSERT(new_inflen == orig_inflen - size_diff);
2128 KASSERT(new_lbrec == orig_lbrec);
2129 KASSERT(new_lbrec == 0);
2130
2131 return 0;
2132 }
2133
2134 /* setup node cleanup extents copy space */
2135 node_ad_cpy = malloc(lb_size * UDF_MAX_ALLOC_EXTENTS,
2136 M_UDFMNT, M_WAITOK);
2137 memset(node_ad_cpy, 0, lb_size * UDF_MAX_ALLOC_EXTENTS);
2138
2139 /*
2140 * Shrink the node by releasing the allocations and truncate the last
2141 * allocation to the new size. If the new size fits into the
2142 * allocation descriptor itself, transform it into an
2143 * UDF_ICB_INTERN_ALLOC.
2144 */
2145 slot = 0;
2146 cpy_slot = 0;
2147 foffset = 0;
2148
2149 /* 1) copy till first overlap piece to the rewrite buffer */
2150 for (;;) {
2151 udf_get_adslot(udf_node, slot, &s_ad, &eof);
2152 if (eof) {
2153 DPRINTF(WRITE,
2154 ("Shrink node failed: "
2155 "encountered EOF\n"));
2156 error = EINVAL;
2157 goto errorout; /* panic? */
2158 }
2159 len = udf_rw32(s_ad.len);
2160 flags = UDF_EXT_FLAGS(len);
2161 len = UDF_EXT_LEN(len);
2162
2163 if (flags == UDF_EXT_REDIRECT) {
2164 slot++;
2165 continue;
2166 }
2167
2168 end_foffset = foffset + len;
2169 if (end_foffset > new_size)
2170 break; /* found */
2171
2172 node_ad_cpy[cpy_slot++] = s_ad;
2173
2174 DPRINTF(ALLOC, ("\t1: vp %d, lb %d, len %d, flags %d "
2175 "-> stack\n",
2176 udf_rw16(s_ad.loc.part_num),
2177 udf_rw32(s_ad.loc.lb_num),
2178 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2179 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2180
2181 foffset = end_foffset;
2182 slot++;
2183 }
2184 slot_offset = new_size - foffset;
2185
2186 /* 2) trunc overlapping slot at overlap and copy it */
2187 if (slot_offset > 0) {
2188 lb_num = udf_rw32(s_ad.loc.lb_num);
2189 vpart_num = udf_rw16(s_ad.loc.part_num);
2190
2191 if (flags == UDF_EXT_ALLOCATED) {
2192 lb_num += (slot_offset + lb_size -1) / lb_size;
2193 num_lb = (len - slot_offset + lb_size - 1) / lb_size;
2194
2195 udf_free_allocated_space(ump, lb_num, vpart_num, num_lb);
2196 }
2197
2198 s_ad.len = udf_rw32(slot_offset | flags);
2199 node_ad_cpy[cpy_slot++] = s_ad;
2200 slot++;
2201
2202 DPRINTF(ALLOC, ("\t2: vp %d, lb %d, len %d, flags %d "
2203 "-> stack\n",
2204 udf_rw16(s_ad.loc.part_num),
2205 udf_rw32(s_ad.loc.lb_num),
2206 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2207 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2208 }
2209
2210 /* 3) delete remainder */
2211 for (;;) {
2212 udf_get_adslot(udf_node, slot, &s_ad, &eof);
2213 if (eof)
2214 break;
2215
2216 len = udf_rw32(s_ad.len);
2217 flags = UDF_EXT_FLAGS(len);
2218 len = UDF_EXT_LEN(len);
2219
2220 if (flags == UDF_EXT_REDIRECT) {
2221 slot++;
2222 continue;
2223 }
2224
2225 DPRINTF(ALLOC, ("\t3: delete remainder "
2226 "vp %d lb %d, len %d, flags %d\n",
2227 udf_rw16(s_ad.loc.part_num),
2228 udf_rw32(s_ad.loc.lb_num),
2229 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2230 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2231
2232 if (flags == UDF_EXT_ALLOCATED) {
2233 lb_num = udf_rw32(s_ad.loc.lb_num);
2234 vpart_num = udf_rw16(s_ad.loc.part_num);
2235 num_lb = (len + lb_size - 1) / lb_size;
2236
2237 udf_free_allocated_space(ump, lb_num, vpart_num,
2238 num_lb);
2239 }
2240
2241 slot++;
2242 }
2243
2244 /* 4) if it will fit into the descriptor then convert */
2245 if (new_size < max_l_ad) {
2246 /*
2247 * resque/evacuate old piece by reading it in, and convert it
2248 * to internal alloc.
2249 */
2250 if (new_size == 0) {
2251 /* XXX/TODO only for zero sizing now */
2252 udf_wipe_adslots(udf_node);
2253
2254 icbflags &= ~UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2255 icbflags |= UDF_ICB_INTERN_ALLOC;
2256 icbtag->flags = udf_rw16(icbflags);
2257
2258 inflen -= size_diff; KASSERT(inflen == 0);
2259 objsize -= size_diff;
2260 l_ad = new_size;
2261 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea + l_ad;
2262 if (fe) {
2263 fe->inf_len = udf_rw64(inflen);
2264 fe->l_ad = udf_rw32(l_ad);
2265 fe->tag.desc_crc_len = udf_rw32(crclen);
2266 } else {
2267 efe->inf_len = udf_rw64(inflen);
2268 efe->obj_size = udf_rw64(objsize);
2269 efe->l_ad = udf_rw32(l_ad);
2270 efe->tag.desc_crc_len = udf_rw32(crclen);
2271 }
2272 /* eventually copy in evacuated piece */
2273 /* set new size for uvm */
2274 uvm_vnp_setsize(vp, new_size);
2275
2276 free(node_ad_cpy, M_UDFMNT);
2277 UDF_UNLOCK_NODE(udf_node, 0);
2278
2279 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2280 KASSERT(new_inflen == orig_inflen - size_diff);
2281 KASSERT(new_inflen == 0);
2282 KASSERT(new_lbrec == 0);
2283
2284 return 0;
2285 }
2286
2287 printf("UDF_SHRINK_NODE: could convert to internal alloc!\n");
2288 }
2289
2290 /* 5) reset node descriptors */
2291 udf_wipe_adslots(udf_node);
2292
2293 /* 6) copy back extents; merge when possible. Recounting on the fly */
2294 cpy_slots = cpy_slot;
2295
2296 c_ad = node_ad_cpy[0];
2297 slot = 0;
2298 for (cpy_slot = 1; cpy_slot < cpy_slots; cpy_slot++) {
2299 s_ad = node_ad_cpy[cpy_slot];
2300
2301 DPRINTF(ALLOC, ("\t6: stack -> got mapping vp %d "
2302 "lb %d, len %d, flags %d\n",
2303 udf_rw16(s_ad.loc.part_num),
2304 udf_rw32(s_ad.loc.lb_num),
2305 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2306 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2307
2308 /* see if we can merge */
2309 if (udf_ads_merge(lb_size, &c_ad, &s_ad)) {
2310 /* not mergable (anymore) */
2311 DPRINTF(ALLOC, ("\t6: appending vp %d lb %d, "
2312 "len %d, flags %d\n",
2313 udf_rw16(c_ad.loc.part_num),
2314 udf_rw32(c_ad.loc.lb_num),
2315 UDF_EXT_LEN(udf_rw32(c_ad.len)),
2316 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2317
2318 error = udf_append_adslot(udf_node, slot, &c_ad);
2319 if (error)
2320 goto errorout; /* panic? */
2321 c_ad = s_ad;
2322 slot++;
2323 }
2324 }
2325
2326 /* 7) push rest slot (if any) */
2327 if (UDF_EXT_LEN(c_ad.len) > 0) {
2328 DPRINTF(ALLOC, ("\t7: last append vp %d lb %d, "
2329 "len %d, flags %d\n",
2330 udf_rw16(c_ad.loc.part_num),
2331 udf_rw32(c_ad.loc.lb_num),
2332 UDF_EXT_LEN(udf_rw32(c_ad.len)),
2333 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2334
2335 error = udf_append_adslot(udf_node, slot, &c_ad);
2336 if (error)
2337 goto errorout; /* panic? */
2338 ;
2339 }
2340
2341 inflen -= size_diff;
2342 objsize -= size_diff;
2343 if (fe) {
2344 fe->inf_len = udf_rw64(inflen);
2345 } else {
2346 efe->inf_len = udf_rw64(inflen);
2347 efe->obj_size = udf_rw64(objsize);
2348 }
2349 error = 0;
2350
2351 /* set new size for uvm */
2352 uvm_vnp_setsize(vp, new_size);
2353
2354 errorout:
2355 free(node_ad_cpy, M_UDFMNT);
2356 UDF_UNLOCK_NODE(udf_node, 0);
2357
2358 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2359 KASSERT(new_inflen == orig_inflen - size_diff);
2360
2361 return error;
2362 }
2363
2364