udf_allocation.c revision 1.4 1 /* $NetBSD: udf_allocation.c,v 1.4 2008/06/25 10:46:35 reinoud Exp $ */
2
3 /*
4 * Copyright (c) 2006, 2008 Reinoud Zandijk
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 *
27 */
28
29 #include <sys/cdefs.h>
30 #ifndef lint
31 __KERNEL_RCSID(0, "$NetBSD: udf_allocation.c,v 1.4 2008/06/25 10:46:35 reinoud Exp $");
32 #endif /* not lint */
33
34
35 #if defined(_KERNEL_OPT)
36 #include "opt_quota.h"
37 #include "opt_compat_netbsd.h"
38 #endif
39
40 /* TODO strip */
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/sysctl.h>
44 #include <sys/namei.h>
45 #include <sys/proc.h>
46 #include <sys/kernel.h>
47 #include <sys/vnode.h>
48 #include <miscfs/genfs/genfs_node.h>
49 #include <sys/mount.h>
50 #include <sys/buf.h>
51 #include <sys/file.h>
52 #include <sys/device.h>
53 #include <sys/disklabel.h>
54 #include <sys/ioctl.h>
55 #include <sys/malloc.h>
56 #include <sys/dirent.h>
57 #include <sys/stat.h>
58 #include <sys/conf.h>
59 #include <sys/kauth.h>
60 #include <sys/kthread.h>
61 #include <dev/clock_subr.h>
62
63 #include <fs/udf/ecma167-udf.h>
64 #include <fs/udf/udf_mount.h>
65
66 #if defined(_KERNEL_OPT)
67 #include "opt_udf.h"
68 #endif
69
70 #include "udf.h"
71 #include "udf_subr.h"
72 #include "udf_bswap.h"
73
74
75 #define VTOI(vnode) ((struct udf_node *) vnode->v_data)
76
77 static void udf_record_allocation_in_node(struct udf_mount *ump,
78 struct buf *buf, uint16_t vpart_num, uint64_t *mapping,
79 struct long_ad *node_ad_cpy);
80
81 /*
82 * IDEA/BUSY: Each udf_node gets its own extentwalker state for all operations;
83 * this will hopefully/likely reduce O(nlog(n)) to O(1) for most functionality
84 * since actions are most likely sequencial and thus seeking doesn't need
85 * searching for the same or adjacent position again.
86 */
87
88 /* --------------------------------------------------------------------- */
89 //#ifdef DEBUG
90 #if 1
91 #if 1
92 static void
93 udf_node_dump(struct udf_node *udf_node) {
94 struct file_entry *fe;
95 struct extfile_entry *efe;
96 struct icb_tag *icbtag;
97 struct short_ad *short_ad;
98 struct long_ad *long_ad;
99 uint64_t inflen;
100 uint32_t icbflags, addr_type, max_l_ad;
101 uint32_t len, lb_num;
102 uint8_t *data_pos;
103 int part_num;
104 int adlen, ad_off, dscr_size, l_ea, l_ad, lb_size, flags;
105
106 if ((udf_verbose & UDF_DEBUG_ADWLK) == 0)
107 return;
108
109 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
110
111 fe = udf_node->fe;
112 efe = udf_node->efe;
113 if (fe) {
114 icbtag = &fe->icbtag;
115 inflen = udf_rw64(fe->inf_len);
116 dscr_size = sizeof(struct file_entry) -1;
117 l_ea = udf_rw32(fe->l_ea);
118 l_ad = udf_rw32(fe->l_ad);
119 data_pos = (uint8_t *) fe + dscr_size + l_ea;
120 } else {
121 icbtag = &efe->icbtag;
122 inflen = udf_rw64(efe->inf_len);
123 dscr_size = sizeof(struct extfile_entry) -1;
124 l_ea = udf_rw32(efe->l_ea);
125 l_ad = udf_rw32(efe->l_ad);
126 data_pos = (uint8_t *) efe + dscr_size + l_ea;
127 }
128 max_l_ad = lb_size - dscr_size - l_ea;
129
130 icbflags = udf_rw16(icbtag->flags);
131 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
132
133 printf("udf_node_dump:\n");
134 printf("\tudf_node %p\n", udf_node);
135
136 if (addr_type == UDF_ICB_INTERN_ALLOC) {
137 printf("\t\tIntern alloc, len = %"PRIu64"\n", inflen);
138 return;
139 }
140
141 printf("\t\tInflen = %"PRIu64"\n", inflen);
142 printf("\t\tl_ad = %d\n", l_ad);
143
144 if (addr_type == UDF_ICB_SHORT_ALLOC) {
145 adlen = sizeof(struct short_ad);
146 } else {
147 adlen = sizeof(struct long_ad);
148 }
149
150 printf("\t\t");
151 for (ad_off = 0; ad_off < max_l_ad-adlen; ad_off += adlen) {
152 if (addr_type == UDF_ICB_SHORT_ALLOC) {
153 short_ad = (struct short_ad *) (data_pos + ad_off);
154 len = udf_rw32(short_ad->len);
155 lb_num = udf_rw32(short_ad->lb_num);
156 part_num = -1;
157 flags = UDF_EXT_FLAGS(len);
158 len = UDF_EXT_LEN(len);
159 } else {
160 long_ad = (struct long_ad *) (data_pos + ad_off);
161 len = udf_rw32(long_ad->len);
162 lb_num = udf_rw32(long_ad->loc.lb_num);
163 part_num = udf_rw16(long_ad->loc.part_num);
164 flags = UDF_EXT_FLAGS(len);
165 len = UDF_EXT_LEN(len);
166 }
167 printf("[");
168 if (part_num >= 0)
169 printf("part %d, ", part_num);
170 printf("lb_num %d, len %d", lb_num, len);
171 if (flags)
172 printf(", flags %d", flags);
173 printf("] ");
174 if (ad_off + adlen == l_ad)
175 printf("\n\t\tl_ad END\n\t\t");
176 }
177 printf("\n");
178 }
179 #else
180 #define udf_node_dump(a)
181 #endif
182
183 static void
184 udf_node_sanity_check(struct udf_node *udf_node,
185 uint64_t *cnt_inflen, uint64_t *cnt_logblksrec) {
186 struct file_entry *fe;
187 struct extfile_entry *efe;
188 struct icb_tag *icbtag;
189 struct short_ad *short_ad;
190 struct long_ad *long_ad;
191 uint64_t inflen, logblksrec;
192 uint32_t icbflags, addr_type, max_l_ad;
193 uint32_t len, lb_num;
194 uint8_t *data_pos;
195 int part_num;
196 int adlen, ad_off, dscr_size, l_ea, l_ad, lb_size, flags, whole_lb;
197
198 /* only lock mutex; we're not changing and its a debug checking func */
199 mutex_enter(&udf_node->node_mutex);
200
201 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
202
203 fe = udf_node->fe;
204 efe = udf_node->efe;
205 if (fe) {
206 icbtag = &fe->icbtag;
207 inflen = udf_rw64(fe->inf_len);
208 logblksrec = udf_rw64(fe->logblks_rec);
209 dscr_size = sizeof(struct file_entry) -1;
210 l_ea = udf_rw32(fe->l_ea);
211 l_ad = udf_rw32(fe->l_ad);
212 data_pos = (uint8_t *) fe + dscr_size + l_ea;
213 } else {
214 icbtag = &efe->icbtag;
215 inflen = udf_rw64(efe->inf_len);
216 logblksrec = udf_rw64(efe->logblks_rec);
217 dscr_size = sizeof(struct extfile_entry) -1;
218 l_ea = udf_rw32(efe->l_ea);
219 l_ad = udf_rw32(efe->l_ad);
220 data_pos = (uint8_t *) efe + dscr_size + l_ea;
221 }
222 max_l_ad = lb_size - dscr_size - l_ea;
223 icbflags = udf_rw16(icbtag->flags);
224 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
225
226 /* reset counters */
227 *cnt_inflen = 0;
228 *cnt_logblksrec = 0;
229
230 if (addr_type == UDF_ICB_INTERN_ALLOC) {
231 KASSERT(l_ad <= max_l_ad);
232 KASSERT(l_ad == inflen);
233 *cnt_inflen = inflen;
234 mutex_exit(&udf_node->node_mutex);
235 return;
236 }
237
238 if (addr_type == UDF_ICB_SHORT_ALLOC) {
239 adlen = sizeof(struct short_ad);
240 } else {
241 adlen = sizeof(struct long_ad);
242 }
243
244 /* start counting */
245 whole_lb = 1;
246 for (ad_off = 0; ad_off < l_ad; ad_off += adlen) {
247 KASSERT(whole_lb == 1);
248 if (addr_type == UDF_ICB_SHORT_ALLOC) {
249 short_ad = (struct short_ad *) (data_pos + ad_off);
250 len = udf_rw32(short_ad->len);
251 lb_num = udf_rw32(short_ad->lb_num);
252 part_num = -1;
253 flags = UDF_EXT_FLAGS(len);
254 len = UDF_EXT_LEN(len);
255 } else {
256 long_ad = (struct long_ad *) (data_pos + ad_off);
257 len = udf_rw32(long_ad->len);
258 lb_num = udf_rw32(long_ad->loc.lb_num);
259 part_num = udf_rw16(long_ad->loc.part_num);
260 flags = UDF_EXT_FLAGS(len);
261 len = UDF_EXT_LEN(len);
262 }
263 KASSERT(flags != UDF_EXT_REDIRECT); /* not implemented yet */
264 *cnt_inflen += len;
265 if (flags == UDF_EXT_ALLOCATED) {
266 *cnt_logblksrec += (len + lb_size -1) / lb_size;
267 }
268 whole_lb = ((len % lb_size) == 0);
269 }
270 /* rest should be zero (ad_off > l_ad < max_l_ad - adlen) */
271
272 KASSERT(*cnt_inflen == inflen);
273 KASSERT(*cnt_logblksrec == logblksrec);
274
275 mutex_exit(&udf_node->node_mutex);
276 if (0)
277 udf_node_dump(udf_node);
278 }
279 #else
280 #define udf_node_sanity_check(a, b, c)
281 #endif
282
283 /* --------------------------------------------------------------------- */
284
285 int
286 udf_translate_vtop(struct udf_mount *ump, struct long_ad *icb_loc,
287 uint32_t *lb_numres, uint32_t *extres)
288 {
289 struct part_desc *pdesc;
290 struct spare_map_entry *sme;
291 struct long_ad s_icb_loc;
292 uint64_t foffset, end_foffset;
293 uint32_t lb_size, len;
294 uint32_t lb_num, lb_rel, lb_packet;
295 uint32_t udf_rw32_lbmap, ext_offset;
296 uint16_t vpart;
297 int rel, part, error, eof, slot, flags;
298
299 assert(ump && icb_loc && lb_numres);
300
301 vpart = udf_rw16(icb_loc->loc.part_num);
302 lb_num = udf_rw32(icb_loc->loc.lb_num);
303 if (vpart > UDF_VTOP_RAWPART)
304 return EINVAL;
305
306 translate_again:
307 part = ump->vtop[vpart];
308 pdesc = ump->partitions[part];
309
310 switch (ump->vtop_tp[vpart]) {
311 case UDF_VTOP_TYPE_RAW :
312 /* 1:1 to the end of the device */
313 *lb_numres = lb_num;
314 *extres = INT_MAX;
315 return 0;
316 case UDF_VTOP_TYPE_PHYS :
317 /* transform into its disc logical block */
318 if (lb_num > udf_rw32(pdesc->part_len))
319 return EINVAL;
320 *lb_numres = lb_num + udf_rw32(pdesc->start_loc);
321
322 /* extent from here to the end of the partition */
323 *extres = udf_rw32(pdesc->part_len) - lb_num;
324 return 0;
325 case UDF_VTOP_TYPE_VIRT :
326 /* only maps one logical block, lookup in VAT */
327 if (lb_num >= ump->vat_entries) /* XXX > or >= ? */
328 return EINVAL;
329
330 /* lookup in virtual allocation table file */
331 mutex_enter(&ump->allocate_mutex);
332 error = udf_vat_read(ump->vat_node,
333 (uint8_t *) &udf_rw32_lbmap, 4,
334 ump->vat_offset + lb_num * 4);
335 mutex_exit(&ump->allocate_mutex);
336
337 if (error)
338 return error;
339
340 lb_num = udf_rw32(udf_rw32_lbmap);
341
342 /* transform into its disc logical block */
343 if (lb_num > udf_rw32(pdesc->part_len))
344 return EINVAL;
345 *lb_numres = lb_num + udf_rw32(pdesc->start_loc);
346
347 /* just one logical block */
348 *extres = 1;
349 return 0;
350 case UDF_VTOP_TYPE_SPARABLE :
351 /* check if the packet containing the lb_num is remapped */
352 lb_packet = lb_num / ump->sparable_packet_size;
353 lb_rel = lb_num % ump->sparable_packet_size;
354
355 for (rel = 0; rel < udf_rw16(ump->sparing_table->rt_l); rel++) {
356 sme = &ump->sparing_table->entries[rel];
357 if (lb_packet == udf_rw32(sme->org)) {
358 /* NOTE maps to absolute disc logical block! */
359 *lb_numres = udf_rw32(sme->map) + lb_rel;
360 *extres = ump->sparable_packet_size - lb_rel;
361 return 0;
362 }
363 }
364
365 /* transform into its disc logical block */
366 if (lb_num > udf_rw32(pdesc->part_len))
367 return EINVAL;
368 *lb_numres = lb_num + udf_rw32(pdesc->start_loc);
369
370 /* rest of block */
371 *extres = ump->sparable_packet_size - lb_rel;
372 return 0;
373 case UDF_VTOP_TYPE_META :
374 /* we have to look into the file's allocation descriptors */
375
376 /* use metadatafile allocation mutex */
377 lb_size = udf_rw32(ump->logical_vol->lb_size);
378
379 UDF_LOCK_NODE(ump->metadata_node, 0);
380
381 /* get first overlapping extent */
382 foffset = 0;
383 slot = 0;
384 for (;;) {
385 udf_get_adslot(ump->metadata_node,
386 slot, &s_icb_loc, &eof);
387 if (eof) {
388 DPRINTF(TRANSLATE,
389 ("Meta partition translation "
390 "failed: can't seek location\n"));
391 UDF_UNLOCK_NODE(ump->metadata_node, 0);
392 return EINVAL;
393 }
394 len = udf_rw32(s_icb_loc.len);
395 flags = UDF_EXT_FLAGS(len);
396 len = UDF_EXT_LEN(len);
397
398 end_foffset = foffset + len;
399
400 if (end_foffset > lb_num * lb_size)
401 break; /* found */
402 if (flags != UDF_EXT_REDIRECT)
403 foffset = end_foffset;
404 slot++;
405 }
406 /* found overlapping slot */
407 ext_offset = lb_num * lb_size - foffset;
408
409 /* process extent offset */
410 lb_num = udf_rw32(s_icb_loc.loc.lb_num);
411 vpart = udf_rw16(s_icb_loc.loc.part_num);
412 lb_num += (ext_offset + lb_size -1) / lb_size;
413 len -= ext_offset;
414 ext_offset = 0;
415
416 flags = UDF_EXT_FLAGS(s_icb_loc.len);
417
418 UDF_UNLOCK_NODE(ump->metadata_node, 0);
419 if (flags != UDF_EXT_ALLOCATED) {
420 DPRINTF(TRANSLATE, ("Metadata partition translation "
421 "failed: not allocated\n"));
422 return EINVAL;
423 }
424
425 /*
426 * vpart and lb_num are updated, translate again since we
427 * might be mapped on sparable media
428 */
429 goto translate_again;
430 default:
431 printf("UDF vtop translation scheme %d unimplemented yet\n",
432 ump->vtop_tp[vpart]);
433 }
434
435 return EINVAL;
436 }
437
438 /* --------------------------------------------------------------------- */
439
440 /*
441 * Translate an extent (in logical_blocks) into logical block numbers; used
442 * for read and write operations. DOESNT't check extents.
443 */
444
445 int
446 udf_translate_file_extent(struct udf_node *udf_node,
447 uint32_t from, uint32_t num_lb,
448 uint64_t *map)
449 {
450 struct udf_mount *ump;
451 struct icb_tag *icbtag;
452 struct long_ad t_ad, s_ad;
453 uint64_t transsec;
454 uint64_t foffset, end_foffset;
455 uint32_t transsec32;
456 uint32_t lb_size;
457 uint32_t ext_offset;
458 uint32_t lb_num, len;
459 uint32_t overlap, translen;
460 uint16_t vpart_num;
461 int eof, error, flags;
462 int slot, addr_type, icbflags;
463
464 if (!udf_node)
465 return ENOENT;
466
467 KASSERT(num_lb > 0);
468
469 UDF_LOCK_NODE(udf_node, 0);
470
471 /* initialise derivative vars */
472 ump = udf_node->ump;
473 lb_size = udf_rw32(ump->logical_vol->lb_size);
474
475 if (udf_node->fe) {
476 icbtag = &udf_node->fe->icbtag;
477 } else {
478 icbtag = &udf_node->efe->icbtag;
479 }
480 icbflags = udf_rw16(icbtag->flags);
481 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
482
483 /* do the work */
484 if (addr_type == UDF_ICB_INTERN_ALLOC) {
485 *map = UDF_TRANS_INTERN;
486 UDF_UNLOCK_NODE(udf_node, 0);
487 return 0;
488 }
489
490 /* find first overlapping extent */
491 foffset = 0;
492 slot = 0;
493 for (;;) {
494 udf_get_adslot(udf_node, slot, &s_ad, &eof);
495 DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, len = %d, "
496 "lb_num = %d, part = %d\n", slot, eof,
497 UDF_EXT_FLAGS(udf_rw32(s_ad.len)),
498 UDF_EXT_LEN(udf_rw32(s_ad.len)),
499 udf_rw32(s_ad.loc.lb_num),
500 udf_rw16(s_ad.loc.part_num)));
501 if (eof) {
502 DPRINTF(TRANSLATE,
503 ("Translate file extent "
504 "failed: can't seek location\n"));
505 UDF_UNLOCK_NODE(udf_node, 0);
506 return EINVAL;
507 }
508 len = udf_rw32(s_ad.len);
509 flags = UDF_EXT_FLAGS(len);
510 len = UDF_EXT_LEN(len);
511 lb_num = udf_rw32(s_ad.loc.lb_num);
512
513 if (flags == UDF_EXT_REDIRECT) {
514 slot++;
515 continue;
516 }
517
518 end_foffset = foffset + len;
519
520 if (end_foffset > from * lb_size)
521 break; /* found */
522 foffset = end_foffset;
523 slot++;
524 }
525 /* found overlapping slot */
526 ext_offset = from * lb_size - foffset;
527
528 for (;;) {
529 udf_get_adslot(udf_node, slot, &s_ad, &eof);
530 DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, len = %d, "
531 "lb_num = %d, part = %d\n", slot, eof,
532 UDF_EXT_FLAGS(udf_rw32(s_ad.len)),
533 UDF_EXT_LEN(udf_rw32(s_ad.len)),
534 udf_rw32(s_ad.loc.lb_num),
535 udf_rw16(s_ad.loc.part_num)));
536 if (eof) {
537 DPRINTF(TRANSLATE,
538 ("Translate file extent "
539 "failed: past eof\n"));
540 UDF_UNLOCK_NODE(udf_node, 0);
541 return EINVAL;
542 }
543
544 len = udf_rw32(s_ad.len);
545 flags = UDF_EXT_FLAGS(len);
546 len = UDF_EXT_LEN(len);
547
548 lb_num = udf_rw32(s_ad.loc.lb_num);
549 vpart_num = udf_rw16(s_ad.loc.part_num);
550
551 end_foffset = foffset + len;
552
553 /* process extent, don't forget to advance on ext_offset! */
554 lb_num += (ext_offset + lb_size -1) / lb_size;
555 overlap = (len - ext_offset + lb_size -1) / lb_size;
556 ext_offset = 0;
557
558 /*
559 * note that the while(){} is nessisary for the extent that
560 * the udf_translate_vtop() returns doens't have to span the
561 * whole extent.
562 */
563
564 overlap = MIN(overlap, num_lb);
565 while (overlap && (flags != UDF_EXT_REDIRECT)) {
566 switch (flags) {
567 case UDF_EXT_FREE :
568 case UDF_EXT_ALLOCATED_BUT_NOT_USED :
569 transsec = UDF_TRANS_ZERO;
570 translen = overlap;
571 while (overlap && num_lb && translen) {
572 *map++ = transsec;
573 lb_num++;
574 overlap--; num_lb--; translen--;
575 }
576 break;
577 case UDF_EXT_ALLOCATED :
578 t_ad.loc.lb_num = udf_rw32(lb_num);
579 t_ad.loc.part_num = udf_rw16(vpart_num);
580 error = udf_translate_vtop(ump,
581 &t_ad, &transsec32, &translen);
582 transsec = transsec32;
583 if (error) {
584 UDF_UNLOCK_NODE(udf_node, 0);
585 return error;
586 }
587 while (overlap && num_lb && translen) {
588 *map++ = transsec;
589 lb_num++; transsec++;
590 overlap--; num_lb--; translen--;
591 }
592 break;
593 default:
594 DPRINTF(TRANSLATE,
595 ("Translate file extent "
596 "failed: bad flags %x\n", flags));
597 UDF_UNLOCK_NODE(udf_node, 0);
598 return EINVAL;
599 }
600 }
601 if (num_lb == 0)
602 break;
603
604 if (flags != UDF_EXT_REDIRECT)
605 foffset = end_foffset;
606 slot++;
607 }
608 UDF_UNLOCK_NODE(udf_node, 0);
609
610 return 0;
611 }
612
613 /* --------------------------------------------------------------------- */
614
615 static int
616 udf_search_free_vatloc(struct udf_mount *ump, uint32_t *lbnumres)
617 {
618 uint32_t lb_size, lb_num, lb_map, udf_rw32_lbmap;
619 uint8_t *blob;
620 int entry, chunk, found, error;
621
622 KASSERT(ump);
623 KASSERT(ump->logical_vol);
624
625 lb_size = udf_rw32(ump->logical_vol->lb_size);
626 blob = malloc(lb_size, M_UDFTEMP, M_WAITOK);
627
628 /* TODO static allocation of search chunk */
629
630 lb_num = MIN(ump->vat_entries, ump->vat_last_free_lb);
631 found = 0;
632 error = 0;
633 entry = 0;
634 do {
635 chunk = MIN(lb_size, (ump->vat_entries - lb_num) * 4);
636 if (chunk <= 0)
637 break;
638 /* load in chunk */
639 error = udf_vat_read(ump->vat_node, blob, chunk,
640 ump->vat_offset + lb_num * 4);
641
642 if (error)
643 break;
644
645 /* search this chunk */
646 for (entry=0; entry < chunk /4; entry++, lb_num++) {
647 udf_rw32_lbmap = *((uint32_t *) (blob + entry * 4));
648 lb_map = udf_rw32(udf_rw32_lbmap);
649 if (lb_map == 0xffffffff) {
650 found = 1;
651 break;
652 }
653 }
654 } while (!found);
655 if (error) {
656 printf("udf_search_free_vatloc: error reading in vat chunk "
657 "(lb %d, size %d)\n", lb_num, chunk);
658 }
659
660 if (!found) {
661 /* extend VAT */
662 DPRINTF(WRITE, ("udf_search_free_vatloc: extending\n"));
663 lb_num = ump->vat_entries;
664 ump->vat_entries++;
665 }
666
667 /* mark entry with initialiser just in case */
668 lb_map = udf_rw32(0xfffffffe);
669 udf_vat_write(ump->vat_node, (uint8_t *) &lb_map, 4,
670 ump->vat_offset + lb_num *4);
671 ump->vat_last_free_lb = lb_num;
672
673 free(blob, M_UDFTEMP);
674 *lbnumres = lb_num;
675 return 0;
676 }
677
678
679 static void
680 udf_bitmap_allocate(struct udf_bitmap *bitmap, int ismetadata,
681 uint32_t ptov, uint32_t *num_lb, uint64_t *pmappos, uint64_t *lmappos)
682 {
683 uint32_t offset, lb_num, bit;
684 int32_t diff;
685 uint8_t *bpos;
686 int pass;
687
688 if (!ismetadata) {
689 /* heuristic to keep the two pointers not too close */
690 diff = bitmap->data_pos - bitmap->metadata_pos;
691 if ((diff >= 0) && (diff < 1024))
692 bitmap->data_pos = bitmap->metadata_pos + 1024;
693 }
694 offset = ismetadata ? bitmap->metadata_pos : bitmap->data_pos;
695 offset &= ~7;
696 for (pass = 0; pass < 2; pass++) {
697 if (offset >= bitmap->max_offset)
698 offset = 0;
699
700 while (offset < bitmap->max_offset) {
701 if (*num_lb == 0)
702 break;
703
704 /* use first bit not set */
705 bpos = bitmap->bits + offset/8;
706 bit = ffs(*bpos);
707 if (bit == 0) {
708 offset += 8;
709 continue;
710 }
711 *bpos &= ~(1 << (bit-1));
712 lb_num = offset + bit-1;
713 *lmappos++ = lb_num;
714 *pmappos++ = lb_num + ptov;
715 *num_lb = *num_lb - 1;
716 // offset = (offset & ~7);
717 }
718 }
719
720 if (ismetadata) {
721 bitmap->metadata_pos = offset;
722 } else {
723 bitmap->data_pos = offset;
724 }
725 }
726
727
728 static void
729 udf_bitmap_free(struct udf_bitmap *bitmap, uint32_t lb_num, uint32_t num_lb)
730 {
731 uint32_t offset;
732 uint32_t bit, bitval;
733 uint8_t *bpos;
734
735 offset = lb_num;
736
737 /* starter bits */
738 bpos = bitmap->bits + offset/8;
739 bit = offset % 8;
740 while ((bit != 0) && (num_lb > 0)) {
741 bitval = (1 << bit);
742 KASSERT((*bpos & bitval) == 0);
743 *bpos |= bitval;
744 offset++; num_lb--;
745 bit = (bit + 1) % 8;
746 }
747 if (num_lb == 0)
748 return;
749
750 /* whole bytes */
751 KASSERT(bit == 0);
752 bpos = bitmap->bits + offset / 8;
753 while (num_lb >= 8) {
754 KASSERT((*bpos == 0));
755 *bpos = 255;
756 offset += 8; num_lb -= 8;
757 bpos++;
758 }
759
760 /* stop bits */
761 KASSERT(num_lb < 8);
762 bit = 0;
763 while (num_lb > 0) {
764 bitval = (1 << bit);
765 KASSERT((*bpos & bitval) == 0);
766 *bpos |= bitval;
767 offset++; num_lb--;
768 bit = (bit + 1) % 8;
769 }
770 }
771
772
773 /* allocate a contiguous sequence of sectornumbers */
774 static int
775 udf_allocate_space(struct udf_mount *ump, int ismetadata, int alloc_type,
776 int num_lb, uint16_t *alloc_partp,
777 uint64_t *lmapping, uint64_t *pmapping)
778 {
779 struct mmc_trackinfo *alloc_track, *other_track;
780 struct udf_bitmap *bitmap;
781 struct part_desc *pdesc;
782 struct logvol_int_desc *lvid;
783 uint64_t *lmappos, *pmappos;
784 uint32_t ptov, lb_num, *freepos, free_lbs;
785 int lb_size, alloc_num_lb;
786 int alloc_part;
787 int error;
788
789 mutex_enter(&ump->allocate_mutex);
790
791 lb_size = udf_rw32(ump->logical_vol->lb_size);
792 KASSERT(lb_size == ump->discinfo.sector_size);
793
794 if (ismetadata) {
795 alloc_part = ump->metadata_part;
796 alloc_track = &ump->metadata_track;
797 other_track = &ump->data_track;
798 } else {
799 alloc_part = ump->data_part;
800 alloc_track = &ump->data_track;
801 other_track = &ump->metadata_track;
802 }
803
804 *alloc_partp = alloc_part;
805
806 error = 0;
807 /* XXX check disc space */
808
809 pdesc = ump->partitions[ump->vtop[alloc_part]];
810 lmappos = lmapping;
811 pmappos = pmapping;
812
813 switch (alloc_type) {
814 case UDF_ALLOC_VAT :
815 /* search empty slot in VAT file */
816 KASSERT(num_lb == 1);
817 error = udf_search_free_vatloc(ump, &lb_num);
818 if (!error) {
819 *lmappos = lb_num;
820 *pmappos = 0; /* will get late-allocated */
821 }
822 break;
823 case UDF_ALLOC_SEQUENTIAL :
824 /* sequential allocation on recordable media */
825 /* calculate offset from physical base partition */
826 ptov = udf_rw32(pdesc->start_loc);
827
828 for (lb_num = 0; lb_num < num_lb; lb_num++) {
829 *pmappos++ = alloc_track->next_writable;
830 *lmappos++ = alloc_track->next_writable - ptov;
831 alloc_track->next_writable++;
832 alloc_track->free_blocks--;
833 }
834 if (alloc_track->tracknr == other_track->tracknr)
835 memcpy(other_track, alloc_track,
836 sizeof(struct mmc_trackinfo));
837 break;
838 case UDF_ALLOC_SPACEMAP :
839 ptov = udf_rw32(pdesc->start_loc);
840
841 /* allocate on unallocated bits page */
842 alloc_num_lb = num_lb;
843 bitmap = &ump->part_unalloc_bits[alloc_part];
844 udf_bitmap_allocate(bitmap, ismetadata, ptov, &alloc_num_lb,
845 pmappos, lmappos);
846 ump->lvclose |= UDF_WRITE_PART_BITMAPS;
847 if (alloc_num_lb) {
848 /* TODO convert freed to unalloc and try again */
849 /* free allocated piece for now */
850 lmappos = lmapping;
851 for (lb_num=0; lb_num < num_lb-alloc_num_lb; lb_num++) {
852 udf_bitmap_free(bitmap, *lmappos++, 1);
853 }
854 error = ENOSPC;
855 }
856 if (!error) {
857 /* adjust freecount */
858 lvid = ump->logvol_integrity;
859 freepos = &lvid->tables[0] + alloc_part;
860 free_lbs = udf_rw32(*freepos);
861 *freepos = udf_rw32(free_lbs - num_lb);
862 }
863 break;
864 case UDF_ALLOC_METABITMAP :
865 case UDF_ALLOC_METASEQUENTIAL :
866 case UDF_ALLOC_RELAXEDSEQUENTIAL :
867 printf("ALERT: udf_allocate_space : allocation %d "
868 "not implemented yet!\n", alloc_type);
869 /* TODO implement, doesn't have to be contiguous */
870 error = ENOSPC;
871 break;
872 }
873
874 #ifdef DEBUG
875 if (udf_verbose & UDF_DEBUG_ALLOC) {
876 lmappos = lmapping;
877 pmappos = pmapping;
878 printf("udf_allocate_space, mapping l->p:\n");
879 for (lb_num = 0; lb_num < num_lb; lb_num++) {
880 printf("\t%"PRIu64" -> %"PRIu64"\n",
881 *lmappos++, *pmappos++);
882 }
883 }
884 #endif
885 mutex_exit(&ump->allocate_mutex);
886
887 return error;
888 }
889
890 /* --------------------------------------------------------------------- */
891
892 void
893 udf_free_allocated_space(struct udf_mount *ump, uint32_t lb_num,
894 uint16_t vpart_num, uint32_t num_lb)
895 {
896 struct udf_bitmap *bitmap;
897 struct part_desc *pdesc;
898 struct logvol_int_desc *lvid;
899 uint32_t ptov, lb_map, udf_rw32_lbmap;
900 uint32_t *freepos, free_lbs;
901 int phys_part;
902 int error;
903
904 DPRINTF(ALLOC, ("udf_free_allocated_space: freeing virt lbnum %d "
905 "part %d + %d sect\n", lb_num, vpart_num, num_lb));
906
907 mutex_enter(&ump->allocate_mutex);
908
909 /* get partition backing up this vpart_num */
910 pdesc = ump->partitions[ump->vtop[vpart_num]];
911
912 switch (ump->vtop_tp[vpart_num]) {
913 case UDF_VTOP_TYPE_PHYS :
914 case UDF_VTOP_TYPE_SPARABLE :
915 /* free space to freed or unallocated space bitmap */
916 ptov = udf_rw32(pdesc->start_loc);
917 phys_part = ump->vtop[vpart_num];
918
919 /* first try freed space bitmap */
920 bitmap = &ump->part_freed_bits[phys_part];
921
922 /* if not defined, use unallocated bitmap */
923 if (bitmap->bits == NULL)
924 bitmap = &ump->part_unalloc_bits[phys_part];
925
926 /* if no bitmaps are defined, bail out */
927 if (bitmap->bits == NULL)
928 break;
929
930 /* free bits if its defined */
931 KASSERT(bitmap->bits);
932 ump->lvclose |= UDF_WRITE_PART_BITMAPS;
933 udf_bitmap_free(bitmap, lb_num, num_lb);
934
935 /* adjust freecount */
936 lvid = ump->logvol_integrity;
937 freepos = &lvid->tables[0] + vpart_num;
938 free_lbs = udf_rw32(*freepos);
939 *freepos = udf_rw32(free_lbs + num_lb);
940 break;
941 case UDF_VTOP_TYPE_VIRT :
942 /* free this VAT entry */
943 KASSERT(num_lb == 1);
944
945 lb_map = 0xffffffff;
946 udf_rw32_lbmap = udf_rw32(lb_map);
947 error = udf_vat_write(ump->vat_node,
948 (uint8_t *) &udf_rw32_lbmap, 4,
949 ump->vat_offset + lb_num * 4);
950 KASSERT(error == 0);
951 ump->vat_last_free_lb = MIN(ump->vat_last_free_lb, lb_num);
952 break;
953 case UDF_VTOP_TYPE_META :
954 /* free space in the metadata bitmap */
955 default:
956 printf("ALERT: udf_free_allocated_space : allocation %d "
957 "not implemented yet!\n", ump->vtop_tp[vpart_num]);
958 break;
959 }
960
961 mutex_exit(&ump->allocate_mutex);
962 }
963
964 /* --------------------------------------------------------------------- */
965
966 int
967 udf_pre_allocate_space(struct udf_mount *ump, int udf_c_type, int num_lb,
968 uint16_t *alloc_partp, uint64_t *lmapping, uint64_t *pmapping)
969 {
970 int ismetadata, alloc_type;
971
972 ismetadata = (udf_c_type == UDF_C_NODE);
973 alloc_type = ismetadata? ump->meta_alloc : ump->data_alloc;
974
975 #ifdef DIAGNOSTIC
976 if ((alloc_type == UDF_ALLOC_VAT) && (udf_c_type != UDF_C_NODE)) {
977 panic("udf_pre_allocate_space: bad c_type on VAT!\n");
978 }
979 #endif
980
981 /* reserve size for VAT allocated data */
982 if (alloc_type == UDF_ALLOC_VAT) {
983 mutex_enter(&ump->allocate_mutex);
984 ump->uncomitted_lb += num_lb;
985 mutex_exit(&ump->allocate_mutex);
986 }
987
988 return udf_allocate_space(ump, ismetadata, alloc_type,
989 num_lb, alloc_partp, lmapping, pmapping);
990 }
991
992 /* --------------------------------------------------------------------- */
993
994 /*
995 * Allocate a buf on disc for direct write out. The space doesn't have to be
996 * contiguous as the caller takes care of this.
997 */
998
999 void
1000 udf_late_allocate_buf(struct udf_mount *ump, struct buf *buf,
1001 uint64_t *lmapping, uint64_t *pmapping, struct long_ad *node_ad_cpy)
1002 {
1003 struct udf_node *udf_node = VTOI(buf->b_vp);
1004 uint16_t vpart_num;
1005 int lb_size, blks, udf_c_type;
1006 int ismetadata, alloc_type;
1007 int num_lb;
1008 int error, s;
1009
1010 /*
1011 * for each sector in the buf, allocate a sector on disc and record
1012 * its position in the provided mapping array.
1013 *
1014 * If its userdata or FIDs, record its location in its node.
1015 */
1016
1017 lb_size = udf_rw32(ump->logical_vol->lb_size);
1018 num_lb = (buf->b_bcount + lb_size -1) / lb_size;
1019 blks = lb_size / DEV_BSIZE;
1020 udf_c_type = buf->b_udf_c_type;
1021
1022 KASSERT(lb_size == ump->discinfo.sector_size);
1023
1024 ismetadata = (udf_c_type == UDF_C_NODE);
1025 alloc_type = ismetadata? ump->meta_alloc : ump->data_alloc;
1026
1027 #ifdef DIAGNOSTIC
1028 if ((alloc_type == UDF_ALLOC_VAT) && (udf_c_type != UDF_C_NODE)) {
1029 panic("udf_late_allocate_buf: bad c_type on VAT!\n");
1030 }
1031 #endif
1032
1033 if (udf_c_type == UDF_C_NODE) {
1034 /* if not VAT, its allready allocated */
1035 if (alloc_type != UDF_ALLOC_VAT)
1036 return;
1037
1038 /* allocate sequential */
1039 alloc_type = UDF_ALLOC_SEQUENTIAL;
1040 }
1041
1042 error = udf_allocate_space(ump, ismetadata, alloc_type,
1043 num_lb, &vpart_num, lmapping, pmapping);
1044 if (error) {
1045 /* ARGH! we've not done our accounting right! */
1046 panic("UDF disc allocation accounting gone wrong");
1047 }
1048
1049 /* commit our sector count */
1050 mutex_enter(&ump->allocate_mutex);
1051 if (num_lb > ump->uncomitted_lb) {
1052 ump->uncomitted_lb = 0;
1053 } else {
1054 ump->uncomitted_lb -= num_lb;
1055 }
1056 mutex_exit(&ump->allocate_mutex);
1057
1058 buf->b_blkno = (*pmapping) * blks;
1059
1060 /* If its userdata or FIDs, record its allocation in its node. */
1061 if ((udf_c_type == UDF_C_USERDATA) || (udf_c_type == UDF_C_FIDS)) {
1062 udf_record_allocation_in_node(ump, buf, vpart_num, lmapping,
1063 node_ad_cpy);
1064 /* decrement our outstanding bufs counter */
1065 s = splbio();
1066 udf_node->outstanding_bufs--;
1067 splx(s);
1068 }
1069 }
1070
1071 /* --------------------------------------------------------------------- */
1072
1073 /*
1074 * Try to merge a1 with the new piece a2. udf_ads_merge returns error when not
1075 * possible (anymore); a2 returns the rest piece.
1076 */
1077
1078 static int
1079 udf_ads_merge(uint32_t lb_size, struct long_ad *a1, struct long_ad *a2)
1080 {
1081 uint32_t max_len, merge_len;
1082 uint32_t a1_len, a2_len;
1083 uint32_t a1_flags, a2_flags;
1084 uint32_t a1_lbnum, a2_lbnum;
1085 uint16_t a1_part, a2_part;
1086
1087 max_len = ((UDF_EXT_MAXLEN / lb_size) * lb_size);
1088
1089 a1_flags = UDF_EXT_FLAGS(udf_rw32(a1->len));
1090 a1_len = UDF_EXT_LEN(udf_rw32(a1->len));
1091 a1_lbnum = udf_rw32(a1->loc.lb_num);
1092 a1_part = udf_rw16(a1->loc.part_num);
1093
1094 a2_flags = UDF_EXT_FLAGS(udf_rw32(a2->len));
1095 a2_len = UDF_EXT_LEN(udf_rw32(a2->len));
1096 a2_lbnum = udf_rw32(a2->loc.lb_num);
1097 a2_part = udf_rw16(a2->loc.part_num);
1098
1099 /* defines same space */
1100 if (a1_flags != a2_flags)
1101 return 1;
1102
1103 if (a1_flags != UDF_EXT_FREE) {
1104 /* the same partition */
1105 if (a1_part != a2_part)
1106 return 1;
1107
1108 /* a2 is successor of a1 */
1109 if (a1_lbnum * lb_size + a1_len != a2_lbnum * lb_size)
1110 return 1;
1111 }
1112
1113 /* merge as most from a2 if possible */
1114 merge_len = MIN(a2_len, max_len - a1_len);
1115 a1_len += merge_len;
1116 a2_len -= merge_len;
1117 a2_lbnum += merge_len/lb_size;
1118
1119 a1->len = udf_rw32(a1_len | a1_flags);
1120 a2->len = udf_rw32(a2_len | a2_flags);
1121 a2->loc.lb_num = udf_rw32(a2_lbnum);
1122
1123 if (a2_len > 0)
1124 return 1;
1125
1126 /* there is space over to merge */
1127 return 0;
1128 }
1129
1130 /* --------------------------------------------------------------------- */
1131
1132 static void
1133 udf_wipe_adslots(struct udf_node *udf_node)
1134 {
1135 struct file_entry *fe;
1136 struct extfile_entry *efe;
1137 struct alloc_ext_entry *ext;
1138 uint64_t inflen, objsize;
1139 uint32_t lb_size, dscr_size, l_ea, l_ad, max_l_ad, crclen;
1140 uint8_t *data_pos;
1141 int extnr;
1142
1143 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1144
1145 fe = udf_node->fe;
1146 efe = udf_node->efe;
1147 if (fe) {
1148 inflen = udf_rw64(fe->inf_len);
1149 objsize = inflen;
1150 dscr_size = sizeof(struct file_entry) -1;
1151 l_ea = udf_rw32(fe->l_ea);
1152 l_ad = udf_rw32(fe->l_ad);
1153 data_pos = (uint8_t *) fe + dscr_size + l_ea;
1154 } else {
1155 inflen = udf_rw64(efe->inf_len);
1156 objsize = udf_rw64(efe->obj_size);
1157 dscr_size = sizeof(struct extfile_entry) -1;
1158 l_ea = udf_rw32(efe->l_ea);
1159 l_ad = udf_rw32(efe->l_ad);
1160 data_pos = (uint8_t *) efe + dscr_size + l_ea;
1161 }
1162 max_l_ad = lb_size - dscr_size - l_ea;
1163
1164 /* wipe fe/efe */
1165 memset(data_pos, 0, max_l_ad);
1166 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea;
1167 if (fe) {
1168 fe->l_ad = udf_rw32(0);
1169 fe->logblks_rec = udf_rw64(0);
1170 fe->tag.desc_crc_len = udf_rw32(crclen);
1171 } else {
1172 efe->l_ad = udf_rw32(0);
1173 efe->logblks_rec = udf_rw64(0);
1174 efe->tag.desc_crc_len = udf_rw32(crclen);
1175 }
1176
1177 /* wipe all allocation extent entries */
1178 for (extnr = 0; extnr < udf_node->num_extensions; extnr++) {
1179 ext = udf_node->ext[extnr];
1180 dscr_size = sizeof(struct alloc_ext_entry) -1;
1181 max_l_ad = lb_size - dscr_size;
1182 memset(data_pos, 0, max_l_ad);
1183 ext->l_ad = udf_rw32(0);
1184
1185 crclen = dscr_size - UDF_DESC_TAG_LENGTH;
1186 ext->tag.desc_crc_len = udf_rw32(crclen);
1187 }
1188 }
1189
1190 /* --------------------------------------------------------------------- */
1191
1192 void
1193 udf_get_adslot(struct udf_node *udf_node, int slot, struct long_ad *icb,
1194 int *eof) {
1195 struct file_entry *fe;
1196 struct extfile_entry *efe;
1197 struct alloc_ext_entry *ext;
1198 struct icb_tag *icbtag;
1199 struct short_ad *short_ad;
1200 struct long_ad *long_ad;
1201 uint32_t offset;
1202 uint32_t lb_size, dscr_size, l_ea, l_ad, max_l_ad;
1203 uint8_t *data_pos;
1204 int icbflags, addr_type, adlen, extnr;
1205
1206 /* determine what descriptor we are in */
1207 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1208
1209 fe = udf_node->fe;
1210 efe = udf_node->efe;
1211 if (fe) {
1212 icbtag = &fe->icbtag;
1213 dscr_size = sizeof(struct file_entry) -1;
1214 l_ea = udf_rw32(fe->l_ea);
1215 l_ad = udf_rw32(fe->l_ad);
1216 data_pos = (uint8_t *) fe + dscr_size + l_ea;
1217 } else {
1218 icbtag = &efe->icbtag;
1219 dscr_size = sizeof(struct extfile_entry) -1;
1220 l_ea = udf_rw32(efe->l_ea);
1221 l_ad = udf_rw32(efe->l_ad);
1222 data_pos = (uint8_t *) efe + dscr_size + l_ea;
1223 }
1224 max_l_ad = lb_size - dscr_size - l_ea;
1225
1226 icbflags = udf_rw16(icbtag->flags);
1227 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
1228
1229 /* just in case we're called on an intern, its EOF */
1230 if (addr_type == UDF_ICB_INTERN_ALLOC) {
1231 memset(icb, 0, sizeof(struct long_ad));
1232 *eof = 1;
1233 return;
1234 }
1235
1236 adlen = 0;
1237 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1238 adlen = sizeof(struct short_ad);
1239 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1240 adlen = sizeof(struct long_ad);
1241 }
1242
1243 /* if offset too big, we go to the allocation extensions */
1244 offset = slot * adlen;
1245 extnr = -1;
1246 while (offset >= max_l_ad) {
1247 extnr++;
1248 offset -= max_l_ad;
1249 ext = udf_node->ext[extnr];
1250 dscr_size = sizeof(struct alloc_ext_entry) -1;
1251 l_ad = udf_rw32(ext->l_ad);
1252 max_l_ad = lb_size - dscr_size;
1253 data_pos = (uint8_t *) ext + dscr_size;
1254 if (extnr > udf_node->num_extensions) {
1255 l_ad = 0; /* force EOF */
1256 break;
1257 }
1258 }
1259
1260 *eof = (offset >= l_ad) || (l_ad == 0);
1261 if (*eof) {
1262 memset(icb, 0, sizeof(struct long_ad));
1263 return;
1264 }
1265
1266 /* get the element */
1267 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1268 short_ad = (struct short_ad *) (data_pos + offset);
1269 icb->len = short_ad->len;
1270 icb->loc.part_num = udf_rw16(0); /* ignore */
1271 icb->loc.lb_num = short_ad->lb_num;
1272 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1273 long_ad = (struct long_ad *) (data_pos + offset);
1274 *icb = *long_ad;
1275 }
1276 }
1277
1278 /* --------------------------------------------------------------------- */
1279
1280 int
1281 udf_append_adslot(struct udf_node *udf_node, int slot, struct long_ad *icb) {
1282 union dscrptr *dscr;
1283 struct file_entry *fe;
1284 struct extfile_entry *efe;
1285 struct alloc_ext_entry *ext;
1286 struct icb_tag *icbtag;
1287 struct short_ad *short_ad;
1288 struct long_ad *long_ad, o_icb;
1289 uint64_t logblks_rec, *logblks_rec_p;
1290 uint32_t offset, rest, len;
1291 uint32_t lb_size, dscr_size, l_ea, l_ad, *l_ad_p, max_l_ad, crclen;
1292 uint8_t *data_pos;
1293 int icbflags, addr_type, adlen, extnr;
1294
1295 /* determine what descriptor we are in */
1296 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1297
1298 fe = udf_node->fe;
1299 efe = udf_node->efe;
1300 if (fe) {
1301 icbtag = &fe->icbtag;
1302 dscr = (union dscrptr *) fe;
1303 dscr_size = sizeof(struct file_entry) -1;
1304
1305 l_ea = udf_rw32(fe->l_ea);
1306 l_ad_p = &fe->l_ad;
1307 logblks_rec_p = &fe->logblks_rec;
1308 } else {
1309 icbtag = &efe->icbtag;
1310 dscr = (union dscrptr *) efe;
1311 dscr_size = sizeof(struct extfile_entry) -1;
1312
1313 l_ea = udf_rw32(efe->l_ea);
1314 l_ad_p = &efe->l_ad;
1315 logblks_rec_p = &efe->logblks_rec;
1316 }
1317 data_pos = (uint8_t *) dscr + dscr_size + l_ea;
1318 max_l_ad = lb_size - dscr_size - l_ea;
1319
1320 icbflags = udf_rw16(icbtag->flags);
1321 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
1322
1323 /* just in case we're called on an intern, its EOF */
1324 if (addr_type == UDF_ICB_INTERN_ALLOC) {
1325 panic("udf_append_adslot on UDF_ICB_INTERN_ALLOC\n");
1326 }
1327
1328 adlen = 0;
1329 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1330 adlen = sizeof(struct short_ad);
1331 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1332 adlen = sizeof(struct long_ad);
1333 }
1334
1335 /* if offset too big, we go to the allocation extensions */
1336 offset = slot * adlen;
1337 extnr = 0;
1338 while (offset > max_l_ad) {
1339 offset -= max_l_ad;
1340 ext = udf_node->ext[extnr];
1341 dscr = (union dscrptr *) ext;
1342 dscr_size = sizeof(struct alloc_ext_entry) -1;
1343
1344 KASSERT(ext != NULL);
1345 l_ad_p = &ext->l_ad;
1346 max_l_ad = lb_size - dscr_size;
1347 data_pos = (uint8_t *) dscr + dscr_size;
1348
1349 extnr++;
1350 }
1351 /* offset is offset within the current (E)FE/AED */
1352 l_ad = udf_rw32(*l_ad_p);
1353 crclen = udf_rw32(dscr->tag.desc_crc_len);
1354 logblks_rec = udf_rw64(*logblks_rec_p);
1355
1356 if (extnr > udf_node->num_extensions)
1357 return EFBIG; /* too fragmented */
1358
1359 /* overwriting old piece? */
1360 if (offset < l_ad) {
1361 /* overwrite entry; compensate for the old element */
1362 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1363 short_ad = (struct short_ad *) (data_pos + offset);
1364 o_icb.len = short_ad->len;
1365 o_icb.loc.part_num = udf_rw16(0); /* ignore */
1366 o_icb.loc.lb_num = short_ad->lb_num;
1367 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1368 long_ad = (struct long_ad *) (data_pos + offset);
1369 o_icb = *long_ad;
1370 } else {
1371 panic("Invalid address type in udf_append_adslot\n");
1372 }
1373
1374 len = udf_rw32(o_icb.len);
1375 if (UDF_EXT_FLAGS(len) == UDF_EXT_ALLOCATED) {
1376 /* adjust counts */
1377 len = UDF_EXT_LEN(len);
1378 logblks_rec -= (len + lb_size -1) / lb_size;
1379 }
1380 }
1381
1382 /* calculate rest space in this descriptor */
1383 rest = max_l_ad - offset;
1384 if (rest <= adlen) {
1385 /* create redirect and link new allocation extension */
1386 printf("udf_append_to_adslot: can't create allocation extention yet\n");
1387 return EFBIG;
1388 }
1389
1390 /* write out the element */
1391 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1392 short_ad = (struct short_ad *) (data_pos + offset);
1393 short_ad->len = icb->len;
1394 short_ad->lb_num = icb->loc.lb_num;
1395 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1396 long_ad = (struct long_ad *) (data_pos + offset);
1397 *long_ad = *icb;
1398 }
1399
1400 /* adjust logblks recorded count */
1401 if (UDF_EXT_FLAGS(icb->len) == UDF_EXT_ALLOCATED)
1402 logblks_rec += (UDF_EXT_LEN(icb->len) + lb_size -1) / lb_size;
1403 *logblks_rec_p = udf_rw64(logblks_rec);
1404
1405 /* adjust l_ad and crclen when needed */
1406 if (offset >= l_ad) {
1407 l_ad += adlen;
1408 crclen += adlen;
1409 dscr->tag.desc_crc_len = udf_rw32(crclen);
1410 *l_ad_p = udf_rw32(l_ad);
1411 }
1412
1413 return 0;
1414 }
1415
1416 /* --------------------------------------------------------------------- */
1417
1418 /*
1419 * Adjust the node's allocation descriptors to reflect the new mapping; do
1420 * take note that we might glue to existing allocation descriptors.
1421 *
1422 * XXX Note there can only be one allocation being recorded/mount; maybe
1423 * explicit allocation in shedule thread?
1424 */
1425
1426 static void
1427 udf_record_allocation_in_node(struct udf_mount *ump, struct buf *buf,
1428 uint16_t vpart_num, uint64_t *mapping, struct long_ad *node_ad_cpy)
1429 {
1430 struct vnode *vp = buf->b_vp;
1431 struct udf_node *udf_node = VTOI(vp);
1432 struct file_entry *fe;
1433 struct extfile_entry *efe;
1434 struct icb_tag *icbtag;
1435 struct long_ad s_ad, c_ad;
1436 uint64_t inflen, from, till;
1437 uint64_t foffset, end_foffset, restart_foffset;
1438 uint64_t orig_inflen, orig_lbrec, new_inflen, new_lbrec;
1439 uint32_t num_lb, len, flags, lb_num;
1440 uint32_t run_start;
1441 uint32_t slot_offset;
1442 uint32_t skip_len, skipped;
1443 int addr_type, icbflags;
1444 int udf_c_type = buf->b_udf_c_type;
1445 int lb_size, run_length, eof;
1446 int slot, cpy_slot, cpy_slots, restart_slot;
1447 int error;
1448
1449 DPRINTF(ALLOC, ("udf_record_allocation_in_node\n"));
1450 udf_node_sanity_check(udf_node, &orig_inflen, &orig_lbrec);
1451
1452 /* sanity check ... should be panic ? */
1453 if ((udf_c_type != UDF_C_USERDATA) && (udf_c_type != UDF_C_FIDS))
1454 return;
1455
1456 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1457
1458 /* do the job */
1459 UDF_LOCK_NODE(udf_node, 0); /* XXX can deadlock ? */
1460
1461 fe = udf_node->fe;
1462 efe = udf_node->efe;
1463 if (fe) {
1464 icbtag = &fe->icbtag;
1465 inflen = udf_rw64(fe->inf_len);
1466 } else {
1467 icbtag = &efe->icbtag;
1468 inflen = udf_rw64(efe->inf_len);
1469 }
1470
1471 /* do check if `till' is not past file information length */
1472 from = buf->b_lblkno * lb_size;
1473 till = MIN(inflen, from + buf->b_resid);
1474
1475 num_lb = (till - from + lb_size -1) / lb_size;
1476
1477 DPRINTF(ALLOC, ("record allocation from = %"PRIu64" + %d\n", from, buf->b_bcount));
1478
1479 icbflags = udf_rw16(icbtag->flags);
1480 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
1481
1482 if (addr_type == UDF_ICB_INTERN_ALLOC) {
1483 /* nothing to do */
1484 /* XXX clean up rest of node? just in case? */
1485 UDF_UNLOCK_NODE(udf_node, 0);
1486 return;
1487 }
1488
1489 slot = 0;
1490 cpy_slot = 0;
1491 foffset = 0;
1492
1493 /* 1) copy till first overlap piece to the rewrite buffer */
1494 for (;;) {
1495 udf_get_adslot(udf_node, slot, &s_ad, &eof);
1496 if (eof) {
1497 DPRINTF(WRITE,
1498 ("Record allocation in node "
1499 "failed: encountered EOF\n"));
1500 UDF_UNLOCK_NODE(udf_node, 0);
1501 buf->b_error = EINVAL;
1502 return;
1503 }
1504 len = udf_rw32(s_ad.len);
1505 flags = UDF_EXT_FLAGS(len);
1506 len = UDF_EXT_LEN(len);
1507
1508 if (flags == UDF_EXT_REDIRECT) {
1509 slot++;
1510 continue;
1511 }
1512
1513 end_foffset = foffset + len;
1514 if (end_foffset > from)
1515 break; /* found */
1516
1517 node_ad_cpy[cpy_slot++] = s_ad;
1518
1519 DPRINTF(ALLOC, ("\t1: vp %d, lb %d, len %d, flags %d "
1520 "-> stack\n",
1521 udf_rw16(s_ad.loc.part_num),
1522 udf_rw32(s_ad.loc.lb_num),
1523 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1524 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1525
1526 foffset = end_foffset;
1527 slot++;
1528 }
1529 restart_slot = slot;
1530 restart_foffset = foffset;
1531
1532 /* 2) trunc overlapping slot at overlap and copy it */
1533 slot_offset = from - foffset;
1534 if (slot_offset > 0) {
1535 DPRINTF(ALLOC, ("\tslot_offset = %d, flags = %d (%d)\n",
1536 slot_offset, flags >> 30, flags));
1537
1538 s_ad.len = udf_rw32(slot_offset | flags);
1539 node_ad_cpy[cpy_slot++] = s_ad;
1540
1541 DPRINTF(ALLOC, ("\t2: vp %d, lb %d, len %d, flags %d "
1542 "-> stack\n",
1543 udf_rw16(s_ad.loc.part_num),
1544 udf_rw32(s_ad.loc.lb_num),
1545 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1546 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1547 }
1548 foffset += slot_offset;
1549
1550 /* 3) insert new mappings */
1551 memset(&s_ad, 0, sizeof(struct long_ad));
1552 lb_num = 0;
1553 for (lb_num = 0; lb_num < num_lb; lb_num++) {
1554 run_start = mapping[lb_num];
1555 run_length = 1;
1556 while (lb_num < num_lb-1) {
1557 if (mapping[lb_num+1] != mapping[lb_num]+1)
1558 if (mapping[lb_num+1] != mapping[lb_num])
1559 break;
1560 run_length++;
1561 lb_num++;
1562 }
1563 /* insert slot for this mapping */
1564 len = run_length * lb_size;
1565
1566 /* bounds checking */
1567 if (foffset + len > till)
1568 len = till - foffset;
1569 KASSERT(foffset + len <= inflen);
1570
1571 s_ad.len = udf_rw32(len | UDF_EXT_ALLOCATED);
1572 s_ad.loc.part_num = udf_rw16(vpart_num);
1573 s_ad.loc.lb_num = udf_rw32(run_start);
1574
1575 foffset += len;
1576
1577 /* paranoia */
1578 if (len == 0) {
1579 DPRINTF(WRITE,
1580 ("Record allocation in node "
1581 "failed: insert failed\n"));
1582 UDF_UNLOCK_NODE(udf_node, 0);
1583 buf->b_error = EINVAL;
1584 return;
1585 }
1586 node_ad_cpy[cpy_slot++] = s_ad;
1587
1588 DPRINTF(ALLOC, ("\t3: insert new mapping vp %d lb %d, len %d, "
1589 "flags %d -> stack\n",
1590 udf_rw16(s_ad.loc.part_num), udf_rw32(s_ad.loc.lb_num),
1591 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1592 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1593 }
1594
1595 /* 4) pop replaced length */
1596 slot = restart_slot;
1597 foffset = restart_foffset;
1598
1599 skip_len = till - foffset; /* relative to start of slot */
1600 slot_offset = from - foffset;
1601 for (;;) {
1602 udf_get_adslot(udf_node, slot, &s_ad, &eof);
1603 if (eof)
1604 break;
1605
1606 len = udf_rw32(s_ad.len);
1607 flags = UDF_EXT_FLAGS(len);
1608 len = UDF_EXT_LEN(len);
1609 lb_num = udf_rw32(s_ad.loc.lb_num);
1610
1611 if (flags == UDF_EXT_REDIRECT) {
1612 slot++;
1613 continue;
1614 }
1615
1616 DPRINTF(ALLOC, ("\t4i: got slot %d, skip_len %d, vp %d, "
1617 "lb %d, len %d, flags %d\n",
1618 slot, skip_len, udf_rw16(s_ad.loc.part_num),
1619 udf_rw32(s_ad.loc.lb_num),
1620 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1621 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1622
1623 skipped = MIN(len, skip_len);
1624 if (flags != UDF_EXT_FREE) {
1625 if (slot_offset) {
1626 /* skip these blocks first */
1627 num_lb = (slot_offset + lb_size-1) / lb_size;
1628 len -= slot_offset;
1629 skip_len -= slot_offset;
1630 foffset += slot_offset;
1631 lb_num += num_lb;
1632 skipped -= slot_offset;
1633 slot_offset = 0;
1634 }
1635 /* free space from current position till `skipped' */
1636 num_lb = (skipped + lb_size-1) / lb_size;
1637 udf_free_allocated_space(ump, lb_num,
1638 udf_rw16(s_ad.loc.part_num), num_lb);
1639 lb_num += num_lb;
1640 }
1641 len -= skipped;
1642 skip_len -= skipped;
1643 foffset += skipped;
1644
1645 if (len) {
1646 KASSERT(skipped % lb_size == 0);
1647
1648 /* we arrived at our point, push remainder */
1649 s_ad.len = udf_rw32(len | flags);
1650 s_ad.loc.lb_num = udf_rw32(lb_num);
1651 node_ad_cpy[cpy_slot++] = s_ad;
1652 foffset += len;
1653 slot++;
1654
1655 DPRINTF(ALLOC, ("\t4: vp %d, lb %d, len %d, flags %d "
1656 "-> stack\n",
1657 udf_rw16(s_ad.loc.part_num),
1658 udf_rw32(s_ad.loc.lb_num),
1659 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1660 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1661 break;
1662 }
1663 slot++;
1664 }
1665
1666 /* 5) copy remainder */
1667 for (;;) {
1668 udf_get_adslot(udf_node, slot, &s_ad, &eof);
1669 if (eof)
1670 break;
1671
1672 len = udf_rw32(s_ad.len);
1673 flags = UDF_EXT_FLAGS(len);
1674 len = UDF_EXT_LEN(len);
1675
1676 if (flags == UDF_EXT_REDIRECT) {
1677 slot++;
1678 continue;
1679 }
1680
1681 node_ad_cpy[cpy_slot++] = s_ad;
1682
1683 DPRINTF(ALLOC, ("\t5: insert new mapping "
1684 "vp %d lb %d, len %d, flags %d "
1685 "-> stack\n",
1686 udf_rw16(s_ad.loc.part_num),
1687 udf_rw32(s_ad.loc.lb_num),
1688 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1689 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1690
1691 slot++;
1692 }
1693
1694 /* 6) reset node descriptors */
1695 udf_wipe_adslots(udf_node);
1696
1697 /* 7) copy back extents; merge when possible. Recounting on the fly */
1698 cpy_slots = cpy_slot;
1699
1700 c_ad = node_ad_cpy[0];
1701 slot = 0;
1702 DPRINTF(ALLOC, ("\t7s: stack -> got mapping vp %d "
1703 "lb %d, len %d, flags %d\n",
1704 udf_rw16(c_ad.loc.part_num),
1705 udf_rw32(c_ad.loc.lb_num),
1706 UDF_EXT_LEN(udf_rw32(c_ad.len)),
1707 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
1708
1709 for (cpy_slot = 1; cpy_slot < cpy_slots; cpy_slot++) {
1710 s_ad = node_ad_cpy[cpy_slot];
1711
1712 DPRINTF(ALLOC, ("\t7i: stack -> got mapping vp %d "
1713 "lb %d, len %d, flags %d\n",
1714 udf_rw16(s_ad.loc.part_num),
1715 udf_rw32(s_ad.loc.lb_num),
1716 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1717 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1718
1719 /* see if we can merge */
1720 if (udf_ads_merge(lb_size, &c_ad, &s_ad)) {
1721 /* not mergable (anymore) */
1722 DPRINTF(ALLOC, ("\t7: appending vp %d lb %d, "
1723 "len %d, flags %d\n",
1724 udf_rw16(c_ad.loc.part_num),
1725 udf_rw32(c_ad.loc.lb_num),
1726 UDF_EXT_LEN(udf_rw32(c_ad.len)),
1727 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
1728
1729 error = udf_append_adslot(udf_node, slot, &c_ad);
1730 if (error) {
1731 buf->b_error = error;
1732 goto out;
1733 }
1734 c_ad = s_ad;
1735 slot++;
1736 }
1737 }
1738
1739 /* 8) push rest slot (if any) */
1740 if (UDF_EXT_LEN(c_ad.len) > 0) {
1741 DPRINTF(ALLOC, ("\t8: last append vp %d lb %d, "
1742 "len %d, flags %d\n",
1743 udf_rw16(c_ad.loc.part_num),
1744 udf_rw32(c_ad.loc.lb_num),
1745 UDF_EXT_LEN(udf_rw32(c_ad.len)),
1746 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
1747
1748 error = udf_append_adslot(udf_node, slot, &c_ad);
1749 if (error) {
1750 buf->b_error = error;
1751 goto out;
1752 }
1753 }
1754
1755 out:
1756 /* the node's descriptors should now be sane */
1757 UDF_UNLOCK_NODE(udf_node, 0);
1758
1759 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
1760
1761 KASSERT(orig_inflen == new_inflen);
1762 KASSERT(new_lbrec >= orig_lbrec);
1763
1764 return;
1765 }
1766
1767 /* --------------------------------------------------------------------- */
1768
1769 int
1770 udf_grow_node(struct udf_node *udf_node, uint64_t new_size)
1771 {
1772 union dscrptr *dscr;
1773 struct vnode *vp = udf_node->vnode;
1774 struct udf_mount *ump = udf_node->ump;
1775 struct file_entry *fe;
1776 struct extfile_entry *efe;
1777 struct icb_tag *icbtag;
1778 struct long_ad c_ad, s_ad;
1779 uint64_t size_diff, old_size, inflen, objsize, chunk, append_len;
1780 uint64_t foffset, end_foffset;
1781 uint64_t orig_inflen, orig_lbrec, new_inflen, new_lbrec;
1782 uint32_t lb_size, dscr_size, crclen, lastblock_grow;
1783 uint32_t len, flags, max_len;
1784 uint32_t max_l_ad, l_ad, l_ea;
1785 uint8_t *data_pos, *evacuated_data;
1786 int icbflags, addr_type;
1787 int slot, cpy_slot;
1788 int eof, error;
1789
1790 DPRINTF(ALLOC, ("udf_grow_node\n"));
1791 udf_node_sanity_check(udf_node, &orig_inflen, &orig_lbrec);
1792
1793 UDF_LOCK_NODE(udf_node, 0);
1794 lb_size = udf_rw32(ump->logical_vol->lb_size);
1795 max_len = ((UDF_EXT_MAXLEN / lb_size) * lb_size);
1796
1797 fe = udf_node->fe;
1798 efe = udf_node->efe;
1799 if (fe) {
1800 dscr = (union dscrptr *) fe;
1801 icbtag = &fe->icbtag;
1802 inflen = udf_rw64(fe->inf_len);
1803 objsize = inflen;
1804 dscr_size = sizeof(struct file_entry) -1;
1805 l_ea = udf_rw32(fe->l_ea);
1806 l_ad = udf_rw32(fe->l_ad);
1807 } else {
1808 dscr = (union dscrptr *) efe;
1809 icbtag = &efe->icbtag;
1810 inflen = udf_rw64(efe->inf_len);
1811 objsize = udf_rw64(efe->obj_size);
1812 dscr_size = sizeof(struct extfile_entry) -1;
1813 l_ea = udf_rw32(efe->l_ea);
1814 l_ad = udf_rw32(efe->l_ad);
1815 }
1816 data_pos = (uint8_t *) dscr + dscr_size + l_ea;
1817 max_l_ad = lb_size - dscr_size - l_ea;
1818
1819 icbflags = udf_rw16(icbtag->flags);
1820 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
1821
1822 old_size = inflen;
1823 size_diff = new_size - old_size;
1824
1825 DPRINTF(ALLOC, ("\tfrom %"PRIu64" to %"PRIu64"\n", old_size, new_size));
1826
1827 evacuated_data = NULL;
1828 if (addr_type == UDF_ICB_INTERN_ALLOC) {
1829 if (l_ad + size_diff <= max_l_ad) {
1830 /* only reflect size change directly in the node */
1831 inflen += size_diff;
1832 objsize += size_diff;
1833 l_ad += size_diff;
1834 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea + l_ad;
1835 if (fe) {
1836 fe->inf_len = udf_rw64(inflen);
1837 fe->l_ad = udf_rw32(l_ad);
1838 fe->tag.desc_crc_len = udf_rw32(crclen);
1839 } else {
1840 efe->inf_len = udf_rw64(inflen);
1841 efe->obj_size = udf_rw64(objsize);
1842 efe->l_ad = udf_rw32(l_ad);
1843 efe->tag.desc_crc_len = udf_rw32(crclen);
1844 }
1845 error = 0;
1846
1847 /* set new size for uvm */
1848 uvm_vnp_setsize(vp, old_size);
1849 uvm_vnp_setwritesize(vp, new_size);
1850
1851 #if 0
1852 /* zero append space in buffer */
1853 uvm_vnp_zerorange(vp, old_size, new_size - old_size);
1854 #endif
1855
1856 /* unlock */
1857 UDF_UNLOCK_NODE(udf_node, 0);
1858
1859 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
1860 KASSERT(new_inflen == orig_inflen + size_diff);
1861 KASSERT(new_lbrec == orig_lbrec);
1862 KASSERT(new_lbrec == 0);
1863 return 0;
1864 }
1865
1866 DPRINTF(ALLOC, ("\tCONVERT from internal\n"));
1867
1868 if (old_size > 0) {
1869 /* allocate some space and copy in the stuff to keep */
1870 evacuated_data = malloc(lb_size, M_UDFTEMP, M_WAITOK);
1871 memset(evacuated_data, 0, lb_size);
1872
1873 /* node is locked, so safe to exit mutex */
1874 UDF_UNLOCK_NODE(udf_node, 0);
1875
1876 /* read in using the `normal' vn_rdwr() */
1877 error = vn_rdwr(UIO_READ, udf_node->vnode,
1878 evacuated_data, old_size, 0,
1879 UIO_SYSSPACE, IO_ALTSEMANTICS | IO_NODELOCKED,
1880 FSCRED, NULL, NULL);
1881
1882 /* enter again */
1883 UDF_LOCK_NODE(udf_node, 0);
1884 }
1885
1886 /* convert to a normal alloc */
1887 /* XXX HOWTO selecting allocation method ? */
1888 icbflags &= ~UDF_ICB_TAG_FLAGS_ALLOC_MASK;
1889 icbflags |= UDF_ICB_LONG_ALLOC; /* XXX or SHORT_ALLOC */
1890 icbtag->flags = udf_rw16(icbflags);
1891
1892 /* wipe old descriptor space */
1893 udf_wipe_adslots(udf_node);
1894
1895 memset(&c_ad, 0, sizeof(struct long_ad));
1896 c_ad.len = udf_rw32(old_size | UDF_EXT_FREE);
1897 c_ad.loc.part_num = udf_rw16(0); /* not relevant */
1898 c_ad.loc.lb_num = udf_rw32(0); /* not relevant */
1899
1900 slot = 0;
1901 } else {
1902 /* goto the last entry (if any) */
1903 slot = 0;
1904 cpy_slot = 0;
1905 foffset = 0;
1906 memset(&c_ad, 0, sizeof(struct long_ad));
1907 for (;;) {
1908 udf_get_adslot(udf_node, slot, &c_ad, &eof);
1909 if (eof)
1910 break;
1911
1912 len = udf_rw32(c_ad.len);
1913 flags = UDF_EXT_FLAGS(len);
1914 len = UDF_EXT_LEN(len);
1915
1916 end_foffset = foffset + len;
1917 if (flags != UDF_EXT_REDIRECT)
1918 foffset = end_foffset;
1919
1920 slot++;
1921 }
1922 /* at end of adslots */
1923
1924 /* special case if the old size was zero, then there is no last slot */
1925 if (old_size == 0) {
1926 c_ad.len = udf_rw32(0 | UDF_EXT_FREE);
1927 c_ad.loc.part_num = udf_rw16(0); /* not relevant */
1928 c_ad.loc.lb_num = udf_rw32(0); /* not relevant */
1929 } else {
1930 /* refetch last slot */
1931 slot--;
1932 udf_get_adslot(udf_node, slot, &c_ad, &eof);
1933 }
1934 }
1935
1936 /*
1937 * If the length of the last slot is not a multiple of lb_size, adjust
1938 * length so that it is; don't forget to adjust `append_len'! relevant for
1939 * extending existing files
1940 */
1941 len = udf_rw32(c_ad.len);
1942 flags = UDF_EXT_FLAGS(len);
1943 len = UDF_EXT_LEN(len);
1944
1945 lastblock_grow = 0;
1946 if (len % lb_size > 0) {
1947 lastblock_grow = lb_size - (len % lb_size);
1948 lastblock_grow = MIN(size_diff, lastblock_grow);
1949 len += lastblock_grow;
1950 c_ad.len = udf_rw32(len | flags);
1951
1952 /* TODO zero appened space in buffer! */
1953 /* using uvm_vnp_zerorange(vp, old_size, new_size - old_size); ? */
1954 }
1955 memset(&s_ad, 0, sizeof(struct long_ad));
1956
1957 /* size_diff can be bigger than allowed, so grow in chunks */
1958 append_len = size_diff - lastblock_grow;
1959 while (append_len > 0) {
1960 chunk = MIN(append_len, max_len);
1961 s_ad.len = udf_rw32(chunk | UDF_EXT_FREE);
1962 s_ad.loc.part_num = udf_rw16(0);
1963 s_ad.loc.lb_num = udf_rw32(0);
1964
1965 if (udf_ads_merge(lb_size, &c_ad, &s_ad)) {
1966 /* not mergable (anymore) */
1967 error = udf_append_adslot(udf_node, slot, &c_ad);
1968 if (error)
1969 goto errorout;
1970 slot++;
1971 c_ad = s_ad;
1972 memset(&s_ad, 0, sizeof(struct long_ad));
1973 }
1974 append_len -= chunk;
1975 }
1976
1977 /* if there is a rest piece in the accumulator, append it */
1978 if (UDF_EXT_LEN(c_ad.len) > 0) {
1979 error = udf_append_adslot(udf_node, slot, &c_ad);
1980 if (error)
1981 goto errorout;
1982 slot++;
1983 }
1984
1985 /* if there is a rest piece that didn't fit, append it */
1986 if (UDF_EXT_LEN(s_ad.len) > 0) {
1987 error = udf_append_adslot(udf_node, slot, &s_ad);
1988 if (error)
1989 goto errorout;
1990 slot++;
1991 }
1992
1993 inflen += size_diff;
1994 objsize += size_diff;
1995 if (fe) {
1996 fe->inf_len = udf_rw64(inflen);
1997 } else {
1998 efe->inf_len = udf_rw64(inflen);
1999 efe->obj_size = udf_rw64(objsize);
2000 }
2001 error = 0;
2002
2003 if (evacuated_data) {
2004 /* set new write size for uvm */
2005 uvm_vnp_setwritesize(vp, old_size);
2006
2007 /* write out evacuated data */
2008 error = vn_rdwr(UIO_WRITE, udf_node->vnode,
2009 evacuated_data, old_size, 0,
2010 UIO_SYSSPACE, IO_ALTSEMANTICS | IO_NODELOCKED,
2011 FSCRED, NULL, NULL);
2012 uvm_vnp_setsize(vp, old_size);
2013 }
2014
2015 errorout:
2016 if (evacuated_data)
2017 free(evacuated_data, M_UDFTEMP);
2018 UDF_UNLOCK_NODE(udf_node, 0);
2019
2020 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2021 KASSERT(new_inflen == orig_inflen + size_diff);
2022 KASSERT(new_lbrec == orig_lbrec);
2023
2024 return error;
2025 }
2026
2027 /* --------------------------------------------------------------------- */
2028
2029 int
2030 udf_shrink_node(struct udf_node *udf_node, uint64_t new_size)
2031 {
2032 struct vnode *vp = udf_node->vnode;
2033 struct udf_mount *ump = udf_node->ump;
2034 struct file_entry *fe;
2035 struct extfile_entry *efe;
2036 struct icb_tag *icbtag;
2037 struct long_ad c_ad, s_ad, *node_ad_cpy;
2038 uint64_t size_diff, old_size, inflen, objsize;
2039 uint64_t foffset, end_foffset;
2040 uint64_t orig_inflen, orig_lbrec, new_inflen, new_lbrec;
2041 uint32_t lb_size, dscr_size, crclen;
2042 uint32_t slot_offset;
2043 uint32_t len, flags, max_len;
2044 uint32_t num_lb, lb_num;
2045 uint32_t max_l_ad, l_ad, l_ea;
2046 uint16_t vpart_num;
2047 uint8_t *data_pos;
2048 int icbflags, addr_type;
2049 int slot, cpy_slot, cpy_slots;
2050 int eof, error;
2051
2052 DPRINTF(ALLOC, ("udf_shrink_node\n"));
2053 udf_node_sanity_check(udf_node, &orig_inflen, &orig_lbrec);
2054
2055 UDF_LOCK_NODE(udf_node, 0);
2056 lb_size = udf_rw32(ump->logical_vol->lb_size);
2057 max_len = ((UDF_EXT_MAXLEN / lb_size) * lb_size);
2058
2059 /* do the work */
2060 fe = udf_node->fe;
2061 efe = udf_node->efe;
2062 if (fe) {
2063 icbtag = &fe->icbtag;
2064 inflen = udf_rw64(fe->inf_len);
2065 objsize = inflen;
2066 dscr_size = sizeof(struct file_entry) -1;
2067 l_ea = udf_rw32(fe->l_ea);
2068 l_ad = udf_rw32(fe->l_ad);
2069 data_pos = (uint8_t *) fe + dscr_size + l_ea;
2070 } else {
2071 icbtag = &efe->icbtag;
2072 inflen = udf_rw64(efe->inf_len);
2073 objsize = udf_rw64(efe->obj_size);
2074 dscr_size = sizeof(struct extfile_entry) -1;
2075 l_ea = udf_rw32(efe->l_ea);
2076 l_ad = udf_rw32(efe->l_ad);
2077 data_pos = (uint8_t *) efe + dscr_size + l_ea;
2078 }
2079 max_l_ad = lb_size - dscr_size - l_ea;
2080
2081 icbflags = udf_rw16(icbtag->flags);
2082 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2083
2084 old_size = inflen;
2085 size_diff = old_size - new_size;
2086
2087 DPRINTF(ALLOC, ("\tfrom %"PRIu64" to %"PRIu64"\n", old_size, new_size));
2088
2089 /* shrink the node to its new size */
2090 if (addr_type == UDF_ICB_INTERN_ALLOC) {
2091 /* only reflect size change directly in the node */
2092 KASSERT(new_size <= max_l_ad);
2093 inflen -= size_diff;
2094 objsize -= size_diff;
2095 l_ad -= size_diff;
2096 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea + l_ad;
2097 if (fe) {
2098 fe->inf_len = udf_rw64(inflen);
2099 fe->l_ad = udf_rw32(l_ad);
2100 fe->tag.desc_crc_len = udf_rw32(crclen);
2101 } else {
2102 efe->inf_len = udf_rw64(inflen);
2103 efe->obj_size = udf_rw64(objsize);
2104 efe->l_ad = udf_rw32(l_ad);
2105 efe->tag.desc_crc_len = udf_rw32(crclen);
2106 }
2107 error = 0;
2108 /* TODO zero appened space in buffer! */
2109 /* using uvm_vnp_zerorange(vp, old_size, old_size - new_size); ? */
2110
2111 /* set new size for uvm */
2112 uvm_vnp_setsize(vp, new_size);
2113 UDF_UNLOCK_NODE(udf_node, 0);
2114
2115 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2116 KASSERT(new_inflen == orig_inflen - size_diff);
2117 KASSERT(new_lbrec == orig_lbrec);
2118 KASSERT(new_lbrec == 0);
2119
2120 return 0;
2121 }
2122
2123 /* setup node cleanup extents copy space */
2124 node_ad_cpy = malloc(lb_size * UDF_MAX_ALLOC_EXTENTS,
2125 M_UDFMNT, M_WAITOK);
2126 memset(node_ad_cpy, 0, lb_size * UDF_MAX_ALLOC_EXTENTS);
2127
2128 /*
2129 * Shrink the node by releasing the allocations and truncate the last
2130 * allocation to the new size. If the new size fits into the
2131 * allocation descriptor itself, transform it into an
2132 * UDF_ICB_INTERN_ALLOC.
2133 */
2134 slot = 0;
2135 cpy_slot = 0;
2136 foffset = 0;
2137
2138 /* 1) copy till first overlap piece to the rewrite buffer */
2139 for (;;) {
2140 udf_get_adslot(udf_node, slot, &s_ad, &eof);
2141 if (eof) {
2142 DPRINTF(WRITE,
2143 ("Shrink node failed: "
2144 "encountered EOF\n"));
2145 error = EINVAL;
2146 goto errorout; /* panic? */
2147 }
2148 len = udf_rw32(s_ad.len);
2149 flags = UDF_EXT_FLAGS(len);
2150 len = UDF_EXT_LEN(len);
2151
2152 if (flags == UDF_EXT_REDIRECT) {
2153 slot++;
2154 continue;
2155 }
2156
2157 end_foffset = foffset + len;
2158 if (end_foffset > new_size)
2159 break; /* found */
2160
2161 node_ad_cpy[cpy_slot++] = s_ad;
2162
2163 DPRINTF(ALLOC, ("\t1: vp %d, lb %d, len %d, flags %d "
2164 "-> stack\n",
2165 udf_rw16(s_ad.loc.part_num),
2166 udf_rw32(s_ad.loc.lb_num),
2167 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2168 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2169
2170 foffset = end_foffset;
2171 slot++;
2172 }
2173 slot_offset = new_size - foffset;
2174
2175 /* 2) trunc overlapping slot at overlap and copy it */
2176 if (slot_offset > 0) {
2177 lb_num = udf_rw32(s_ad.loc.lb_num);
2178 vpart_num = udf_rw16(s_ad.loc.part_num);
2179
2180 if (flags == UDF_EXT_ALLOCATED) {
2181 lb_num += (slot_offset + lb_size -1) / lb_size;
2182 num_lb = (len - slot_offset + lb_size - 1) / lb_size;
2183
2184 udf_free_allocated_space(ump, lb_num, vpart_num, num_lb);
2185 }
2186
2187 s_ad.len = udf_rw32(slot_offset | flags);
2188 node_ad_cpy[cpy_slot++] = s_ad;
2189 slot++;
2190
2191 DPRINTF(ALLOC, ("\t2: vp %d, lb %d, len %d, flags %d "
2192 "-> stack\n",
2193 udf_rw16(s_ad.loc.part_num),
2194 udf_rw32(s_ad.loc.lb_num),
2195 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2196 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2197 }
2198
2199 /* 3) delete remainder */
2200 for (;;) {
2201 udf_get_adslot(udf_node, slot, &s_ad, &eof);
2202 if (eof)
2203 break;
2204
2205 len = udf_rw32(s_ad.len);
2206 flags = UDF_EXT_FLAGS(len);
2207 len = UDF_EXT_LEN(len);
2208
2209 if (flags == UDF_EXT_REDIRECT) {
2210 slot++;
2211 continue;
2212 }
2213
2214 DPRINTF(ALLOC, ("\t3: delete remainder "
2215 "vp %d lb %d, len %d, flags %d\n",
2216 udf_rw16(s_ad.loc.part_num),
2217 udf_rw32(s_ad.loc.lb_num),
2218 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2219 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2220
2221 if (flags == UDF_EXT_ALLOCATED) {
2222 lb_num = udf_rw32(s_ad.loc.lb_num);
2223 vpart_num = udf_rw16(s_ad.loc.part_num);
2224 num_lb = (len + lb_size - 1) / lb_size;
2225
2226 udf_free_allocated_space(ump, lb_num, vpart_num,
2227 num_lb);
2228 }
2229
2230 slot++;
2231 }
2232
2233 /* 4) if it will fit into the descriptor then convert */
2234 if (new_size < max_l_ad) {
2235 /*
2236 * resque/evacuate old piece by reading it in, and convert it
2237 * to internal alloc.
2238 */
2239 if (new_size == 0) {
2240 /* XXX/TODO only for zero sizing now */
2241 udf_wipe_adslots(udf_node);
2242
2243 icbflags &= ~UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2244 icbflags |= UDF_ICB_INTERN_ALLOC;
2245 icbtag->flags = udf_rw16(icbflags);
2246
2247 inflen -= size_diff; KASSERT(inflen == 0);
2248 objsize -= size_diff;
2249 l_ad = new_size;
2250 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea + l_ad;
2251 if (fe) {
2252 fe->inf_len = udf_rw64(inflen);
2253 fe->l_ad = udf_rw32(l_ad);
2254 fe->tag.desc_crc_len = udf_rw32(crclen);
2255 } else {
2256 efe->inf_len = udf_rw64(inflen);
2257 efe->obj_size = udf_rw64(objsize);
2258 efe->l_ad = udf_rw32(l_ad);
2259 efe->tag.desc_crc_len = udf_rw32(crclen);
2260 }
2261 /* eventually copy in evacuated piece */
2262 /* set new size for uvm */
2263 uvm_vnp_setsize(vp, new_size);
2264
2265 free(node_ad_cpy, M_UDFMNT);
2266 UDF_UNLOCK_NODE(udf_node, 0);
2267
2268 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2269 KASSERT(new_inflen == orig_inflen - size_diff);
2270 KASSERT(new_inflen == 0);
2271 KASSERT(new_lbrec == 0);
2272
2273 return 0;
2274 }
2275
2276 printf("UDF_SHRINK_NODE: could convert to internal alloc!\n");
2277 }
2278
2279 /* 5) reset node descriptors */
2280 udf_wipe_adslots(udf_node);
2281
2282 /* 6) copy back extents; merge when possible. Recounting on the fly */
2283 cpy_slots = cpy_slot;
2284
2285 c_ad = node_ad_cpy[0];
2286 slot = 0;
2287 for (cpy_slot = 1; cpy_slot < cpy_slots; cpy_slot++) {
2288 s_ad = node_ad_cpy[cpy_slot];
2289
2290 DPRINTF(ALLOC, ("\t6: stack -> got mapping vp %d "
2291 "lb %d, len %d, flags %d\n",
2292 udf_rw16(s_ad.loc.part_num),
2293 udf_rw32(s_ad.loc.lb_num),
2294 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2295 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2296
2297 /* see if we can merge */
2298 if (udf_ads_merge(lb_size, &c_ad, &s_ad)) {
2299 /* not mergable (anymore) */
2300 DPRINTF(ALLOC, ("\t6: appending vp %d lb %d, "
2301 "len %d, flags %d\n",
2302 udf_rw16(c_ad.loc.part_num),
2303 udf_rw32(c_ad.loc.lb_num),
2304 UDF_EXT_LEN(udf_rw32(c_ad.len)),
2305 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2306
2307 error = udf_append_adslot(udf_node, slot, &c_ad);
2308 if (error)
2309 goto errorout; /* panic? */
2310 c_ad = s_ad;
2311 slot++;
2312 }
2313 }
2314
2315 /* 7) push rest slot (if any) */
2316 if (UDF_EXT_LEN(c_ad.len) > 0) {
2317 DPRINTF(ALLOC, ("\t7: last append vp %d lb %d, "
2318 "len %d, flags %d\n",
2319 udf_rw16(c_ad.loc.part_num),
2320 udf_rw32(c_ad.loc.lb_num),
2321 UDF_EXT_LEN(udf_rw32(c_ad.len)),
2322 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2323
2324 error = udf_append_adslot(udf_node, slot, &c_ad);
2325 if (error)
2326 goto errorout; /* panic? */
2327 ;
2328 }
2329
2330 inflen -= size_diff;
2331 objsize -= size_diff;
2332 if (fe) {
2333 fe->inf_len = udf_rw64(inflen);
2334 } else {
2335 efe->inf_len = udf_rw64(inflen);
2336 efe->obj_size = udf_rw64(objsize);
2337 }
2338 error = 0;
2339
2340 /* set new size for uvm */
2341 uvm_vnp_setsize(vp, new_size);
2342
2343 errorout:
2344 free(node_ad_cpy, M_UDFMNT);
2345 UDF_UNLOCK_NODE(udf_node, 0);
2346
2347 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2348 KASSERT(new_inflen == orig_inflen - size_diff);
2349
2350 return error;
2351 }
2352
2353