udf_allocation.c revision 1.6 1 /* $NetBSD: udf_allocation.c,v 1.6 2008/06/26 13:28:45 reinoud Exp $ */
2
3 /*
4 * Copyright (c) 2006, 2008 Reinoud Zandijk
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 *
27 */
28
29 #include <sys/cdefs.h>
30 #ifndef lint
31 __KERNEL_RCSID(0, "$NetBSD: udf_allocation.c,v 1.6 2008/06/26 13:28:45 reinoud Exp $");
32 #endif /* not lint */
33
34
35 #if defined(_KERNEL_OPT)
36 #include "opt_quota.h"
37 #include "opt_compat_netbsd.h"
38 #endif
39
40 /* TODO strip */
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/sysctl.h>
44 #include <sys/namei.h>
45 #include <sys/proc.h>
46 #include <sys/kernel.h>
47 #include <sys/vnode.h>
48 #include <miscfs/genfs/genfs_node.h>
49 #include <sys/mount.h>
50 #include <sys/buf.h>
51 #include <sys/file.h>
52 #include <sys/device.h>
53 #include <sys/disklabel.h>
54 #include <sys/ioctl.h>
55 #include <sys/malloc.h>
56 #include <sys/dirent.h>
57 #include <sys/stat.h>
58 #include <sys/conf.h>
59 #include <sys/kauth.h>
60 #include <sys/kthread.h>
61 #include <dev/clock_subr.h>
62
63 #include <fs/udf/ecma167-udf.h>
64 #include <fs/udf/udf_mount.h>
65
66 #if defined(_KERNEL_OPT)
67 #include "opt_udf.h"
68 #endif
69
70 #include "udf.h"
71 #include "udf_subr.h"
72 #include "udf_bswap.h"
73
74
75 #define VTOI(vnode) ((struct udf_node *) vnode->v_data)
76
77 static void udf_record_allocation_in_node(struct udf_mount *ump,
78 struct buf *buf, uint16_t vpart_num, uint64_t *mapping,
79 struct long_ad *node_ad_cpy);
80
81 /*
82 * IDEA/BUSY: Each udf_node gets its own extentwalker state for all operations;
83 * this will hopefully/likely reduce O(nlog(n)) to O(1) for most functionality
84 * since actions are most likely sequencial and thus seeking doesn't need
85 * searching for the same or adjacent position again.
86 */
87
88 /* --------------------------------------------------------------------- */
89 //#ifdef DEBUG
90 #if 1
91 #if 1
92 static void
93 udf_node_dump(struct udf_node *udf_node) {
94 struct file_entry *fe;
95 struct extfile_entry *efe;
96 struct icb_tag *icbtag;
97 struct short_ad *short_ad;
98 struct long_ad *long_ad;
99 uint64_t inflen;
100 uint32_t icbflags, addr_type, max_l_ad;
101 uint32_t len, lb_num;
102 uint8_t *data_pos;
103 int part_num;
104 int adlen, ad_off, dscr_size, l_ea, l_ad, lb_size, flags;
105
106 if ((udf_verbose & UDF_DEBUG_ADWLK) == 0)
107 return;
108
109 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
110
111 fe = udf_node->fe;
112 efe = udf_node->efe;
113 if (fe) {
114 icbtag = &fe->icbtag;
115 inflen = udf_rw64(fe->inf_len);
116 dscr_size = sizeof(struct file_entry) -1;
117 l_ea = udf_rw32(fe->l_ea);
118 l_ad = udf_rw32(fe->l_ad);
119 data_pos = (uint8_t *) fe + dscr_size + l_ea;
120 } else {
121 icbtag = &efe->icbtag;
122 inflen = udf_rw64(efe->inf_len);
123 dscr_size = sizeof(struct extfile_entry) -1;
124 l_ea = udf_rw32(efe->l_ea);
125 l_ad = udf_rw32(efe->l_ad);
126 data_pos = (uint8_t *) efe + dscr_size + l_ea;
127 }
128 max_l_ad = lb_size - dscr_size - l_ea;
129
130 icbflags = udf_rw16(icbtag->flags);
131 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
132
133 printf("udf_node_dump:\n");
134 printf("\tudf_node %p\n", udf_node);
135
136 if (addr_type == UDF_ICB_INTERN_ALLOC) {
137 printf("\t\tIntern alloc, len = %"PRIu64"\n", inflen);
138 return;
139 }
140
141 printf("\t\tInflen = %"PRIu64"\n", inflen);
142 printf("\t\tl_ad = %d\n", l_ad);
143
144 if (addr_type == UDF_ICB_SHORT_ALLOC) {
145 adlen = sizeof(struct short_ad);
146 } else {
147 adlen = sizeof(struct long_ad);
148 }
149
150 printf("\t\t");
151 for (ad_off = 0; ad_off < max_l_ad-adlen; ad_off += adlen) {
152 if (addr_type == UDF_ICB_SHORT_ALLOC) {
153 short_ad = (struct short_ad *) (data_pos + ad_off);
154 len = udf_rw32(short_ad->len);
155 lb_num = udf_rw32(short_ad->lb_num);
156 part_num = -1;
157 flags = UDF_EXT_FLAGS(len);
158 len = UDF_EXT_LEN(len);
159 } else {
160 long_ad = (struct long_ad *) (data_pos + ad_off);
161 len = udf_rw32(long_ad->len);
162 lb_num = udf_rw32(long_ad->loc.lb_num);
163 part_num = udf_rw16(long_ad->loc.part_num);
164 flags = UDF_EXT_FLAGS(len);
165 len = UDF_EXT_LEN(len);
166 }
167 printf("[");
168 if (part_num >= 0)
169 printf("part %d, ", part_num);
170 printf("lb_num %d, len %d", lb_num, len);
171 if (flags)
172 printf(", flags %d", flags);
173 printf("] ");
174 if (ad_off + adlen == l_ad)
175 printf("\n\t\tl_ad END\n\t\t");
176 }
177 printf("\n");
178 }
179 #else
180 #define udf_node_dump(a)
181 #endif
182
183 static void
184 udf_node_sanity_check(struct udf_node *udf_node,
185 uint64_t *cnt_inflen, uint64_t *cnt_logblksrec) {
186 struct file_entry *fe;
187 struct extfile_entry *efe;
188 struct icb_tag *icbtag;
189 struct short_ad *short_ad;
190 struct long_ad *long_ad;
191 uint64_t inflen, logblksrec;
192 uint32_t icbflags, addr_type, max_l_ad;
193 uint32_t len, lb_num;
194 uint8_t *data_pos;
195 int part_num;
196 int adlen, ad_off, dscr_size, l_ea, l_ad, lb_size, flags, whole_lb;
197
198 /* only lock mutex; we're not changing and its a debug checking func */
199 mutex_enter(&udf_node->node_mutex);
200
201 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
202
203 fe = udf_node->fe;
204 efe = udf_node->efe;
205 if (fe) {
206 icbtag = &fe->icbtag;
207 inflen = udf_rw64(fe->inf_len);
208 logblksrec = udf_rw64(fe->logblks_rec);
209 dscr_size = sizeof(struct file_entry) -1;
210 l_ea = udf_rw32(fe->l_ea);
211 l_ad = udf_rw32(fe->l_ad);
212 data_pos = (uint8_t *) fe + dscr_size + l_ea;
213 } else {
214 icbtag = &efe->icbtag;
215 inflen = udf_rw64(efe->inf_len);
216 logblksrec = udf_rw64(efe->logblks_rec);
217 dscr_size = sizeof(struct extfile_entry) -1;
218 l_ea = udf_rw32(efe->l_ea);
219 l_ad = udf_rw32(efe->l_ad);
220 data_pos = (uint8_t *) efe + dscr_size + l_ea;
221 }
222 max_l_ad = lb_size - dscr_size - l_ea;
223 icbflags = udf_rw16(icbtag->flags);
224 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
225
226 /* reset counters */
227 *cnt_inflen = 0;
228 *cnt_logblksrec = 0;
229
230 if (addr_type == UDF_ICB_INTERN_ALLOC) {
231 KASSERT(l_ad <= max_l_ad);
232 KASSERT(l_ad == inflen);
233 *cnt_inflen = inflen;
234 mutex_exit(&udf_node->node_mutex);
235 return;
236 }
237
238 if (addr_type == UDF_ICB_SHORT_ALLOC) {
239 adlen = sizeof(struct short_ad);
240 } else {
241 adlen = sizeof(struct long_ad);
242 }
243
244 /* start counting */
245 whole_lb = 1;
246 for (ad_off = 0; ad_off < l_ad; ad_off += adlen) {
247 KASSERT(whole_lb == 1);
248 if (addr_type == UDF_ICB_SHORT_ALLOC) {
249 short_ad = (struct short_ad *) (data_pos + ad_off);
250 len = udf_rw32(short_ad->len);
251 lb_num = udf_rw32(short_ad->lb_num);
252 part_num = -1;
253 flags = UDF_EXT_FLAGS(len);
254 len = UDF_EXT_LEN(len);
255 } else {
256 long_ad = (struct long_ad *) (data_pos + ad_off);
257 len = udf_rw32(long_ad->len);
258 lb_num = udf_rw32(long_ad->loc.lb_num);
259 part_num = udf_rw16(long_ad->loc.part_num);
260 flags = UDF_EXT_FLAGS(len);
261 len = UDF_EXT_LEN(len);
262 }
263 if (flags != UDF_EXT_REDIRECT) {
264 *cnt_inflen += len;
265 if (flags == UDF_EXT_ALLOCATED) {
266 *cnt_logblksrec += (len + lb_size -1) / lb_size;
267 }
268 } else {
269 KASSERT(len == lb_size);
270 }
271 whole_lb = ((len % lb_size) == 0);
272 }
273 /* rest should be zero (ad_off > l_ad < max_l_ad - adlen) */
274
275 KASSERT(*cnt_inflen == inflen);
276 KASSERT(*cnt_logblksrec == logblksrec);
277
278 mutex_exit(&udf_node->node_mutex);
279 if (0)
280 udf_node_dump(udf_node);
281 }
282 #else
283 #define udf_node_sanity_check(a, b, c)
284 #endif
285
286 /* --------------------------------------------------------------------- */
287
288 int
289 udf_translate_vtop(struct udf_mount *ump, struct long_ad *icb_loc,
290 uint32_t *lb_numres, uint32_t *extres)
291 {
292 struct part_desc *pdesc;
293 struct spare_map_entry *sme;
294 struct long_ad s_icb_loc;
295 uint64_t foffset, end_foffset;
296 uint32_t lb_size, len;
297 uint32_t lb_num, lb_rel, lb_packet;
298 uint32_t udf_rw32_lbmap, ext_offset;
299 uint16_t vpart;
300 int rel, part, error, eof, slot, flags;
301
302 assert(ump && icb_loc && lb_numres);
303
304 vpart = udf_rw16(icb_loc->loc.part_num);
305 lb_num = udf_rw32(icb_loc->loc.lb_num);
306 if (vpart > UDF_VTOP_RAWPART)
307 return EINVAL;
308
309 translate_again:
310 part = ump->vtop[vpart];
311 pdesc = ump->partitions[part];
312
313 switch (ump->vtop_tp[vpart]) {
314 case UDF_VTOP_TYPE_RAW :
315 /* 1:1 to the end of the device */
316 *lb_numres = lb_num;
317 *extres = INT_MAX;
318 return 0;
319 case UDF_VTOP_TYPE_PHYS :
320 /* transform into its disc logical block */
321 if (lb_num > udf_rw32(pdesc->part_len))
322 return EINVAL;
323 *lb_numres = lb_num + udf_rw32(pdesc->start_loc);
324
325 /* extent from here to the end of the partition */
326 *extres = udf_rw32(pdesc->part_len) - lb_num;
327 return 0;
328 case UDF_VTOP_TYPE_VIRT :
329 /* only maps one logical block, lookup in VAT */
330 if (lb_num >= ump->vat_entries) /* XXX > or >= ? */
331 return EINVAL;
332
333 /* lookup in virtual allocation table file */
334 mutex_enter(&ump->allocate_mutex);
335 error = udf_vat_read(ump->vat_node,
336 (uint8_t *) &udf_rw32_lbmap, 4,
337 ump->vat_offset + lb_num * 4);
338 mutex_exit(&ump->allocate_mutex);
339
340 if (error)
341 return error;
342
343 lb_num = udf_rw32(udf_rw32_lbmap);
344
345 /* transform into its disc logical block */
346 if (lb_num > udf_rw32(pdesc->part_len))
347 return EINVAL;
348 *lb_numres = lb_num + udf_rw32(pdesc->start_loc);
349
350 /* just one logical block */
351 *extres = 1;
352 return 0;
353 case UDF_VTOP_TYPE_SPARABLE :
354 /* check if the packet containing the lb_num is remapped */
355 lb_packet = lb_num / ump->sparable_packet_size;
356 lb_rel = lb_num % ump->sparable_packet_size;
357
358 for (rel = 0; rel < udf_rw16(ump->sparing_table->rt_l); rel++) {
359 sme = &ump->sparing_table->entries[rel];
360 if (lb_packet == udf_rw32(sme->org)) {
361 /* NOTE maps to absolute disc logical block! */
362 *lb_numres = udf_rw32(sme->map) + lb_rel;
363 *extres = ump->sparable_packet_size - lb_rel;
364 return 0;
365 }
366 }
367
368 /* transform into its disc logical block */
369 if (lb_num > udf_rw32(pdesc->part_len))
370 return EINVAL;
371 *lb_numres = lb_num + udf_rw32(pdesc->start_loc);
372
373 /* rest of block */
374 *extres = ump->sparable_packet_size - lb_rel;
375 return 0;
376 case UDF_VTOP_TYPE_META :
377 /* we have to look into the file's allocation descriptors */
378
379 /* use metadatafile allocation mutex */
380 lb_size = udf_rw32(ump->logical_vol->lb_size);
381
382 UDF_LOCK_NODE(ump->metadata_node, 0);
383
384 /* get first overlapping extent */
385 foffset = 0;
386 slot = 0;
387 for (;;) {
388 udf_get_adslot(ump->metadata_node,
389 slot, &s_icb_loc, &eof);
390 DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, "
391 "len = %d, lb_num = %d, part = %d\n",
392 slot, eof,
393 UDF_EXT_FLAGS(udf_rw32(s_icb_loc.len)),
394 UDF_EXT_LEN(udf_rw32(s_icb_loc.len)),
395 udf_rw32(s_icb_loc.loc.lb_num),
396 udf_rw16(s_icb_loc.loc.part_num)));
397 if (eof) {
398 DPRINTF(TRANSLATE,
399 ("Meta partition translation "
400 "failed: can't seek location\n"));
401 UDF_UNLOCK_NODE(ump->metadata_node, 0);
402 return EINVAL;
403 }
404 len = udf_rw32(s_icb_loc.len);
405 flags = UDF_EXT_FLAGS(len);
406 len = UDF_EXT_LEN(len);
407
408 if (flags == UDF_EXT_REDIRECT) {
409 slot++;
410 continue;
411 }
412
413 end_foffset = foffset + len;
414
415 if (end_foffset > lb_num * lb_size)
416 break; /* found */
417 foffset = end_foffset;
418 slot++;
419 }
420 /* found overlapping slot */
421 ext_offset = lb_num * lb_size - foffset;
422
423 /* process extent offset */
424 lb_num = udf_rw32(s_icb_loc.loc.lb_num);
425 vpart = udf_rw16(s_icb_loc.loc.part_num);
426 lb_num += (ext_offset + lb_size -1) / lb_size;
427 len -= ext_offset;
428 ext_offset = 0;
429
430 flags = UDF_EXT_FLAGS(s_icb_loc.len);
431
432 UDF_UNLOCK_NODE(ump->metadata_node, 0);
433 if (flags != UDF_EXT_ALLOCATED) {
434 DPRINTF(TRANSLATE, ("Metadata partition translation "
435 "failed: not allocated\n"));
436 return EINVAL;
437 }
438
439 /*
440 * vpart and lb_num are updated, translate again since we
441 * might be mapped on sparable media
442 */
443 goto translate_again;
444 default:
445 printf("UDF vtop translation scheme %d unimplemented yet\n",
446 ump->vtop_tp[vpart]);
447 }
448
449 return EINVAL;
450 }
451
452 /* --------------------------------------------------------------------- */
453
454 /*
455 * Translate an extent (in logical_blocks) into logical block numbers; used
456 * for read and write operations. DOESNT't check extents.
457 */
458
459 int
460 udf_translate_file_extent(struct udf_node *udf_node,
461 uint32_t from, uint32_t num_lb,
462 uint64_t *map)
463 {
464 struct udf_mount *ump;
465 struct icb_tag *icbtag;
466 struct long_ad t_ad, s_ad;
467 uint64_t transsec;
468 uint64_t foffset, end_foffset;
469 uint32_t transsec32;
470 uint32_t lb_size;
471 uint32_t ext_offset;
472 uint32_t lb_num, len;
473 uint32_t overlap, translen;
474 uint16_t vpart_num;
475 int eof, error, flags;
476 int slot, addr_type, icbflags;
477
478 if (!udf_node)
479 return ENOENT;
480
481 KASSERT(num_lb > 0);
482
483 UDF_LOCK_NODE(udf_node, 0);
484
485 /* initialise derivative vars */
486 ump = udf_node->ump;
487 lb_size = udf_rw32(ump->logical_vol->lb_size);
488
489 if (udf_node->fe) {
490 icbtag = &udf_node->fe->icbtag;
491 } else {
492 icbtag = &udf_node->efe->icbtag;
493 }
494 icbflags = udf_rw16(icbtag->flags);
495 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
496
497 /* do the work */
498 if (addr_type == UDF_ICB_INTERN_ALLOC) {
499 *map = UDF_TRANS_INTERN;
500 UDF_UNLOCK_NODE(udf_node, 0);
501 return 0;
502 }
503
504 /* find first overlapping extent */
505 foffset = 0;
506 slot = 0;
507 for (;;) {
508 udf_get_adslot(udf_node, slot, &s_ad, &eof);
509 DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, len = %d, "
510 "lb_num = %d, part = %d\n", slot, eof,
511 UDF_EXT_FLAGS(udf_rw32(s_ad.len)),
512 UDF_EXT_LEN(udf_rw32(s_ad.len)),
513 udf_rw32(s_ad.loc.lb_num),
514 udf_rw16(s_ad.loc.part_num)));
515 if (eof) {
516 DPRINTF(TRANSLATE,
517 ("Translate file extent "
518 "failed: can't seek location\n"));
519 UDF_UNLOCK_NODE(udf_node, 0);
520 return EINVAL;
521 }
522 len = udf_rw32(s_ad.len);
523 flags = UDF_EXT_FLAGS(len);
524 len = UDF_EXT_LEN(len);
525 lb_num = udf_rw32(s_ad.loc.lb_num);
526
527 if (flags == UDF_EXT_REDIRECT) {
528 slot++;
529 continue;
530 }
531
532 end_foffset = foffset + len;
533
534 if (end_foffset > from * lb_size)
535 break; /* found */
536 foffset = end_foffset;
537 slot++;
538 }
539 /* found overlapping slot */
540 ext_offset = from * lb_size - foffset;
541
542 for (;;) {
543 udf_get_adslot(udf_node, slot, &s_ad, &eof);
544 DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, len = %d, "
545 "lb_num = %d, part = %d\n", slot, eof,
546 UDF_EXT_FLAGS(udf_rw32(s_ad.len)),
547 UDF_EXT_LEN(udf_rw32(s_ad.len)),
548 udf_rw32(s_ad.loc.lb_num),
549 udf_rw16(s_ad.loc.part_num)));
550 if (eof) {
551 DPRINTF(TRANSLATE,
552 ("Translate file extent "
553 "failed: past eof\n"));
554 UDF_UNLOCK_NODE(udf_node, 0);
555 return EINVAL;
556 }
557
558 len = udf_rw32(s_ad.len);
559 flags = UDF_EXT_FLAGS(len);
560 len = UDF_EXT_LEN(len);
561
562 lb_num = udf_rw32(s_ad.loc.lb_num);
563 vpart_num = udf_rw16(s_ad.loc.part_num);
564
565 end_foffset = foffset + len;
566
567 /* process extent, don't forget to advance on ext_offset! */
568 lb_num += (ext_offset + lb_size -1) / lb_size;
569 overlap = (len - ext_offset + lb_size -1) / lb_size;
570 ext_offset = 0;
571
572 /*
573 * note that the while(){} is nessisary for the extent that
574 * the udf_translate_vtop() returns doens't have to span the
575 * whole extent.
576 */
577
578 overlap = MIN(overlap, num_lb);
579 while (overlap && (flags != UDF_EXT_REDIRECT)) {
580 switch (flags) {
581 case UDF_EXT_FREE :
582 case UDF_EXT_ALLOCATED_BUT_NOT_USED :
583 transsec = UDF_TRANS_ZERO;
584 translen = overlap;
585 while (overlap && num_lb && translen) {
586 *map++ = transsec;
587 lb_num++;
588 overlap--; num_lb--; translen--;
589 }
590 break;
591 case UDF_EXT_ALLOCATED :
592 t_ad.loc.lb_num = udf_rw32(lb_num);
593 t_ad.loc.part_num = udf_rw16(vpart_num);
594 error = udf_translate_vtop(ump,
595 &t_ad, &transsec32, &translen);
596 transsec = transsec32;
597 if (error) {
598 UDF_UNLOCK_NODE(udf_node, 0);
599 return error;
600 }
601 while (overlap && num_lb && translen) {
602 *map++ = transsec;
603 lb_num++; transsec++;
604 overlap--; num_lb--; translen--;
605 }
606 break;
607 default:
608 DPRINTF(TRANSLATE,
609 ("Translate file extent "
610 "failed: bad flags %x\n", flags));
611 UDF_UNLOCK_NODE(udf_node, 0);
612 return EINVAL;
613 }
614 }
615 if (num_lb == 0)
616 break;
617
618 if (flags != UDF_EXT_REDIRECT)
619 foffset = end_foffset;
620 slot++;
621 }
622 UDF_UNLOCK_NODE(udf_node, 0);
623
624 return 0;
625 }
626
627 /* --------------------------------------------------------------------- */
628
629 static int
630 udf_search_free_vatloc(struct udf_mount *ump, uint32_t *lbnumres)
631 {
632 uint32_t lb_size, lb_num, lb_map, udf_rw32_lbmap;
633 uint8_t *blob;
634 int entry, chunk, found, error;
635
636 KASSERT(ump);
637 KASSERT(ump->logical_vol);
638
639 lb_size = udf_rw32(ump->logical_vol->lb_size);
640 blob = malloc(lb_size, M_UDFTEMP, M_WAITOK);
641
642 /* TODO static allocation of search chunk */
643
644 lb_num = MIN(ump->vat_entries, ump->vat_last_free_lb);
645 found = 0;
646 error = 0;
647 entry = 0;
648 do {
649 chunk = MIN(lb_size, (ump->vat_entries - lb_num) * 4);
650 if (chunk <= 0)
651 break;
652 /* load in chunk */
653 error = udf_vat_read(ump->vat_node, blob, chunk,
654 ump->vat_offset + lb_num * 4);
655
656 if (error)
657 break;
658
659 /* search this chunk */
660 for (entry=0; entry < chunk /4; entry++, lb_num++) {
661 udf_rw32_lbmap = *((uint32_t *) (blob + entry * 4));
662 lb_map = udf_rw32(udf_rw32_lbmap);
663 if (lb_map == 0xffffffff) {
664 found = 1;
665 break;
666 }
667 }
668 } while (!found);
669 if (error) {
670 printf("udf_search_free_vatloc: error reading in vat chunk "
671 "(lb %d, size %d)\n", lb_num, chunk);
672 }
673
674 if (!found) {
675 /* extend VAT */
676 DPRINTF(WRITE, ("udf_search_free_vatloc: extending\n"));
677 lb_num = ump->vat_entries;
678 ump->vat_entries++;
679 }
680
681 /* mark entry with initialiser just in case */
682 lb_map = udf_rw32(0xfffffffe);
683 udf_vat_write(ump->vat_node, (uint8_t *) &lb_map, 4,
684 ump->vat_offset + lb_num *4);
685 ump->vat_last_free_lb = lb_num;
686
687 free(blob, M_UDFTEMP);
688 *lbnumres = lb_num;
689 return 0;
690 }
691
692
693 static void
694 udf_bitmap_allocate(struct udf_bitmap *bitmap, int ismetadata,
695 uint32_t ptov, uint32_t *num_lb, uint64_t *pmappos, uint64_t *lmappos)
696 {
697 uint32_t offset, lb_num, bit;
698 int32_t diff;
699 uint8_t *bpos;
700 int pass;
701
702 if (!ismetadata) {
703 /* heuristic to keep the two pointers not too close */
704 diff = bitmap->data_pos - bitmap->metadata_pos;
705 if ((diff >= 0) && (diff < 1024))
706 bitmap->data_pos = bitmap->metadata_pos + 1024;
707 }
708 offset = ismetadata ? bitmap->metadata_pos : bitmap->data_pos;
709 offset &= ~7;
710 for (pass = 0; pass < 2; pass++) {
711 if (offset >= bitmap->max_offset)
712 offset = 0;
713
714 while (offset < bitmap->max_offset) {
715 if (*num_lb == 0)
716 break;
717
718 /* use first bit not set */
719 bpos = bitmap->bits + offset/8;
720 bit = ffs(*bpos);
721 if (bit == 0) {
722 offset += 8;
723 continue;
724 }
725 *bpos &= ~(1 << (bit-1));
726 lb_num = offset + bit-1;
727 *lmappos++ = lb_num;
728 *pmappos++ = lb_num + ptov;
729 *num_lb = *num_lb - 1;
730 // offset = (offset & ~7);
731 }
732 }
733
734 if (ismetadata) {
735 bitmap->metadata_pos = offset;
736 } else {
737 bitmap->data_pos = offset;
738 }
739 }
740
741
742 static void
743 udf_bitmap_free(struct udf_bitmap *bitmap, uint32_t lb_num, uint32_t num_lb)
744 {
745 uint32_t offset;
746 uint32_t bit, bitval;
747 uint8_t *bpos;
748
749 offset = lb_num;
750
751 /* starter bits */
752 bpos = bitmap->bits + offset/8;
753 bit = offset % 8;
754 while ((bit != 0) && (num_lb > 0)) {
755 bitval = (1 << bit);
756 KASSERT((*bpos & bitval) == 0);
757 *bpos |= bitval;
758 offset++; num_lb--;
759 bit = (bit + 1) % 8;
760 }
761 if (num_lb == 0)
762 return;
763
764 /* whole bytes */
765 KASSERT(bit == 0);
766 bpos = bitmap->bits + offset / 8;
767 while (num_lb >= 8) {
768 KASSERT((*bpos == 0));
769 *bpos = 255;
770 offset += 8; num_lb -= 8;
771 bpos++;
772 }
773
774 /* stop bits */
775 KASSERT(num_lb < 8);
776 bit = 0;
777 while (num_lb > 0) {
778 bitval = (1 << bit);
779 KASSERT((*bpos & bitval) == 0);
780 *bpos |= bitval;
781 offset++; num_lb--;
782 bit = (bit + 1) % 8;
783 }
784 }
785
786
787 /* allocate a contiguous sequence of sectornumbers */
788 static int
789 udf_allocate_space(struct udf_mount *ump, int ismetadata, int alloc_type,
790 int num_lb, uint16_t *alloc_partp,
791 uint64_t *lmapping, uint64_t *pmapping)
792 {
793 struct mmc_trackinfo *alloc_track, *other_track;
794 struct udf_bitmap *bitmap;
795 struct part_desc *pdesc;
796 struct logvol_int_desc *lvid;
797 uint64_t *lmappos, *pmappos;
798 uint32_t ptov, lb_num, *freepos, free_lbs;
799 int lb_size, alloc_num_lb;
800 int alloc_part;
801 int error;
802
803 mutex_enter(&ump->allocate_mutex);
804
805 lb_size = udf_rw32(ump->logical_vol->lb_size);
806 KASSERT(lb_size == ump->discinfo.sector_size);
807
808 if (ismetadata) {
809 alloc_part = ump->metadata_part;
810 alloc_track = &ump->metadata_track;
811 other_track = &ump->data_track;
812 } else {
813 alloc_part = ump->data_part;
814 alloc_track = &ump->data_track;
815 other_track = &ump->metadata_track;
816 }
817
818 *alloc_partp = alloc_part;
819
820 error = 0;
821 /* XXX check disc space */
822
823 pdesc = ump->partitions[ump->vtop[alloc_part]];
824 lmappos = lmapping;
825 pmappos = pmapping;
826
827 switch (alloc_type) {
828 case UDF_ALLOC_VAT :
829 /* search empty slot in VAT file */
830 KASSERT(num_lb == 1);
831 error = udf_search_free_vatloc(ump, &lb_num);
832 if (!error) {
833 *lmappos = lb_num;
834 *pmappos = 0; /* will get late-allocated */
835 }
836 break;
837 case UDF_ALLOC_SEQUENTIAL :
838 /* sequential allocation on recordable media */
839 /* calculate offset from physical base partition */
840 ptov = udf_rw32(pdesc->start_loc);
841
842 for (lb_num = 0; lb_num < num_lb; lb_num++) {
843 *pmappos++ = alloc_track->next_writable;
844 *lmappos++ = alloc_track->next_writable - ptov;
845 alloc_track->next_writable++;
846 alloc_track->free_blocks--;
847 }
848 if (alloc_track->tracknr == other_track->tracknr)
849 memcpy(other_track, alloc_track,
850 sizeof(struct mmc_trackinfo));
851 break;
852 case UDF_ALLOC_SPACEMAP :
853 ptov = udf_rw32(pdesc->start_loc);
854
855 /* allocate on unallocated bits page */
856 alloc_num_lb = num_lb;
857 bitmap = &ump->part_unalloc_bits[alloc_part];
858 udf_bitmap_allocate(bitmap, ismetadata, ptov, &alloc_num_lb,
859 pmappos, lmappos);
860 ump->lvclose |= UDF_WRITE_PART_BITMAPS;
861 if (alloc_num_lb) {
862 /* TODO convert freed to unalloc and try again */
863 /* free allocated piece for now */
864 lmappos = lmapping;
865 for (lb_num=0; lb_num < num_lb-alloc_num_lb; lb_num++) {
866 udf_bitmap_free(bitmap, *lmappos++, 1);
867 }
868 error = ENOSPC;
869 }
870 if (!error) {
871 /* adjust freecount */
872 lvid = ump->logvol_integrity;
873 freepos = &lvid->tables[0] + alloc_part;
874 free_lbs = udf_rw32(*freepos);
875 *freepos = udf_rw32(free_lbs - num_lb);
876 }
877 break;
878 case UDF_ALLOC_METABITMAP :
879 case UDF_ALLOC_METASEQUENTIAL :
880 case UDF_ALLOC_RELAXEDSEQUENTIAL :
881 printf("ALERT: udf_allocate_space : allocation %d "
882 "not implemented yet!\n", alloc_type);
883 /* TODO implement, doesn't have to be contiguous */
884 error = ENOSPC;
885 break;
886 }
887
888 #ifdef DEBUG
889 if (udf_verbose & UDF_DEBUG_ALLOC) {
890 lmappos = lmapping;
891 pmappos = pmapping;
892 printf("udf_allocate_space, mapping l->p:\n");
893 for (lb_num = 0; lb_num < num_lb; lb_num++) {
894 printf("\t%"PRIu64" -> %"PRIu64"\n",
895 *lmappos++, *pmappos++);
896 }
897 }
898 #endif
899 mutex_exit(&ump->allocate_mutex);
900
901 return error;
902 }
903
904 /* --------------------------------------------------------------------- */
905
906 void
907 udf_free_allocated_space(struct udf_mount *ump, uint32_t lb_num,
908 uint16_t vpart_num, uint32_t num_lb)
909 {
910 struct udf_bitmap *bitmap;
911 struct part_desc *pdesc;
912 struct logvol_int_desc *lvid;
913 uint32_t ptov, lb_map, udf_rw32_lbmap;
914 uint32_t *freepos, free_lbs;
915 int phys_part;
916 int error;
917
918 DPRINTF(ALLOC, ("udf_free_allocated_space: freeing virt lbnum %d "
919 "part %d + %d sect\n", lb_num, vpart_num, num_lb));
920
921 mutex_enter(&ump->allocate_mutex);
922
923 /* get partition backing up this vpart_num */
924 pdesc = ump->partitions[ump->vtop[vpart_num]];
925
926 switch (ump->vtop_tp[vpart_num]) {
927 case UDF_VTOP_TYPE_PHYS :
928 case UDF_VTOP_TYPE_SPARABLE :
929 /* free space to freed or unallocated space bitmap */
930 ptov = udf_rw32(pdesc->start_loc);
931 phys_part = ump->vtop[vpart_num];
932
933 /* first try freed space bitmap */
934 bitmap = &ump->part_freed_bits[phys_part];
935
936 /* if not defined, use unallocated bitmap */
937 if (bitmap->bits == NULL)
938 bitmap = &ump->part_unalloc_bits[phys_part];
939
940 /* if no bitmaps are defined, bail out */
941 if (bitmap->bits == NULL)
942 break;
943
944 /* free bits if its defined */
945 KASSERT(bitmap->bits);
946 ump->lvclose |= UDF_WRITE_PART_BITMAPS;
947 udf_bitmap_free(bitmap, lb_num, num_lb);
948
949 /* adjust freecount */
950 lvid = ump->logvol_integrity;
951 freepos = &lvid->tables[0] + vpart_num;
952 free_lbs = udf_rw32(*freepos);
953 *freepos = udf_rw32(free_lbs + num_lb);
954 break;
955 case UDF_VTOP_TYPE_VIRT :
956 /* free this VAT entry */
957 KASSERT(num_lb == 1);
958
959 lb_map = 0xffffffff;
960 udf_rw32_lbmap = udf_rw32(lb_map);
961 error = udf_vat_write(ump->vat_node,
962 (uint8_t *) &udf_rw32_lbmap, 4,
963 ump->vat_offset + lb_num * 4);
964 KASSERT(error == 0);
965 ump->vat_last_free_lb = MIN(ump->vat_last_free_lb, lb_num);
966 break;
967 case UDF_VTOP_TYPE_META :
968 /* free space in the metadata bitmap */
969 default:
970 printf("ALERT: udf_free_allocated_space : allocation %d "
971 "not implemented yet!\n", ump->vtop_tp[vpart_num]);
972 break;
973 }
974
975 mutex_exit(&ump->allocate_mutex);
976 }
977
978 /* --------------------------------------------------------------------- */
979
980 int
981 udf_pre_allocate_space(struct udf_mount *ump, int udf_c_type, int num_lb,
982 uint16_t *alloc_partp, uint64_t *lmapping, uint64_t *pmapping)
983 {
984 int ismetadata, alloc_type;
985
986 ismetadata = (udf_c_type == UDF_C_NODE);
987 alloc_type = ismetadata? ump->meta_alloc : ump->data_alloc;
988
989 #ifdef DIAGNOSTIC
990 if ((alloc_type == UDF_ALLOC_VAT) && (udf_c_type != UDF_C_NODE)) {
991 panic("udf_pre_allocate_space: bad c_type on VAT!\n");
992 }
993 #endif
994
995 /* reserve size for VAT allocated data */
996 if (alloc_type == UDF_ALLOC_VAT) {
997 mutex_enter(&ump->allocate_mutex);
998 ump->uncomitted_lb += num_lb;
999 mutex_exit(&ump->allocate_mutex);
1000 }
1001
1002 return udf_allocate_space(ump, ismetadata, alloc_type,
1003 num_lb, alloc_partp, lmapping, pmapping);
1004 }
1005
1006 /* --------------------------------------------------------------------- */
1007
1008 /*
1009 * Allocate a buf on disc for direct write out. The space doesn't have to be
1010 * contiguous as the caller takes care of this.
1011 */
1012
1013 void
1014 udf_late_allocate_buf(struct udf_mount *ump, struct buf *buf,
1015 uint64_t *lmapping, uint64_t *pmapping, struct long_ad *node_ad_cpy)
1016 {
1017 struct udf_node *udf_node = VTOI(buf->b_vp);
1018 uint16_t vpart_num;
1019 int lb_size, blks, udf_c_type;
1020 int ismetadata, alloc_type;
1021 int num_lb;
1022 int error, s;
1023
1024 /*
1025 * for each sector in the buf, allocate a sector on disc and record
1026 * its position in the provided mapping array.
1027 *
1028 * If its userdata or FIDs, record its location in its node.
1029 */
1030
1031 lb_size = udf_rw32(ump->logical_vol->lb_size);
1032 num_lb = (buf->b_bcount + lb_size -1) / lb_size;
1033 blks = lb_size / DEV_BSIZE;
1034 udf_c_type = buf->b_udf_c_type;
1035
1036 KASSERT(lb_size == ump->discinfo.sector_size);
1037
1038 ismetadata = (udf_c_type == UDF_C_NODE);
1039 alloc_type = ismetadata? ump->meta_alloc : ump->data_alloc;
1040
1041 #ifdef DIAGNOSTIC
1042 if ((alloc_type == UDF_ALLOC_VAT) && (udf_c_type != UDF_C_NODE)) {
1043 panic("udf_late_allocate_buf: bad c_type on VAT!\n");
1044 }
1045 #endif
1046
1047 if (udf_c_type == UDF_C_NODE) {
1048 /* if not VAT, its allready allocated */
1049 if (alloc_type != UDF_ALLOC_VAT)
1050 return;
1051
1052 /* allocate sequential */
1053 alloc_type = UDF_ALLOC_SEQUENTIAL;
1054 }
1055
1056 error = udf_allocate_space(ump, ismetadata, alloc_type,
1057 num_lb, &vpart_num, lmapping, pmapping);
1058 if (error) {
1059 /* ARGH! we've not done our accounting right! */
1060 panic("UDF disc allocation accounting gone wrong");
1061 }
1062
1063 /* commit our sector count */
1064 mutex_enter(&ump->allocate_mutex);
1065 if (num_lb > ump->uncomitted_lb) {
1066 ump->uncomitted_lb = 0;
1067 } else {
1068 ump->uncomitted_lb -= num_lb;
1069 }
1070 mutex_exit(&ump->allocate_mutex);
1071
1072 buf->b_blkno = (*pmapping) * blks;
1073
1074 /* If its userdata or FIDs, record its allocation in its node. */
1075 if ((udf_c_type == UDF_C_USERDATA) || (udf_c_type == UDF_C_FIDS)) {
1076 udf_record_allocation_in_node(ump, buf, vpart_num, lmapping,
1077 node_ad_cpy);
1078 /* decrement our outstanding bufs counter */
1079 s = splbio();
1080 udf_node->outstanding_bufs--;
1081 splx(s);
1082 }
1083 }
1084
1085 /* --------------------------------------------------------------------- */
1086
1087 /*
1088 * Try to merge a1 with the new piece a2. udf_ads_merge returns error when not
1089 * possible (anymore); a2 returns the rest piece.
1090 */
1091
1092 static int
1093 udf_ads_merge(uint32_t lb_size, struct long_ad *a1, struct long_ad *a2)
1094 {
1095 uint32_t max_len, merge_len;
1096 uint32_t a1_len, a2_len;
1097 uint32_t a1_flags, a2_flags;
1098 uint32_t a1_lbnum, a2_lbnum;
1099 uint16_t a1_part, a2_part;
1100
1101 max_len = ((UDF_EXT_MAXLEN / lb_size) * lb_size);
1102
1103 a1_flags = UDF_EXT_FLAGS(udf_rw32(a1->len));
1104 a1_len = UDF_EXT_LEN(udf_rw32(a1->len));
1105 a1_lbnum = udf_rw32(a1->loc.lb_num);
1106 a1_part = udf_rw16(a1->loc.part_num);
1107
1108 a2_flags = UDF_EXT_FLAGS(udf_rw32(a2->len));
1109 a2_len = UDF_EXT_LEN(udf_rw32(a2->len));
1110 a2_lbnum = udf_rw32(a2->loc.lb_num);
1111 a2_part = udf_rw16(a2->loc.part_num);
1112
1113 /* defines same space */
1114 if (a1_flags != a2_flags)
1115 return 1;
1116
1117 if (a1_flags != UDF_EXT_FREE) {
1118 /* the same partition */
1119 if (a1_part != a2_part)
1120 return 1;
1121
1122 /* a2 is successor of a1 */
1123 if (a1_lbnum * lb_size + a1_len != a2_lbnum * lb_size)
1124 return 1;
1125 }
1126
1127 /* merge as most from a2 if possible */
1128 merge_len = MIN(a2_len, max_len - a1_len);
1129 a1_len += merge_len;
1130 a2_len -= merge_len;
1131 a2_lbnum += merge_len/lb_size;
1132
1133 a1->len = udf_rw32(a1_len | a1_flags);
1134 a2->len = udf_rw32(a2_len | a2_flags);
1135 a2->loc.lb_num = udf_rw32(a2_lbnum);
1136
1137 if (a2_len > 0)
1138 return 1;
1139
1140 /* there is space over to merge */
1141 return 0;
1142 }
1143
1144 /* --------------------------------------------------------------------- */
1145
1146 static void
1147 udf_wipe_adslots(struct udf_node *udf_node)
1148 {
1149 struct file_entry *fe;
1150 struct extfile_entry *efe;
1151 struct alloc_ext_entry *ext;
1152 uint64_t inflen, objsize;
1153 uint32_t lb_size, dscr_size, l_ea, l_ad, max_l_ad, crclen;
1154 uint8_t *data_pos;
1155 int extnr;
1156
1157 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1158
1159 fe = udf_node->fe;
1160 efe = udf_node->efe;
1161 if (fe) {
1162 inflen = udf_rw64(fe->inf_len);
1163 objsize = inflen;
1164 dscr_size = sizeof(struct file_entry) -1;
1165 l_ea = udf_rw32(fe->l_ea);
1166 l_ad = udf_rw32(fe->l_ad);
1167 data_pos = (uint8_t *) fe + dscr_size + l_ea;
1168 } else {
1169 inflen = udf_rw64(efe->inf_len);
1170 objsize = udf_rw64(efe->obj_size);
1171 dscr_size = sizeof(struct extfile_entry) -1;
1172 l_ea = udf_rw32(efe->l_ea);
1173 l_ad = udf_rw32(efe->l_ad);
1174 data_pos = (uint8_t *) efe + dscr_size + l_ea;
1175 }
1176 max_l_ad = lb_size - dscr_size - l_ea;
1177
1178 /* wipe fe/efe */
1179 memset(data_pos, 0, max_l_ad);
1180 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea;
1181 if (fe) {
1182 fe->l_ad = udf_rw32(0);
1183 fe->logblks_rec = udf_rw64(0);
1184 fe->tag.desc_crc_len = udf_rw32(crclen);
1185 } else {
1186 efe->l_ad = udf_rw32(0);
1187 efe->logblks_rec = udf_rw64(0);
1188 efe->tag.desc_crc_len = udf_rw32(crclen);
1189 }
1190
1191 /* wipe all allocation extent entries */
1192 for (extnr = 0; extnr < udf_node->num_extensions; extnr++) {
1193 ext = udf_node->ext[extnr];
1194 dscr_size = sizeof(struct alloc_ext_entry) -1;
1195 max_l_ad = lb_size - dscr_size;
1196 memset(data_pos, 0, max_l_ad);
1197 ext->l_ad = udf_rw32(0);
1198
1199 crclen = dscr_size - UDF_DESC_TAG_LENGTH;
1200 ext->tag.desc_crc_len = udf_rw32(crclen);
1201 }
1202 }
1203
1204 /* --------------------------------------------------------------------- */
1205
1206 void
1207 udf_get_adslot(struct udf_node *udf_node, int slot, struct long_ad *icb,
1208 int *eof) {
1209 struct file_entry *fe;
1210 struct extfile_entry *efe;
1211 struct alloc_ext_entry *ext;
1212 struct icb_tag *icbtag;
1213 struct short_ad *short_ad;
1214 struct long_ad *long_ad;
1215 uint32_t offset;
1216 uint32_t lb_size, dscr_size, l_ea, l_ad, max_l_ad;
1217 uint8_t *data_pos;
1218 int icbflags, addr_type, adlen, extnr;
1219
1220 /* determine what descriptor we are in */
1221 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1222
1223 fe = udf_node->fe;
1224 efe = udf_node->efe;
1225 if (fe) {
1226 icbtag = &fe->icbtag;
1227 dscr_size = sizeof(struct file_entry) -1;
1228 l_ea = udf_rw32(fe->l_ea);
1229 l_ad = udf_rw32(fe->l_ad);
1230 data_pos = (uint8_t *) fe + dscr_size + l_ea;
1231 } else {
1232 icbtag = &efe->icbtag;
1233 dscr_size = sizeof(struct extfile_entry) -1;
1234 l_ea = udf_rw32(efe->l_ea);
1235 l_ad = udf_rw32(efe->l_ad);
1236 data_pos = (uint8_t *) efe + dscr_size + l_ea;
1237 }
1238 max_l_ad = lb_size - dscr_size - l_ea;
1239
1240 icbflags = udf_rw16(icbtag->flags);
1241 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
1242
1243 /* just in case we're called on an intern, its EOF */
1244 if (addr_type == UDF_ICB_INTERN_ALLOC) {
1245 memset(icb, 0, sizeof(struct long_ad));
1246 *eof = 1;
1247 return;
1248 }
1249
1250 adlen = 0;
1251 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1252 adlen = sizeof(struct short_ad);
1253 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1254 adlen = sizeof(struct long_ad);
1255 }
1256
1257 /* if offset too big, we go to the allocation extensions */
1258 offset = slot * adlen;
1259 extnr = -1;
1260 while (offset >= max_l_ad) {
1261 extnr++;
1262 offset -= max_l_ad;
1263 ext = udf_node->ext[extnr];
1264 dscr_size = sizeof(struct alloc_ext_entry) -1;
1265 l_ad = udf_rw32(ext->l_ad);
1266 max_l_ad = lb_size - dscr_size;
1267 data_pos = (uint8_t *) ext + dscr_size;
1268 if (extnr > udf_node->num_extensions) {
1269 l_ad = 0; /* force EOF */
1270 break;
1271 }
1272 }
1273
1274 *eof = (offset >= l_ad) || (l_ad == 0);
1275 if (*eof) {
1276 memset(icb, 0, sizeof(struct long_ad));
1277 return;
1278 }
1279
1280 /* get the element */
1281 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1282 short_ad = (struct short_ad *) (data_pos + offset);
1283 icb->len = short_ad->len;
1284 icb->loc.part_num = udf_node->loc.loc.part_num;
1285 icb->loc.lb_num = short_ad->lb_num;
1286 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1287 long_ad = (struct long_ad *) (data_pos + offset);
1288 *icb = *long_ad;
1289 }
1290 }
1291
1292 /* --------------------------------------------------------------------- */
1293
1294 int
1295 udf_append_adslot(struct udf_node *udf_node, int slot, struct long_ad *icb) {
1296 union dscrptr *dscr;
1297 struct file_entry *fe;
1298 struct extfile_entry *efe;
1299 struct alloc_ext_entry *ext;
1300 struct icb_tag *icbtag;
1301 struct short_ad *short_ad;
1302 struct long_ad *long_ad, o_icb;
1303 uint64_t logblks_rec, *logblks_rec_p;
1304 uint32_t offset, rest, len;
1305 uint32_t lb_size, dscr_size, l_ea, l_ad, *l_ad_p, max_l_ad, crclen;
1306 uint8_t *data_pos;
1307 int icbflags, addr_type, adlen, extnr;
1308
1309 /* determine what descriptor we are in */
1310 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1311
1312 fe = udf_node->fe;
1313 efe = udf_node->efe;
1314 if (fe) {
1315 icbtag = &fe->icbtag;
1316 dscr = (union dscrptr *) fe;
1317 dscr_size = sizeof(struct file_entry) -1;
1318
1319 l_ea = udf_rw32(fe->l_ea);
1320 l_ad_p = &fe->l_ad;
1321 logblks_rec_p = &fe->logblks_rec;
1322 } else {
1323 icbtag = &efe->icbtag;
1324 dscr = (union dscrptr *) efe;
1325 dscr_size = sizeof(struct extfile_entry) -1;
1326
1327 l_ea = udf_rw32(efe->l_ea);
1328 l_ad_p = &efe->l_ad;
1329 logblks_rec_p = &efe->logblks_rec;
1330 }
1331 data_pos = (uint8_t *) dscr + dscr_size + l_ea;
1332 max_l_ad = lb_size - dscr_size - l_ea;
1333
1334 icbflags = udf_rw16(icbtag->flags);
1335 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
1336
1337 /* just in case we're called on an intern, its EOF */
1338 if (addr_type == UDF_ICB_INTERN_ALLOC) {
1339 panic("udf_append_adslot on UDF_ICB_INTERN_ALLOC\n");
1340 }
1341
1342 adlen = 0;
1343 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1344 adlen = sizeof(struct short_ad);
1345 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1346 adlen = sizeof(struct long_ad);
1347 }
1348
1349 /* if offset too big, we go to the allocation extensions */
1350 offset = slot * adlen;
1351 extnr = 0;
1352 while (offset > max_l_ad) {
1353 offset -= max_l_ad;
1354 ext = udf_node->ext[extnr];
1355 dscr = (union dscrptr *) ext;
1356 dscr_size = sizeof(struct alloc_ext_entry) -1;
1357
1358 KASSERT(ext != NULL);
1359 l_ad_p = &ext->l_ad;
1360 max_l_ad = lb_size - dscr_size;
1361 data_pos = (uint8_t *) dscr + dscr_size;
1362
1363 extnr++;
1364 }
1365 /* offset is offset within the current (E)FE/AED */
1366 l_ad = udf_rw32(*l_ad_p);
1367 crclen = udf_rw32(dscr->tag.desc_crc_len);
1368 logblks_rec = udf_rw64(*logblks_rec_p);
1369
1370 if (extnr > udf_node->num_extensions)
1371 return EFBIG; /* too fragmented */
1372
1373 /* overwriting old piece? */
1374 if (offset < l_ad) {
1375 /* overwrite entry; compensate for the old element */
1376 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1377 short_ad = (struct short_ad *) (data_pos + offset);
1378 o_icb.len = short_ad->len;
1379 o_icb.loc.part_num = udf_rw16(0); /* ignore */
1380 o_icb.loc.lb_num = short_ad->lb_num;
1381 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1382 long_ad = (struct long_ad *) (data_pos + offset);
1383 o_icb = *long_ad;
1384 } else {
1385 panic("Invalid address type in udf_append_adslot\n");
1386 }
1387
1388 len = udf_rw32(o_icb.len);
1389 if (UDF_EXT_FLAGS(len) == UDF_EXT_ALLOCATED) {
1390 /* adjust counts */
1391 len = UDF_EXT_LEN(len);
1392 logblks_rec -= (len + lb_size -1) / lb_size;
1393 }
1394 }
1395
1396 /* calculate rest space in this descriptor */
1397 rest = max_l_ad - offset;
1398 if (rest <= adlen) {
1399 /* create redirect and link new allocation extension */
1400 printf("udf_append_to_adslot: can't create allocation extention yet\n");
1401 return EFBIG;
1402 }
1403
1404 /* write out the element */
1405 if (addr_type == UDF_ICB_SHORT_ALLOC) {
1406 short_ad = (struct short_ad *) (data_pos + offset);
1407 short_ad->len = icb->len;
1408 short_ad->lb_num = icb->loc.lb_num;
1409 } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1410 long_ad = (struct long_ad *) (data_pos + offset);
1411 *long_ad = *icb;
1412 }
1413
1414 /* adjust logblks recorded count */
1415 if (UDF_EXT_FLAGS(icb->len) == UDF_EXT_ALLOCATED)
1416 logblks_rec += (UDF_EXT_LEN(icb->len) + lb_size -1) / lb_size;
1417 *logblks_rec_p = udf_rw64(logblks_rec);
1418
1419 /* adjust l_ad and crclen when needed */
1420 if (offset >= l_ad) {
1421 l_ad += adlen;
1422 crclen += adlen;
1423 dscr->tag.desc_crc_len = udf_rw32(crclen);
1424 *l_ad_p = udf_rw32(l_ad);
1425 }
1426
1427 return 0;
1428 }
1429
1430 /* --------------------------------------------------------------------- */
1431
1432 /*
1433 * Adjust the node's allocation descriptors to reflect the new mapping; do
1434 * take note that we might glue to existing allocation descriptors.
1435 *
1436 * XXX Note there can only be one allocation being recorded/mount; maybe
1437 * explicit allocation in shedule thread?
1438 */
1439
1440 static void
1441 udf_record_allocation_in_node(struct udf_mount *ump, struct buf *buf,
1442 uint16_t vpart_num, uint64_t *mapping, struct long_ad *node_ad_cpy)
1443 {
1444 struct vnode *vp = buf->b_vp;
1445 struct udf_node *udf_node = VTOI(vp);
1446 struct file_entry *fe;
1447 struct extfile_entry *efe;
1448 struct icb_tag *icbtag;
1449 struct long_ad s_ad, c_ad;
1450 uint64_t inflen, from, till;
1451 uint64_t foffset, end_foffset, restart_foffset;
1452 uint64_t orig_inflen, orig_lbrec, new_inflen, new_lbrec;
1453 uint32_t num_lb, len, flags, lb_num;
1454 uint32_t run_start;
1455 uint32_t slot_offset;
1456 uint32_t skip_len, skipped;
1457 int addr_type, icbflags;
1458 int udf_c_type = buf->b_udf_c_type;
1459 int lb_size, run_length, eof;
1460 int slot, cpy_slot, cpy_slots, restart_slot;
1461 int error;
1462
1463 DPRINTF(ALLOC, ("udf_record_allocation_in_node\n"));
1464 udf_node_sanity_check(udf_node, &orig_inflen, &orig_lbrec);
1465
1466 /* sanity check ... should be panic ? */
1467 if ((udf_c_type != UDF_C_USERDATA) && (udf_c_type != UDF_C_FIDS))
1468 return;
1469
1470 lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1471
1472 /* do the job */
1473 UDF_LOCK_NODE(udf_node, 0); /* XXX can deadlock ? */
1474
1475 fe = udf_node->fe;
1476 efe = udf_node->efe;
1477 if (fe) {
1478 icbtag = &fe->icbtag;
1479 inflen = udf_rw64(fe->inf_len);
1480 } else {
1481 icbtag = &efe->icbtag;
1482 inflen = udf_rw64(efe->inf_len);
1483 }
1484
1485 /* do check if `till' is not past file information length */
1486 from = buf->b_lblkno * lb_size;
1487 till = MIN(inflen, from + buf->b_resid);
1488
1489 num_lb = (till - from + lb_size -1) / lb_size;
1490
1491 DPRINTF(ALLOC, ("record allocation from = %"PRIu64" + %d\n", from, buf->b_bcount));
1492
1493 icbflags = udf_rw16(icbtag->flags);
1494 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
1495
1496 if (addr_type == UDF_ICB_INTERN_ALLOC) {
1497 /* nothing to do */
1498 /* XXX clean up rest of node? just in case? */
1499 UDF_UNLOCK_NODE(udf_node, 0);
1500 return;
1501 }
1502
1503 slot = 0;
1504 cpy_slot = 0;
1505 foffset = 0;
1506
1507 /* 1) copy till first overlap piece to the rewrite buffer */
1508 for (;;) {
1509 udf_get_adslot(udf_node, slot, &s_ad, &eof);
1510 if (eof) {
1511 DPRINTF(WRITE,
1512 ("Record allocation in node "
1513 "failed: encountered EOF\n"));
1514 UDF_UNLOCK_NODE(udf_node, 0);
1515 buf->b_error = EINVAL;
1516 return;
1517 }
1518 len = udf_rw32(s_ad.len);
1519 flags = UDF_EXT_FLAGS(len);
1520 len = UDF_EXT_LEN(len);
1521
1522 if (flags == UDF_EXT_REDIRECT) {
1523 slot++;
1524 continue;
1525 }
1526
1527 end_foffset = foffset + len;
1528 if (end_foffset > from)
1529 break; /* found */
1530
1531 node_ad_cpy[cpy_slot++] = s_ad;
1532
1533 DPRINTF(ALLOC, ("\t1: vp %d, lb %d, len %d, flags %d "
1534 "-> stack\n",
1535 udf_rw16(s_ad.loc.part_num),
1536 udf_rw32(s_ad.loc.lb_num),
1537 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1538 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1539
1540 foffset = end_foffset;
1541 slot++;
1542 }
1543 restart_slot = slot;
1544 restart_foffset = foffset;
1545
1546 /* 2) trunc overlapping slot at overlap and copy it */
1547 slot_offset = from - foffset;
1548 if (slot_offset > 0) {
1549 DPRINTF(ALLOC, ("\tslot_offset = %d, flags = %d (%d)\n",
1550 slot_offset, flags >> 30, flags));
1551
1552 s_ad.len = udf_rw32(slot_offset | flags);
1553 node_ad_cpy[cpy_slot++] = s_ad;
1554
1555 DPRINTF(ALLOC, ("\t2: vp %d, lb %d, len %d, flags %d "
1556 "-> stack\n",
1557 udf_rw16(s_ad.loc.part_num),
1558 udf_rw32(s_ad.loc.lb_num),
1559 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1560 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1561 }
1562 foffset += slot_offset;
1563
1564 /* 3) insert new mappings */
1565 memset(&s_ad, 0, sizeof(struct long_ad));
1566 lb_num = 0;
1567 for (lb_num = 0; lb_num < num_lb; lb_num++) {
1568 run_start = mapping[lb_num];
1569 run_length = 1;
1570 while (lb_num < num_lb-1) {
1571 if (mapping[lb_num+1] != mapping[lb_num]+1)
1572 if (mapping[lb_num+1] != mapping[lb_num])
1573 break;
1574 run_length++;
1575 lb_num++;
1576 }
1577 /* insert slot for this mapping */
1578 len = run_length * lb_size;
1579
1580 /* bounds checking */
1581 if (foffset + len > till)
1582 len = till - foffset;
1583 KASSERT(foffset + len <= inflen);
1584
1585 s_ad.len = udf_rw32(len | UDF_EXT_ALLOCATED);
1586 s_ad.loc.part_num = udf_rw16(vpart_num);
1587 s_ad.loc.lb_num = udf_rw32(run_start);
1588
1589 foffset += len;
1590
1591 /* paranoia */
1592 if (len == 0) {
1593 DPRINTF(WRITE,
1594 ("Record allocation in node "
1595 "failed: insert failed\n"));
1596 UDF_UNLOCK_NODE(udf_node, 0);
1597 buf->b_error = EINVAL;
1598 return;
1599 }
1600 node_ad_cpy[cpy_slot++] = s_ad;
1601
1602 DPRINTF(ALLOC, ("\t3: insert new mapping vp %d lb %d, len %d, "
1603 "flags %d -> stack\n",
1604 udf_rw16(s_ad.loc.part_num), udf_rw32(s_ad.loc.lb_num),
1605 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1606 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1607 }
1608
1609 /* 4) pop replaced length */
1610 slot = restart_slot;
1611 foffset = restart_foffset;
1612
1613 skip_len = till - foffset; /* relative to start of slot */
1614 slot_offset = from - foffset;
1615 for (;;) {
1616 udf_get_adslot(udf_node, slot, &s_ad, &eof);
1617 if (eof)
1618 break;
1619
1620 len = udf_rw32(s_ad.len);
1621 flags = UDF_EXT_FLAGS(len);
1622 len = UDF_EXT_LEN(len);
1623 lb_num = udf_rw32(s_ad.loc.lb_num);
1624
1625 if (flags == UDF_EXT_REDIRECT) {
1626 slot++;
1627 continue;
1628 }
1629
1630 DPRINTF(ALLOC, ("\t4i: got slot %d, skip_len %d, vp %d, "
1631 "lb %d, len %d, flags %d\n",
1632 slot, skip_len, udf_rw16(s_ad.loc.part_num),
1633 udf_rw32(s_ad.loc.lb_num),
1634 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1635 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1636
1637 skipped = MIN(len, skip_len);
1638 if (flags != UDF_EXT_FREE) {
1639 if (slot_offset) {
1640 /* skip these blocks first */
1641 num_lb = (slot_offset + lb_size-1) / lb_size;
1642 len -= slot_offset;
1643 skip_len -= slot_offset;
1644 foffset += slot_offset;
1645 lb_num += num_lb;
1646 skipped -= slot_offset;
1647 slot_offset = 0;
1648 }
1649 /* free space from current position till `skipped' */
1650 num_lb = (skipped + lb_size-1) / lb_size;
1651 udf_free_allocated_space(ump, lb_num,
1652 udf_rw16(s_ad.loc.part_num), num_lb);
1653 lb_num += num_lb;
1654 }
1655 len -= skipped;
1656 skip_len -= skipped;
1657 foffset += skipped;
1658
1659 if (len) {
1660 KASSERT(skipped % lb_size == 0);
1661
1662 /* we arrived at our point, push remainder */
1663 s_ad.len = udf_rw32(len | flags);
1664 s_ad.loc.lb_num = udf_rw32(lb_num);
1665 node_ad_cpy[cpy_slot++] = s_ad;
1666 foffset += len;
1667 slot++;
1668
1669 DPRINTF(ALLOC, ("\t4: vp %d, lb %d, len %d, flags %d "
1670 "-> stack\n",
1671 udf_rw16(s_ad.loc.part_num),
1672 udf_rw32(s_ad.loc.lb_num),
1673 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1674 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1675 break;
1676 }
1677 slot++;
1678 }
1679
1680 /* 5) copy remainder */
1681 for (;;) {
1682 udf_get_adslot(udf_node, slot, &s_ad, &eof);
1683 if (eof)
1684 break;
1685
1686 len = udf_rw32(s_ad.len);
1687 flags = UDF_EXT_FLAGS(len);
1688 len = UDF_EXT_LEN(len);
1689
1690 if (flags == UDF_EXT_REDIRECT) {
1691 slot++;
1692 continue;
1693 }
1694
1695 node_ad_cpy[cpy_slot++] = s_ad;
1696
1697 DPRINTF(ALLOC, ("\t5: insert new mapping "
1698 "vp %d lb %d, len %d, flags %d "
1699 "-> stack\n",
1700 udf_rw16(s_ad.loc.part_num),
1701 udf_rw32(s_ad.loc.lb_num),
1702 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1703 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1704
1705 slot++;
1706 }
1707
1708 /* 6) reset node descriptors */
1709 udf_wipe_adslots(udf_node);
1710
1711 /* 7) copy back extents; merge when possible. Recounting on the fly */
1712 cpy_slots = cpy_slot;
1713
1714 c_ad = node_ad_cpy[0];
1715 slot = 0;
1716 DPRINTF(ALLOC, ("\t7s: stack -> got mapping vp %d "
1717 "lb %d, len %d, flags %d\n",
1718 udf_rw16(c_ad.loc.part_num),
1719 udf_rw32(c_ad.loc.lb_num),
1720 UDF_EXT_LEN(udf_rw32(c_ad.len)),
1721 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
1722
1723 for (cpy_slot = 1; cpy_slot < cpy_slots; cpy_slot++) {
1724 s_ad = node_ad_cpy[cpy_slot];
1725
1726 DPRINTF(ALLOC, ("\t7i: stack -> got mapping vp %d "
1727 "lb %d, len %d, flags %d\n",
1728 udf_rw16(s_ad.loc.part_num),
1729 udf_rw32(s_ad.loc.lb_num),
1730 UDF_EXT_LEN(udf_rw32(s_ad.len)),
1731 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
1732
1733 /* see if we can merge */
1734 if (udf_ads_merge(lb_size, &c_ad, &s_ad)) {
1735 /* not mergable (anymore) */
1736 DPRINTF(ALLOC, ("\t7: appending vp %d lb %d, "
1737 "len %d, flags %d\n",
1738 udf_rw16(c_ad.loc.part_num),
1739 udf_rw32(c_ad.loc.lb_num),
1740 UDF_EXT_LEN(udf_rw32(c_ad.len)),
1741 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
1742
1743 error = udf_append_adslot(udf_node, slot, &c_ad);
1744 if (error) {
1745 buf->b_error = error;
1746 goto out;
1747 }
1748 c_ad = s_ad;
1749 slot++;
1750 }
1751 }
1752
1753 /* 8) push rest slot (if any) */
1754 if (UDF_EXT_LEN(c_ad.len) > 0) {
1755 DPRINTF(ALLOC, ("\t8: last append vp %d lb %d, "
1756 "len %d, flags %d\n",
1757 udf_rw16(c_ad.loc.part_num),
1758 udf_rw32(c_ad.loc.lb_num),
1759 UDF_EXT_LEN(udf_rw32(c_ad.len)),
1760 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
1761
1762 error = udf_append_adslot(udf_node, slot, &c_ad);
1763 if (error) {
1764 buf->b_error = error;
1765 goto out;
1766 }
1767 }
1768
1769 out:
1770 /* the node's descriptors should now be sane */
1771 UDF_UNLOCK_NODE(udf_node, 0);
1772
1773 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
1774
1775 KASSERT(orig_inflen == new_inflen);
1776 KASSERT(new_lbrec >= orig_lbrec);
1777
1778 return;
1779 }
1780
1781 /* --------------------------------------------------------------------- */
1782
1783 int
1784 udf_grow_node(struct udf_node *udf_node, uint64_t new_size)
1785 {
1786 union dscrptr *dscr;
1787 struct vnode *vp = udf_node->vnode;
1788 struct udf_mount *ump = udf_node->ump;
1789 struct file_entry *fe;
1790 struct extfile_entry *efe;
1791 struct icb_tag *icbtag;
1792 struct long_ad c_ad, s_ad;
1793 uint64_t size_diff, old_size, inflen, objsize, chunk, append_len;
1794 uint64_t foffset, end_foffset;
1795 uint64_t orig_inflen, orig_lbrec, new_inflen, new_lbrec;
1796 uint32_t lb_size, dscr_size, crclen, lastblock_grow;
1797 uint32_t len, flags, max_len;
1798 uint32_t max_l_ad, l_ad, l_ea;
1799 uint8_t *data_pos, *evacuated_data;
1800 int icbflags, addr_type;
1801 int slot, cpy_slot;
1802 int eof, error;
1803
1804 DPRINTF(ALLOC, ("udf_grow_node\n"));
1805 udf_node_sanity_check(udf_node, &orig_inflen, &orig_lbrec);
1806
1807 UDF_LOCK_NODE(udf_node, 0);
1808 lb_size = udf_rw32(ump->logical_vol->lb_size);
1809 max_len = ((UDF_EXT_MAXLEN / lb_size) * lb_size);
1810
1811 fe = udf_node->fe;
1812 efe = udf_node->efe;
1813 if (fe) {
1814 dscr = (union dscrptr *) fe;
1815 icbtag = &fe->icbtag;
1816 inflen = udf_rw64(fe->inf_len);
1817 objsize = inflen;
1818 dscr_size = sizeof(struct file_entry) -1;
1819 l_ea = udf_rw32(fe->l_ea);
1820 l_ad = udf_rw32(fe->l_ad);
1821 } else {
1822 dscr = (union dscrptr *) efe;
1823 icbtag = &efe->icbtag;
1824 inflen = udf_rw64(efe->inf_len);
1825 objsize = udf_rw64(efe->obj_size);
1826 dscr_size = sizeof(struct extfile_entry) -1;
1827 l_ea = udf_rw32(efe->l_ea);
1828 l_ad = udf_rw32(efe->l_ad);
1829 }
1830 data_pos = (uint8_t *) dscr + dscr_size + l_ea;
1831 max_l_ad = lb_size - dscr_size - l_ea;
1832
1833 icbflags = udf_rw16(icbtag->flags);
1834 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
1835
1836 old_size = inflen;
1837 size_diff = new_size - old_size;
1838
1839 DPRINTF(ALLOC, ("\tfrom %"PRIu64" to %"PRIu64"\n", old_size, new_size));
1840
1841 evacuated_data = NULL;
1842 if (addr_type == UDF_ICB_INTERN_ALLOC) {
1843 if (l_ad + size_diff <= max_l_ad) {
1844 /* only reflect size change directly in the node */
1845 inflen += size_diff;
1846 objsize += size_diff;
1847 l_ad += size_diff;
1848 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea + l_ad;
1849 if (fe) {
1850 fe->inf_len = udf_rw64(inflen);
1851 fe->l_ad = udf_rw32(l_ad);
1852 fe->tag.desc_crc_len = udf_rw32(crclen);
1853 } else {
1854 efe->inf_len = udf_rw64(inflen);
1855 efe->obj_size = udf_rw64(objsize);
1856 efe->l_ad = udf_rw32(l_ad);
1857 efe->tag.desc_crc_len = udf_rw32(crclen);
1858 }
1859 error = 0;
1860
1861 /* set new size for uvm */
1862 uvm_vnp_setsize(vp, old_size);
1863 uvm_vnp_setwritesize(vp, new_size);
1864
1865 #if 0
1866 /* zero append space in buffer */
1867 uvm_vnp_zerorange(vp, old_size, new_size - old_size);
1868 #endif
1869
1870 /* unlock */
1871 UDF_UNLOCK_NODE(udf_node, 0);
1872
1873 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
1874 KASSERT(new_inflen == orig_inflen + size_diff);
1875 KASSERT(new_lbrec == orig_lbrec);
1876 KASSERT(new_lbrec == 0);
1877 return 0;
1878 }
1879
1880 DPRINTF(ALLOC, ("\tCONVERT from internal\n"));
1881
1882 if (old_size > 0) {
1883 /* allocate some space and copy in the stuff to keep */
1884 evacuated_data = malloc(lb_size, M_UDFTEMP, M_WAITOK);
1885 memset(evacuated_data, 0, lb_size);
1886
1887 /* node is locked, so safe to exit mutex */
1888 UDF_UNLOCK_NODE(udf_node, 0);
1889
1890 /* read in using the `normal' vn_rdwr() */
1891 error = vn_rdwr(UIO_READ, udf_node->vnode,
1892 evacuated_data, old_size, 0,
1893 UIO_SYSSPACE, IO_ALTSEMANTICS | IO_NODELOCKED,
1894 FSCRED, NULL, NULL);
1895
1896 /* enter again */
1897 UDF_LOCK_NODE(udf_node, 0);
1898 }
1899
1900 /* convert to a normal alloc */
1901 /* XXX HOWTO selecting allocation method ? */
1902 icbflags &= ~UDF_ICB_TAG_FLAGS_ALLOC_MASK;
1903 icbflags |= UDF_ICB_LONG_ALLOC; /* XXX or SHORT_ALLOC */
1904 icbtag->flags = udf_rw16(icbflags);
1905
1906 /* wipe old descriptor space */
1907 udf_wipe_adslots(udf_node);
1908
1909 memset(&c_ad, 0, sizeof(struct long_ad));
1910 c_ad.len = udf_rw32(old_size | UDF_EXT_FREE);
1911 c_ad.loc.part_num = udf_rw16(0); /* not relevant */
1912 c_ad.loc.lb_num = udf_rw32(0); /* not relevant */
1913
1914 slot = 0;
1915 } else {
1916 /* goto the last entry (if any) */
1917 slot = 0;
1918 cpy_slot = 0;
1919 foffset = 0;
1920 memset(&c_ad, 0, sizeof(struct long_ad));
1921 for (;;) {
1922 udf_get_adslot(udf_node, slot, &c_ad, &eof);
1923 if (eof)
1924 break;
1925
1926 len = udf_rw32(c_ad.len);
1927 flags = UDF_EXT_FLAGS(len);
1928 len = UDF_EXT_LEN(len);
1929
1930 end_foffset = foffset + len;
1931 if (flags != UDF_EXT_REDIRECT)
1932 foffset = end_foffset;
1933
1934 slot++;
1935 }
1936 /* at end of adslots */
1937
1938 /* special case if the old size was zero, then there is no last slot */
1939 if (old_size == 0) {
1940 c_ad.len = udf_rw32(0 | UDF_EXT_FREE);
1941 c_ad.loc.part_num = udf_rw16(0); /* not relevant */
1942 c_ad.loc.lb_num = udf_rw32(0); /* not relevant */
1943 } else {
1944 /* refetch last slot */
1945 slot--;
1946 udf_get_adslot(udf_node, slot, &c_ad, &eof);
1947 }
1948 }
1949
1950 /*
1951 * If the length of the last slot is not a multiple of lb_size, adjust
1952 * length so that it is; don't forget to adjust `append_len'! relevant for
1953 * extending existing files
1954 */
1955 len = udf_rw32(c_ad.len);
1956 flags = UDF_EXT_FLAGS(len);
1957 len = UDF_EXT_LEN(len);
1958
1959 lastblock_grow = 0;
1960 if (len % lb_size > 0) {
1961 lastblock_grow = lb_size - (len % lb_size);
1962 lastblock_grow = MIN(size_diff, lastblock_grow);
1963 len += lastblock_grow;
1964 c_ad.len = udf_rw32(len | flags);
1965
1966 /* TODO zero appened space in buffer! */
1967 /* using uvm_vnp_zerorange(vp, old_size, new_size - old_size); ? */
1968 }
1969 memset(&s_ad, 0, sizeof(struct long_ad));
1970
1971 /* size_diff can be bigger than allowed, so grow in chunks */
1972 append_len = size_diff - lastblock_grow;
1973 while (append_len > 0) {
1974 chunk = MIN(append_len, max_len);
1975 s_ad.len = udf_rw32(chunk | UDF_EXT_FREE);
1976 s_ad.loc.part_num = udf_rw16(0);
1977 s_ad.loc.lb_num = udf_rw32(0);
1978
1979 if (udf_ads_merge(lb_size, &c_ad, &s_ad)) {
1980 /* not mergable (anymore) */
1981 error = udf_append_adslot(udf_node, slot, &c_ad);
1982 if (error)
1983 goto errorout;
1984 slot++;
1985 c_ad = s_ad;
1986 memset(&s_ad, 0, sizeof(struct long_ad));
1987 }
1988 append_len -= chunk;
1989 }
1990
1991 /* if there is a rest piece in the accumulator, append it */
1992 if (UDF_EXT_LEN(c_ad.len) > 0) {
1993 error = udf_append_adslot(udf_node, slot, &c_ad);
1994 if (error)
1995 goto errorout;
1996 slot++;
1997 }
1998
1999 /* if there is a rest piece that didn't fit, append it */
2000 if (UDF_EXT_LEN(s_ad.len) > 0) {
2001 error = udf_append_adslot(udf_node, slot, &s_ad);
2002 if (error)
2003 goto errorout;
2004 slot++;
2005 }
2006
2007 inflen += size_diff;
2008 objsize += size_diff;
2009 if (fe) {
2010 fe->inf_len = udf_rw64(inflen);
2011 } else {
2012 efe->inf_len = udf_rw64(inflen);
2013 efe->obj_size = udf_rw64(objsize);
2014 }
2015 error = 0;
2016
2017 if (evacuated_data) {
2018 /* set new write size for uvm */
2019 uvm_vnp_setwritesize(vp, old_size);
2020
2021 /* write out evacuated data */
2022 error = vn_rdwr(UIO_WRITE, udf_node->vnode,
2023 evacuated_data, old_size, 0,
2024 UIO_SYSSPACE, IO_ALTSEMANTICS | IO_NODELOCKED,
2025 FSCRED, NULL, NULL);
2026 uvm_vnp_setsize(vp, old_size);
2027 }
2028
2029 errorout:
2030 if (evacuated_data)
2031 free(evacuated_data, M_UDFTEMP);
2032 UDF_UNLOCK_NODE(udf_node, 0);
2033
2034 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2035 KASSERT(new_inflen == orig_inflen + size_diff);
2036 KASSERT(new_lbrec == orig_lbrec);
2037
2038 return error;
2039 }
2040
2041 /* --------------------------------------------------------------------- */
2042
2043 int
2044 udf_shrink_node(struct udf_node *udf_node, uint64_t new_size)
2045 {
2046 struct vnode *vp = udf_node->vnode;
2047 struct udf_mount *ump = udf_node->ump;
2048 struct file_entry *fe;
2049 struct extfile_entry *efe;
2050 struct icb_tag *icbtag;
2051 struct long_ad c_ad, s_ad, *node_ad_cpy;
2052 uint64_t size_diff, old_size, inflen, objsize;
2053 uint64_t foffset, end_foffset;
2054 uint64_t orig_inflen, orig_lbrec, new_inflen, new_lbrec;
2055 uint32_t lb_size, dscr_size, crclen;
2056 uint32_t slot_offset;
2057 uint32_t len, flags, max_len;
2058 uint32_t num_lb, lb_num;
2059 uint32_t max_l_ad, l_ad, l_ea;
2060 uint16_t vpart_num;
2061 uint8_t *data_pos;
2062 int icbflags, addr_type;
2063 int slot, cpy_slot, cpy_slots;
2064 int eof, error;
2065
2066 DPRINTF(ALLOC, ("udf_shrink_node\n"));
2067 udf_node_sanity_check(udf_node, &orig_inflen, &orig_lbrec);
2068
2069 UDF_LOCK_NODE(udf_node, 0);
2070 lb_size = udf_rw32(ump->logical_vol->lb_size);
2071 max_len = ((UDF_EXT_MAXLEN / lb_size) * lb_size);
2072
2073 /* do the work */
2074 fe = udf_node->fe;
2075 efe = udf_node->efe;
2076 if (fe) {
2077 icbtag = &fe->icbtag;
2078 inflen = udf_rw64(fe->inf_len);
2079 objsize = inflen;
2080 dscr_size = sizeof(struct file_entry) -1;
2081 l_ea = udf_rw32(fe->l_ea);
2082 l_ad = udf_rw32(fe->l_ad);
2083 data_pos = (uint8_t *) fe + dscr_size + l_ea;
2084 } else {
2085 icbtag = &efe->icbtag;
2086 inflen = udf_rw64(efe->inf_len);
2087 objsize = udf_rw64(efe->obj_size);
2088 dscr_size = sizeof(struct extfile_entry) -1;
2089 l_ea = udf_rw32(efe->l_ea);
2090 l_ad = udf_rw32(efe->l_ad);
2091 data_pos = (uint8_t *) efe + dscr_size + l_ea;
2092 }
2093 max_l_ad = lb_size - dscr_size - l_ea;
2094
2095 icbflags = udf_rw16(icbtag->flags);
2096 addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2097
2098 old_size = inflen;
2099 size_diff = old_size - new_size;
2100
2101 DPRINTF(ALLOC, ("\tfrom %"PRIu64" to %"PRIu64"\n", old_size, new_size));
2102
2103 /* shrink the node to its new size */
2104 if (addr_type == UDF_ICB_INTERN_ALLOC) {
2105 /* only reflect size change directly in the node */
2106 KASSERT(new_size <= max_l_ad);
2107 inflen -= size_diff;
2108 objsize -= size_diff;
2109 l_ad -= size_diff;
2110 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea + l_ad;
2111 if (fe) {
2112 fe->inf_len = udf_rw64(inflen);
2113 fe->l_ad = udf_rw32(l_ad);
2114 fe->tag.desc_crc_len = udf_rw32(crclen);
2115 } else {
2116 efe->inf_len = udf_rw64(inflen);
2117 efe->obj_size = udf_rw64(objsize);
2118 efe->l_ad = udf_rw32(l_ad);
2119 efe->tag.desc_crc_len = udf_rw32(crclen);
2120 }
2121 error = 0;
2122 /* TODO zero appened space in buffer! */
2123 /* using uvm_vnp_zerorange(vp, old_size, old_size - new_size); ? */
2124
2125 /* set new size for uvm */
2126 uvm_vnp_setsize(vp, new_size);
2127 UDF_UNLOCK_NODE(udf_node, 0);
2128
2129 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2130 KASSERT(new_inflen == orig_inflen - size_diff);
2131 KASSERT(new_lbrec == orig_lbrec);
2132 KASSERT(new_lbrec == 0);
2133
2134 return 0;
2135 }
2136
2137 /* setup node cleanup extents copy space */
2138 node_ad_cpy = malloc(lb_size * UDF_MAX_ALLOC_EXTENTS,
2139 M_UDFMNT, M_WAITOK);
2140 memset(node_ad_cpy, 0, lb_size * UDF_MAX_ALLOC_EXTENTS);
2141
2142 /*
2143 * Shrink the node by releasing the allocations and truncate the last
2144 * allocation to the new size. If the new size fits into the
2145 * allocation descriptor itself, transform it into an
2146 * UDF_ICB_INTERN_ALLOC.
2147 */
2148 slot = 0;
2149 cpy_slot = 0;
2150 foffset = 0;
2151
2152 /* 1) copy till first overlap piece to the rewrite buffer */
2153 for (;;) {
2154 udf_get_adslot(udf_node, slot, &s_ad, &eof);
2155 if (eof) {
2156 DPRINTF(WRITE,
2157 ("Shrink node failed: "
2158 "encountered EOF\n"));
2159 error = EINVAL;
2160 goto errorout; /* panic? */
2161 }
2162 len = udf_rw32(s_ad.len);
2163 flags = UDF_EXT_FLAGS(len);
2164 len = UDF_EXT_LEN(len);
2165
2166 if (flags == UDF_EXT_REDIRECT) {
2167 slot++;
2168 continue;
2169 }
2170
2171 end_foffset = foffset + len;
2172 if (end_foffset > new_size)
2173 break; /* found */
2174
2175 node_ad_cpy[cpy_slot++] = s_ad;
2176
2177 DPRINTF(ALLOC, ("\t1: vp %d, lb %d, len %d, flags %d "
2178 "-> stack\n",
2179 udf_rw16(s_ad.loc.part_num),
2180 udf_rw32(s_ad.loc.lb_num),
2181 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2182 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2183
2184 foffset = end_foffset;
2185 slot++;
2186 }
2187 slot_offset = new_size - foffset;
2188
2189 /* 2) trunc overlapping slot at overlap and copy it */
2190 if (slot_offset > 0) {
2191 lb_num = udf_rw32(s_ad.loc.lb_num);
2192 vpart_num = udf_rw16(s_ad.loc.part_num);
2193
2194 if (flags == UDF_EXT_ALLOCATED) {
2195 lb_num += (slot_offset + lb_size -1) / lb_size;
2196 num_lb = (len - slot_offset + lb_size - 1) / lb_size;
2197
2198 udf_free_allocated_space(ump, lb_num, vpart_num, num_lb);
2199 }
2200
2201 s_ad.len = udf_rw32(slot_offset | flags);
2202 node_ad_cpy[cpy_slot++] = s_ad;
2203 slot++;
2204
2205 DPRINTF(ALLOC, ("\t2: vp %d, lb %d, len %d, flags %d "
2206 "-> stack\n",
2207 udf_rw16(s_ad.loc.part_num),
2208 udf_rw32(s_ad.loc.lb_num),
2209 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2210 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2211 }
2212
2213 /* 3) delete remainder */
2214 for (;;) {
2215 udf_get_adslot(udf_node, slot, &s_ad, &eof);
2216 if (eof)
2217 break;
2218
2219 len = udf_rw32(s_ad.len);
2220 flags = UDF_EXT_FLAGS(len);
2221 len = UDF_EXT_LEN(len);
2222
2223 if (flags == UDF_EXT_REDIRECT) {
2224 slot++;
2225 continue;
2226 }
2227
2228 DPRINTF(ALLOC, ("\t3: delete remainder "
2229 "vp %d lb %d, len %d, flags %d\n",
2230 udf_rw16(s_ad.loc.part_num),
2231 udf_rw32(s_ad.loc.lb_num),
2232 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2233 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2234
2235 if (flags == UDF_EXT_ALLOCATED) {
2236 lb_num = udf_rw32(s_ad.loc.lb_num);
2237 vpart_num = udf_rw16(s_ad.loc.part_num);
2238 num_lb = (len + lb_size - 1) / lb_size;
2239
2240 udf_free_allocated_space(ump, lb_num, vpart_num,
2241 num_lb);
2242 }
2243
2244 slot++;
2245 }
2246
2247 /* 4) if it will fit into the descriptor then convert */
2248 if (new_size < max_l_ad) {
2249 /*
2250 * resque/evacuate old piece by reading it in, and convert it
2251 * to internal alloc.
2252 */
2253 if (new_size == 0) {
2254 /* XXX/TODO only for zero sizing now */
2255 udf_wipe_adslots(udf_node);
2256
2257 icbflags &= ~UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2258 icbflags |= UDF_ICB_INTERN_ALLOC;
2259 icbtag->flags = udf_rw16(icbflags);
2260
2261 inflen -= size_diff; KASSERT(inflen == 0);
2262 objsize -= size_diff;
2263 l_ad = new_size;
2264 crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea + l_ad;
2265 if (fe) {
2266 fe->inf_len = udf_rw64(inflen);
2267 fe->l_ad = udf_rw32(l_ad);
2268 fe->tag.desc_crc_len = udf_rw32(crclen);
2269 } else {
2270 efe->inf_len = udf_rw64(inflen);
2271 efe->obj_size = udf_rw64(objsize);
2272 efe->l_ad = udf_rw32(l_ad);
2273 efe->tag.desc_crc_len = udf_rw32(crclen);
2274 }
2275 /* eventually copy in evacuated piece */
2276 /* set new size for uvm */
2277 uvm_vnp_setsize(vp, new_size);
2278
2279 free(node_ad_cpy, M_UDFMNT);
2280 UDF_UNLOCK_NODE(udf_node, 0);
2281
2282 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2283 KASSERT(new_inflen == orig_inflen - size_diff);
2284 KASSERT(new_inflen == 0);
2285 KASSERT(new_lbrec == 0);
2286
2287 return 0;
2288 }
2289
2290 printf("UDF_SHRINK_NODE: could convert to internal alloc!\n");
2291 }
2292
2293 /* 5) reset node descriptors */
2294 udf_wipe_adslots(udf_node);
2295
2296 /* 6) copy back extents; merge when possible. Recounting on the fly */
2297 cpy_slots = cpy_slot;
2298
2299 c_ad = node_ad_cpy[0];
2300 slot = 0;
2301 for (cpy_slot = 1; cpy_slot < cpy_slots; cpy_slot++) {
2302 s_ad = node_ad_cpy[cpy_slot];
2303
2304 DPRINTF(ALLOC, ("\t6: stack -> got mapping vp %d "
2305 "lb %d, len %d, flags %d\n",
2306 udf_rw16(s_ad.loc.part_num),
2307 udf_rw32(s_ad.loc.lb_num),
2308 UDF_EXT_LEN(udf_rw32(s_ad.len)),
2309 UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2310
2311 /* see if we can merge */
2312 if (udf_ads_merge(lb_size, &c_ad, &s_ad)) {
2313 /* not mergable (anymore) */
2314 DPRINTF(ALLOC, ("\t6: appending vp %d lb %d, "
2315 "len %d, flags %d\n",
2316 udf_rw16(c_ad.loc.part_num),
2317 udf_rw32(c_ad.loc.lb_num),
2318 UDF_EXT_LEN(udf_rw32(c_ad.len)),
2319 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2320
2321 error = udf_append_adslot(udf_node, slot, &c_ad);
2322 if (error)
2323 goto errorout; /* panic? */
2324 c_ad = s_ad;
2325 slot++;
2326 }
2327 }
2328
2329 /* 7) push rest slot (if any) */
2330 if (UDF_EXT_LEN(c_ad.len) > 0) {
2331 DPRINTF(ALLOC, ("\t7: last append vp %d lb %d, "
2332 "len %d, flags %d\n",
2333 udf_rw16(c_ad.loc.part_num),
2334 udf_rw32(c_ad.loc.lb_num),
2335 UDF_EXT_LEN(udf_rw32(c_ad.len)),
2336 UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2337
2338 error = udf_append_adslot(udf_node, slot, &c_ad);
2339 if (error)
2340 goto errorout; /* panic? */
2341 ;
2342 }
2343
2344 inflen -= size_diff;
2345 objsize -= size_diff;
2346 if (fe) {
2347 fe->inf_len = udf_rw64(inflen);
2348 } else {
2349 efe->inf_len = udf_rw64(inflen);
2350 efe->obj_size = udf_rw64(objsize);
2351 }
2352 error = 0;
2353
2354 /* set new size for uvm */
2355 uvm_vnp_setsize(vp, new_size);
2356
2357 errorout:
2358 free(node_ad_cpy, M_UDFMNT);
2359 UDF_UNLOCK_NODE(udf_node, 0);
2360
2361 udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2362 KASSERT(new_inflen == orig_inflen - size_diff);
2363
2364 return error;
2365 }
2366
2367