udf_allocation.c revision 1.1.2.6 1 1.1.2.6 yamt /* $NetBSD: udf_allocation.c,v 1.1.2.6 2010/03/11 15:04:14 yamt Exp $ */
2 1.1.2.2 yamt
3 1.1.2.2 yamt /*
4 1.1.2.2 yamt * Copyright (c) 2006, 2008 Reinoud Zandijk
5 1.1.2.2 yamt * All rights reserved.
6 1.1.2.2 yamt *
7 1.1.2.2 yamt * Redistribution and use in source and binary forms, with or without
8 1.1.2.2 yamt * modification, are permitted provided that the following conditions
9 1.1.2.2 yamt * are met:
10 1.1.2.2 yamt * 1. Redistributions of source code must retain the above copyright
11 1.1.2.2 yamt * notice, this list of conditions and the following disclaimer.
12 1.1.2.2 yamt * 2. Redistributions in binary form must reproduce the above copyright
13 1.1.2.2 yamt * notice, this list of conditions and the following disclaimer in the
14 1.1.2.2 yamt * documentation and/or other materials provided with the distribution.
15 1.1.2.2 yamt *
16 1.1.2.2 yamt * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 1.1.2.2 yamt * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 1.1.2.2 yamt * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 1.1.2.2 yamt * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 1.1.2.2 yamt * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 1.1.2.2 yamt * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 1.1.2.2 yamt * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 1.1.2.2 yamt * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 1.1.2.2 yamt * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 1.1.2.2 yamt * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 1.1.2.2 yamt *
27 1.1.2.2 yamt */
28 1.1.2.2 yamt
29 1.1.2.2 yamt #include <sys/cdefs.h>
30 1.1.2.2 yamt #ifndef lint
31 1.1.2.6 yamt __KERNEL_RCSID(0, "$NetBSD: udf_allocation.c,v 1.1.2.6 2010/03/11 15:04:14 yamt Exp $");
32 1.1.2.2 yamt #endif /* not lint */
33 1.1.2.2 yamt
34 1.1.2.2 yamt
35 1.1.2.2 yamt #if defined(_KERNEL_OPT)
36 1.1.2.2 yamt #include "opt_compat_netbsd.h"
37 1.1.2.2 yamt #endif
38 1.1.2.2 yamt
39 1.1.2.2 yamt /* TODO strip */
40 1.1.2.2 yamt #include <sys/param.h>
41 1.1.2.2 yamt #include <sys/systm.h>
42 1.1.2.2 yamt #include <sys/sysctl.h>
43 1.1.2.2 yamt #include <sys/namei.h>
44 1.1.2.2 yamt #include <sys/proc.h>
45 1.1.2.2 yamt #include <sys/kernel.h>
46 1.1.2.2 yamt #include <sys/vnode.h>
47 1.1.2.2 yamt #include <miscfs/genfs/genfs_node.h>
48 1.1.2.2 yamt #include <sys/mount.h>
49 1.1.2.2 yamt #include <sys/buf.h>
50 1.1.2.2 yamt #include <sys/file.h>
51 1.1.2.2 yamt #include <sys/device.h>
52 1.1.2.2 yamt #include <sys/disklabel.h>
53 1.1.2.2 yamt #include <sys/ioctl.h>
54 1.1.2.2 yamt #include <sys/malloc.h>
55 1.1.2.2 yamt #include <sys/dirent.h>
56 1.1.2.2 yamt #include <sys/stat.h>
57 1.1.2.2 yamt #include <sys/conf.h>
58 1.1.2.2 yamt #include <sys/kauth.h>
59 1.1.2.2 yamt #include <sys/kthread.h>
60 1.1.2.2 yamt #include <dev/clock_subr.h>
61 1.1.2.2 yamt
62 1.1.2.2 yamt #include <fs/udf/ecma167-udf.h>
63 1.1.2.2 yamt #include <fs/udf/udf_mount.h>
64 1.1.2.2 yamt
65 1.1.2.2 yamt #include "udf.h"
66 1.1.2.2 yamt #include "udf_subr.h"
67 1.1.2.2 yamt #include "udf_bswap.h"
68 1.1.2.2 yamt
69 1.1.2.2 yamt
70 1.1.2.2 yamt #define VTOI(vnode) ((struct udf_node *) vnode->v_data)
71 1.1.2.2 yamt
72 1.1.2.2 yamt static void udf_record_allocation_in_node(struct udf_mount *ump,
73 1.1.2.2 yamt struct buf *buf, uint16_t vpart_num, uint64_t *mapping,
74 1.1.2.2 yamt struct long_ad *node_ad_cpy);
75 1.1.2.2 yamt
76 1.1.2.6 yamt static void udf_collect_free_space_for_vpart(struct udf_mount *ump,
77 1.1.2.6 yamt uint16_t vpart_num, uint32_t num_lb);
78 1.1.2.6 yamt
79 1.1.2.6 yamt static void udf_wipe_adslots(struct udf_node *udf_node);
80 1.1.2.6 yamt static void udf_count_alloc_exts(struct udf_node *udf_node);
81 1.1.2.6 yamt
82 1.1.2.2 yamt /*
83 1.1.2.2 yamt * IDEA/BUSY: Each udf_node gets its own extentwalker state for all operations;
84 1.1.2.2 yamt * this will hopefully/likely reduce O(nlog(n)) to O(1) for most functionality
85 1.1.2.2 yamt * since actions are most likely sequencial and thus seeking doesn't need
86 1.1.2.2 yamt * searching for the same or adjacent position again.
87 1.1.2.2 yamt */
88 1.1.2.2 yamt
89 1.1.2.2 yamt /* --------------------------------------------------------------------- */
90 1.1.2.3 yamt
91 1.1.2.3 yamt #if 0
92 1.1.2.2 yamt #if 1
93 1.1.2.2 yamt static void
94 1.1.2.2 yamt udf_node_dump(struct udf_node *udf_node) {
95 1.1.2.2 yamt struct file_entry *fe;
96 1.1.2.2 yamt struct extfile_entry *efe;
97 1.1.2.2 yamt struct icb_tag *icbtag;
98 1.1.2.3 yamt struct long_ad s_ad;
99 1.1.2.2 yamt uint64_t inflen;
100 1.1.2.3 yamt uint32_t icbflags, addr_type;
101 1.1.2.2 yamt uint32_t len, lb_num;
102 1.1.2.3 yamt uint32_t flags;
103 1.1.2.2 yamt int part_num;
104 1.1.2.3 yamt int lb_size, eof, slot;
105 1.1.2.2 yamt
106 1.1.2.3 yamt if ((udf_verbose & UDF_DEBUG_NODEDUMP) == 0)
107 1.1.2.2 yamt return;
108 1.1.2.2 yamt
109 1.1.2.2 yamt lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
110 1.1.2.2 yamt
111 1.1.2.2 yamt fe = udf_node->fe;
112 1.1.2.2 yamt efe = udf_node->efe;
113 1.1.2.2 yamt if (fe) {
114 1.1.2.2 yamt icbtag = &fe->icbtag;
115 1.1.2.2 yamt inflen = udf_rw64(fe->inf_len);
116 1.1.2.2 yamt } else {
117 1.1.2.2 yamt icbtag = &efe->icbtag;
118 1.1.2.2 yamt inflen = udf_rw64(efe->inf_len);
119 1.1.2.2 yamt }
120 1.1.2.2 yamt
121 1.1.2.2 yamt icbflags = udf_rw16(icbtag->flags);
122 1.1.2.2 yamt addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
123 1.1.2.2 yamt
124 1.1.2.3 yamt printf("udf_node_dump %p :\n", udf_node);
125 1.1.2.2 yamt
126 1.1.2.2 yamt if (addr_type == UDF_ICB_INTERN_ALLOC) {
127 1.1.2.3 yamt printf("\tIntern alloc, len = %"PRIu64"\n", inflen);
128 1.1.2.2 yamt return;
129 1.1.2.2 yamt }
130 1.1.2.2 yamt
131 1.1.2.3 yamt printf("\tInflen = %"PRIu64"\n", inflen);
132 1.1.2.3 yamt printf("\t\t");
133 1.1.2.2 yamt
134 1.1.2.3 yamt slot = 0;
135 1.1.2.3 yamt for (;;) {
136 1.1.2.3 yamt udf_get_adslot(udf_node, slot, &s_ad, &eof);
137 1.1.2.3 yamt if (eof)
138 1.1.2.3 yamt break;
139 1.1.2.3 yamt part_num = udf_rw16(s_ad.loc.part_num);
140 1.1.2.3 yamt lb_num = udf_rw32(s_ad.loc.lb_num);
141 1.1.2.3 yamt len = udf_rw32(s_ad.len);
142 1.1.2.3 yamt flags = UDF_EXT_FLAGS(len);
143 1.1.2.3 yamt len = UDF_EXT_LEN(len);
144 1.1.2.2 yamt
145 1.1.2.2 yamt printf("[");
146 1.1.2.2 yamt if (part_num >= 0)
147 1.1.2.2 yamt printf("part %d, ", part_num);
148 1.1.2.2 yamt printf("lb_num %d, len %d", lb_num, len);
149 1.1.2.2 yamt if (flags)
150 1.1.2.3 yamt printf(", flags %d", flags>>30);
151 1.1.2.2 yamt printf("] ");
152 1.1.2.3 yamt
153 1.1.2.3 yamt if (flags == UDF_EXT_REDIRECT) {
154 1.1.2.3 yamt printf("\n\textent END\n\tallocation extent\n\t\t");
155 1.1.2.3 yamt }
156 1.1.2.3 yamt
157 1.1.2.3 yamt slot++;
158 1.1.2.2 yamt }
159 1.1.2.3 yamt printf("\n\tl_ad END\n\n");
160 1.1.2.2 yamt }
161 1.1.2.2 yamt #else
162 1.1.2.2 yamt #define udf_node_dump(a)
163 1.1.2.2 yamt #endif
164 1.1.2.2 yamt
165 1.1.2.3 yamt
166 1.1.2.3 yamt static void
167 1.1.2.3 yamt udf_assert_allocated(struct udf_mount *ump, uint16_t vpart_num,
168 1.1.2.3 yamt uint32_t lb_num, uint32_t num_lb)
169 1.1.2.3 yamt {
170 1.1.2.3 yamt struct udf_bitmap *bitmap;
171 1.1.2.3 yamt struct part_desc *pdesc;
172 1.1.2.3 yamt uint32_t ptov;
173 1.1.2.3 yamt uint32_t bitval;
174 1.1.2.3 yamt uint8_t *bpos;
175 1.1.2.3 yamt int bit;
176 1.1.2.3 yamt int phys_part;
177 1.1.2.3 yamt int ok;
178 1.1.2.3 yamt
179 1.1.2.3 yamt DPRINTF(PARANOIA, ("udf_assert_allocated: check virt lbnum %d "
180 1.1.2.3 yamt "part %d + %d sect\n", lb_num, vpart_num, num_lb));
181 1.1.2.3 yamt
182 1.1.2.3 yamt /* get partition backing up this vpart_num */
183 1.1.2.3 yamt pdesc = ump->partitions[ump->vtop[vpart_num]];
184 1.1.2.3 yamt
185 1.1.2.3 yamt switch (ump->vtop_tp[vpart_num]) {
186 1.1.2.3 yamt case UDF_VTOP_TYPE_PHYS :
187 1.1.2.3 yamt case UDF_VTOP_TYPE_SPARABLE :
188 1.1.2.3 yamt /* free space to freed or unallocated space bitmap */
189 1.1.2.3 yamt ptov = udf_rw32(pdesc->start_loc);
190 1.1.2.3 yamt phys_part = ump->vtop[vpart_num];
191 1.1.2.3 yamt
192 1.1.2.3 yamt /* use unallocated bitmap */
193 1.1.2.3 yamt bitmap = &ump->part_unalloc_bits[phys_part];
194 1.1.2.3 yamt
195 1.1.2.3 yamt /* if no bitmaps are defined, bail out */
196 1.1.2.3 yamt if (bitmap->bits == NULL)
197 1.1.2.3 yamt break;
198 1.1.2.3 yamt
199 1.1.2.3 yamt /* check bits */
200 1.1.2.3 yamt KASSERT(bitmap->bits);
201 1.1.2.3 yamt ok = 1;
202 1.1.2.3 yamt bpos = bitmap->bits + lb_num/8;
203 1.1.2.3 yamt bit = lb_num % 8;
204 1.1.2.3 yamt while (num_lb > 0) {
205 1.1.2.3 yamt bitval = (1 << bit);
206 1.1.2.3 yamt DPRINTF(PARANOIA, ("XXX : check %d, %p, bit %d\n",
207 1.1.2.3 yamt lb_num, bpos, bit));
208 1.1.2.3 yamt KASSERT(bitmap->bits + lb_num/8 == bpos);
209 1.1.2.3 yamt if (*bpos & bitval) {
210 1.1.2.3 yamt printf("\tlb_num %d is NOT marked busy\n",
211 1.1.2.3 yamt lb_num);
212 1.1.2.3 yamt ok = 0;
213 1.1.2.3 yamt }
214 1.1.2.3 yamt lb_num++; num_lb--;
215 1.1.2.3 yamt bit = (bit + 1) % 8;
216 1.1.2.3 yamt if (bit == 0)
217 1.1.2.3 yamt bpos++;
218 1.1.2.3 yamt }
219 1.1.2.3 yamt if (!ok) {
220 1.1.2.3 yamt /* KASSERT(0); */
221 1.1.2.3 yamt }
222 1.1.2.3 yamt
223 1.1.2.3 yamt break;
224 1.1.2.3 yamt case UDF_VTOP_TYPE_VIRT :
225 1.1.2.3 yamt /* TODO check space */
226 1.1.2.3 yamt KASSERT(num_lb == 1);
227 1.1.2.3 yamt break;
228 1.1.2.3 yamt case UDF_VTOP_TYPE_META :
229 1.1.2.3 yamt /* TODO check space in the metadata bitmap */
230 1.1.2.3 yamt default:
231 1.1.2.3 yamt /* not implemented */
232 1.1.2.3 yamt break;
233 1.1.2.3 yamt }
234 1.1.2.3 yamt }
235 1.1.2.3 yamt
236 1.1.2.3 yamt
237 1.1.2.2 yamt static void
238 1.1.2.2 yamt udf_node_sanity_check(struct udf_node *udf_node,
239 1.1.2.3 yamt uint64_t *cnt_inflen, uint64_t *cnt_logblksrec)
240 1.1.2.3 yamt {
241 1.1.2.3 yamt union dscrptr *dscr;
242 1.1.2.2 yamt struct file_entry *fe;
243 1.1.2.2 yamt struct extfile_entry *efe;
244 1.1.2.2 yamt struct icb_tag *icbtag;
245 1.1.2.3 yamt struct long_ad s_ad;
246 1.1.2.2 yamt uint64_t inflen, logblksrec;
247 1.1.2.3 yamt uint32_t icbflags, addr_type;
248 1.1.2.3 yamt uint32_t len, lb_num, l_ea, l_ad, max_l_ad;
249 1.1.2.3 yamt uint16_t part_num;
250 1.1.2.3 yamt uint8_t *data_pos;
251 1.1.2.3 yamt int dscr_size, lb_size, flags, whole_lb;
252 1.1.2.3 yamt int i, slot, eof;
253 1.1.2.3 yamt
254 1.1.2.3 yamt // KASSERT(mutex_owned(&udf_node->ump->allocate_mutex));
255 1.1.2.2 yamt
256 1.1.2.3 yamt if (1)
257 1.1.2.3 yamt udf_node_dump(udf_node);
258 1.1.2.2 yamt
259 1.1.2.2 yamt lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
260 1.1.2.2 yamt
261 1.1.2.2 yamt fe = udf_node->fe;
262 1.1.2.2 yamt efe = udf_node->efe;
263 1.1.2.2 yamt if (fe) {
264 1.1.2.3 yamt dscr = (union dscrptr *) fe;
265 1.1.2.3 yamt icbtag = &fe->icbtag;
266 1.1.2.3 yamt inflen = udf_rw64(fe->inf_len);
267 1.1.2.2 yamt dscr_size = sizeof(struct file_entry) -1;
268 1.1.2.3 yamt logblksrec = udf_rw64(fe->logblks_rec);
269 1.1.2.2 yamt l_ad = udf_rw32(fe->l_ad);
270 1.1.2.3 yamt l_ea = udf_rw32(fe->l_ea);
271 1.1.2.2 yamt } else {
272 1.1.2.3 yamt dscr = (union dscrptr *) efe;
273 1.1.2.3 yamt icbtag = &efe->icbtag;
274 1.1.2.3 yamt inflen = udf_rw64(efe->inf_len);
275 1.1.2.2 yamt dscr_size = sizeof(struct extfile_entry) -1;
276 1.1.2.3 yamt logblksrec = udf_rw64(efe->logblks_rec);
277 1.1.2.2 yamt l_ad = udf_rw32(efe->l_ad);
278 1.1.2.3 yamt l_ea = udf_rw32(efe->l_ea);
279 1.1.2.2 yamt }
280 1.1.2.3 yamt data_pos = (uint8_t *) dscr + dscr_size + l_ea;
281 1.1.2.3 yamt max_l_ad = lb_size - dscr_size - l_ea;
282 1.1.2.2 yamt icbflags = udf_rw16(icbtag->flags);
283 1.1.2.2 yamt addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
284 1.1.2.2 yamt
285 1.1.2.3 yamt /* check if tail is zero */
286 1.1.2.3 yamt DPRINTF(PARANOIA, ("Sanity check blank tail\n"));
287 1.1.2.3 yamt for (i = l_ad; i < max_l_ad; i++) {
288 1.1.2.3 yamt if (data_pos[i] != 0)
289 1.1.2.3 yamt printf( "sanity_check: violation: node byte %d "
290 1.1.2.3 yamt "has value %d\n", i, data_pos[i]);
291 1.1.2.3 yamt }
292 1.1.2.3 yamt
293 1.1.2.2 yamt /* reset counters */
294 1.1.2.2 yamt *cnt_inflen = 0;
295 1.1.2.2 yamt *cnt_logblksrec = 0;
296 1.1.2.2 yamt
297 1.1.2.2 yamt if (addr_type == UDF_ICB_INTERN_ALLOC) {
298 1.1.2.2 yamt KASSERT(l_ad <= max_l_ad);
299 1.1.2.2 yamt KASSERT(l_ad == inflen);
300 1.1.2.2 yamt *cnt_inflen = inflen;
301 1.1.2.2 yamt return;
302 1.1.2.2 yamt }
303 1.1.2.2 yamt
304 1.1.2.2 yamt /* start counting */
305 1.1.2.2 yamt whole_lb = 1;
306 1.1.2.3 yamt slot = 0;
307 1.1.2.3 yamt for (;;) {
308 1.1.2.3 yamt udf_get_adslot(udf_node, slot, &s_ad, &eof);
309 1.1.2.3 yamt if (eof)
310 1.1.2.3 yamt break;
311 1.1.2.2 yamt KASSERT(whole_lb == 1);
312 1.1.2.3 yamt
313 1.1.2.3 yamt part_num = udf_rw16(s_ad.loc.part_num);
314 1.1.2.3 yamt lb_num = udf_rw32(s_ad.loc.lb_num);
315 1.1.2.3 yamt len = udf_rw32(s_ad.len);
316 1.1.2.3 yamt flags = UDF_EXT_FLAGS(len);
317 1.1.2.3 yamt len = UDF_EXT_LEN(len);
318 1.1.2.3 yamt
319 1.1.2.3 yamt if (flags != UDF_EXT_REDIRECT) {
320 1.1.2.3 yamt *cnt_inflen += len;
321 1.1.2.3 yamt if (flags == UDF_EXT_ALLOCATED) {
322 1.1.2.3 yamt *cnt_logblksrec += (len + lb_size -1) / lb_size;
323 1.1.2.3 yamt }
324 1.1.2.2 yamt } else {
325 1.1.2.3 yamt KASSERT(len == lb_size);
326 1.1.2.2 yamt }
327 1.1.2.3 yamt /* check allocation */
328 1.1.2.3 yamt if (flags == UDF_EXT_ALLOCATED)
329 1.1.2.3 yamt udf_assert_allocated(udf_node->ump, part_num, lb_num,
330 1.1.2.3 yamt (len + lb_size - 1) / lb_size);
331 1.1.2.3 yamt
332 1.1.2.3 yamt /* check whole lb */
333 1.1.2.2 yamt whole_lb = ((len % lb_size) == 0);
334 1.1.2.3 yamt
335 1.1.2.3 yamt slot++;
336 1.1.2.2 yamt }
337 1.1.2.2 yamt /* rest should be zero (ad_off > l_ad < max_l_ad - adlen) */
338 1.1.2.2 yamt
339 1.1.2.2 yamt KASSERT(*cnt_inflen == inflen);
340 1.1.2.2 yamt KASSERT(*cnt_logblksrec == logblksrec);
341 1.1.2.2 yamt
342 1.1.2.3 yamt // KASSERT(mutex_owned(&udf_node->ump->allocate_mutex));
343 1.1.2.2 yamt }
344 1.1.2.2 yamt #else
345 1.1.2.3 yamt static void
346 1.1.2.3 yamt udf_node_sanity_check(struct udf_node *udf_node,
347 1.1.2.3 yamt uint64_t *cnt_inflen, uint64_t *cnt_logblksrec) {
348 1.1.2.3 yamt struct file_entry *fe;
349 1.1.2.3 yamt struct extfile_entry *efe;
350 1.1.2.3 yamt struct icb_tag *icbtag;
351 1.1.2.3 yamt uint64_t inflen, logblksrec;
352 1.1.2.3 yamt int dscr_size, lb_size;
353 1.1.2.3 yamt
354 1.1.2.3 yamt lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
355 1.1.2.3 yamt
356 1.1.2.3 yamt fe = udf_node->fe;
357 1.1.2.3 yamt efe = udf_node->efe;
358 1.1.2.3 yamt if (fe) {
359 1.1.2.3 yamt icbtag = &fe->icbtag;
360 1.1.2.3 yamt inflen = udf_rw64(fe->inf_len);
361 1.1.2.3 yamt dscr_size = sizeof(struct file_entry) -1;
362 1.1.2.3 yamt logblksrec = udf_rw64(fe->logblks_rec);
363 1.1.2.3 yamt } else {
364 1.1.2.3 yamt icbtag = &efe->icbtag;
365 1.1.2.3 yamt inflen = udf_rw64(efe->inf_len);
366 1.1.2.3 yamt dscr_size = sizeof(struct extfile_entry) -1;
367 1.1.2.3 yamt logblksrec = udf_rw64(efe->logblks_rec);
368 1.1.2.3 yamt }
369 1.1.2.3 yamt *cnt_logblksrec = logblksrec;
370 1.1.2.3 yamt *cnt_inflen = inflen;
371 1.1.2.3 yamt }
372 1.1.2.2 yamt #endif
373 1.1.2.2 yamt
374 1.1.2.2 yamt /* --------------------------------------------------------------------- */
375 1.1.2.2 yamt
376 1.1.2.5 yamt void
377 1.1.2.5 yamt udf_calc_freespace(struct udf_mount *ump, uint64_t *sizeblks, uint64_t *freeblks)
378 1.1.2.5 yamt {
379 1.1.2.5 yamt struct logvol_int_desc *lvid;
380 1.1.2.5 yamt uint32_t *pos1, *pos2;
381 1.1.2.5 yamt int vpart, num_vpart;
382 1.1.2.5 yamt
383 1.1.2.5 yamt lvid = ump->logvol_integrity;
384 1.1.2.5 yamt *freeblks = *sizeblks = 0;
385 1.1.2.5 yamt
386 1.1.2.5 yamt /*
387 1.1.2.5 yamt * Sequentials media report free space directly (CD/DVD/BD-R), for the
388 1.1.2.5 yamt * other media we need the logical volume integrity.
389 1.1.2.5 yamt *
390 1.1.2.5 yamt * We sum all free space up here regardless of type.
391 1.1.2.5 yamt */
392 1.1.2.5 yamt
393 1.1.2.5 yamt KASSERT(lvid);
394 1.1.2.5 yamt num_vpart = udf_rw32(lvid->num_part);
395 1.1.2.5 yamt
396 1.1.2.5 yamt if (ump->discinfo.mmc_cur & MMC_CAP_SEQUENTIAL) {
397 1.1.2.5 yamt /* use track info directly summing if there are 2 open */
398 1.1.2.5 yamt /* XXX assumption at most two tracks open */
399 1.1.2.5 yamt *freeblks = ump->data_track.free_blocks;
400 1.1.2.5 yamt if (ump->data_track.tracknr != ump->metadata_track.tracknr)
401 1.1.2.5 yamt *freeblks += ump->metadata_track.free_blocks;
402 1.1.2.5 yamt *sizeblks = ump->discinfo.last_possible_lba;
403 1.1.2.5 yamt } else {
404 1.1.2.5 yamt /* free and used space for mountpoint based on logvol integrity */
405 1.1.2.5 yamt for (vpart = 0; vpart < num_vpart; vpart++) {
406 1.1.2.5 yamt pos1 = &lvid->tables[0] + vpart;
407 1.1.2.5 yamt pos2 = &lvid->tables[0] + num_vpart + vpart;
408 1.1.2.5 yamt if (udf_rw32(*pos1) != (uint32_t) -1) {
409 1.1.2.5 yamt *freeblks += udf_rw32(*pos1);
410 1.1.2.5 yamt *sizeblks += udf_rw32(*pos2);
411 1.1.2.5 yamt }
412 1.1.2.5 yamt }
413 1.1.2.5 yamt }
414 1.1.2.5 yamt /* adjust for accounted uncommitted blocks */
415 1.1.2.5 yamt for (vpart = 0; vpart < num_vpart; vpart++)
416 1.1.2.5 yamt *freeblks -= ump->uncommitted_lbs[vpart];
417 1.1.2.5 yamt
418 1.1.2.5 yamt if (*freeblks > UDF_DISC_SLACK) {
419 1.1.2.5 yamt *freeblks -= UDF_DISC_SLACK;
420 1.1.2.5 yamt } else {
421 1.1.2.5 yamt *freeblks = 0;
422 1.1.2.5 yamt }
423 1.1.2.5 yamt }
424 1.1.2.5 yamt
425 1.1.2.5 yamt
426 1.1.2.5 yamt static void
427 1.1.2.5 yamt udf_calc_vpart_freespace(struct udf_mount *ump, uint16_t vpart_num, uint64_t *freeblks)
428 1.1.2.5 yamt {
429 1.1.2.5 yamt struct logvol_int_desc *lvid;
430 1.1.2.5 yamt uint32_t *pos1;
431 1.1.2.5 yamt
432 1.1.2.5 yamt lvid = ump->logvol_integrity;
433 1.1.2.5 yamt *freeblks = 0;
434 1.1.2.5 yamt
435 1.1.2.5 yamt /*
436 1.1.2.5 yamt * Sequentials media report free space directly (CD/DVD/BD-R), for the
437 1.1.2.5 yamt * other media we need the logical volume integrity.
438 1.1.2.5 yamt *
439 1.1.2.5 yamt * We sum all free space up here regardless of type.
440 1.1.2.5 yamt */
441 1.1.2.5 yamt
442 1.1.2.5 yamt KASSERT(lvid);
443 1.1.2.5 yamt if (ump->discinfo.mmc_cur & MMC_CAP_SEQUENTIAL) {
444 1.1.2.5 yamt /* XXX assumption at most two tracks open */
445 1.1.2.5 yamt if (vpart_num == ump->data_part) {
446 1.1.2.5 yamt *freeblks = ump->data_track.free_blocks;
447 1.1.2.5 yamt } else {
448 1.1.2.5 yamt *freeblks = ump->metadata_track.free_blocks;
449 1.1.2.5 yamt }
450 1.1.2.5 yamt } else {
451 1.1.2.5 yamt /* free and used space for mountpoint based on logvol integrity */
452 1.1.2.5 yamt pos1 = &lvid->tables[0] + vpart_num;
453 1.1.2.5 yamt if (udf_rw32(*pos1) != (uint32_t) -1)
454 1.1.2.5 yamt *freeblks += udf_rw32(*pos1);
455 1.1.2.5 yamt }
456 1.1.2.5 yamt
457 1.1.2.5 yamt /* adjust for accounted uncommitted blocks */
458 1.1.2.6 yamt if (*freeblks > ump->uncommitted_lbs[vpart_num]) {
459 1.1.2.6 yamt *freeblks -= ump->uncommitted_lbs[vpart_num];
460 1.1.2.6 yamt } else {
461 1.1.2.6 yamt *freeblks = 0;
462 1.1.2.6 yamt }
463 1.1.2.5 yamt }
464 1.1.2.5 yamt
465 1.1.2.5 yamt /* --------------------------------------------------------------------- */
466 1.1.2.5 yamt
467 1.1.2.2 yamt int
468 1.1.2.2 yamt udf_translate_vtop(struct udf_mount *ump, struct long_ad *icb_loc,
469 1.1.2.2 yamt uint32_t *lb_numres, uint32_t *extres)
470 1.1.2.2 yamt {
471 1.1.2.2 yamt struct part_desc *pdesc;
472 1.1.2.2 yamt struct spare_map_entry *sme;
473 1.1.2.2 yamt struct long_ad s_icb_loc;
474 1.1.2.2 yamt uint64_t foffset, end_foffset;
475 1.1.2.2 yamt uint32_t lb_size, len;
476 1.1.2.2 yamt uint32_t lb_num, lb_rel, lb_packet;
477 1.1.2.2 yamt uint32_t udf_rw32_lbmap, ext_offset;
478 1.1.2.2 yamt uint16_t vpart;
479 1.1.2.2 yamt int rel, part, error, eof, slot, flags;
480 1.1.2.2 yamt
481 1.1.2.2 yamt assert(ump && icb_loc && lb_numres);
482 1.1.2.2 yamt
483 1.1.2.2 yamt vpart = udf_rw16(icb_loc->loc.part_num);
484 1.1.2.2 yamt lb_num = udf_rw32(icb_loc->loc.lb_num);
485 1.1.2.2 yamt if (vpart > UDF_VTOP_RAWPART)
486 1.1.2.2 yamt return EINVAL;
487 1.1.2.2 yamt
488 1.1.2.2 yamt translate_again:
489 1.1.2.2 yamt part = ump->vtop[vpart];
490 1.1.2.2 yamt pdesc = ump->partitions[part];
491 1.1.2.2 yamt
492 1.1.2.2 yamt switch (ump->vtop_tp[vpart]) {
493 1.1.2.2 yamt case UDF_VTOP_TYPE_RAW :
494 1.1.2.2 yamt /* 1:1 to the end of the device */
495 1.1.2.2 yamt *lb_numres = lb_num;
496 1.1.2.2 yamt *extres = INT_MAX;
497 1.1.2.2 yamt return 0;
498 1.1.2.2 yamt case UDF_VTOP_TYPE_PHYS :
499 1.1.2.2 yamt /* transform into its disc logical block */
500 1.1.2.2 yamt if (lb_num > udf_rw32(pdesc->part_len))
501 1.1.2.2 yamt return EINVAL;
502 1.1.2.2 yamt *lb_numres = lb_num + udf_rw32(pdesc->start_loc);
503 1.1.2.2 yamt
504 1.1.2.2 yamt /* extent from here to the end of the partition */
505 1.1.2.2 yamt *extres = udf_rw32(pdesc->part_len) - lb_num;
506 1.1.2.2 yamt return 0;
507 1.1.2.2 yamt case UDF_VTOP_TYPE_VIRT :
508 1.1.2.2 yamt /* only maps one logical block, lookup in VAT */
509 1.1.2.2 yamt if (lb_num >= ump->vat_entries) /* XXX > or >= ? */
510 1.1.2.2 yamt return EINVAL;
511 1.1.2.2 yamt
512 1.1.2.2 yamt /* lookup in virtual allocation table file */
513 1.1.2.2 yamt mutex_enter(&ump->allocate_mutex);
514 1.1.2.2 yamt error = udf_vat_read(ump->vat_node,
515 1.1.2.2 yamt (uint8_t *) &udf_rw32_lbmap, 4,
516 1.1.2.2 yamt ump->vat_offset + lb_num * 4);
517 1.1.2.2 yamt mutex_exit(&ump->allocate_mutex);
518 1.1.2.2 yamt
519 1.1.2.2 yamt if (error)
520 1.1.2.2 yamt return error;
521 1.1.2.2 yamt
522 1.1.2.2 yamt lb_num = udf_rw32(udf_rw32_lbmap);
523 1.1.2.2 yamt
524 1.1.2.2 yamt /* transform into its disc logical block */
525 1.1.2.2 yamt if (lb_num > udf_rw32(pdesc->part_len))
526 1.1.2.2 yamt return EINVAL;
527 1.1.2.2 yamt *lb_numres = lb_num + udf_rw32(pdesc->start_loc);
528 1.1.2.2 yamt
529 1.1.2.2 yamt /* just one logical block */
530 1.1.2.2 yamt *extres = 1;
531 1.1.2.2 yamt return 0;
532 1.1.2.2 yamt case UDF_VTOP_TYPE_SPARABLE :
533 1.1.2.2 yamt /* check if the packet containing the lb_num is remapped */
534 1.1.2.2 yamt lb_packet = lb_num / ump->sparable_packet_size;
535 1.1.2.2 yamt lb_rel = lb_num % ump->sparable_packet_size;
536 1.1.2.2 yamt
537 1.1.2.2 yamt for (rel = 0; rel < udf_rw16(ump->sparing_table->rt_l); rel++) {
538 1.1.2.2 yamt sme = &ump->sparing_table->entries[rel];
539 1.1.2.2 yamt if (lb_packet == udf_rw32(sme->org)) {
540 1.1.2.2 yamt /* NOTE maps to absolute disc logical block! */
541 1.1.2.2 yamt *lb_numres = udf_rw32(sme->map) + lb_rel;
542 1.1.2.2 yamt *extres = ump->sparable_packet_size - lb_rel;
543 1.1.2.2 yamt return 0;
544 1.1.2.2 yamt }
545 1.1.2.2 yamt }
546 1.1.2.2 yamt
547 1.1.2.2 yamt /* transform into its disc logical block */
548 1.1.2.2 yamt if (lb_num > udf_rw32(pdesc->part_len))
549 1.1.2.2 yamt return EINVAL;
550 1.1.2.2 yamt *lb_numres = lb_num + udf_rw32(pdesc->start_loc);
551 1.1.2.2 yamt
552 1.1.2.2 yamt /* rest of block */
553 1.1.2.2 yamt *extres = ump->sparable_packet_size - lb_rel;
554 1.1.2.2 yamt return 0;
555 1.1.2.2 yamt case UDF_VTOP_TYPE_META :
556 1.1.2.2 yamt /* we have to look into the file's allocation descriptors */
557 1.1.2.2 yamt
558 1.1.2.2 yamt /* use metadatafile allocation mutex */
559 1.1.2.2 yamt lb_size = udf_rw32(ump->logical_vol->lb_size);
560 1.1.2.2 yamt
561 1.1.2.2 yamt UDF_LOCK_NODE(ump->metadata_node, 0);
562 1.1.2.2 yamt
563 1.1.2.2 yamt /* get first overlapping extent */
564 1.1.2.2 yamt foffset = 0;
565 1.1.2.2 yamt slot = 0;
566 1.1.2.2 yamt for (;;) {
567 1.1.2.2 yamt udf_get_adslot(ump->metadata_node,
568 1.1.2.2 yamt slot, &s_icb_loc, &eof);
569 1.1.2.3 yamt DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, "
570 1.1.2.3 yamt "len = %d, lb_num = %d, part = %d\n",
571 1.1.2.3 yamt slot, eof,
572 1.1.2.3 yamt UDF_EXT_FLAGS(udf_rw32(s_icb_loc.len)),
573 1.1.2.3 yamt UDF_EXT_LEN(udf_rw32(s_icb_loc.len)),
574 1.1.2.3 yamt udf_rw32(s_icb_loc.loc.lb_num),
575 1.1.2.3 yamt udf_rw16(s_icb_loc.loc.part_num)));
576 1.1.2.2 yamt if (eof) {
577 1.1.2.2 yamt DPRINTF(TRANSLATE,
578 1.1.2.2 yamt ("Meta partition translation "
579 1.1.2.2 yamt "failed: can't seek location\n"));
580 1.1.2.2 yamt UDF_UNLOCK_NODE(ump->metadata_node, 0);
581 1.1.2.2 yamt return EINVAL;
582 1.1.2.2 yamt }
583 1.1.2.2 yamt len = udf_rw32(s_icb_loc.len);
584 1.1.2.2 yamt flags = UDF_EXT_FLAGS(len);
585 1.1.2.2 yamt len = UDF_EXT_LEN(len);
586 1.1.2.2 yamt
587 1.1.2.3 yamt if (flags == UDF_EXT_REDIRECT) {
588 1.1.2.3 yamt slot++;
589 1.1.2.3 yamt continue;
590 1.1.2.3 yamt }
591 1.1.2.3 yamt
592 1.1.2.2 yamt end_foffset = foffset + len;
593 1.1.2.2 yamt
594 1.1.2.2 yamt if (end_foffset > lb_num * lb_size)
595 1.1.2.2 yamt break; /* found */
596 1.1.2.3 yamt foffset = end_foffset;
597 1.1.2.2 yamt slot++;
598 1.1.2.2 yamt }
599 1.1.2.2 yamt /* found overlapping slot */
600 1.1.2.2 yamt ext_offset = lb_num * lb_size - foffset;
601 1.1.2.2 yamt
602 1.1.2.2 yamt /* process extent offset */
603 1.1.2.2 yamt lb_num = udf_rw32(s_icb_loc.loc.lb_num);
604 1.1.2.2 yamt vpart = udf_rw16(s_icb_loc.loc.part_num);
605 1.1.2.2 yamt lb_num += (ext_offset + lb_size -1) / lb_size;
606 1.1.2.2 yamt ext_offset = 0;
607 1.1.2.2 yamt
608 1.1.2.2 yamt UDF_UNLOCK_NODE(ump->metadata_node, 0);
609 1.1.2.2 yamt if (flags != UDF_EXT_ALLOCATED) {
610 1.1.2.2 yamt DPRINTF(TRANSLATE, ("Metadata partition translation "
611 1.1.2.2 yamt "failed: not allocated\n"));
612 1.1.2.2 yamt return EINVAL;
613 1.1.2.2 yamt }
614 1.1.2.2 yamt
615 1.1.2.2 yamt /*
616 1.1.2.2 yamt * vpart and lb_num are updated, translate again since we
617 1.1.2.2 yamt * might be mapped on sparable media
618 1.1.2.2 yamt */
619 1.1.2.2 yamt goto translate_again;
620 1.1.2.2 yamt default:
621 1.1.2.2 yamt printf("UDF vtop translation scheme %d unimplemented yet\n",
622 1.1.2.2 yamt ump->vtop_tp[vpart]);
623 1.1.2.2 yamt }
624 1.1.2.2 yamt
625 1.1.2.2 yamt return EINVAL;
626 1.1.2.2 yamt }
627 1.1.2.2 yamt
628 1.1.2.3 yamt
629 1.1.2.3 yamt /* XXX provisional primitive braindead version */
630 1.1.2.3 yamt /* TODO use ext_res */
631 1.1.2.3 yamt void
632 1.1.2.3 yamt udf_translate_vtop_list(struct udf_mount *ump, uint32_t sectors,
633 1.1.2.3 yamt uint16_t vpart_num, uint64_t *lmapping, uint64_t *pmapping)
634 1.1.2.3 yamt {
635 1.1.2.3 yamt struct long_ad loc;
636 1.1.2.3 yamt uint32_t lb_numres, ext_res;
637 1.1.2.3 yamt int sector;
638 1.1.2.3 yamt
639 1.1.2.3 yamt for (sector = 0; sector < sectors; sector++) {
640 1.1.2.3 yamt memset(&loc, 0, sizeof(struct long_ad));
641 1.1.2.3 yamt loc.loc.part_num = udf_rw16(vpart_num);
642 1.1.2.3 yamt loc.loc.lb_num = udf_rw32(*lmapping);
643 1.1.2.3 yamt udf_translate_vtop(ump, &loc, &lb_numres, &ext_res);
644 1.1.2.3 yamt *pmapping = lb_numres;
645 1.1.2.3 yamt lmapping++; pmapping++;
646 1.1.2.3 yamt }
647 1.1.2.3 yamt }
648 1.1.2.3 yamt
649 1.1.2.3 yamt
650 1.1.2.2 yamt /* --------------------------------------------------------------------- */
651 1.1.2.2 yamt
652 1.1.2.2 yamt /*
653 1.1.2.2 yamt * Translate an extent (in logical_blocks) into logical block numbers; used
654 1.1.2.2 yamt * for read and write operations. DOESNT't check extents.
655 1.1.2.2 yamt */
656 1.1.2.2 yamt
657 1.1.2.2 yamt int
658 1.1.2.2 yamt udf_translate_file_extent(struct udf_node *udf_node,
659 1.1.2.2 yamt uint32_t from, uint32_t num_lb,
660 1.1.2.2 yamt uint64_t *map)
661 1.1.2.2 yamt {
662 1.1.2.2 yamt struct udf_mount *ump;
663 1.1.2.2 yamt struct icb_tag *icbtag;
664 1.1.2.2 yamt struct long_ad t_ad, s_ad;
665 1.1.2.2 yamt uint64_t transsec;
666 1.1.2.2 yamt uint64_t foffset, end_foffset;
667 1.1.2.2 yamt uint32_t transsec32;
668 1.1.2.2 yamt uint32_t lb_size;
669 1.1.2.2 yamt uint32_t ext_offset;
670 1.1.2.2 yamt uint32_t lb_num, len;
671 1.1.2.2 yamt uint32_t overlap, translen;
672 1.1.2.2 yamt uint16_t vpart_num;
673 1.1.2.2 yamt int eof, error, flags;
674 1.1.2.2 yamt int slot, addr_type, icbflags;
675 1.1.2.2 yamt
676 1.1.2.2 yamt if (!udf_node)
677 1.1.2.2 yamt return ENOENT;
678 1.1.2.2 yamt
679 1.1.2.2 yamt KASSERT(num_lb > 0);
680 1.1.2.2 yamt
681 1.1.2.2 yamt UDF_LOCK_NODE(udf_node, 0);
682 1.1.2.2 yamt
683 1.1.2.2 yamt /* initialise derivative vars */
684 1.1.2.2 yamt ump = udf_node->ump;
685 1.1.2.2 yamt lb_size = udf_rw32(ump->logical_vol->lb_size);
686 1.1.2.2 yamt
687 1.1.2.2 yamt if (udf_node->fe) {
688 1.1.2.2 yamt icbtag = &udf_node->fe->icbtag;
689 1.1.2.2 yamt } else {
690 1.1.2.2 yamt icbtag = &udf_node->efe->icbtag;
691 1.1.2.2 yamt }
692 1.1.2.2 yamt icbflags = udf_rw16(icbtag->flags);
693 1.1.2.2 yamt addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
694 1.1.2.2 yamt
695 1.1.2.2 yamt /* do the work */
696 1.1.2.2 yamt if (addr_type == UDF_ICB_INTERN_ALLOC) {
697 1.1.2.2 yamt *map = UDF_TRANS_INTERN;
698 1.1.2.2 yamt UDF_UNLOCK_NODE(udf_node, 0);
699 1.1.2.2 yamt return 0;
700 1.1.2.2 yamt }
701 1.1.2.2 yamt
702 1.1.2.2 yamt /* find first overlapping extent */
703 1.1.2.2 yamt foffset = 0;
704 1.1.2.2 yamt slot = 0;
705 1.1.2.2 yamt for (;;) {
706 1.1.2.2 yamt udf_get_adslot(udf_node, slot, &s_ad, &eof);
707 1.1.2.3 yamt DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, len = %d, "
708 1.1.2.3 yamt "lb_num = %d, part = %d\n", slot, eof,
709 1.1.2.3 yamt UDF_EXT_FLAGS(udf_rw32(s_ad.len)),
710 1.1.2.3 yamt UDF_EXT_LEN(udf_rw32(s_ad.len)),
711 1.1.2.3 yamt udf_rw32(s_ad.loc.lb_num),
712 1.1.2.3 yamt udf_rw16(s_ad.loc.part_num)));
713 1.1.2.2 yamt if (eof) {
714 1.1.2.2 yamt DPRINTF(TRANSLATE,
715 1.1.2.2 yamt ("Translate file extent "
716 1.1.2.2 yamt "failed: can't seek location\n"));
717 1.1.2.2 yamt UDF_UNLOCK_NODE(udf_node, 0);
718 1.1.2.2 yamt return EINVAL;
719 1.1.2.2 yamt }
720 1.1.2.2 yamt len = udf_rw32(s_ad.len);
721 1.1.2.2 yamt flags = UDF_EXT_FLAGS(len);
722 1.1.2.2 yamt len = UDF_EXT_LEN(len);
723 1.1.2.2 yamt lb_num = udf_rw32(s_ad.loc.lb_num);
724 1.1.2.2 yamt
725 1.1.2.2 yamt if (flags == UDF_EXT_REDIRECT) {
726 1.1.2.2 yamt slot++;
727 1.1.2.2 yamt continue;
728 1.1.2.2 yamt }
729 1.1.2.2 yamt
730 1.1.2.2 yamt end_foffset = foffset + len;
731 1.1.2.2 yamt
732 1.1.2.2 yamt if (end_foffset > from * lb_size)
733 1.1.2.2 yamt break; /* found */
734 1.1.2.2 yamt foffset = end_foffset;
735 1.1.2.2 yamt slot++;
736 1.1.2.2 yamt }
737 1.1.2.2 yamt /* found overlapping slot */
738 1.1.2.2 yamt ext_offset = from * lb_size - foffset;
739 1.1.2.2 yamt
740 1.1.2.2 yamt for (;;) {
741 1.1.2.2 yamt udf_get_adslot(udf_node, slot, &s_ad, &eof);
742 1.1.2.3 yamt DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, len = %d, "
743 1.1.2.3 yamt "lb_num = %d, part = %d\n", slot, eof,
744 1.1.2.3 yamt UDF_EXT_FLAGS(udf_rw32(s_ad.len)),
745 1.1.2.3 yamt UDF_EXT_LEN(udf_rw32(s_ad.len)),
746 1.1.2.3 yamt udf_rw32(s_ad.loc.lb_num),
747 1.1.2.3 yamt udf_rw16(s_ad.loc.part_num)));
748 1.1.2.2 yamt if (eof) {
749 1.1.2.2 yamt DPRINTF(TRANSLATE,
750 1.1.2.2 yamt ("Translate file extent "
751 1.1.2.2 yamt "failed: past eof\n"));
752 1.1.2.2 yamt UDF_UNLOCK_NODE(udf_node, 0);
753 1.1.2.2 yamt return EINVAL;
754 1.1.2.2 yamt }
755 1.1.2.2 yamt
756 1.1.2.2 yamt len = udf_rw32(s_ad.len);
757 1.1.2.2 yamt flags = UDF_EXT_FLAGS(len);
758 1.1.2.2 yamt len = UDF_EXT_LEN(len);
759 1.1.2.2 yamt
760 1.1.2.2 yamt lb_num = udf_rw32(s_ad.loc.lb_num);
761 1.1.2.2 yamt vpart_num = udf_rw16(s_ad.loc.part_num);
762 1.1.2.2 yamt
763 1.1.2.2 yamt end_foffset = foffset + len;
764 1.1.2.2 yamt
765 1.1.2.2 yamt /* process extent, don't forget to advance on ext_offset! */
766 1.1.2.2 yamt lb_num += (ext_offset + lb_size -1) / lb_size;
767 1.1.2.2 yamt overlap = (len - ext_offset + lb_size -1) / lb_size;
768 1.1.2.2 yamt ext_offset = 0;
769 1.1.2.2 yamt
770 1.1.2.2 yamt /*
771 1.1.2.2 yamt * note that the while(){} is nessisary for the extent that
772 1.1.2.2 yamt * the udf_translate_vtop() returns doens't have to span the
773 1.1.2.2 yamt * whole extent.
774 1.1.2.2 yamt */
775 1.1.2.2 yamt
776 1.1.2.2 yamt overlap = MIN(overlap, num_lb);
777 1.1.2.3 yamt while (overlap && (flags != UDF_EXT_REDIRECT)) {
778 1.1.2.2 yamt switch (flags) {
779 1.1.2.2 yamt case UDF_EXT_FREE :
780 1.1.2.2 yamt case UDF_EXT_ALLOCATED_BUT_NOT_USED :
781 1.1.2.2 yamt transsec = UDF_TRANS_ZERO;
782 1.1.2.2 yamt translen = overlap;
783 1.1.2.2 yamt while (overlap && num_lb && translen) {
784 1.1.2.2 yamt *map++ = transsec;
785 1.1.2.2 yamt lb_num++;
786 1.1.2.2 yamt overlap--; num_lb--; translen--;
787 1.1.2.2 yamt }
788 1.1.2.2 yamt break;
789 1.1.2.2 yamt case UDF_EXT_ALLOCATED :
790 1.1.2.2 yamt t_ad.loc.lb_num = udf_rw32(lb_num);
791 1.1.2.2 yamt t_ad.loc.part_num = udf_rw16(vpart_num);
792 1.1.2.2 yamt error = udf_translate_vtop(ump,
793 1.1.2.2 yamt &t_ad, &transsec32, &translen);
794 1.1.2.2 yamt transsec = transsec32;
795 1.1.2.2 yamt if (error) {
796 1.1.2.2 yamt UDF_UNLOCK_NODE(udf_node, 0);
797 1.1.2.2 yamt return error;
798 1.1.2.2 yamt }
799 1.1.2.2 yamt while (overlap && num_lb && translen) {
800 1.1.2.2 yamt *map++ = transsec;
801 1.1.2.2 yamt lb_num++; transsec++;
802 1.1.2.2 yamt overlap--; num_lb--; translen--;
803 1.1.2.2 yamt }
804 1.1.2.2 yamt break;
805 1.1.2.3 yamt default:
806 1.1.2.3 yamt DPRINTF(TRANSLATE,
807 1.1.2.3 yamt ("Translate file extent "
808 1.1.2.3 yamt "failed: bad flags %x\n", flags));
809 1.1.2.3 yamt UDF_UNLOCK_NODE(udf_node, 0);
810 1.1.2.3 yamt return EINVAL;
811 1.1.2.2 yamt }
812 1.1.2.2 yamt }
813 1.1.2.2 yamt if (num_lb == 0)
814 1.1.2.2 yamt break;
815 1.1.2.2 yamt
816 1.1.2.2 yamt if (flags != UDF_EXT_REDIRECT)
817 1.1.2.2 yamt foffset = end_foffset;
818 1.1.2.2 yamt slot++;
819 1.1.2.2 yamt }
820 1.1.2.2 yamt UDF_UNLOCK_NODE(udf_node, 0);
821 1.1.2.2 yamt
822 1.1.2.2 yamt return 0;
823 1.1.2.2 yamt }
824 1.1.2.2 yamt
825 1.1.2.2 yamt /* --------------------------------------------------------------------- */
826 1.1.2.2 yamt
827 1.1.2.2 yamt static int
828 1.1.2.2 yamt udf_search_free_vatloc(struct udf_mount *ump, uint32_t *lbnumres)
829 1.1.2.2 yamt {
830 1.1.2.2 yamt uint32_t lb_size, lb_num, lb_map, udf_rw32_lbmap;
831 1.1.2.2 yamt uint8_t *blob;
832 1.1.2.2 yamt int entry, chunk, found, error;
833 1.1.2.2 yamt
834 1.1.2.2 yamt KASSERT(ump);
835 1.1.2.2 yamt KASSERT(ump->logical_vol);
836 1.1.2.2 yamt
837 1.1.2.2 yamt lb_size = udf_rw32(ump->logical_vol->lb_size);
838 1.1.2.2 yamt blob = malloc(lb_size, M_UDFTEMP, M_WAITOK);
839 1.1.2.2 yamt
840 1.1.2.2 yamt /* TODO static allocation of search chunk */
841 1.1.2.2 yamt
842 1.1.2.2 yamt lb_num = MIN(ump->vat_entries, ump->vat_last_free_lb);
843 1.1.2.2 yamt found = 0;
844 1.1.2.2 yamt error = 0;
845 1.1.2.2 yamt entry = 0;
846 1.1.2.2 yamt do {
847 1.1.2.2 yamt chunk = MIN(lb_size, (ump->vat_entries - lb_num) * 4);
848 1.1.2.2 yamt if (chunk <= 0)
849 1.1.2.2 yamt break;
850 1.1.2.2 yamt /* load in chunk */
851 1.1.2.2 yamt error = udf_vat_read(ump->vat_node, blob, chunk,
852 1.1.2.2 yamt ump->vat_offset + lb_num * 4);
853 1.1.2.2 yamt
854 1.1.2.2 yamt if (error)
855 1.1.2.2 yamt break;
856 1.1.2.2 yamt
857 1.1.2.2 yamt /* search this chunk */
858 1.1.2.2 yamt for (entry=0; entry < chunk /4; entry++, lb_num++) {
859 1.1.2.2 yamt udf_rw32_lbmap = *((uint32_t *) (blob + entry * 4));
860 1.1.2.2 yamt lb_map = udf_rw32(udf_rw32_lbmap);
861 1.1.2.2 yamt if (lb_map == 0xffffffff) {
862 1.1.2.2 yamt found = 1;
863 1.1.2.2 yamt break;
864 1.1.2.2 yamt }
865 1.1.2.2 yamt }
866 1.1.2.2 yamt } while (!found);
867 1.1.2.2 yamt if (error) {
868 1.1.2.2 yamt printf("udf_search_free_vatloc: error reading in vat chunk "
869 1.1.2.2 yamt "(lb %d, size %d)\n", lb_num, chunk);
870 1.1.2.2 yamt }
871 1.1.2.2 yamt
872 1.1.2.2 yamt if (!found) {
873 1.1.2.2 yamt /* extend VAT */
874 1.1.2.2 yamt DPRINTF(WRITE, ("udf_search_free_vatloc: extending\n"));
875 1.1.2.2 yamt lb_num = ump->vat_entries;
876 1.1.2.2 yamt ump->vat_entries++;
877 1.1.2.2 yamt }
878 1.1.2.2 yamt
879 1.1.2.2 yamt /* mark entry with initialiser just in case */
880 1.1.2.2 yamt lb_map = udf_rw32(0xfffffffe);
881 1.1.2.2 yamt udf_vat_write(ump->vat_node, (uint8_t *) &lb_map, 4,
882 1.1.2.2 yamt ump->vat_offset + lb_num *4);
883 1.1.2.2 yamt ump->vat_last_free_lb = lb_num;
884 1.1.2.2 yamt
885 1.1.2.2 yamt free(blob, M_UDFTEMP);
886 1.1.2.2 yamt *lbnumres = lb_num;
887 1.1.2.2 yamt return 0;
888 1.1.2.2 yamt }
889 1.1.2.2 yamt
890 1.1.2.2 yamt
891 1.1.2.2 yamt static void
892 1.1.2.2 yamt udf_bitmap_allocate(struct udf_bitmap *bitmap, int ismetadata,
893 1.1.2.3 yamt uint32_t *num_lb, uint64_t *lmappos)
894 1.1.2.2 yamt {
895 1.1.2.2 yamt uint32_t offset, lb_num, bit;
896 1.1.2.2 yamt int32_t diff;
897 1.1.2.2 yamt uint8_t *bpos;
898 1.1.2.2 yamt int pass;
899 1.1.2.2 yamt
900 1.1.2.2 yamt if (!ismetadata) {
901 1.1.2.2 yamt /* heuristic to keep the two pointers not too close */
902 1.1.2.2 yamt diff = bitmap->data_pos - bitmap->metadata_pos;
903 1.1.2.2 yamt if ((diff >= 0) && (diff < 1024))
904 1.1.2.2 yamt bitmap->data_pos = bitmap->metadata_pos + 1024;
905 1.1.2.2 yamt }
906 1.1.2.2 yamt offset = ismetadata ? bitmap->metadata_pos : bitmap->data_pos;
907 1.1.2.2 yamt offset &= ~7;
908 1.1.2.2 yamt for (pass = 0; pass < 2; pass++) {
909 1.1.2.2 yamt if (offset >= bitmap->max_offset)
910 1.1.2.2 yamt offset = 0;
911 1.1.2.2 yamt
912 1.1.2.2 yamt while (offset < bitmap->max_offset) {
913 1.1.2.2 yamt if (*num_lb == 0)
914 1.1.2.2 yamt break;
915 1.1.2.2 yamt
916 1.1.2.2 yamt /* use first bit not set */
917 1.1.2.2 yamt bpos = bitmap->bits + offset/8;
918 1.1.2.3 yamt bit = ffs(*bpos); /* returns 0 or 1..8 */
919 1.1.2.2 yamt if (bit == 0) {
920 1.1.2.2 yamt offset += 8;
921 1.1.2.2 yamt continue;
922 1.1.2.2 yamt }
923 1.1.2.3 yamt
924 1.1.2.3 yamt /* check for ffs overshoot */
925 1.1.2.3 yamt if (offset + bit-1 >= bitmap->max_offset) {
926 1.1.2.3 yamt offset = bitmap->max_offset;
927 1.1.2.3 yamt break;
928 1.1.2.3 yamt }
929 1.1.2.3 yamt
930 1.1.2.3 yamt DPRINTF(PARANOIA, ("XXX : allocate %d, %p, bit %d\n",
931 1.1.2.3 yamt offset + bit -1, bpos, bit-1));
932 1.1.2.2 yamt *bpos &= ~(1 << (bit-1));
933 1.1.2.2 yamt lb_num = offset + bit-1;
934 1.1.2.2 yamt *lmappos++ = lb_num;
935 1.1.2.2 yamt *num_lb = *num_lb - 1;
936 1.1.2.2 yamt // offset = (offset & ~7);
937 1.1.2.2 yamt }
938 1.1.2.2 yamt }
939 1.1.2.2 yamt
940 1.1.2.2 yamt if (ismetadata) {
941 1.1.2.2 yamt bitmap->metadata_pos = offset;
942 1.1.2.2 yamt } else {
943 1.1.2.2 yamt bitmap->data_pos = offset;
944 1.1.2.2 yamt }
945 1.1.2.2 yamt }
946 1.1.2.2 yamt
947 1.1.2.2 yamt
948 1.1.2.2 yamt static void
949 1.1.2.2 yamt udf_bitmap_free(struct udf_bitmap *bitmap, uint32_t lb_num, uint32_t num_lb)
950 1.1.2.2 yamt {
951 1.1.2.2 yamt uint32_t offset;
952 1.1.2.2 yamt uint32_t bit, bitval;
953 1.1.2.2 yamt uint8_t *bpos;
954 1.1.2.2 yamt
955 1.1.2.2 yamt offset = lb_num;
956 1.1.2.2 yamt
957 1.1.2.2 yamt /* starter bits */
958 1.1.2.2 yamt bpos = bitmap->bits + offset/8;
959 1.1.2.2 yamt bit = offset % 8;
960 1.1.2.2 yamt while ((bit != 0) && (num_lb > 0)) {
961 1.1.2.2 yamt bitval = (1 << bit);
962 1.1.2.2 yamt KASSERT((*bpos & bitval) == 0);
963 1.1.2.3 yamt DPRINTF(PARANOIA, ("XXX : free %d, %p, %d\n",
964 1.1.2.3 yamt offset, bpos, bit));
965 1.1.2.2 yamt *bpos |= bitval;
966 1.1.2.2 yamt offset++; num_lb--;
967 1.1.2.2 yamt bit = (bit + 1) % 8;
968 1.1.2.2 yamt }
969 1.1.2.2 yamt if (num_lb == 0)
970 1.1.2.2 yamt return;
971 1.1.2.2 yamt
972 1.1.2.2 yamt /* whole bytes */
973 1.1.2.2 yamt KASSERT(bit == 0);
974 1.1.2.2 yamt bpos = bitmap->bits + offset / 8;
975 1.1.2.2 yamt while (num_lb >= 8) {
976 1.1.2.2 yamt KASSERT((*bpos == 0));
977 1.1.2.3 yamt DPRINTF(PARANOIA, ("XXX : free %d + 8, %p\n", offset, bpos));
978 1.1.2.2 yamt *bpos = 255;
979 1.1.2.2 yamt offset += 8; num_lb -= 8;
980 1.1.2.2 yamt bpos++;
981 1.1.2.2 yamt }
982 1.1.2.2 yamt
983 1.1.2.2 yamt /* stop bits */
984 1.1.2.2 yamt KASSERT(num_lb < 8);
985 1.1.2.2 yamt bit = 0;
986 1.1.2.2 yamt while (num_lb > 0) {
987 1.1.2.2 yamt bitval = (1 << bit);
988 1.1.2.2 yamt KASSERT((*bpos & bitval) == 0);
989 1.1.2.3 yamt DPRINTF(PARANOIA, ("XXX : free %d, %p, %d\n",
990 1.1.2.3 yamt offset, bpos, bit));
991 1.1.2.2 yamt *bpos |= bitval;
992 1.1.2.2 yamt offset++; num_lb--;
993 1.1.2.2 yamt bit = (bit + 1) % 8;
994 1.1.2.2 yamt }
995 1.1.2.2 yamt }
996 1.1.2.2 yamt
997 1.1.2.6 yamt
998 1.1.2.6 yamt static uint32_t
999 1.1.2.6 yamt udf_bitmap_check_trunc_free(struct udf_bitmap *bitmap, uint32_t to_trunc)
1000 1.1.2.6 yamt {
1001 1.1.2.6 yamt uint32_t seq_free, offset;
1002 1.1.2.6 yamt uint8_t *bpos;
1003 1.1.2.6 yamt uint8_t bit, bitval;
1004 1.1.2.6 yamt
1005 1.1.2.6 yamt DPRINTF(RESERVE, ("\ttrying to trunc %d bits from bitmap\n", to_trunc));
1006 1.1.2.6 yamt offset = bitmap->max_offset - to_trunc;
1007 1.1.2.6 yamt
1008 1.1.2.6 yamt /* starter bits (if any) */
1009 1.1.2.6 yamt bpos = bitmap->bits + offset/8;
1010 1.1.2.6 yamt bit = offset % 8;
1011 1.1.2.6 yamt seq_free = 0;
1012 1.1.2.6 yamt while (to_trunc > 0) {
1013 1.1.2.6 yamt seq_free++;
1014 1.1.2.6 yamt bitval = (1 << bit);
1015 1.1.2.6 yamt if (!(*bpos & bitval))
1016 1.1.2.6 yamt seq_free = 0;
1017 1.1.2.6 yamt offset++; to_trunc--;
1018 1.1.2.6 yamt bit++;
1019 1.1.2.6 yamt if (bit == 8) {
1020 1.1.2.6 yamt bpos++;
1021 1.1.2.6 yamt bit = 0;
1022 1.1.2.6 yamt }
1023 1.1.2.6 yamt }
1024 1.1.2.6 yamt
1025 1.1.2.6 yamt DPRINTF(RESERVE, ("\tfound %d sequential free bits in bitmap\n", seq_free));
1026 1.1.2.6 yamt return seq_free;
1027 1.1.2.6 yamt }
1028 1.1.2.6 yamt
1029 1.1.2.5 yamt /* --------------------------------------------------------------------- */
1030 1.1.2.2 yamt
1031 1.1.2.5 yamt /*
1032 1.1.2.5 yamt * We check for overall disc space with a margin to prevent critical
1033 1.1.2.5 yamt * conditions. If disc space is low we try to force a sync() to improve our
1034 1.1.2.5 yamt * estimates. When confronted with meta-data partition size shortage we know
1035 1.1.2.5 yamt * we have to check if it can be extended and we need to extend it when
1036 1.1.2.5 yamt * needed.
1037 1.1.2.5 yamt *
1038 1.1.2.5 yamt * A 2nd strategy we could use when disc space is getting low on a disc
1039 1.1.2.5 yamt * formatted with a meta-data partition is to see if there are sparse areas in
1040 1.1.2.5 yamt * the meta-data partition and free blocks there for extra data.
1041 1.1.2.5 yamt */
1042 1.1.2.5 yamt
1043 1.1.2.5 yamt void
1044 1.1.2.5 yamt udf_do_reserve_space(struct udf_mount *ump, struct udf_node *udf_node,
1045 1.1.2.5 yamt uint16_t vpart_num, uint32_t num_lb)
1046 1.1.2.5 yamt {
1047 1.1.2.5 yamt ump->uncommitted_lbs[vpart_num] += num_lb;
1048 1.1.2.5 yamt if (udf_node)
1049 1.1.2.5 yamt udf_node->uncommitted_lbs += num_lb;
1050 1.1.2.5 yamt }
1051 1.1.2.5 yamt
1052 1.1.2.5 yamt
1053 1.1.2.5 yamt void
1054 1.1.2.5 yamt udf_do_unreserve_space(struct udf_mount *ump, struct udf_node *udf_node,
1055 1.1.2.5 yamt uint16_t vpart_num, uint32_t num_lb)
1056 1.1.2.5 yamt {
1057 1.1.2.5 yamt ump->uncommitted_lbs[vpart_num] -= num_lb;
1058 1.1.2.5 yamt if (ump->uncommitted_lbs[vpart_num] < 0) {
1059 1.1.2.5 yamt DPRINTF(RESERVE, ("UDF: underflow on partition reservation, "
1060 1.1.2.5 yamt "part %d: %d\n", vpart_num,
1061 1.1.2.5 yamt ump->uncommitted_lbs[vpart_num]));
1062 1.1.2.5 yamt ump->uncommitted_lbs[vpart_num] = 0;
1063 1.1.2.5 yamt }
1064 1.1.2.5 yamt if (udf_node) {
1065 1.1.2.5 yamt udf_node->uncommitted_lbs -= num_lb;
1066 1.1.2.5 yamt if (udf_node->uncommitted_lbs < 0) {
1067 1.1.2.5 yamt DPRINTF(RESERVE, ("UDF: underflow of node "
1068 1.1.2.5 yamt "reservation : %d\n",
1069 1.1.2.5 yamt udf_node->uncommitted_lbs));
1070 1.1.2.5 yamt udf_node->uncommitted_lbs = 0;
1071 1.1.2.5 yamt }
1072 1.1.2.5 yamt }
1073 1.1.2.5 yamt }
1074 1.1.2.5 yamt
1075 1.1.2.5 yamt
1076 1.1.2.5 yamt int
1077 1.1.2.5 yamt udf_reserve_space(struct udf_mount *ump, struct udf_node *udf_node,
1078 1.1.2.5 yamt int udf_c_type, uint16_t vpart_num, uint32_t num_lb, int can_fail)
1079 1.1.2.5 yamt {
1080 1.1.2.5 yamt uint64_t freeblks;
1081 1.1.2.5 yamt uint64_t slack;
1082 1.1.2.5 yamt int i, error;
1083 1.1.2.5 yamt
1084 1.1.2.5 yamt slack = 0;
1085 1.1.2.5 yamt if (can_fail)
1086 1.1.2.5 yamt slack = UDF_DISC_SLACK;
1087 1.1.2.5 yamt
1088 1.1.2.5 yamt error = 0;
1089 1.1.2.5 yamt mutex_enter(&ump->allocate_mutex);
1090 1.1.2.5 yamt
1091 1.1.2.5 yamt /* check if there is enough space available */
1092 1.1.2.6 yamt for (i = 0; i < 3; i++) { /* XXX arbitrary number */
1093 1.1.2.5 yamt udf_calc_vpart_freespace(ump, vpart_num, &freeblks);
1094 1.1.2.5 yamt if (num_lb + slack < freeblks)
1095 1.1.2.5 yamt break;
1096 1.1.2.5 yamt /* issue SYNC */
1097 1.1.2.5 yamt DPRINTF(RESERVE, ("udf_reserve_space: issuing sync\n"));
1098 1.1.2.5 yamt mutex_exit(&ump->allocate_mutex);
1099 1.1.2.5 yamt udf_do_sync(ump, FSCRED, 0);
1100 1.1.2.5 yamt mutex_enter(&mntvnode_lock);
1101 1.1.2.6 yamt /* 1/8 second wait */
1102 1.1.2.5 yamt cv_timedwait(&ump->dirtynodes_cv, &mntvnode_lock,
1103 1.1.2.6 yamt hz/8);
1104 1.1.2.5 yamt mutex_exit(&mntvnode_lock);
1105 1.1.2.5 yamt mutex_enter(&ump->allocate_mutex);
1106 1.1.2.5 yamt }
1107 1.1.2.5 yamt
1108 1.1.2.5 yamt /* check if there is enough space available now */
1109 1.1.2.5 yamt udf_calc_vpart_freespace(ump, vpart_num, &freeblks);
1110 1.1.2.5 yamt if (num_lb + slack >= freeblks) {
1111 1.1.2.6 yamt DPRINTF(RESERVE, ("udf_reserve_space: try to redistribute "
1112 1.1.2.6 yamt "partition space\n"));
1113 1.1.2.6 yamt DPRINTF(RESERVE, ("\tvpart %d, type %d is full\n",
1114 1.1.2.6 yamt vpart_num, ump->vtop_alloc[vpart_num]));
1115 1.1.2.6 yamt /* Try to redistribute space if possible */
1116 1.1.2.6 yamt udf_collect_free_space_for_vpart(ump, vpart_num, num_lb + slack);
1117 1.1.2.5 yamt }
1118 1.1.2.5 yamt
1119 1.1.2.5 yamt /* check if there is enough space available now */
1120 1.1.2.5 yamt udf_calc_vpart_freespace(ump, vpart_num, &freeblks);
1121 1.1.2.5 yamt if (num_lb + slack <= freeblks) {
1122 1.1.2.5 yamt udf_do_reserve_space(ump, udf_node, vpart_num, num_lb);
1123 1.1.2.5 yamt } else {
1124 1.1.2.5 yamt DPRINTF(RESERVE, ("udf_reserve_space: out of disc space\n"));
1125 1.1.2.5 yamt error = ENOSPC;
1126 1.1.2.5 yamt }
1127 1.1.2.5 yamt
1128 1.1.2.5 yamt mutex_exit(&ump->allocate_mutex);
1129 1.1.2.5 yamt return error;
1130 1.1.2.5 yamt }
1131 1.1.2.5 yamt
1132 1.1.2.5 yamt
1133 1.1.2.5 yamt void
1134 1.1.2.5 yamt udf_cleanup_reservation(struct udf_node *udf_node)
1135 1.1.2.5 yamt {
1136 1.1.2.5 yamt struct udf_mount *ump = udf_node->ump;
1137 1.1.2.5 yamt int vpart_num;
1138 1.1.2.5 yamt
1139 1.1.2.5 yamt mutex_enter(&ump->allocate_mutex);
1140 1.1.2.5 yamt
1141 1.1.2.5 yamt /* compensate for overlapping blocks */
1142 1.1.2.5 yamt DPRINTF(RESERVE, ("UDF: overlapped %d blocks in count\n", udf_node->uncommitted_lbs));
1143 1.1.2.5 yamt
1144 1.1.2.5 yamt vpart_num = udf_get_record_vpart(ump, udf_get_c_type(udf_node));
1145 1.1.2.5 yamt udf_do_unreserve_space(ump, udf_node, vpart_num, udf_node->uncommitted_lbs);
1146 1.1.2.5 yamt
1147 1.1.2.5 yamt DPRINTF(RESERVE, ("\ttotal now %d\n", ump->uncommitted_lbs[vpart_num]));
1148 1.1.2.5 yamt
1149 1.1.2.5 yamt /* sanity */
1150 1.1.2.5 yamt if (ump->uncommitted_lbs[vpart_num] < 0)
1151 1.1.2.5 yamt ump->uncommitted_lbs[vpart_num] = 0;
1152 1.1.2.5 yamt
1153 1.1.2.5 yamt mutex_exit(&ump->allocate_mutex);
1154 1.1.2.5 yamt }
1155 1.1.2.5 yamt
1156 1.1.2.5 yamt /* --------------------------------------------------------------------- */
1157 1.1.2.5 yamt
1158 1.1.2.5 yamt /*
1159 1.1.2.5 yamt * Allocate an extent of given length on given virt. partition. It doesn't
1160 1.1.2.5 yamt * have to be one stretch.
1161 1.1.2.5 yamt */
1162 1.1.2.5 yamt
1163 1.1.2.5 yamt int
1164 1.1.2.5 yamt udf_allocate_space(struct udf_mount *ump, struct udf_node *udf_node,
1165 1.1.2.5 yamt int udf_c_type, uint16_t vpart_num, uint32_t num_lb, uint64_t *lmapping)
1166 1.1.2.2 yamt {
1167 1.1.2.2 yamt struct mmc_trackinfo *alloc_track, *other_track;
1168 1.1.2.2 yamt struct udf_bitmap *bitmap;
1169 1.1.2.2 yamt struct part_desc *pdesc;
1170 1.1.2.2 yamt struct logvol_int_desc *lvid;
1171 1.1.2.3 yamt uint64_t *lmappos;
1172 1.1.2.2 yamt uint32_t ptov, lb_num, *freepos, free_lbs;
1173 1.1.2.2 yamt int lb_size, alloc_num_lb;
1174 1.1.2.3 yamt int alloc_type, error;
1175 1.1.2.3 yamt int is_node;
1176 1.1.2.2 yamt
1177 1.1.2.3 yamt DPRINTF(CALL, ("udf_allocate_space(ctype %d, vpart %d, num_lb %d\n",
1178 1.1.2.3 yamt udf_c_type, vpart_num, num_lb));
1179 1.1.2.2 yamt mutex_enter(&ump->allocate_mutex);
1180 1.1.2.3 yamt
1181 1.1.2.2 yamt lb_size = udf_rw32(ump->logical_vol->lb_size);
1182 1.1.2.2 yamt KASSERT(lb_size == ump->discinfo.sector_size);
1183 1.1.2.2 yamt
1184 1.1.2.3 yamt alloc_type = ump->vtop_alloc[vpart_num];
1185 1.1.2.3 yamt is_node = (udf_c_type == UDF_C_NODE);
1186 1.1.2.2 yamt
1187 1.1.2.2 yamt lmappos = lmapping;
1188 1.1.2.3 yamt error = 0;
1189 1.1.2.2 yamt switch (alloc_type) {
1190 1.1.2.2 yamt case UDF_ALLOC_VAT :
1191 1.1.2.2 yamt /* search empty slot in VAT file */
1192 1.1.2.2 yamt KASSERT(num_lb == 1);
1193 1.1.2.2 yamt error = udf_search_free_vatloc(ump, &lb_num);
1194 1.1.2.5 yamt if (!error) {
1195 1.1.2.2 yamt *lmappos = lb_num;
1196 1.1.2.5 yamt
1197 1.1.2.5 yamt /* reserve on the backing sequential partition since
1198 1.1.2.5 yamt * that partition is credited back later */
1199 1.1.2.5 yamt udf_do_reserve_space(ump, udf_node,
1200 1.1.2.5 yamt ump->vtop[vpart_num], num_lb);
1201 1.1.2.5 yamt }
1202 1.1.2.2 yamt break;
1203 1.1.2.2 yamt case UDF_ALLOC_SEQUENTIAL :
1204 1.1.2.2 yamt /* sequential allocation on recordable media */
1205 1.1.2.3 yamt /* get partition backing up this vpart_num_num */
1206 1.1.2.3 yamt pdesc = ump->partitions[ump->vtop[vpart_num]];
1207 1.1.2.3 yamt
1208 1.1.2.2 yamt /* calculate offset from physical base partition */
1209 1.1.2.2 yamt ptov = udf_rw32(pdesc->start_loc);
1210 1.1.2.2 yamt
1211 1.1.2.3 yamt /* get our track descriptors */
1212 1.1.2.3 yamt if (vpart_num == ump->node_part) {
1213 1.1.2.3 yamt alloc_track = &ump->metadata_track;
1214 1.1.2.3 yamt other_track = &ump->data_track;
1215 1.1.2.3 yamt } else {
1216 1.1.2.3 yamt alloc_track = &ump->data_track;
1217 1.1.2.3 yamt other_track = &ump->metadata_track;
1218 1.1.2.3 yamt }
1219 1.1.2.3 yamt
1220 1.1.2.3 yamt /* allocate */
1221 1.1.2.2 yamt for (lb_num = 0; lb_num < num_lb; lb_num++) {
1222 1.1.2.2 yamt *lmappos++ = alloc_track->next_writable - ptov;
1223 1.1.2.2 yamt alloc_track->next_writable++;
1224 1.1.2.2 yamt alloc_track->free_blocks--;
1225 1.1.2.2 yamt }
1226 1.1.2.3 yamt
1227 1.1.2.3 yamt /* keep other track up-to-date */
1228 1.1.2.2 yamt if (alloc_track->tracknr == other_track->tracknr)
1229 1.1.2.2 yamt memcpy(other_track, alloc_track,
1230 1.1.2.2 yamt sizeof(struct mmc_trackinfo));
1231 1.1.2.2 yamt break;
1232 1.1.2.2 yamt case UDF_ALLOC_SPACEMAP :
1233 1.1.2.3 yamt /* try to allocate on unallocated bits */
1234 1.1.2.2 yamt alloc_num_lb = num_lb;
1235 1.1.2.3 yamt bitmap = &ump->part_unalloc_bits[vpart_num];
1236 1.1.2.3 yamt udf_bitmap_allocate(bitmap, is_node, &alloc_num_lb, lmappos);
1237 1.1.2.2 yamt ump->lvclose |= UDF_WRITE_PART_BITMAPS;
1238 1.1.2.3 yamt
1239 1.1.2.3 yamt /* have we allocated all? */
1240 1.1.2.2 yamt if (alloc_num_lb) {
1241 1.1.2.2 yamt /* TODO convert freed to unalloc and try again */
1242 1.1.2.2 yamt /* free allocated piece for now */
1243 1.1.2.2 yamt lmappos = lmapping;
1244 1.1.2.2 yamt for (lb_num=0; lb_num < num_lb-alloc_num_lb; lb_num++) {
1245 1.1.2.2 yamt udf_bitmap_free(bitmap, *lmappos++, 1);
1246 1.1.2.2 yamt }
1247 1.1.2.2 yamt error = ENOSPC;
1248 1.1.2.2 yamt }
1249 1.1.2.2 yamt if (!error) {
1250 1.1.2.2 yamt /* adjust freecount */
1251 1.1.2.2 yamt lvid = ump->logvol_integrity;
1252 1.1.2.3 yamt freepos = &lvid->tables[0] + vpart_num;
1253 1.1.2.3 yamt free_lbs = udf_rw32(*freepos);
1254 1.1.2.3 yamt *freepos = udf_rw32(free_lbs - num_lb);
1255 1.1.2.3 yamt }
1256 1.1.2.3 yamt break;
1257 1.1.2.3 yamt case UDF_ALLOC_METABITMAP : /* UDF 2.50, 2.60 BluRay-RE */
1258 1.1.2.3 yamt /* allocate on metadata unallocated bits */
1259 1.1.2.3 yamt alloc_num_lb = num_lb;
1260 1.1.2.3 yamt bitmap = &ump->metadata_unalloc_bits;
1261 1.1.2.3 yamt udf_bitmap_allocate(bitmap, is_node, &alloc_num_lb, lmappos);
1262 1.1.2.3 yamt ump->lvclose |= UDF_WRITE_PART_BITMAPS;
1263 1.1.2.3 yamt
1264 1.1.2.3 yamt /* have we allocated all? */
1265 1.1.2.3 yamt if (alloc_num_lb) {
1266 1.1.2.3 yamt /* YIKES! TODO we need to extend the metadata partition */
1267 1.1.2.3 yamt /* free allocated piece for now */
1268 1.1.2.3 yamt lmappos = lmapping;
1269 1.1.2.3 yamt for (lb_num=0; lb_num < num_lb-alloc_num_lb; lb_num++) {
1270 1.1.2.3 yamt udf_bitmap_free(bitmap, *lmappos++, 1);
1271 1.1.2.3 yamt }
1272 1.1.2.3 yamt error = ENOSPC;
1273 1.1.2.3 yamt }
1274 1.1.2.3 yamt if (!error) {
1275 1.1.2.3 yamt /* adjust freecount */
1276 1.1.2.3 yamt lvid = ump->logvol_integrity;
1277 1.1.2.3 yamt freepos = &lvid->tables[0] + vpart_num;
1278 1.1.2.2 yamt free_lbs = udf_rw32(*freepos);
1279 1.1.2.2 yamt *freepos = udf_rw32(free_lbs - num_lb);
1280 1.1.2.2 yamt }
1281 1.1.2.2 yamt break;
1282 1.1.2.3 yamt case UDF_ALLOC_METASEQUENTIAL : /* UDF 2.60 BluRay-R */
1283 1.1.2.3 yamt case UDF_ALLOC_RELAXEDSEQUENTIAL : /* UDF 2.50/~meta BluRay-R */
1284 1.1.2.2 yamt printf("ALERT: udf_allocate_space : allocation %d "
1285 1.1.2.2 yamt "not implemented yet!\n", alloc_type);
1286 1.1.2.2 yamt /* TODO implement, doesn't have to be contiguous */
1287 1.1.2.2 yamt error = ENOSPC;
1288 1.1.2.2 yamt break;
1289 1.1.2.2 yamt }
1290 1.1.2.2 yamt
1291 1.1.2.5 yamt if (!error) {
1292 1.1.2.5 yamt /* credit our partition since we have committed the space */
1293 1.1.2.5 yamt udf_do_unreserve_space(ump, udf_node, vpart_num, num_lb);
1294 1.1.2.5 yamt }
1295 1.1.2.5 yamt
1296 1.1.2.2 yamt #ifdef DEBUG
1297 1.1.2.2 yamt if (udf_verbose & UDF_DEBUG_ALLOC) {
1298 1.1.2.2 yamt lmappos = lmapping;
1299 1.1.2.3 yamt printf("udf_allocate_space, allocated logical lba :\n");
1300 1.1.2.2 yamt for (lb_num = 0; lb_num < num_lb; lb_num++) {
1301 1.1.2.5 yamt printf("%s %"PRIu64, (lb_num > 0)?",":"",
1302 1.1.2.3 yamt *lmappos++);
1303 1.1.2.2 yamt }
1304 1.1.2.3 yamt printf("\n");
1305 1.1.2.2 yamt }
1306 1.1.2.2 yamt #endif
1307 1.1.2.2 yamt mutex_exit(&ump->allocate_mutex);
1308 1.1.2.2 yamt
1309 1.1.2.2 yamt return error;
1310 1.1.2.2 yamt }
1311 1.1.2.2 yamt
1312 1.1.2.2 yamt /* --------------------------------------------------------------------- */
1313 1.1.2.2 yamt
1314 1.1.2.2 yamt void
1315 1.1.2.2 yamt udf_free_allocated_space(struct udf_mount *ump, uint32_t lb_num,
1316 1.1.2.2 yamt uint16_t vpart_num, uint32_t num_lb)
1317 1.1.2.2 yamt {
1318 1.1.2.2 yamt struct udf_bitmap *bitmap;
1319 1.1.2.2 yamt struct part_desc *pdesc;
1320 1.1.2.2 yamt struct logvol_int_desc *lvid;
1321 1.1.2.2 yamt uint32_t ptov, lb_map, udf_rw32_lbmap;
1322 1.1.2.2 yamt uint32_t *freepos, free_lbs;
1323 1.1.2.2 yamt int phys_part;
1324 1.1.2.2 yamt int error;
1325 1.1.2.2 yamt
1326 1.1.2.2 yamt DPRINTF(ALLOC, ("udf_free_allocated_space: freeing virt lbnum %d "
1327 1.1.2.2 yamt "part %d + %d sect\n", lb_num, vpart_num, num_lb));
1328 1.1.2.2 yamt
1329 1.1.2.3 yamt /* no use freeing zero length */
1330 1.1.2.3 yamt if (num_lb == 0)
1331 1.1.2.3 yamt return;
1332 1.1.2.3 yamt
1333 1.1.2.2 yamt mutex_enter(&ump->allocate_mutex);
1334 1.1.2.2 yamt
1335 1.1.2.2 yamt /* get partition backing up this vpart_num */
1336 1.1.2.2 yamt pdesc = ump->partitions[ump->vtop[vpart_num]];
1337 1.1.2.2 yamt
1338 1.1.2.2 yamt switch (ump->vtop_tp[vpart_num]) {
1339 1.1.2.2 yamt case UDF_VTOP_TYPE_PHYS :
1340 1.1.2.2 yamt case UDF_VTOP_TYPE_SPARABLE :
1341 1.1.2.2 yamt /* free space to freed or unallocated space bitmap */
1342 1.1.2.2 yamt ptov = udf_rw32(pdesc->start_loc);
1343 1.1.2.2 yamt phys_part = ump->vtop[vpart_num];
1344 1.1.2.2 yamt
1345 1.1.2.2 yamt /* first try freed space bitmap */
1346 1.1.2.2 yamt bitmap = &ump->part_freed_bits[phys_part];
1347 1.1.2.2 yamt
1348 1.1.2.2 yamt /* if not defined, use unallocated bitmap */
1349 1.1.2.2 yamt if (bitmap->bits == NULL)
1350 1.1.2.2 yamt bitmap = &ump->part_unalloc_bits[phys_part];
1351 1.1.2.2 yamt
1352 1.1.2.3 yamt /* if no bitmaps are defined, bail out; XXX OK? */
1353 1.1.2.2 yamt if (bitmap->bits == NULL)
1354 1.1.2.2 yamt break;
1355 1.1.2.2 yamt
1356 1.1.2.2 yamt /* free bits if its defined */
1357 1.1.2.2 yamt KASSERT(bitmap->bits);
1358 1.1.2.2 yamt ump->lvclose |= UDF_WRITE_PART_BITMAPS;
1359 1.1.2.2 yamt udf_bitmap_free(bitmap, lb_num, num_lb);
1360 1.1.2.2 yamt
1361 1.1.2.2 yamt /* adjust freecount */
1362 1.1.2.2 yamt lvid = ump->logvol_integrity;
1363 1.1.2.2 yamt freepos = &lvid->tables[0] + vpart_num;
1364 1.1.2.2 yamt free_lbs = udf_rw32(*freepos);
1365 1.1.2.2 yamt *freepos = udf_rw32(free_lbs + num_lb);
1366 1.1.2.2 yamt break;
1367 1.1.2.2 yamt case UDF_VTOP_TYPE_VIRT :
1368 1.1.2.2 yamt /* free this VAT entry */
1369 1.1.2.2 yamt KASSERT(num_lb == 1);
1370 1.1.2.2 yamt
1371 1.1.2.2 yamt lb_map = 0xffffffff;
1372 1.1.2.2 yamt udf_rw32_lbmap = udf_rw32(lb_map);
1373 1.1.2.2 yamt error = udf_vat_write(ump->vat_node,
1374 1.1.2.2 yamt (uint8_t *) &udf_rw32_lbmap, 4,
1375 1.1.2.2 yamt ump->vat_offset + lb_num * 4);
1376 1.1.2.2 yamt KASSERT(error == 0);
1377 1.1.2.2 yamt ump->vat_last_free_lb = MIN(ump->vat_last_free_lb, lb_num);
1378 1.1.2.2 yamt break;
1379 1.1.2.2 yamt case UDF_VTOP_TYPE_META :
1380 1.1.2.2 yamt /* free space in the metadata bitmap */
1381 1.1.2.3 yamt bitmap = &ump->metadata_unalloc_bits;
1382 1.1.2.3 yamt KASSERT(bitmap->bits);
1383 1.1.2.3 yamt
1384 1.1.2.3 yamt ump->lvclose |= UDF_WRITE_PART_BITMAPS;
1385 1.1.2.3 yamt udf_bitmap_free(bitmap, lb_num, num_lb);
1386 1.1.2.3 yamt
1387 1.1.2.3 yamt /* adjust freecount */
1388 1.1.2.3 yamt lvid = ump->logvol_integrity;
1389 1.1.2.3 yamt freepos = &lvid->tables[0] + vpart_num;
1390 1.1.2.3 yamt free_lbs = udf_rw32(*freepos);
1391 1.1.2.3 yamt *freepos = udf_rw32(free_lbs + num_lb);
1392 1.1.2.3 yamt break;
1393 1.1.2.2 yamt default:
1394 1.1.2.2 yamt printf("ALERT: udf_free_allocated_space : allocation %d "
1395 1.1.2.2 yamt "not implemented yet!\n", ump->vtop_tp[vpart_num]);
1396 1.1.2.2 yamt break;
1397 1.1.2.2 yamt }
1398 1.1.2.2 yamt
1399 1.1.2.2 yamt mutex_exit(&ump->allocate_mutex);
1400 1.1.2.2 yamt }
1401 1.1.2.2 yamt
1402 1.1.2.2 yamt /* --------------------------------------------------------------------- */
1403 1.1.2.2 yamt
1404 1.1.2.2 yamt /*
1405 1.1.2.6 yamt * Special function to synchronise the metadatamirror file when they change on
1406 1.1.2.6 yamt * resizing. When the metadatafile is actually duplicated, this action is a
1407 1.1.2.6 yamt * no-op since they describe different extents on the disc.
1408 1.1.2.6 yamt */
1409 1.1.2.6 yamt
1410 1.1.2.6 yamt void udf_synchronise_metadatamirror_node(struct udf_mount *ump)
1411 1.1.2.6 yamt {
1412 1.1.2.6 yamt struct udf_node *meta_node, *metamirror_node;
1413 1.1.2.6 yamt struct long_ad s_ad;
1414 1.1.2.6 yamt int slot, cpy_slot;
1415 1.1.2.6 yamt int error, eof;
1416 1.1.2.6 yamt
1417 1.1.2.6 yamt if (ump->metadata_flags & METADATA_DUPLICATED)
1418 1.1.2.6 yamt return;
1419 1.1.2.6 yamt
1420 1.1.2.6 yamt meta_node = ump->metadata_node;
1421 1.1.2.6 yamt metamirror_node = ump->metadatamirror_node;
1422 1.1.2.6 yamt
1423 1.1.2.6 yamt /* 1) wipe mirror node */
1424 1.1.2.6 yamt udf_wipe_adslots(metamirror_node);
1425 1.1.2.6 yamt
1426 1.1.2.6 yamt /* 2) copy all node descriptors from the meta_node */
1427 1.1.2.6 yamt slot = 0;
1428 1.1.2.6 yamt cpy_slot = 0;
1429 1.1.2.6 yamt for (;;) {
1430 1.1.2.6 yamt udf_get_adslot(meta_node, slot, &s_ad, &eof);
1431 1.1.2.6 yamt if (eof)
1432 1.1.2.6 yamt break;
1433 1.1.2.6 yamt error = udf_append_adslot(metamirror_node, &cpy_slot, &s_ad);
1434 1.1.2.6 yamt if (error) {
1435 1.1.2.6 yamt /* WTF, this shouldn't happen, what to do now? */
1436 1.1.2.6 yamt panic("udf_synchronise_metadatamirror_node failed!");
1437 1.1.2.6 yamt }
1438 1.1.2.6 yamt slot++;
1439 1.1.2.6 yamt }
1440 1.1.2.6 yamt
1441 1.1.2.6 yamt /* 3) adjust metamirror_node size */
1442 1.1.2.6 yamt if (meta_node->fe) {
1443 1.1.2.6 yamt KASSERT(metamirror_node->fe);
1444 1.1.2.6 yamt metamirror_node->fe->inf_len = meta_node->fe->inf_len;
1445 1.1.2.6 yamt } else {
1446 1.1.2.6 yamt KASSERT(meta_node->efe);
1447 1.1.2.6 yamt KASSERT(metamirror_node->efe);
1448 1.1.2.6 yamt metamirror_node->efe->inf_len = meta_node->efe->inf_len;
1449 1.1.2.6 yamt metamirror_node->efe->obj_size = meta_node->efe->obj_size;
1450 1.1.2.6 yamt }
1451 1.1.2.6 yamt
1452 1.1.2.6 yamt /* for sanity */
1453 1.1.2.6 yamt udf_count_alloc_exts(metamirror_node);
1454 1.1.2.6 yamt }
1455 1.1.2.6 yamt
1456 1.1.2.6 yamt /* --------------------------------------------------------------------- */
1457 1.1.2.6 yamt
1458 1.1.2.6 yamt /*
1459 1.1.2.6 yamt * When faced with an out of space but there is still space available on other
1460 1.1.2.6 yamt * partitions, try to redistribute the space. This is only defined for media
1461 1.1.2.6 yamt * using Metadata partitions.
1462 1.1.2.6 yamt *
1463 1.1.2.6 yamt * There are two formats to deal with. Either its a `normal' metadata
1464 1.1.2.6 yamt * partition and we can move blocks between a metadata bitmap and its
1465 1.1.2.6 yamt * companion data spacemap OR its a UDF 2.60 formatted BluRay-R disc with POW
1466 1.1.2.6 yamt * and a metadata partition.
1467 1.1.2.6 yamt */
1468 1.1.2.6 yamt
1469 1.1.2.6 yamt static uint32_t
1470 1.1.2.6 yamt udf_trunc_metadatapart(struct udf_mount *ump, uint32_t num_lb)
1471 1.1.2.6 yamt {
1472 1.1.2.6 yamt struct udf_node *bitmap_node;
1473 1.1.2.6 yamt struct udf_bitmap *bitmap;
1474 1.1.2.6 yamt struct space_bitmap_desc *sbd, *new_sbd;
1475 1.1.2.6 yamt struct logvol_int_desc *lvid;
1476 1.1.2.6 yamt uint64_t inf_len;
1477 1.1.2.6 yamt uint64_t meta_free_lbs, data_free_lbs;
1478 1.1.2.6 yamt uint32_t *freepos, *sizepos;
1479 1.1.2.6 yamt uint32_t unit, lb_size, to_trunc;
1480 1.1.2.6 yamt uint16_t meta_vpart_num, data_vpart_num, num_vpart;
1481 1.1.2.6 yamt int err;
1482 1.1.2.6 yamt
1483 1.1.2.6 yamt unit = ump->metadata_alloc_unit_size;
1484 1.1.2.6 yamt lb_size = udf_rw32(ump->logical_vol->lb_size);
1485 1.1.2.6 yamt lvid = ump->logvol_integrity;
1486 1.1.2.6 yamt
1487 1.1.2.6 yamt /* lookup vpart for metadata partition */
1488 1.1.2.6 yamt meta_vpart_num = ump->node_part;
1489 1.1.2.6 yamt KASSERT(ump->vtop_alloc[meta_vpart_num] == UDF_ALLOC_METABITMAP);
1490 1.1.2.6 yamt
1491 1.1.2.6 yamt /* lookup vpart for data partition */
1492 1.1.2.6 yamt data_vpart_num = ump->data_part;
1493 1.1.2.6 yamt KASSERT(ump->vtop_alloc[data_vpart_num] == UDF_ALLOC_SPACEMAP);
1494 1.1.2.6 yamt
1495 1.1.2.6 yamt udf_calc_vpart_freespace(ump, data_vpart_num, &data_free_lbs);
1496 1.1.2.6 yamt udf_calc_vpart_freespace(ump, meta_vpart_num, &meta_free_lbs);
1497 1.1.2.6 yamt
1498 1.1.2.6 yamt DPRINTF(RESERVE, ("\tfree space on data partition %"PRIu64" blks\n", data_free_lbs));
1499 1.1.2.6 yamt DPRINTF(RESERVE, ("\tfree space on metadata partition %"PRIu64" blks\n", meta_free_lbs));
1500 1.1.2.6 yamt
1501 1.1.2.6 yamt /* give away some of the free meta space, in unit block sizes */
1502 1.1.2.6 yamt to_trunc = meta_free_lbs/4; /* give out a quarter */
1503 1.1.2.6 yamt to_trunc = MAX(to_trunc, num_lb);
1504 1.1.2.6 yamt to_trunc = unit * ((to_trunc + unit-1) / unit); /* round up */
1505 1.1.2.6 yamt
1506 1.1.2.6 yamt /* scale down if needed and bail out when out of space */
1507 1.1.2.6 yamt if (to_trunc >= meta_free_lbs)
1508 1.1.2.6 yamt return num_lb;
1509 1.1.2.6 yamt
1510 1.1.2.6 yamt /* check extent of bits marked free at the end of the map */
1511 1.1.2.6 yamt bitmap = &ump->metadata_unalloc_bits;
1512 1.1.2.6 yamt to_trunc = udf_bitmap_check_trunc_free(bitmap, to_trunc);
1513 1.1.2.6 yamt to_trunc = unit * (to_trunc / unit); /* round down again */
1514 1.1.2.6 yamt if (to_trunc == 0)
1515 1.1.2.6 yamt return num_lb;
1516 1.1.2.6 yamt
1517 1.1.2.6 yamt DPRINTF(RESERVE, ("\ttruncating %d lbs from the metadata bitmap\n",
1518 1.1.2.6 yamt to_trunc));
1519 1.1.2.6 yamt
1520 1.1.2.6 yamt /* get length of the metadata bitmap node file */
1521 1.1.2.6 yamt bitmap_node = ump->metadatabitmap_node;
1522 1.1.2.6 yamt if (bitmap_node->fe) {
1523 1.1.2.6 yamt inf_len = udf_rw64(bitmap_node->fe->inf_len);
1524 1.1.2.6 yamt } else {
1525 1.1.2.6 yamt KASSERT(bitmap_node->efe);
1526 1.1.2.6 yamt inf_len = udf_rw64(bitmap_node->efe->inf_len);
1527 1.1.2.6 yamt }
1528 1.1.2.6 yamt inf_len -= to_trunc/8;
1529 1.1.2.6 yamt
1530 1.1.2.6 yamt /* as per [UDF 2.60/2.2.13.6] : */
1531 1.1.2.6 yamt /* 1) update the SBD in the metadata bitmap file */
1532 1.1.2.6 yamt sbd = (struct space_bitmap_desc *) bitmap->blob;
1533 1.1.2.6 yamt sbd->num_bits = udf_rw32(sbd->num_bits) - to_trunc;
1534 1.1.2.6 yamt sbd->num_bytes = udf_rw32(sbd->num_bytes) - to_trunc/8;
1535 1.1.2.6 yamt bitmap->max_offset = udf_rw32(sbd->num_bits);
1536 1.1.2.6 yamt
1537 1.1.2.6 yamt num_vpart = udf_rw32(lvid->num_part);
1538 1.1.2.6 yamt freepos = &lvid->tables[0] + meta_vpart_num;
1539 1.1.2.6 yamt sizepos = &lvid->tables[0] + num_vpart + meta_vpart_num;
1540 1.1.2.6 yamt *freepos = udf_rw32(*freepos) - to_trunc;
1541 1.1.2.6 yamt *sizepos = udf_rw32(*sizepos) - to_trunc;
1542 1.1.2.6 yamt
1543 1.1.2.6 yamt /* realloc bitmap for better memory usage */
1544 1.1.2.6 yamt new_sbd = realloc(sbd, inf_len, M_UDFVOLD,
1545 1.1.2.6 yamt M_CANFAIL | M_WAITOK);
1546 1.1.2.6 yamt if (new_sbd) {
1547 1.1.2.6 yamt /* update pointers */
1548 1.1.2.6 yamt ump->metadata_unalloc_dscr = new_sbd;
1549 1.1.2.6 yamt bitmap->blob = (uint8_t *) new_sbd;
1550 1.1.2.6 yamt }
1551 1.1.2.6 yamt ump->lvclose |= UDF_WRITE_PART_BITMAPS;
1552 1.1.2.6 yamt
1553 1.1.2.6 yamt /*
1554 1.1.2.6 yamt * The truncated space is secured now and can't be allocated anymore. Release
1555 1.1.2.6 yamt * the allocate mutex so we can shrink the nodes the normal way.
1556 1.1.2.6 yamt */
1557 1.1.2.6 yamt mutex_exit(&ump->allocate_mutex);
1558 1.1.2.6 yamt
1559 1.1.2.6 yamt /* 2) trunc the metadata bitmap information file, freeing blocks */
1560 1.1.2.6 yamt err = udf_shrink_node(bitmap_node, inf_len);
1561 1.1.2.6 yamt KASSERT(err == 0);
1562 1.1.2.6 yamt
1563 1.1.2.6 yamt /* 3) trunc the metadata file and mirror file, freeing blocks */
1564 1.1.2.6 yamt inf_len = udf_rw32(sbd->num_bits) * lb_size; /* [4/14.12.4] */
1565 1.1.2.6 yamt err = udf_shrink_node(ump->metadata_node, inf_len);
1566 1.1.2.6 yamt KASSERT(err == 0);
1567 1.1.2.6 yamt if (ump->metadatamirror_node && (ump->metadata_flags & METADATA_DUPLICATED)) {
1568 1.1.2.6 yamt err = udf_shrink_node(ump->metadatamirror_node, inf_len);
1569 1.1.2.6 yamt KASSERT(err == 0);
1570 1.1.2.6 yamt }
1571 1.1.2.6 yamt ump->lvclose |= UDF_WRITE_METAPART_NODES;
1572 1.1.2.6 yamt
1573 1.1.2.6 yamt /* relock before exit */
1574 1.1.2.6 yamt mutex_enter(&ump->allocate_mutex);
1575 1.1.2.6 yamt
1576 1.1.2.6 yamt if (to_trunc > num_lb)
1577 1.1.2.6 yamt return 0;
1578 1.1.2.6 yamt return num_lb - to_trunc;
1579 1.1.2.6 yamt }
1580 1.1.2.6 yamt
1581 1.1.2.6 yamt
1582 1.1.2.6 yamt static void
1583 1.1.2.6 yamt udf_sparsify_metadatapart(struct udf_mount *ump, uint32_t num_lb)
1584 1.1.2.6 yamt {
1585 1.1.2.6 yamt /* NOT IMPLEMENTED, fail */
1586 1.1.2.6 yamt }
1587 1.1.2.6 yamt
1588 1.1.2.6 yamt
1589 1.1.2.6 yamt static void
1590 1.1.2.6 yamt udf_collect_free_space_for_vpart(struct udf_mount *ump,
1591 1.1.2.6 yamt uint16_t vpart_num, uint32_t num_lb)
1592 1.1.2.6 yamt {
1593 1.1.2.6 yamt /* allocate mutex is helt */
1594 1.1.2.6 yamt
1595 1.1.2.6 yamt /* only defined for metadata partitions */
1596 1.1.2.6 yamt if (ump->vtop_tp[ump->node_part] != UDF_VTOP_TYPE_META) {
1597 1.1.2.6 yamt DPRINTF(RESERVE, ("\tcan't grow/shrink; no metadata partitioning\n"));
1598 1.1.2.6 yamt return;
1599 1.1.2.6 yamt }
1600 1.1.2.6 yamt
1601 1.1.2.6 yamt /* UDF 2.60 BD-R+POW? */
1602 1.1.2.6 yamt if (ump->vtop_alloc[ump->node_part] == UDF_ALLOC_METASEQUENTIAL) {
1603 1.1.2.6 yamt DPRINTF(RESERVE, ("\tUDF 2.60 BD-R+POW track grow not implemented yet\n"));
1604 1.1.2.6 yamt return;
1605 1.1.2.6 yamt }
1606 1.1.2.6 yamt
1607 1.1.2.6 yamt if (ump->vtop_tp[vpart_num] == UDF_VTOP_TYPE_META) {
1608 1.1.2.6 yamt /* try to grow the meta partition */
1609 1.1.2.6 yamt DPRINTF(RESERVE, ("\ttrying to grow the meta partition\n"));
1610 1.1.2.6 yamt /* as per [UDF 2.60/2.2.13.5] : extend bitmap and metadata file(s) */
1611 1.1.2.6 yamt } else {
1612 1.1.2.6 yamt /* try to shrink the metadata partition */
1613 1.1.2.6 yamt DPRINTF(RESERVE, ("\ttrying to shrink the meta partition\n"));
1614 1.1.2.6 yamt /* as per [UDF 2.60/2.2.13.6] : either trunc or make sparse */
1615 1.1.2.6 yamt num_lb = udf_trunc_metadatapart(ump, num_lb);
1616 1.1.2.6 yamt if (num_lb)
1617 1.1.2.6 yamt udf_sparsify_metadatapart(ump, num_lb);
1618 1.1.2.6 yamt }
1619 1.1.2.6 yamt
1620 1.1.2.6 yamt /* allocate mutex should still be helt */
1621 1.1.2.6 yamt }
1622 1.1.2.6 yamt
1623 1.1.2.6 yamt /* --------------------------------------------------------------------- */
1624 1.1.2.6 yamt
1625 1.1.2.6 yamt /*
1626 1.1.2.2 yamt * Allocate a buf on disc for direct write out. The space doesn't have to be
1627 1.1.2.2 yamt * contiguous as the caller takes care of this.
1628 1.1.2.2 yamt */
1629 1.1.2.2 yamt
1630 1.1.2.2 yamt void
1631 1.1.2.2 yamt udf_late_allocate_buf(struct udf_mount *ump, struct buf *buf,
1632 1.1.2.3 yamt uint64_t *lmapping, struct long_ad *node_ad_cpy, uint16_t *vpart_nump)
1633 1.1.2.2 yamt {
1634 1.1.2.2 yamt struct udf_node *udf_node = VTOI(buf->b_vp);
1635 1.1.2.2 yamt int lb_size, blks, udf_c_type;
1636 1.1.2.3 yamt int vpart_num, num_lb;
1637 1.1.2.2 yamt int error, s;
1638 1.1.2.2 yamt
1639 1.1.2.2 yamt /*
1640 1.1.2.2 yamt * for each sector in the buf, allocate a sector on disc and record
1641 1.1.2.2 yamt * its position in the provided mapping array.
1642 1.1.2.2 yamt *
1643 1.1.2.2 yamt * If its userdata or FIDs, record its location in its node.
1644 1.1.2.2 yamt */
1645 1.1.2.2 yamt
1646 1.1.2.2 yamt lb_size = udf_rw32(ump->logical_vol->lb_size);
1647 1.1.2.2 yamt num_lb = (buf->b_bcount + lb_size -1) / lb_size;
1648 1.1.2.2 yamt blks = lb_size / DEV_BSIZE;
1649 1.1.2.2 yamt udf_c_type = buf->b_udf_c_type;
1650 1.1.2.2 yamt
1651 1.1.2.2 yamt KASSERT(lb_size == ump->discinfo.sector_size);
1652 1.1.2.2 yamt
1653 1.1.2.3 yamt /* select partition to record the buffer on */
1654 1.1.2.5 yamt vpart_num = *vpart_nump = udf_get_record_vpart(ump, udf_c_type);
1655 1.1.2.2 yamt
1656 1.1.2.2 yamt if (udf_c_type == UDF_C_NODE) {
1657 1.1.2.2 yamt /* if not VAT, its allready allocated */
1658 1.1.2.3 yamt if (ump->vtop_alloc[ump->node_part] != UDF_ALLOC_VAT)
1659 1.1.2.2 yamt return;
1660 1.1.2.2 yamt
1661 1.1.2.3 yamt /* allocate on its backing sequential partition */
1662 1.1.2.3 yamt vpart_num = ump->data_part;
1663 1.1.2.2 yamt }
1664 1.1.2.2 yamt
1665 1.1.2.5 yamt /* XXX can this still happen? */
1666 1.1.2.3 yamt /* do allocation on the selected partition */
1667 1.1.2.5 yamt error = udf_allocate_space(ump, udf_node, udf_c_type,
1668 1.1.2.3 yamt vpart_num, num_lb, lmapping);
1669 1.1.2.2 yamt if (error) {
1670 1.1.2.5 yamt /*
1671 1.1.2.5 yamt * ARGH! we haven't done our accounting right! it should
1672 1.1.2.5 yamt * allways succeed.
1673 1.1.2.5 yamt */
1674 1.1.2.2 yamt panic("UDF disc allocation accounting gone wrong");
1675 1.1.2.2 yamt }
1676 1.1.2.2 yamt
1677 1.1.2.2 yamt /* If its userdata or FIDs, record its allocation in its node. */
1678 1.1.2.3 yamt if ((udf_c_type == UDF_C_USERDATA) ||
1679 1.1.2.3 yamt (udf_c_type == UDF_C_FIDS) ||
1680 1.1.2.3 yamt (udf_c_type == UDF_C_METADATA_SBM))
1681 1.1.2.3 yamt {
1682 1.1.2.2 yamt udf_record_allocation_in_node(ump, buf, vpart_num, lmapping,
1683 1.1.2.2 yamt node_ad_cpy);
1684 1.1.2.2 yamt /* decrement our outstanding bufs counter */
1685 1.1.2.2 yamt s = splbio();
1686 1.1.2.2 yamt udf_node->outstanding_bufs--;
1687 1.1.2.2 yamt splx(s);
1688 1.1.2.2 yamt }
1689 1.1.2.2 yamt }
1690 1.1.2.2 yamt
1691 1.1.2.2 yamt /* --------------------------------------------------------------------- */
1692 1.1.2.2 yamt
1693 1.1.2.2 yamt /*
1694 1.1.2.2 yamt * Try to merge a1 with the new piece a2. udf_ads_merge returns error when not
1695 1.1.2.2 yamt * possible (anymore); a2 returns the rest piece.
1696 1.1.2.2 yamt */
1697 1.1.2.2 yamt
1698 1.1.2.2 yamt static int
1699 1.1.2.2 yamt udf_ads_merge(uint32_t lb_size, struct long_ad *a1, struct long_ad *a2)
1700 1.1.2.2 yamt {
1701 1.1.2.2 yamt uint32_t max_len, merge_len;
1702 1.1.2.2 yamt uint32_t a1_len, a2_len;
1703 1.1.2.2 yamt uint32_t a1_flags, a2_flags;
1704 1.1.2.2 yamt uint32_t a1_lbnum, a2_lbnum;
1705 1.1.2.2 yamt uint16_t a1_part, a2_part;
1706 1.1.2.2 yamt
1707 1.1.2.2 yamt max_len = ((UDF_EXT_MAXLEN / lb_size) * lb_size);
1708 1.1.2.2 yamt
1709 1.1.2.2 yamt a1_flags = UDF_EXT_FLAGS(udf_rw32(a1->len));
1710 1.1.2.2 yamt a1_len = UDF_EXT_LEN(udf_rw32(a1->len));
1711 1.1.2.2 yamt a1_lbnum = udf_rw32(a1->loc.lb_num);
1712 1.1.2.2 yamt a1_part = udf_rw16(a1->loc.part_num);
1713 1.1.2.2 yamt
1714 1.1.2.2 yamt a2_flags = UDF_EXT_FLAGS(udf_rw32(a2->len));
1715 1.1.2.2 yamt a2_len = UDF_EXT_LEN(udf_rw32(a2->len));
1716 1.1.2.2 yamt a2_lbnum = udf_rw32(a2->loc.lb_num);
1717 1.1.2.2 yamt a2_part = udf_rw16(a2->loc.part_num);
1718 1.1.2.2 yamt
1719 1.1.2.2 yamt /* defines same space */
1720 1.1.2.2 yamt if (a1_flags != a2_flags)
1721 1.1.2.2 yamt return 1;
1722 1.1.2.2 yamt
1723 1.1.2.2 yamt if (a1_flags != UDF_EXT_FREE) {
1724 1.1.2.2 yamt /* the same partition */
1725 1.1.2.2 yamt if (a1_part != a2_part)
1726 1.1.2.2 yamt return 1;
1727 1.1.2.2 yamt
1728 1.1.2.2 yamt /* a2 is successor of a1 */
1729 1.1.2.2 yamt if (a1_lbnum * lb_size + a1_len != a2_lbnum * lb_size)
1730 1.1.2.2 yamt return 1;
1731 1.1.2.2 yamt }
1732 1.1.2.3 yamt
1733 1.1.2.2 yamt /* merge as most from a2 if possible */
1734 1.1.2.2 yamt merge_len = MIN(a2_len, max_len - a1_len);
1735 1.1.2.2 yamt a1_len += merge_len;
1736 1.1.2.2 yamt a2_len -= merge_len;
1737 1.1.2.2 yamt a2_lbnum += merge_len/lb_size;
1738 1.1.2.2 yamt
1739 1.1.2.2 yamt a1->len = udf_rw32(a1_len | a1_flags);
1740 1.1.2.2 yamt a2->len = udf_rw32(a2_len | a2_flags);
1741 1.1.2.2 yamt a2->loc.lb_num = udf_rw32(a2_lbnum);
1742 1.1.2.2 yamt
1743 1.1.2.2 yamt if (a2_len > 0)
1744 1.1.2.2 yamt return 1;
1745 1.1.2.2 yamt
1746 1.1.2.2 yamt /* there is space over to merge */
1747 1.1.2.2 yamt return 0;
1748 1.1.2.2 yamt }
1749 1.1.2.2 yamt
1750 1.1.2.2 yamt /* --------------------------------------------------------------------- */
1751 1.1.2.2 yamt
1752 1.1.2.2 yamt static void
1753 1.1.2.2 yamt udf_wipe_adslots(struct udf_node *udf_node)
1754 1.1.2.2 yamt {
1755 1.1.2.2 yamt struct file_entry *fe;
1756 1.1.2.2 yamt struct extfile_entry *efe;
1757 1.1.2.2 yamt struct alloc_ext_entry *ext;
1758 1.1.2.2 yamt uint64_t inflen, objsize;
1759 1.1.2.2 yamt uint32_t lb_size, dscr_size, l_ea, l_ad, max_l_ad, crclen;
1760 1.1.2.2 yamt uint8_t *data_pos;
1761 1.1.2.2 yamt int extnr;
1762 1.1.2.2 yamt
1763 1.1.2.2 yamt lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1764 1.1.2.2 yamt
1765 1.1.2.2 yamt fe = udf_node->fe;
1766 1.1.2.2 yamt efe = udf_node->efe;
1767 1.1.2.2 yamt if (fe) {
1768 1.1.2.2 yamt inflen = udf_rw64(fe->inf_len);
1769 1.1.2.2 yamt objsize = inflen;
1770 1.1.2.2 yamt dscr_size = sizeof(struct file_entry) -1;
1771 1.1.2.2 yamt l_ea = udf_rw32(fe->l_ea);
1772 1.1.2.2 yamt l_ad = udf_rw32(fe->l_ad);
1773 1.1.2.2 yamt data_pos = (uint8_t *) fe + dscr_size + l_ea;
1774 1.1.2.2 yamt } else {
1775 1.1.2.2 yamt inflen = udf_rw64(efe->inf_len);
1776 1.1.2.2 yamt objsize = udf_rw64(efe->obj_size);
1777 1.1.2.2 yamt dscr_size = sizeof(struct extfile_entry) -1;
1778 1.1.2.2 yamt l_ea = udf_rw32(efe->l_ea);
1779 1.1.2.2 yamt l_ad = udf_rw32(efe->l_ad);
1780 1.1.2.2 yamt data_pos = (uint8_t *) efe + dscr_size + l_ea;
1781 1.1.2.2 yamt }
1782 1.1.2.2 yamt max_l_ad = lb_size - dscr_size - l_ea;
1783 1.1.2.2 yamt
1784 1.1.2.2 yamt /* wipe fe/efe */
1785 1.1.2.2 yamt memset(data_pos, 0, max_l_ad);
1786 1.1.2.2 yamt crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea;
1787 1.1.2.2 yamt if (fe) {
1788 1.1.2.2 yamt fe->l_ad = udf_rw32(0);
1789 1.1.2.2 yamt fe->logblks_rec = udf_rw64(0);
1790 1.1.2.4 yamt fe->tag.desc_crc_len = udf_rw16(crclen);
1791 1.1.2.2 yamt } else {
1792 1.1.2.2 yamt efe->l_ad = udf_rw32(0);
1793 1.1.2.2 yamt efe->logblks_rec = udf_rw64(0);
1794 1.1.2.4 yamt efe->tag.desc_crc_len = udf_rw16(crclen);
1795 1.1.2.2 yamt }
1796 1.1.2.2 yamt
1797 1.1.2.2 yamt /* wipe all allocation extent entries */
1798 1.1.2.2 yamt for (extnr = 0; extnr < udf_node->num_extensions; extnr++) {
1799 1.1.2.2 yamt ext = udf_node->ext[extnr];
1800 1.1.2.2 yamt dscr_size = sizeof(struct alloc_ext_entry) -1;
1801 1.1.2.3 yamt data_pos = (uint8_t *) ext->data;
1802 1.1.2.2 yamt max_l_ad = lb_size - dscr_size;
1803 1.1.2.2 yamt memset(data_pos, 0, max_l_ad);
1804 1.1.2.2 yamt ext->l_ad = udf_rw32(0);
1805 1.1.2.2 yamt
1806 1.1.2.2 yamt crclen = dscr_size - UDF_DESC_TAG_LENGTH;
1807 1.1.2.4 yamt ext->tag.desc_crc_len = udf_rw16(crclen);
1808 1.1.2.2 yamt }
1809 1.1.2.3 yamt udf_node->i_flags |= IN_NODE_REBUILD;
1810 1.1.2.2 yamt }
1811 1.1.2.2 yamt
1812 1.1.2.2 yamt /* --------------------------------------------------------------------- */
1813 1.1.2.2 yamt
1814 1.1.2.2 yamt void
1815 1.1.2.2 yamt udf_get_adslot(struct udf_node *udf_node, int slot, struct long_ad *icb,
1816 1.1.2.2 yamt int *eof) {
1817 1.1.2.2 yamt struct file_entry *fe;
1818 1.1.2.2 yamt struct extfile_entry *efe;
1819 1.1.2.2 yamt struct alloc_ext_entry *ext;
1820 1.1.2.2 yamt struct icb_tag *icbtag;
1821 1.1.2.2 yamt struct short_ad *short_ad;
1822 1.1.2.3 yamt struct long_ad *long_ad, l_icb;
1823 1.1.2.2 yamt uint32_t offset;
1824 1.1.2.3 yamt uint32_t lb_size, dscr_size, l_ea, l_ad, flags;
1825 1.1.2.2 yamt uint8_t *data_pos;
1826 1.1.2.2 yamt int icbflags, addr_type, adlen, extnr;
1827 1.1.2.2 yamt
1828 1.1.2.2 yamt /* determine what descriptor we are in */
1829 1.1.2.2 yamt lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
1830 1.1.2.2 yamt
1831 1.1.2.2 yamt fe = udf_node->fe;
1832 1.1.2.2 yamt efe = udf_node->efe;
1833 1.1.2.2 yamt if (fe) {
1834 1.1.2.2 yamt icbtag = &fe->icbtag;
1835 1.1.2.2 yamt dscr_size = sizeof(struct file_entry) -1;
1836 1.1.2.2 yamt l_ea = udf_rw32(fe->l_ea);
1837 1.1.2.2 yamt l_ad = udf_rw32(fe->l_ad);
1838 1.1.2.2 yamt data_pos = (uint8_t *) fe + dscr_size + l_ea;
1839 1.1.2.2 yamt } else {
1840 1.1.2.2 yamt icbtag = &efe->icbtag;
1841 1.1.2.2 yamt dscr_size = sizeof(struct extfile_entry) -1;
1842 1.1.2.2 yamt l_ea = udf_rw32(efe->l_ea);
1843 1.1.2.2 yamt l_ad = udf_rw32(efe->l_ad);
1844 1.1.2.2 yamt data_pos = (uint8_t *) efe + dscr_size + l_ea;
1845 1.1.2.2 yamt }
1846 1.1.2.2 yamt
1847 1.1.2.2 yamt icbflags = udf_rw16(icbtag->flags);
1848 1.1.2.2 yamt addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
1849 1.1.2.2 yamt
1850 1.1.2.2 yamt /* just in case we're called on an intern, its EOF */
1851 1.1.2.2 yamt if (addr_type == UDF_ICB_INTERN_ALLOC) {
1852 1.1.2.2 yamt memset(icb, 0, sizeof(struct long_ad));
1853 1.1.2.2 yamt *eof = 1;
1854 1.1.2.2 yamt return;
1855 1.1.2.2 yamt }
1856 1.1.2.2 yamt
1857 1.1.2.2 yamt adlen = 0;
1858 1.1.2.2 yamt if (addr_type == UDF_ICB_SHORT_ALLOC) {
1859 1.1.2.2 yamt adlen = sizeof(struct short_ad);
1860 1.1.2.2 yamt } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1861 1.1.2.2 yamt adlen = sizeof(struct long_ad);
1862 1.1.2.2 yamt }
1863 1.1.2.2 yamt
1864 1.1.2.2 yamt /* if offset too big, we go to the allocation extensions */
1865 1.1.2.2 yamt offset = slot * adlen;
1866 1.1.2.3 yamt extnr = -1;
1867 1.1.2.3 yamt while (offset >= l_ad) {
1868 1.1.2.3 yamt /* check if our last entry is a redirect */
1869 1.1.2.3 yamt if (addr_type == UDF_ICB_SHORT_ALLOC) {
1870 1.1.2.3 yamt short_ad = (struct short_ad *) (data_pos + l_ad-adlen);
1871 1.1.2.3 yamt l_icb.len = short_ad->len;
1872 1.1.2.3 yamt l_icb.loc.part_num = udf_node->loc.loc.part_num;
1873 1.1.2.3 yamt l_icb.loc.lb_num = short_ad->lb_num;
1874 1.1.2.3 yamt } else {
1875 1.1.2.3 yamt KASSERT(addr_type == UDF_ICB_LONG_ALLOC);
1876 1.1.2.3 yamt long_ad = (struct long_ad *) (data_pos + l_ad-adlen);
1877 1.1.2.3 yamt l_icb = *long_ad;
1878 1.1.2.3 yamt }
1879 1.1.2.3 yamt flags = UDF_EXT_FLAGS(udf_rw32(l_icb.len));
1880 1.1.2.3 yamt if (flags != UDF_EXT_REDIRECT) {
1881 1.1.2.3 yamt l_ad = 0; /* force EOF */
1882 1.1.2.3 yamt break;
1883 1.1.2.3 yamt }
1884 1.1.2.3 yamt
1885 1.1.2.3 yamt /* advance to next extent */
1886 1.1.2.2 yamt extnr++;
1887 1.1.2.3 yamt if (extnr >= udf_node->num_extensions) {
1888 1.1.2.2 yamt l_ad = 0; /* force EOF */
1889 1.1.2.2 yamt break;
1890 1.1.2.2 yamt }
1891 1.1.2.3 yamt offset = offset - l_ad;
1892 1.1.2.3 yamt ext = udf_node->ext[extnr];
1893 1.1.2.3 yamt dscr_size = sizeof(struct alloc_ext_entry) -1;
1894 1.1.2.3 yamt l_ad = udf_rw32(ext->l_ad);
1895 1.1.2.3 yamt data_pos = (uint8_t *) ext + dscr_size;
1896 1.1.2.2 yamt }
1897 1.1.2.2 yamt
1898 1.1.2.3 yamt /* XXX l_ad == 0 should be enough to check */
1899 1.1.2.2 yamt *eof = (offset >= l_ad) || (l_ad == 0);
1900 1.1.2.2 yamt if (*eof) {
1901 1.1.2.3 yamt DPRINTF(PARANOIDADWLK, ("returning EOF, extnr %d, offset %d, "
1902 1.1.2.3 yamt "l_ad %d\n", extnr, offset, l_ad));
1903 1.1.2.2 yamt memset(icb, 0, sizeof(struct long_ad));
1904 1.1.2.2 yamt return;
1905 1.1.2.2 yamt }
1906 1.1.2.2 yamt
1907 1.1.2.2 yamt /* get the element */
1908 1.1.2.2 yamt if (addr_type == UDF_ICB_SHORT_ALLOC) {
1909 1.1.2.2 yamt short_ad = (struct short_ad *) (data_pos + offset);
1910 1.1.2.2 yamt icb->len = short_ad->len;
1911 1.1.2.3 yamt icb->loc.part_num = udf_node->loc.loc.part_num;
1912 1.1.2.2 yamt icb->loc.lb_num = short_ad->lb_num;
1913 1.1.2.2 yamt } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1914 1.1.2.2 yamt long_ad = (struct long_ad *) (data_pos + offset);
1915 1.1.2.2 yamt *icb = *long_ad;
1916 1.1.2.2 yamt }
1917 1.1.2.3 yamt DPRINTF(PARANOIDADWLK, ("returning element : v %d, lb %d, len %d, "
1918 1.1.2.3 yamt "flags %d\n", icb->loc.part_num, icb->loc.lb_num,
1919 1.1.2.3 yamt UDF_EXT_LEN(icb->len), UDF_EXT_FLAGS(icb->len)));
1920 1.1.2.2 yamt }
1921 1.1.2.2 yamt
1922 1.1.2.2 yamt /* --------------------------------------------------------------------- */
1923 1.1.2.2 yamt
1924 1.1.2.2 yamt int
1925 1.1.2.3 yamt udf_append_adslot(struct udf_node *udf_node, int *slot, struct long_ad *icb) {
1926 1.1.2.3 yamt struct udf_mount *ump = udf_node->ump;
1927 1.1.2.3 yamt union dscrptr *dscr, *extdscr;
1928 1.1.2.2 yamt struct file_entry *fe;
1929 1.1.2.2 yamt struct extfile_entry *efe;
1930 1.1.2.2 yamt struct alloc_ext_entry *ext;
1931 1.1.2.2 yamt struct icb_tag *icbtag;
1932 1.1.2.2 yamt struct short_ad *short_ad;
1933 1.1.2.3 yamt struct long_ad *long_ad, o_icb, l_icb;
1934 1.1.2.2 yamt uint64_t logblks_rec, *logblks_rec_p;
1935 1.1.2.3 yamt uint64_t lmapping;
1936 1.1.2.3 yamt uint32_t offset, rest, len, lb_num;
1937 1.1.2.2 yamt uint32_t lb_size, dscr_size, l_ea, l_ad, *l_ad_p, max_l_ad, crclen;
1938 1.1.2.3 yamt uint32_t flags;
1939 1.1.2.3 yamt uint16_t vpart_num;
1940 1.1.2.2 yamt uint8_t *data_pos;
1941 1.1.2.2 yamt int icbflags, addr_type, adlen, extnr;
1942 1.1.2.3 yamt int error;
1943 1.1.2.2 yamt
1944 1.1.2.3 yamt lb_size = udf_rw32(ump->logical_vol->lb_size);
1945 1.1.2.3 yamt vpart_num = udf_rw16(udf_node->loc.loc.part_num);
1946 1.1.2.2 yamt
1947 1.1.2.3 yamt /* determine what descriptor we are in */
1948 1.1.2.2 yamt fe = udf_node->fe;
1949 1.1.2.2 yamt efe = udf_node->efe;
1950 1.1.2.2 yamt if (fe) {
1951 1.1.2.2 yamt icbtag = &fe->icbtag;
1952 1.1.2.2 yamt dscr = (union dscrptr *) fe;
1953 1.1.2.2 yamt dscr_size = sizeof(struct file_entry) -1;
1954 1.1.2.2 yamt
1955 1.1.2.2 yamt l_ea = udf_rw32(fe->l_ea);
1956 1.1.2.2 yamt l_ad_p = &fe->l_ad;
1957 1.1.2.2 yamt logblks_rec_p = &fe->logblks_rec;
1958 1.1.2.2 yamt } else {
1959 1.1.2.2 yamt icbtag = &efe->icbtag;
1960 1.1.2.2 yamt dscr = (union dscrptr *) efe;
1961 1.1.2.2 yamt dscr_size = sizeof(struct extfile_entry) -1;
1962 1.1.2.2 yamt
1963 1.1.2.2 yamt l_ea = udf_rw32(efe->l_ea);
1964 1.1.2.2 yamt l_ad_p = &efe->l_ad;
1965 1.1.2.2 yamt logblks_rec_p = &efe->logblks_rec;
1966 1.1.2.2 yamt }
1967 1.1.2.2 yamt data_pos = (uint8_t *) dscr + dscr_size + l_ea;
1968 1.1.2.2 yamt max_l_ad = lb_size - dscr_size - l_ea;
1969 1.1.2.2 yamt
1970 1.1.2.2 yamt icbflags = udf_rw16(icbtag->flags);
1971 1.1.2.2 yamt addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
1972 1.1.2.2 yamt
1973 1.1.2.2 yamt /* just in case we're called on an intern, its EOF */
1974 1.1.2.2 yamt if (addr_type == UDF_ICB_INTERN_ALLOC) {
1975 1.1.2.2 yamt panic("udf_append_adslot on UDF_ICB_INTERN_ALLOC\n");
1976 1.1.2.2 yamt }
1977 1.1.2.2 yamt
1978 1.1.2.2 yamt adlen = 0;
1979 1.1.2.2 yamt if (addr_type == UDF_ICB_SHORT_ALLOC) {
1980 1.1.2.2 yamt adlen = sizeof(struct short_ad);
1981 1.1.2.2 yamt } else if (addr_type == UDF_ICB_LONG_ALLOC) {
1982 1.1.2.2 yamt adlen = sizeof(struct long_ad);
1983 1.1.2.2 yamt }
1984 1.1.2.2 yamt
1985 1.1.2.3 yamt /* clean up given long_ad since it can be a synthesized one */
1986 1.1.2.3 yamt flags = UDF_EXT_FLAGS(udf_rw32(icb->len));
1987 1.1.2.3 yamt if (flags == UDF_EXT_FREE) {
1988 1.1.2.3 yamt icb->loc.part_num = udf_rw16(0);
1989 1.1.2.3 yamt icb->loc.lb_num = udf_rw32(0);
1990 1.1.2.3 yamt }
1991 1.1.2.3 yamt
1992 1.1.2.2 yamt /* if offset too big, we go to the allocation extensions */
1993 1.1.2.3 yamt l_ad = udf_rw32(*l_ad_p);
1994 1.1.2.3 yamt offset = (*slot) * adlen;
1995 1.1.2.3 yamt extnr = -1;
1996 1.1.2.3 yamt while (offset >= l_ad) {
1997 1.1.2.3 yamt /* check if our last entry is a redirect */
1998 1.1.2.3 yamt if (addr_type == UDF_ICB_SHORT_ALLOC) {
1999 1.1.2.3 yamt short_ad = (struct short_ad *) (data_pos + l_ad-adlen);
2000 1.1.2.3 yamt l_icb.len = short_ad->len;
2001 1.1.2.3 yamt l_icb.loc.part_num = udf_node->loc.loc.part_num;
2002 1.1.2.3 yamt l_icb.loc.lb_num = short_ad->lb_num;
2003 1.1.2.3 yamt } else {
2004 1.1.2.3 yamt KASSERT(addr_type == UDF_ICB_LONG_ALLOC);
2005 1.1.2.3 yamt long_ad = (struct long_ad *) (data_pos + l_ad-adlen);
2006 1.1.2.3 yamt l_icb = *long_ad;
2007 1.1.2.3 yamt }
2008 1.1.2.3 yamt flags = UDF_EXT_FLAGS(udf_rw32(l_icb.len));
2009 1.1.2.3 yamt if (flags != UDF_EXT_REDIRECT) {
2010 1.1.2.3 yamt /* only one past the last one is adressable */
2011 1.1.2.3 yamt break;
2012 1.1.2.3 yamt }
2013 1.1.2.3 yamt
2014 1.1.2.3 yamt /* advance to next extent */
2015 1.1.2.3 yamt extnr++;
2016 1.1.2.3 yamt KASSERT(extnr < udf_node->num_extensions);
2017 1.1.2.3 yamt offset = offset - l_ad;
2018 1.1.2.3 yamt
2019 1.1.2.2 yamt ext = udf_node->ext[extnr];
2020 1.1.2.2 yamt dscr = (union dscrptr *) ext;
2021 1.1.2.2 yamt dscr_size = sizeof(struct alloc_ext_entry) -1;
2022 1.1.2.2 yamt max_l_ad = lb_size - dscr_size;
2023 1.1.2.3 yamt l_ad_p = &ext->l_ad;
2024 1.1.2.3 yamt l_ad = udf_rw32(*l_ad_p);
2025 1.1.2.3 yamt data_pos = (uint8_t *) ext + dscr_size;
2026 1.1.2.2 yamt }
2027 1.1.2.3 yamt DPRINTF(PARANOIDADWLK, ("append, ext %d, offset %d, l_ad %d\n",
2028 1.1.2.3 yamt extnr, offset, udf_rw32(*l_ad_p)));
2029 1.1.2.3 yamt KASSERT(l_ad == udf_rw32(*l_ad_p));
2030 1.1.2.3 yamt
2031 1.1.2.2 yamt /* offset is offset within the current (E)FE/AED */
2032 1.1.2.2 yamt l_ad = udf_rw32(*l_ad_p);
2033 1.1.2.4 yamt crclen = udf_rw16(dscr->tag.desc_crc_len);
2034 1.1.2.2 yamt logblks_rec = udf_rw64(*logblks_rec_p);
2035 1.1.2.2 yamt
2036 1.1.2.2 yamt /* overwriting old piece? */
2037 1.1.2.2 yamt if (offset < l_ad) {
2038 1.1.2.2 yamt /* overwrite entry; compensate for the old element */
2039 1.1.2.2 yamt if (addr_type == UDF_ICB_SHORT_ALLOC) {
2040 1.1.2.2 yamt short_ad = (struct short_ad *) (data_pos + offset);
2041 1.1.2.2 yamt o_icb.len = short_ad->len;
2042 1.1.2.2 yamt o_icb.loc.part_num = udf_rw16(0); /* ignore */
2043 1.1.2.2 yamt o_icb.loc.lb_num = short_ad->lb_num;
2044 1.1.2.2 yamt } else if (addr_type == UDF_ICB_LONG_ALLOC) {
2045 1.1.2.2 yamt long_ad = (struct long_ad *) (data_pos + offset);
2046 1.1.2.2 yamt o_icb = *long_ad;
2047 1.1.2.2 yamt } else {
2048 1.1.2.2 yamt panic("Invalid address type in udf_append_adslot\n");
2049 1.1.2.2 yamt }
2050 1.1.2.2 yamt
2051 1.1.2.2 yamt len = udf_rw32(o_icb.len);
2052 1.1.2.2 yamt if (UDF_EXT_FLAGS(len) == UDF_EXT_ALLOCATED) {
2053 1.1.2.2 yamt /* adjust counts */
2054 1.1.2.2 yamt len = UDF_EXT_LEN(len);
2055 1.1.2.2 yamt logblks_rec -= (len + lb_size -1) / lb_size;
2056 1.1.2.2 yamt }
2057 1.1.2.2 yamt }
2058 1.1.2.2 yamt
2059 1.1.2.3 yamt /* check if we're not appending a redirection */
2060 1.1.2.3 yamt flags = UDF_EXT_FLAGS(udf_rw32(icb->len));
2061 1.1.2.3 yamt KASSERT(flags != UDF_EXT_REDIRECT);
2062 1.1.2.3 yamt
2063 1.1.2.3 yamt /* round down available space */
2064 1.1.2.3 yamt rest = adlen * ((max_l_ad - offset) / adlen);
2065 1.1.2.2 yamt if (rest <= adlen) {
2066 1.1.2.3 yamt /* have to append aed, see if we already have a spare one */
2067 1.1.2.3 yamt extnr++;
2068 1.1.2.3 yamt ext = udf_node->ext[extnr];
2069 1.1.2.3 yamt l_icb = udf_node->ext_loc[extnr];
2070 1.1.2.3 yamt if (ext == NULL) {
2071 1.1.2.3 yamt DPRINTF(ALLOC,("adding allocation extent %d\n", extnr));
2072 1.1.2.3 yamt
2073 1.1.2.5 yamt error = udf_reserve_space(ump, NULL, UDF_C_NODE,
2074 1.1.2.5 yamt vpart_num, 1, /* can fail */ false);
2075 1.1.2.5 yamt if (error) {
2076 1.1.2.5 yamt printf("UDF: couldn't reserve space for AED!\n");
2077 1.1.2.5 yamt return error;
2078 1.1.2.5 yamt }
2079 1.1.2.5 yamt error = udf_allocate_space(ump, NULL, UDF_C_NODE,
2080 1.1.2.5 yamt vpart_num, 1, &lmapping);
2081 1.1.2.3 yamt lb_num = lmapping;
2082 1.1.2.3 yamt if (error)
2083 1.1.2.5 yamt panic("UDF: couldn't allocate AED!\n");
2084 1.1.2.3 yamt
2085 1.1.2.3 yamt /* initialise pointer to location */
2086 1.1.2.3 yamt memset(&l_icb, 0, sizeof(struct long_ad));
2087 1.1.2.3 yamt l_icb.len = udf_rw32(lb_size | UDF_EXT_REDIRECT);
2088 1.1.2.3 yamt l_icb.loc.lb_num = udf_rw32(lb_num);
2089 1.1.2.3 yamt l_icb.loc.part_num = udf_rw16(vpart_num);
2090 1.1.2.3 yamt
2091 1.1.2.3 yamt /* create new aed descriptor */
2092 1.1.2.3 yamt udf_create_logvol_dscr(ump, udf_node, &l_icb, &extdscr);
2093 1.1.2.3 yamt ext = &extdscr->aee;
2094 1.1.2.3 yamt
2095 1.1.2.3 yamt udf_inittag(ump, &ext->tag, TAGID_ALLOCEXTENT, lb_num);
2096 1.1.2.3 yamt dscr_size = sizeof(struct alloc_ext_entry) -1;
2097 1.1.2.3 yamt max_l_ad = lb_size - dscr_size;
2098 1.1.2.3 yamt memset(ext->data, 0, max_l_ad);
2099 1.1.2.3 yamt ext->l_ad = udf_rw32(0);
2100 1.1.2.3 yamt ext->tag.desc_crc_len =
2101 1.1.2.4 yamt udf_rw16(dscr_size - UDF_DESC_TAG_LENGTH);
2102 1.1.2.3 yamt
2103 1.1.2.3 yamt /* declare aed */
2104 1.1.2.3 yamt udf_node->num_extensions++;
2105 1.1.2.3 yamt udf_node->ext_loc[extnr] = l_icb;
2106 1.1.2.3 yamt udf_node->ext[extnr] = ext;
2107 1.1.2.3 yamt }
2108 1.1.2.3 yamt /* add redirect and adjust l_ad and crclen for old descr */
2109 1.1.2.3 yamt if (addr_type == UDF_ICB_SHORT_ALLOC) {
2110 1.1.2.3 yamt short_ad = (struct short_ad *) (data_pos + offset);
2111 1.1.2.3 yamt short_ad->len = l_icb.len;
2112 1.1.2.3 yamt short_ad->lb_num = l_icb.loc.lb_num;
2113 1.1.2.3 yamt } else if (addr_type == UDF_ICB_LONG_ALLOC) {
2114 1.1.2.3 yamt long_ad = (struct long_ad *) (data_pos + offset);
2115 1.1.2.3 yamt *long_ad = l_icb;
2116 1.1.2.3 yamt }
2117 1.1.2.3 yamt l_ad += adlen;
2118 1.1.2.3 yamt crclen += adlen;
2119 1.1.2.4 yamt dscr->tag.desc_crc_len = udf_rw16(crclen);
2120 1.1.2.3 yamt *l_ad_p = udf_rw32(l_ad);
2121 1.1.2.3 yamt
2122 1.1.2.3 yamt /* advance to the new extension */
2123 1.1.2.3 yamt KASSERT(ext != NULL);
2124 1.1.2.3 yamt dscr = (union dscrptr *) ext;
2125 1.1.2.3 yamt dscr_size = sizeof(struct alloc_ext_entry) -1;
2126 1.1.2.3 yamt max_l_ad = lb_size - dscr_size;
2127 1.1.2.3 yamt data_pos = (uint8_t *) dscr + dscr_size;
2128 1.1.2.3 yamt
2129 1.1.2.3 yamt l_ad_p = &ext->l_ad;
2130 1.1.2.3 yamt l_ad = udf_rw32(*l_ad_p);
2131 1.1.2.4 yamt crclen = udf_rw16(dscr->tag.desc_crc_len);
2132 1.1.2.3 yamt offset = 0;
2133 1.1.2.3 yamt
2134 1.1.2.3 yamt /* adjust callees slot count for link insert */
2135 1.1.2.3 yamt *slot += 1;
2136 1.1.2.2 yamt }
2137 1.1.2.2 yamt
2138 1.1.2.2 yamt /* write out the element */
2139 1.1.2.3 yamt DPRINTF(PARANOIDADWLK, ("adding element : %p : v %d, lb %d, "
2140 1.1.2.3 yamt "len %d, flags %d\n", data_pos + offset,
2141 1.1.2.3 yamt icb->loc.part_num, icb->loc.lb_num,
2142 1.1.2.3 yamt UDF_EXT_LEN(icb->len), UDF_EXT_FLAGS(icb->len)));
2143 1.1.2.2 yamt if (addr_type == UDF_ICB_SHORT_ALLOC) {
2144 1.1.2.2 yamt short_ad = (struct short_ad *) (data_pos + offset);
2145 1.1.2.2 yamt short_ad->len = icb->len;
2146 1.1.2.2 yamt short_ad->lb_num = icb->loc.lb_num;
2147 1.1.2.2 yamt } else if (addr_type == UDF_ICB_LONG_ALLOC) {
2148 1.1.2.2 yamt long_ad = (struct long_ad *) (data_pos + offset);
2149 1.1.2.2 yamt *long_ad = *icb;
2150 1.1.2.2 yamt }
2151 1.1.2.2 yamt
2152 1.1.2.2 yamt /* adjust logblks recorded count */
2153 1.1.2.4 yamt len = udf_rw32(icb->len);
2154 1.1.2.4 yamt flags = UDF_EXT_FLAGS(len);
2155 1.1.2.3 yamt if (flags == UDF_EXT_ALLOCATED)
2156 1.1.2.4 yamt logblks_rec += (UDF_EXT_LEN(len) + lb_size -1) / lb_size;
2157 1.1.2.2 yamt *logblks_rec_p = udf_rw64(logblks_rec);
2158 1.1.2.2 yamt
2159 1.1.2.2 yamt /* adjust l_ad and crclen when needed */
2160 1.1.2.2 yamt if (offset >= l_ad) {
2161 1.1.2.2 yamt l_ad += adlen;
2162 1.1.2.2 yamt crclen += adlen;
2163 1.1.2.4 yamt dscr->tag.desc_crc_len = udf_rw16(crclen);
2164 1.1.2.2 yamt *l_ad_p = udf_rw32(l_ad);
2165 1.1.2.2 yamt }
2166 1.1.2.2 yamt
2167 1.1.2.2 yamt return 0;
2168 1.1.2.2 yamt }
2169 1.1.2.2 yamt
2170 1.1.2.2 yamt /* --------------------------------------------------------------------- */
2171 1.1.2.2 yamt
2172 1.1.2.3 yamt static void
2173 1.1.2.3 yamt udf_count_alloc_exts(struct udf_node *udf_node)
2174 1.1.2.3 yamt {
2175 1.1.2.3 yamt struct long_ad s_ad;
2176 1.1.2.3 yamt uint32_t lb_num, len, flags;
2177 1.1.2.3 yamt uint16_t vpart_num;
2178 1.1.2.3 yamt int slot, eof;
2179 1.1.2.3 yamt int num_extents, extnr;
2180 1.1.2.3 yamt int lb_size;
2181 1.1.2.3 yamt
2182 1.1.2.3 yamt if (udf_node->num_extensions == 0)
2183 1.1.2.3 yamt return;
2184 1.1.2.3 yamt
2185 1.1.2.3 yamt lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
2186 1.1.2.3 yamt /* count number of allocation extents in use */
2187 1.1.2.3 yamt num_extents = 0;
2188 1.1.2.3 yamt slot = 0;
2189 1.1.2.3 yamt for (;;) {
2190 1.1.2.3 yamt udf_get_adslot(udf_node, slot, &s_ad, &eof);
2191 1.1.2.3 yamt if (eof)
2192 1.1.2.3 yamt break;
2193 1.1.2.3 yamt len = udf_rw32(s_ad.len);
2194 1.1.2.3 yamt flags = UDF_EXT_FLAGS(len);
2195 1.1.2.3 yamt
2196 1.1.2.3 yamt if (flags == UDF_EXT_REDIRECT)
2197 1.1.2.3 yamt num_extents++;
2198 1.1.2.3 yamt
2199 1.1.2.3 yamt slot++;
2200 1.1.2.3 yamt }
2201 1.1.2.3 yamt
2202 1.1.2.3 yamt DPRINTF(ALLOC, ("udf_count_alloc_ext counted %d live extents\n",
2203 1.1.2.3 yamt num_extents));
2204 1.1.2.3 yamt
2205 1.1.2.3 yamt /* XXX choice: we could delay freeing them on node writeout */
2206 1.1.2.3 yamt /* free excess entries */
2207 1.1.2.3 yamt extnr = num_extents;
2208 1.1.2.3 yamt for (;extnr < udf_node->num_extensions; extnr++) {
2209 1.1.2.3 yamt DPRINTF(ALLOC, ("freeing alloc ext %d\n", extnr));
2210 1.1.2.3 yamt /* free dscriptor */
2211 1.1.2.3 yamt s_ad = udf_node->ext_loc[extnr];
2212 1.1.2.3 yamt udf_free_logvol_dscr(udf_node->ump, &s_ad,
2213 1.1.2.3 yamt udf_node->ext[extnr]);
2214 1.1.2.3 yamt udf_node->ext[extnr] = NULL;
2215 1.1.2.3 yamt
2216 1.1.2.3 yamt /* free disc space */
2217 1.1.2.3 yamt lb_num = udf_rw32(s_ad.loc.lb_num);
2218 1.1.2.3 yamt vpart_num = udf_rw16(s_ad.loc.part_num);
2219 1.1.2.3 yamt udf_free_allocated_space(udf_node->ump, lb_num, vpart_num, 1);
2220 1.1.2.3 yamt
2221 1.1.2.3 yamt memset(&udf_node->ext_loc[extnr], 0, sizeof(struct long_ad));
2222 1.1.2.3 yamt }
2223 1.1.2.3 yamt
2224 1.1.2.3 yamt /* set our new number of allocation extents */
2225 1.1.2.3 yamt udf_node->num_extensions = num_extents;
2226 1.1.2.3 yamt }
2227 1.1.2.3 yamt
2228 1.1.2.3 yamt
2229 1.1.2.3 yamt /* --------------------------------------------------------------------- */
2230 1.1.2.3 yamt
2231 1.1.2.2 yamt /*
2232 1.1.2.2 yamt * Adjust the node's allocation descriptors to reflect the new mapping; do
2233 1.1.2.2 yamt * take note that we might glue to existing allocation descriptors.
2234 1.1.2.2 yamt *
2235 1.1.2.2 yamt * XXX Note there can only be one allocation being recorded/mount; maybe
2236 1.1.2.2 yamt * explicit allocation in shedule thread?
2237 1.1.2.2 yamt */
2238 1.1.2.2 yamt
2239 1.1.2.2 yamt static void
2240 1.1.2.2 yamt udf_record_allocation_in_node(struct udf_mount *ump, struct buf *buf,
2241 1.1.2.2 yamt uint16_t vpart_num, uint64_t *mapping, struct long_ad *node_ad_cpy)
2242 1.1.2.2 yamt {
2243 1.1.2.2 yamt struct vnode *vp = buf->b_vp;
2244 1.1.2.2 yamt struct udf_node *udf_node = VTOI(vp);
2245 1.1.2.2 yamt struct file_entry *fe;
2246 1.1.2.2 yamt struct extfile_entry *efe;
2247 1.1.2.2 yamt struct icb_tag *icbtag;
2248 1.1.2.2 yamt struct long_ad s_ad, c_ad;
2249 1.1.2.2 yamt uint64_t inflen, from, till;
2250 1.1.2.2 yamt uint64_t foffset, end_foffset, restart_foffset;
2251 1.1.2.2 yamt uint64_t orig_inflen, orig_lbrec, new_inflen, new_lbrec;
2252 1.1.2.2 yamt uint32_t num_lb, len, flags, lb_num;
2253 1.1.2.2 yamt uint32_t run_start;
2254 1.1.2.3 yamt uint32_t slot_offset, replace_len, replace;
2255 1.1.2.2 yamt int addr_type, icbflags;
2256 1.1.2.3 yamt // int udf_c_type = buf->b_udf_c_type;
2257 1.1.2.2 yamt int lb_size, run_length, eof;
2258 1.1.2.2 yamt int slot, cpy_slot, cpy_slots, restart_slot;
2259 1.1.2.2 yamt int error;
2260 1.1.2.2 yamt
2261 1.1.2.2 yamt DPRINTF(ALLOC, ("udf_record_allocation_in_node\n"));
2262 1.1.2.2 yamt
2263 1.1.2.3 yamt #if 0
2264 1.1.2.3 yamt /* XXX disable sanity check for now */
2265 1.1.2.2 yamt /* sanity check ... should be panic ? */
2266 1.1.2.2 yamt if ((udf_c_type != UDF_C_USERDATA) && (udf_c_type != UDF_C_FIDS))
2267 1.1.2.2 yamt return;
2268 1.1.2.3 yamt #endif
2269 1.1.2.2 yamt
2270 1.1.2.2 yamt lb_size = udf_rw32(udf_node->ump->logical_vol->lb_size);
2271 1.1.2.2 yamt
2272 1.1.2.2 yamt /* do the job */
2273 1.1.2.2 yamt UDF_LOCK_NODE(udf_node, 0); /* XXX can deadlock ? */
2274 1.1.2.3 yamt udf_node_sanity_check(udf_node, &orig_inflen, &orig_lbrec);
2275 1.1.2.2 yamt
2276 1.1.2.2 yamt fe = udf_node->fe;
2277 1.1.2.2 yamt efe = udf_node->efe;
2278 1.1.2.2 yamt if (fe) {
2279 1.1.2.2 yamt icbtag = &fe->icbtag;
2280 1.1.2.2 yamt inflen = udf_rw64(fe->inf_len);
2281 1.1.2.2 yamt } else {
2282 1.1.2.2 yamt icbtag = &efe->icbtag;
2283 1.1.2.2 yamt inflen = udf_rw64(efe->inf_len);
2284 1.1.2.2 yamt }
2285 1.1.2.2 yamt
2286 1.1.2.2 yamt /* do check if `till' is not past file information length */
2287 1.1.2.2 yamt from = buf->b_lblkno * lb_size;
2288 1.1.2.2 yamt till = MIN(inflen, from + buf->b_resid);
2289 1.1.2.2 yamt
2290 1.1.2.2 yamt num_lb = (till - from + lb_size -1) / lb_size;
2291 1.1.2.2 yamt
2292 1.1.2.3 yamt DPRINTF(ALLOC, ("record allocation from %"PRIu64" + %d\n", from, buf->b_bcount));
2293 1.1.2.2 yamt
2294 1.1.2.2 yamt icbflags = udf_rw16(icbtag->flags);
2295 1.1.2.2 yamt addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2296 1.1.2.2 yamt
2297 1.1.2.2 yamt if (addr_type == UDF_ICB_INTERN_ALLOC) {
2298 1.1.2.2 yamt /* nothing to do */
2299 1.1.2.2 yamt /* XXX clean up rest of node? just in case? */
2300 1.1.2.2 yamt UDF_UNLOCK_NODE(udf_node, 0);
2301 1.1.2.2 yamt return;
2302 1.1.2.2 yamt }
2303 1.1.2.2 yamt
2304 1.1.2.2 yamt slot = 0;
2305 1.1.2.2 yamt cpy_slot = 0;
2306 1.1.2.2 yamt foffset = 0;
2307 1.1.2.2 yamt
2308 1.1.2.2 yamt /* 1) copy till first overlap piece to the rewrite buffer */
2309 1.1.2.2 yamt for (;;) {
2310 1.1.2.2 yamt udf_get_adslot(udf_node, slot, &s_ad, &eof);
2311 1.1.2.2 yamt if (eof) {
2312 1.1.2.2 yamt DPRINTF(WRITE,
2313 1.1.2.2 yamt ("Record allocation in node "
2314 1.1.2.2 yamt "failed: encountered EOF\n"));
2315 1.1.2.2 yamt UDF_UNLOCK_NODE(udf_node, 0);
2316 1.1.2.2 yamt buf->b_error = EINVAL;
2317 1.1.2.2 yamt return;
2318 1.1.2.2 yamt }
2319 1.1.2.2 yamt len = udf_rw32(s_ad.len);
2320 1.1.2.2 yamt flags = UDF_EXT_FLAGS(len);
2321 1.1.2.2 yamt len = UDF_EXT_LEN(len);
2322 1.1.2.2 yamt
2323 1.1.2.2 yamt if (flags == UDF_EXT_REDIRECT) {
2324 1.1.2.2 yamt slot++;
2325 1.1.2.2 yamt continue;
2326 1.1.2.2 yamt }
2327 1.1.2.2 yamt
2328 1.1.2.2 yamt end_foffset = foffset + len;
2329 1.1.2.2 yamt if (end_foffset > from)
2330 1.1.2.2 yamt break; /* found */
2331 1.1.2.2 yamt
2332 1.1.2.2 yamt node_ad_cpy[cpy_slot++] = s_ad;
2333 1.1.2.2 yamt
2334 1.1.2.2 yamt DPRINTF(ALLOC, ("\t1: vp %d, lb %d, len %d, flags %d "
2335 1.1.2.2 yamt "-> stack\n",
2336 1.1.2.2 yamt udf_rw16(s_ad.loc.part_num),
2337 1.1.2.2 yamt udf_rw32(s_ad.loc.lb_num),
2338 1.1.2.2 yamt UDF_EXT_LEN(udf_rw32(s_ad.len)),
2339 1.1.2.2 yamt UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2340 1.1.2.2 yamt
2341 1.1.2.2 yamt foffset = end_foffset;
2342 1.1.2.2 yamt slot++;
2343 1.1.2.2 yamt }
2344 1.1.2.2 yamt restart_slot = slot;
2345 1.1.2.2 yamt restart_foffset = foffset;
2346 1.1.2.2 yamt
2347 1.1.2.2 yamt /* 2) trunc overlapping slot at overlap and copy it */
2348 1.1.2.2 yamt slot_offset = from - foffset;
2349 1.1.2.2 yamt if (slot_offset > 0) {
2350 1.1.2.2 yamt DPRINTF(ALLOC, ("\tslot_offset = %d, flags = %d (%d)\n",
2351 1.1.2.2 yamt slot_offset, flags >> 30, flags));
2352 1.1.2.2 yamt
2353 1.1.2.2 yamt s_ad.len = udf_rw32(slot_offset | flags);
2354 1.1.2.2 yamt node_ad_cpy[cpy_slot++] = s_ad;
2355 1.1.2.2 yamt
2356 1.1.2.2 yamt DPRINTF(ALLOC, ("\t2: vp %d, lb %d, len %d, flags %d "
2357 1.1.2.2 yamt "-> stack\n",
2358 1.1.2.2 yamt udf_rw16(s_ad.loc.part_num),
2359 1.1.2.2 yamt udf_rw32(s_ad.loc.lb_num),
2360 1.1.2.2 yamt UDF_EXT_LEN(udf_rw32(s_ad.len)),
2361 1.1.2.2 yamt UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2362 1.1.2.2 yamt }
2363 1.1.2.2 yamt foffset += slot_offset;
2364 1.1.2.2 yamt
2365 1.1.2.2 yamt /* 3) insert new mappings */
2366 1.1.2.2 yamt memset(&s_ad, 0, sizeof(struct long_ad));
2367 1.1.2.2 yamt lb_num = 0;
2368 1.1.2.2 yamt for (lb_num = 0; lb_num < num_lb; lb_num++) {
2369 1.1.2.2 yamt run_start = mapping[lb_num];
2370 1.1.2.2 yamt run_length = 1;
2371 1.1.2.2 yamt while (lb_num < num_lb-1) {
2372 1.1.2.2 yamt if (mapping[lb_num+1] != mapping[lb_num]+1)
2373 1.1.2.2 yamt if (mapping[lb_num+1] != mapping[lb_num])
2374 1.1.2.2 yamt break;
2375 1.1.2.2 yamt run_length++;
2376 1.1.2.2 yamt lb_num++;
2377 1.1.2.2 yamt }
2378 1.1.2.2 yamt /* insert slot for this mapping */
2379 1.1.2.2 yamt len = run_length * lb_size;
2380 1.1.2.2 yamt
2381 1.1.2.2 yamt /* bounds checking */
2382 1.1.2.2 yamt if (foffset + len > till)
2383 1.1.2.2 yamt len = till - foffset;
2384 1.1.2.2 yamt KASSERT(foffset + len <= inflen);
2385 1.1.2.2 yamt
2386 1.1.2.2 yamt s_ad.len = udf_rw32(len | UDF_EXT_ALLOCATED);
2387 1.1.2.2 yamt s_ad.loc.part_num = udf_rw16(vpart_num);
2388 1.1.2.2 yamt s_ad.loc.lb_num = udf_rw32(run_start);
2389 1.1.2.2 yamt
2390 1.1.2.2 yamt foffset += len;
2391 1.1.2.2 yamt
2392 1.1.2.2 yamt /* paranoia */
2393 1.1.2.2 yamt if (len == 0) {
2394 1.1.2.2 yamt DPRINTF(WRITE,
2395 1.1.2.2 yamt ("Record allocation in node "
2396 1.1.2.2 yamt "failed: insert failed\n"));
2397 1.1.2.2 yamt UDF_UNLOCK_NODE(udf_node, 0);
2398 1.1.2.2 yamt buf->b_error = EINVAL;
2399 1.1.2.2 yamt return;
2400 1.1.2.2 yamt }
2401 1.1.2.2 yamt node_ad_cpy[cpy_slot++] = s_ad;
2402 1.1.2.2 yamt
2403 1.1.2.2 yamt DPRINTF(ALLOC, ("\t3: insert new mapping vp %d lb %d, len %d, "
2404 1.1.2.2 yamt "flags %d -> stack\n",
2405 1.1.2.2 yamt udf_rw16(s_ad.loc.part_num), udf_rw32(s_ad.loc.lb_num),
2406 1.1.2.2 yamt UDF_EXT_LEN(udf_rw32(s_ad.len)),
2407 1.1.2.2 yamt UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2408 1.1.2.2 yamt }
2409 1.1.2.2 yamt
2410 1.1.2.2 yamt /* 4) pop replaced length */
2411 1.1.2.3 yamt slot = restart_slot;
2412 1.1.2.2 yamt foffset = restart_foffset;
2413 1.1.2.2 yamt
2414 1.1.2.3 yamt replace_len = till - foffset; /* total amount of bytes to pop */
2415 1.1.2.3 yamt slot_offset = from - foffset; /* offset in first encounted slot */
2416 1.1.2.3 yamt KASSERT((slot_offset % lb_size) == 0);
2417 1.1.2.3 yamt
2418 1.1.2.2 yamt for (;;) {
2419 1.1.2.2 yamt udf_get_adslot(udf_node, slot, &s_ad, &eof);
2420 1.1.2.2 yamt if (eof)
2421 1.1.2.2 yamt break;
2422 1.1.2.2 yamt
2423 1.1.2.2 yamt len = udf_rw32(s_ad.len);
2424 1.1.2.2 yamt flags = UDF_EXT_FLAGS(len);
2425 1.1.2.2 yamt len = UDF_EXT_LEN(len);
2426 1.1.2.2 yamt lb_num = udf_rw32(s_ad.loc.lb_num);
2427 1.1.2.2 yamt
2428 1.1.2.2 yamt if (flags == UDF_EXT_REDIRECT) {
2429 1.1.2.2 yamt slot++;
2430 1.1.2.2 yamt continue;
2431 1.1.2.2 yamt }
2432 1.1.2.2 yamt
2433 1.1.2.3 yamt DPRINTF(ALLOC, ("\t4i: got slot %d, slot_offset %d, "
2434 1.1.2.3 yamt "replace_len %d, "
2435 1.1.2.3 yamt "vp %d, lb %d, len %d, flags %d\n",
2436 1.1.2.3 yamt slot, slot_offset, replace_len,
2437 1.1.2.3 yamt udf_rw16(s_ad.loc.part_num),
2438 1.1.2.2 yamt udf_rw32(s_ad.loc.lb_num),
2439 1.1.2.2 yamt UDF_EXT_LEN(udf_rw32(s_ad.len)),
2440 1.1.2.2 yamt UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2441 1.1.2.2 yamt
2442 1.1.2.3 yamt /* adjust for slot offset */
2443 1.1.2.3 yamt if (slot_offset) {
2444 1.1.2.3 yamt DPRINTF(ALLOC, ("\t4s: skipping %d\n", slot_offset));
2445 1.1.2.3 yamt lb_num += slot_offset / lb_size;
2446 1.1.2.3 yamt len -= slot_offset;
2447 1.1.2.3 yamt foffset += slot_offset;
2448 1.1.2.3 yamt replace_len -= slot_offset;
2449 1.1.2.3 yamt
2450 1.1.2.3 yamt /* mark adjusted */
2451 1.1.2.3 yamt slot_offset = 0;
2452 1.1.2.3 yamt }
2453 1.1.2.3 yamt
2454 1.1.2.3 yamt /* advance for (the rest of) this slot */
2455 1.1.2.3 yamt replace = MIN(len, replace_len);
2456 1.1.2.3 yamt DPRINTF(ALLOC, ("\t4d: replacing %d\n", replace));
2457 1.1.2.3 yamt
2458 1.1.2.3 yamt /* advance for this slot */
2459 1.1.2.3 yamt if (replace) {
2460 1.1.2.3 yamt /* note: dont round DOWN on num_lb since we then
2461 1.1.2.3 yamt * forget the last partial one */
2462 1.1.2.3 yamt num_lb = (replace + lb_size - 1) / lb_size;
2463 1.1.2.3 yamt if (flags != UDF_EXT_FREE) {
2464 1.1.2.3 yamt udf_free_allocated_space(ump, lb_num,
2465 1.1.2.3 yamt udf_rw16(s_ad.loc.part_num), num_lb);
2466 1.1.2.2 yamt }
2467 1.1.2.3 yamt lb_num += num_lb;
2468 1.1.2.3 yamt len -= replace;
2469 1.1.2.3 yamt foffset += replace;
2470 1.1.2.3 yamt replace_len -= replace;
2471 1.1.2.3 yamt }
2472 1.1.2.2 yamt
2473 1.1.2.3 yamt /* do we have a slot tail ? */
2474 1.1.2.2 yamt if (len) {
2475 1.1.2.3 yamt KASSERT(foffset % lb_size == 0);
2476 1.1.2.2 yamt
2477 1.1.2.2 yamt /* we arrived at our point, push remainder */
2478 1.1.2.2 yamt s_ad.len = udf_rw32(len | flags);
2479 1.1.2.2 yamt s_ad.loc.lb_num = udf_rw32(lb_num);
2480 1.1.2.3 yamt if (flags == UDF_EXT_FREE)
2481 1.1.2.3 yamt s_ad.loc.lb_num = udf_rw32(0);
2482 1.1.2.2 yamt node_ad_cpy[cpy_slot++] = s_ad;
2483 1.1.2.2 yamt foffset += len;
2484 1.1.2.2 yamt slot++;
2485 1.1.2.2 yamt
2486 1.1.2.2 yamt DPRINTF(ALLOC, ("\t4: vp %d, lb %d, len %d, flags %d "
2487 1.1.2.2 yamt "-> stack\n",
2488 1.1.2.2 yamt udf_rw16(s_ad.loc.part_num),
2489 1.1.2.2 yamt udf_rw32(s_ad.loc.lb_num),
2490 1.1.2.2 yamt UDF_EXT_LEN(udf_rw32(s_ad.len)),
2491 1.1.2.2 yamt UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2492 1.1.2.2 yamt break;
2493 1.1.2.2 yamt }
2494 1.1.2.3 yamt
2495 1.1.2.2 yamt slot++;
2496 1.1.2.2 yamt }
2497 1.1.2.2 yamt
2498 1.1.2.2 yamt /* 5) copy remainder */
2499 1.1.2.2 yamt for (;;) {
2500 1.1.2.2 yamt udf_get_adslot(udf_node, slot, &s_ad, &eof);
2501 1.1.2.2 yamt if (eof)
2502 1.1.2.2 yamt break;
2503 1.1.2.2 yamt
2504 1.1.2.2 yamt len = udf_rw32(s_ad.len);
2505 1.1.2.2 yamt flags = UDF_EXT_FLAGS(len);
2506 1.1.2.2 yamt len = UDF_EXT_LEN(len);
2507 1.1.2.2 yamt
2508 1.1.2.2 yamt if (flags == UDF_EXT_REDIRECT) {
2509 1.1.2.2 yamt slot++;
2510 1.1.2.2 yamt continue;
2511 1.1.2.2 yamt }
2512 1.1.2.2 yamt
2513 1.1.2.2 yamt node_ad_cpy[cpy_slot++] = s_ad;
2514 1.1.2.2 yamt
2515 1.1.2.2 yamt DPRINTF(ALLOC, ("\t5: insert new mapping "
2516 1.1.2.2 yamt "vp %d lb %d, len %d, flags %d "
2517 1.1.2.2 yamt "-> stack\n",
2518 1.1.2.2 yamt udf_rw16(s_ad.loc.part_num),
2519 1.1.2.2 yamt udf_rw32(s_ad.loc.lb_num),
2520 1.1.2.2 yamt UDF_EXT_LEN(udf_rw32(s_ad.len)),
2521 1.1.2.2 yamt UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2522 1.1.2.2 yamt
2523 1.1.2.2 yamt slot++;
2524 1.1.2.2 yamt }
2525 1.1.2.2 yamt
2526 1.1.2.2 yamt /* 6) reset node descriptors */
2527 1.1.2.2 yamt udf_wipe_adslots(udf_node);
2528 1.1.2.2 yamt
2529 1.1.2.2 yamt /* 7) copy back extents; merge when possible. Recounting on the fly */
2530 1.1.2.2 yamt cpy_slots = cpy_slot;
2531 1.1.2.2 yamt
2532 1.1.2.2 yamt c_ad = node_ad_cpy[0];
2533 1.1.2.2 yamt slot = 0;
2534 1.1.2.2 yamt DPRINTF(ALLOC, ("\t7s: stack -> got mapping vp %d "
2535 1.1.2.2 yamt "lb %d, len %d, flags %d\n",
2536 1.1.2.2 yamt udf_rw16(c_ad.loc.part_num),
2537 1.1.2.2 yamt udf_rw32(c_ad.loc.lb_num),
2538 1.1.2.2 yamt UDF_EXT_LEN(udf_rw32(c_ad.len)),
2539 1.1.2.2 yamt UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2540 1.1.2.2 yamt
2541 1.1.2.2 yamt for (cpy_slot = 1; cpy_slot < cpy_slots; cpy_slot++) {
2542 1.1.2.2 yamt s_ad = node_ad_cpy[cpy_slot];
2543 1.1.2.2 yamt
2544 1.1.2.2 yamt DPRINTF(ALLOC, ("\t7i: stack -> got mapping vp %d "
2545 1.1.2.2 yamt "lb %d, len %d, flags %d\n",
2546 1.1.2.2 yamt udf_rw16(s_ad.loc.part_num),
2547 1.1.2.2 yamt udf_rw32(s_ad.loc.lb_num),
2548 1.1.2.2 yamt UDF_EXT_LEN(udf_rw32(s_ad.len)),
2549 1.1.2.2 yamt UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
2550 1.1.2.2 yamt
2551 1.1.2.2 yamt /* see if we can merge */
2552 1.1.2.2 yamt if (udf_ads_merge(lb_size, &c_ad, &s_ad)) {
2553 1.1.2.2 yamt /* not mergable (anymore) */
2554 1.1.2.2 yamt DPRINTF(ALLOC, ("\t7: appending vp %d lb %d, "
2555 1.1.2.2 yamt "len %d, flags %d\n",
2556 1.1.2.2 yamt udf_rw16(c_ad.loc.part_num),
2557 1.1.2.2 yamt udf_rw32(c_ad.loc.lb_num),
2558 1.1.2.2 yamt UDF_EXT_LEN(udf_rw32(c_ad.len)),
2559 1.1.2.2 yamt UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2560 1.1.2.2 yamt
2561 1.1.2.3 yamt error = udf_append_adslot(udf_node, &slot, &c_ad);
2562 1.1.2.2 yamt if (error) {
2563 1.1.2.2 yamt buf->b_error = error;
2564 1.1.2.2 yamt goto out;
2565 1.1.2.2 yamt }
2566 1.1.2.2 yamt c_ad = s_ad;
2567 1.1.2.2 yamt slot++;
2568 1.1.2.2 yamt }
2569 1.1.2.2 yamt }
2570 1.1.2.2 yamt
2571 1.1.2.2 yamt /* 8) push rest slot (if any) */
2572 1.1.2.2 yamt if (UDF_EXT_LEN(c_ad.len) > 0) {
2573 1.1.2.2 yamt DPRINTF(ALLOC, ("\t8: last append vp %d lb %d, "
2574 1.1.2.2 yamt "len %d, flags %d\n",
2575 1.1.2.2 yamt udf_rw16(c_ad.loc.part_num),
2576 1.1.2.2 yamt udf_rw32(c_ad.loc.lb_num),
2577 1.1.2.2 yamt UDF_EXT_LEN(udf_rw32(c_ad.len)),
2578 1.1.2.2 yamt UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
2579 1.1.2.2 yamt
2580 1.1.2.3 yamt error = udf_append_adslot(udf_node, &slot, &c_ad);
2581 1.1.2.2 yamt if (error) {
2582 1.1.2.2 yamt buf->b_error = error;
2583 1.1.2.2 yamt goto out;
2584 1.1.2.2 yamt }
2585 1.1.2.2 yamt }
2586 1.1.2.2 yamt
2587 1.1.2.2 yamt out:
2588 1.1.2.3 yamt udf_count_alloc_exts(udf_node);
2589 1.1.2.2 yamt
2590 1.1.2.3 yamt /* the node's descriptors should now be sane */
2591 1.1.2.2 yamt udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2592 1.1.2.3 yamt UDF_UNLOCK_NODE(udf_node, 0);
2593 1.1.2.2 yamt
2594 1.1.2.2 yamt KASSERT(orig_inflen == new_inflen);
2595 1.1.2.2 yamt KASSERT(new_lbrec >= orig_lbrec);
2596 1.1.2.2 yamt
2597 1.1.2.2 yamt return;
2598 1.1.2.2 yamt }
2599 1.1.2.2 yamt
2600 1.1.2.2 yamt /* --------------------------------------------------------------------- */
2601 1.1.2.2 yamt
2602 1.1.2.2 yamt int
2603 1.1.2.2 yamt udf_grow_node(struct udf_node *udf_node, uint64_t new_size)
2604 1.1.2.2 yamt {
2605 1.1.2.2 yamt union dscrptr *dscr;
2606 1.1.2.2 yamt struct vnode *vp = udf_node->vnode;
2607 1.1.2.2 yamt struct udf_mount *ump = udf_node->ump;
2608 1.1.2.2 yamt struct file_entry *fe;
2609 1.1.2.2 yamt struct extfile_entry *efe;
2610 1.1.2.2 yamt struct icb_tag *icbtag;
2611 1.1.2.2 yamt struct long_ad c_ad, s_ad;
2612 1.1.2.2 yamt uint64_t size_diff, old_size, inflen, objsize, chunk, append_len;
2613 1.1.2.2 yamt uint64_t foffset, end_foffset;
2614 1.1.2.2 yamt uint64_t orig_inflen, orig_lbrec, new_inflen, new_lbrec;
2615 1.1.2.2 yamt uint32_t lb_size, dscr_size, crclen, lastblock_grow;
2616 1.1.2.3 yamt uint32_t icbflags, len, flags, max_len;
2617 1.1.2.2 yamt uint32_t max_l_ad, l_ad, l_ea;
2618 1.1.2.3 yamt uint16_t my_part, dst_part;
2619 1.1.2.2 yamt uint8_t *data_pos, *evacuated_data;
2620 1.1.2.3 yamt int addr_type;
2621 1.1.2.2 yamt int slot, cpy_slot;
2622 1.1.2.5 yamt int eof, error;
2623 1.1.2.2 yamt
2624 1.1.2.2 yamt DPRINTF(ALLOC, ("udf_grow_node\n"));
2625 1.1.2.2 yamt
2626 1.1.2.2 yamt UDF_LOCK_NODE(udf_node, 0);
2627 1.1.2.3 yamt udf_node_sanity_check(udf_node, &orig_inflen, &orig_lbrec);
2628 1.1.2.3 yamt
2629 1.1.2.2 yamt lb_size = udf_rw32(ump->logical_vol->lb_size);
2630 1.1.2.2 yamt max_len = ((UDF_EXT_MAXLEN / lb_size) * lb_size);
2631 1.1.2.2 yamt
2632 1.1.2.2 yamt fe = udf_node->fe;
2633 1.1.2.2 yamt efe = udf_node->efe;
2634 1.1.2.2 yamt if (fe) {
2635 1.1.2.2 yamt dscr = (union dscrptr *) fe;
2636 1.1.2.2 yamt icbtag = &fe->icbtag;
2637 1.1.2.2 yamt inflen = udf_rw64(fe->inf_len);
2638 1.1.2.2 yamt objsize = inflen;
2639 1.1.2.2 yamt dscr_size = sizeof(struct file_entry) -1;
2640 1.1.2.2 yamt l_ea = udf_rw32(fe->l_ea);
2641 1.1.2.2 yamt l_ad = udf_rw32(fe->l_ad);
2642 1.1.2.2 yamt } else {
2643 1.1.2.2 yamt dscr = (union dscrptr *) efe;
2644 1.1.2.2 yamt icbtag = &efe->icbtag;
2645 1.1.2.2 yamt inflen = udf_rw64(efe->inf_len);
2646 1.1.2.2 yamt objsize = udf_rw64(efe->obj_size);
2647 1.1.2.2 yamt dscr_size = sizeof(struct extfile_entry) -1;
2648 1.1.2.2 yamt l_ea = udf_rw32(efe->l_ea);
2649 1.1.2.2 yamt l_ad = udf_rw32(efe->l_ad);
2650 1.1.2.2 yamt }
2651 1.1.2.2 yamt data_pos = (uint8_t *) dscr + dscr_size + l_ea;
2652 1.1.2.2 yamt max_l_ad = lb_size - dscr_size - l_ea;
2653 1.1.2.2 yamt
2654 1.1.2.2 yamt icbflags = udf_rw16(icbtag->flags);
2655 1.1.2.2 yamt addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2656 1.1.2.2 yamt
2657 1.1.2.2 yamt old_size = inflen;
2658 1.1.2.2 yamt size_diff = new_size - old_size;
2659 1.1.2.2 yamt
2660 1.1.2.2 yamt DPRINTF(ALLOC, ("\tfrom %"PRIu64" to %"PRIu64"\n", old_size, new_size));
2661 1.1.2.2 yamt
2662 1.1.2.2 yamt evacuated_data = NULL;
2663 1.1.2.2 yamt if (addr_type == UDF_ICB_INTERN_ALLOC) {
2664 1.1.2.2 yamt if (l_ad + size_diff <= max_l_ad) {
2665 1.1.2.2 yamt /* only reflect size change directly in the node */
2666 1.1.2.2 yamt inflen += size_diff;
2667 1.1.2.2 yamt objsize += size_diff;
2668 1.1.2.2 yamt l_ad += size_diff;
2669 1.1.2.2 yamt crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea + l_ad;
2670 1.1.2.2 yamt if (fe) {
2671 1.1.2.2 yamt fe->inf_len = udf_rw64(inflen);
2672 1.1.2.2 yamt fe->l_ad = udf_rw32(l_ad);
2673 1.1.2.4 yamt fe->tag.desc_crc_len = udf_rw16(crclen);
2674 1.1.2.2 yamt } else {
2675 1.1.2.2 yamt efe->inf_len = udf_rw64(inflen);
2676 1.1.2.2 yamt efe->obj_size = udf_rw64(objsize);
2677 1.1.2.2 yamt efe->l_ad = udf_rw32(l_ad);
2678 1.1.2.4 yamt efe->tag.desc_crc_len = udf_rw16(crclen);
2679 1.1.2.2 yamt }
2680 1.1.2.2 yamt error = 0;
2681 1.1.2.2 yamt
2682 1.1.2.2 yamt /* set new size for uvm */
2683 1.1.2.2 yamt uvm_vnp_setsize(vp, old_size);
2684 1.1.2.2 yamt uvm_vnp_setwritesize(vp, new_size);
2685 1.1.2.2 yamt
2686 1.1.2.2 yamt #if 0
2687 1.1.2.2 yamt /* zero append space in buffer */
2688 1.1.2.2 yamt uvm_vnp_zerorange(vp, old_size, new_size - old_size);
2689 1.1.2.2 yamt #endif
2690 1.1.2.2 yamt
2691 1.1.2.3 yamt udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2692 1.1.2.3 yamt
2693 1.1.2.2 yamt /* unlock */
2694 1.1.2.2 yamt UDF_UNLOCK_NODE(udf_node, 0);
2695 1.1.2.2 yamt
2696 1.1.2.2 yamt KASSERT(new_inflen == orig_inflen + size_diff);
2697 1.1.2.2 yamt KASSERT(new_lbrec == orig_lbrec);
2698 1.1.2.2 yamt KASSERT(new_lbrec == 0);
2699 1.1.2.2 yamt return 0;
2700 1.1.2.2 yamt }
2701 1.1.2.2 yamt
2702 1.1.2.2 yamt DPRINTF(ALLOC, ("\tCONVERT from internal\n"));
2703 1.1.2.2 yamt
2704 1.1.2.2 yamt if (old_size > 0) {
2705 1.1.2.2 yamt /* allocate some space and copy in the stuff to keep */
2706 1.1.2.2 yamt evacuated_data = malloc(lb_size, M_UDFTEMP, M_WAITOK);
2707 1.1.2.2 yamt memset(evacuated_data, 0, lb_size);
2708 1.1.2.2 yamt
2709 1.1.2.2 yamt /* node is locked, so safe to exit mutex */
2710 1.1.2.2 yamt UDF_UNLOCK_NODE(udf_node, 0);
2711 1.1.2.2 yamt
2712 1.1.2.2 yamt /* read in using the `normal' vn_rdwr() */
2713 1.1.2.2 yamt error = vn_rdwr(UIO_READ, udf_node->vnode,
2714 1.1.2.2 yamt evacuated_data, old_size, 0,
2715 1.1.2.2 yamt UIO_SYSSPACE, IO_ALTSEMANTICS | IO_NODELOCKED,
2716 1.1.2.2 yamt FSCRED, NULL, NULL);
2717 1.1.2.2 yamt
2718 1.1.2.2 yamt /* enter again */
2719 1.1.2.2 yamt UDF_LOCK_NODE(udf_node, 0);
2720 1.1.2.2 yamt }
2721 1.1.2.2 yamt
2722 1.1.2.3 yamt /* convert to a normal alloc and select type */
2723 1.1.2.3 yamt my_part = udf_rw16(udf_node->loc.loc.part_num);
2724 1.1.2.5 yamt dst_part = udf_get_record_vpart(ump, udf_get_c_type(udf_node));
2725 1.1.2.3 yamt addr_type = UDF_ICB_SHORT_ALLOC;
2726 1.1.2.3 yamt if (dst_part != my_part)
2727 1.1.2.3 yamt addr_type = UDF_ICB_LONG_ALLOC;
2728 1.1.2.3 yamt
2729 1.1.2.2 yamt icbflags &= ~UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2730 1.1.2.3 yamt icbflags |= addr_type;
2731 1.1.2.2 yamt icbtag->flags = udf_rw16(icbflags);
2732 1.1.2.2 yamt
2733 1.1.2.2 yamt /* wipe old descriptor space */
2734 1.1.2.2 yamt udf_wipe_adslots(udf_node);
2735 1.1.2.2 yamt
2736 1.1.2.2 yamt memset(&c_ad, 0, sizeof(struct long_ad));
2737 1.1.2.2 yamt c_ad.len = udf_rw32(old_size | UDF_EXT_FREE);
2738 1.1.2.2 yamt c_ad.loc.part_num = udf_rw16(0); /* not relevant */
2739 1.1.2.2 yamt c_ad.loc.lb_num = udf_rw32(0); /* not relevant */
2740 1.1.2.2 yamt
2741 1.1.2.2 yamt slot = 0;
2742 1.1.2.2 yamt } else {
2743 1.1.2.2 yamt /* goto the last entry (if any) */
2744 1.1.2.2 yamt slot = 0;
2745 1.1.2.2 yamt cpy_slot = 0;
2746 1.1.2.2 yamt foffset = 0;
2747 1.1.2.2 yamt memset(&c_ad, 0, sizeof(struct long_ad));
2748 1.1.2.2 yamt for (;;) {
2749 1.1.2.2 yamt udf_get_adslot(udf_node, slot, &c_ad, &eof);
2750 1.1.2.2 yamt if (eof)
2751 1.1.2.2 yamt break;
2752 1.1.2.2 yamt
2753 1.1.2.2 yamt len = udf_rw32(c_ad.len);
2754 1.1.2.2 yamt flags = UDF_EXT_FLAGS(len);
2755 1.1.2.2 yamt len = UDF_EXT_LEN(len);
2756 1.1.2.2 yamt
2757 1.1.2.2 yamt end_foffset = foffset + len;
2758 1.1.2.2 yamt if (flags != UDF_EXT_REDIRECT)
2759 1.1.2.2 yamt foffset = end_foffset;
2760 1.1.2.2 yamt
2761 1.1.2.2 yamt slot++;
2762 1.1.2.2 yamt }
2763 1.1.2.2 yamt /* at end of adslots */
2764 1.1.2.2 yamt
2765 1.1.2.2 yamt /* special case if the old size was zero, then there is no last slot */
2766 1.1.2.2 yamt if (old_size == 0) {
2767 1.1.2.2 yamt c_ad.len = udf_rw32(0 | UDF_EXT_FREE);
2768 1.1.2.2 yamt c_ad.loc.part_num = udf_rw16(0); /* not relevant */
2769 1.1.2.2 yamt c_ad.loc.lb_num = udf_rw32(0); /* not relevant */
2770 1.1.2.2 yamt } else {
2771 1.1.2.2 yamt /* refetch last slot */
2772 1.1.2.2 yamt slot--;
2773 1.1.2.2 yamt udf_get_adslot(udf_node, slot, &c_ad, &eof);
2774 1.1.2.2 yamt }
2775 1.1.2.2 yamt }
2776 1.1.2.2 yamt
2777 1.1.2.2 yamt /*
2778 1.1.2.2 yamt * If the length of the last slot is not a multiple of lb_size, adjust
2779 1.1.2.2 yamt * length so that it is; don't forget to adjust `append_len'! relevant for
2780 1.1.2.2 yamt * extending existing files
2781 1.1.2.2 yamt */
2782 1.1.2.2 yamt len = udf_rw32(c_ad.len);
2783 1.1.2.2 yamt flags = UDF_EXT_FLAGS(len);
2784 1.1.2.2 yamt len = UDF_EXT_LEN(len);
2785 1.1.2.2 yamt
2786 1.1.2.2 yamt lastblock_grow = 0;
2787 1.1.2.2 yamt if (len % lb_size > 0) {
2788 1.1.2.2 yamt lastblock_grow = lb_size - (len % lb_size);
2789 1.1.2.2 yamt lastblock_grow = MIN(size_diff, lastblock_grow);
2790 1.1.2.2 yamt len += lastblock_grow;
2791 1.1.2.2 yamt c_ad.len = udf_rw32(len | flags);
2792 1.1.2.2 yamt
2793 1.1.2.2 yamt /* TODO zero appened space in buffer! */
2794 1.1.2.2 yamt /* using uvm_vnp_zerorange(vp, old_size, new_size - old_size); ? */
2795 1.1.2.2 yamt }
2796 1.1.2.2 yamt memset(&s_ad, 0, sizeof(struct long_ad));
2797 1.1.2.2 yamt
2798 1.1.2.2 yamt /* size_diff can be bigger than allowed, so grow in chunks */
2799 1.1.2.2 yamt append_len = size_diff - lastblock_grow;
2800 1.1.2.2 yamt while (append_len > 0) {
2801 1.1.2.2 yamt chunk = MIN(append_len, max_len);
2802 1.1.2.2 yamt s_ad.len = udf_rw32(chunk | UDF_EXT_FREE);
2803 1.1.2.2 yamt s_ad.loc.part_num = udf_rw16(0);
2804 1.1.2.2 yamt s_ad.loc.lb_num = udf_rw32(0);
2805 1.1.2.2 yamt
2806 1.1.2.2 yamt if (udf_ads_merge(lb_size, &c_ad, &s_ad)) {
2807 1.1.2.2 yamt /* not mergable (anymore) */
2808 1.1.2.3 yamt error = udf_append_adslot(udf_node, &slot, &c_ad);
2809 1.1.2.2 yamt if (error)
2810 1.1.2.2 yamt goto errorout;
2811 1.1.2.2 yamt slot++;
2812 1.1.2.2 yamt c_ad = s_ad;
2813 1.1.2.2 yamt memset(&s_ad, 0, sizeof(struct long_ad));
2814 1.1.2.2 yamt }
2815 1.1.2.2 yamt append_len -= chunk;
2816 1.1.2.2 yamt }
2817 1.1.2.2 yamt
2818 1.1.2.2 yamt /* if there is a rest piece in the accumulator, append it */
2819 1.1.2.3 yamt if (UDF_EXT_LEN(udf_rw32(c_ad.len)) > 0) {
2820 1.1.2.3 yamt error = udf_append_adslot(udf_node, &slot, &c_ad);
2821 1.1.2.2 yamt if (error)
2822 1.1.2.2 yamt goto errorout;
2823 1.1.2.2 yamt slot++;
2824 1.1.2.2 yamt }
2825 1.1.2.2 yamt
2826 1.1.2.2 yamt /* if there is a rest piece that didn't fit, append it */
2827 1.1.2.3 yamt if (UDF_EXT_LEN(udf_rw32(s_ad.len)) > 0) {
2828 1.1.2.3 yamt error = udf_append_adslot(udf_node, &slot, &s_ad);
2829 1.1.2.2 yamt if (error)
2830 1.1.2.2 yamt goto errorout;
2831 1.1.2.2 yamt slot++;
2832 1.1.2.2 yamt }
2833 1.1.2.2 yamt
2834 1.1.2.2 yamt inflen += size_diff;
2835 1.1.2.2 yamt objsize += size_diff;
2836 1.1.2.2 yamt if (fe) {
2837 1.1.2.2 yamt fe->inf_len = udf_rw64(inflen);
2838 1.1.2.2 yamt } else {
2839 1.1.2.2 yamt efe->inf_len = udf_rw64(inflen);
2840 1.1.2.2 yamt efe->obj_size = udf_rw64(objsize);
2841 1.1.2.2 yamt }
2842 1.1.2.2 yamt error = 0;
2843 1.1.2.2 yamt
2844 1.1.2.2 yamt if (evacuated_data) {
2845 1.1.2.2 yamt /* set new write size for uvm */
2846 1.1.2.2 yamt uvm_vnp_setwritesize(vp, old_size);
2847 1.1.2.2 yamt
2848 1.1.2.2 yamt /* write out evacuated data */
2849 1.1.2.2 yamt error = vn_rdwr(UIO_WRITE, udf_node->vnode,
2850 1.1.2.2 yamt evacuated_data, old_size, 0,
2851 1.1.2.2 yamt UIO_SYSSPACE, IO_ALTSEMANTICS | IO_NODELOCKED,
2852 1.1.2.2 yamt FSCRED, NULL, NULL);
2853 1.1.2.2 yamt uvm_vnp_setsize(vp, old_size);
2854 1.1.2.2 yamt }
2855 1.1.2.2 yamt
2856 1.1.2.2 yamt errorout:
2857 1.1.2.2 yamt if (evacuated_data)
2858 1.1.2.2 yamt free(evacuated_data, M_UDFTEMP);
2859 1.1.2.3 yamt
2860 1.1.2.3 yamt udf_count_alloc_exts(udf_node);
2861 1.1.2.2 yamt
2862 1.1.2.2 yamt udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2863 1.1.2.3 yamt UDF_UNLOCK_NODE(udf_node, 0);
2864 1.1.2.3 yamt
2865 1.1.2.2 yamt KASSERT(new_inflen == orig_inflen + size_diff);
2866 1.1.2.2 yamt KASSERT(new_lbrec == orig_lbrec);
2867 1.1.2.2 yamt
2868 1.1.2.2 yamt return error;
2869 1.1.2.2 yamt }
2870 1.1.2.2 yamt
2871 1.1.2.2 yamt /* --------------------------------------------------------------------- */
2872 1.1.2.2 yamt
2873 1.1.2.2 yamt int
2874 1.1.2.2 yamt udf_shrink_node(struct udf_node *udf_node, uint64_t new_size)
2875 1.1.2.2 yamt {
2876 1.1.2.2 yamt struct vnode *vp = udf_node->vnode;
2877 1.1.2.2 yamt struct udf_mount *ump = udf_node->ump;
2878 1.1.2.2 yamt struct file_entry *fe;
2879 1.1.2.2 yamt struct extfile_entry *efe;
2880 1.1.2.2 yamt struct icb_tag *icbtag;
2881 1.1.2.2 yamt struct long_ad c_ad, s_ad, *node_ad_cpy;
2882 1.1.2.2 yamt uint64_t size_diff, old_size, inflen, objsize;
2883 1.1.2.2 yamt uint64_t foffset, end_foffset;
2884 1.1.2.2 yamt uint64_t orig_inflen, orig_lbrec, new_inflen, new_lbrec;
2885 1.1.2.2 yamt uint32_t lb_size, dscr_size, crclen;
2886 1.1.2.5 yamt uint32_t slot_offset, slot_offset_lb;
2887 1.1.2.2 yamt uint32_t len, flags, max_len;
2888 1.1.2.2 yamt uint32_t num_lb, lb_num;
2889 1.1.2.2 yamt uint32_t max_l_ad, l_ad, l_ea;
2890 1.1.2.2 yamt uint16_t vpart_num;
2891 1.1.2.2 yamt uint8_t *data_pos;
2892 1.1.2.2 yamt int icbflags, addr_type;
2893 1.1.2.2 yamt int slot, cpy_slot, cpy_slots;
2894 1.1.2.2 yamt int eof, error;
2895 1.1.2.2 yamt
2896 1.1.2.2 yamt DPRINTF(ALLOC, ("udf_shrink_node\n"));
2897 1.1.2.2 yamt
2898 1.1.2.2 yamt UDF_LOCK_NODE(udf_node, 0);
2899 1.1.2.3 yamt udf_node_sanity_check(udf_node, &orig_inflen, &orig_lbrec);
2900 1.1.2.3 yamt
2901 1.1.2.2 yamt lb_size = udf_rw32(ump->logical_vol->lb_size);
2902 1.1.2.2 yamt max_len = ((UDF_EXT_MAXLEN / lb_size) * lb_size);
2903 1.1.2.2 yamt
2904 1.1.2.2 yamt /* do the work */
2905 1.1.2.2 yamt fe = udf_node->fe;
2906 1.1.2.2 yamt efe = udf_node->efe;
2907 1.1.2.2 yamt if (fe) {
2908 1.1.2.2 yamt icbtag = &fe->icbtag;
2909 1.1.2.2 yamt inflen = udf_rw64(fe->inf_len);
2910 1.1.2.2 yamt objsize = inflen;
2911 1.1.2.2 yamt dscr_size = sizeof(struct file_entry) -1;
2912 1.1.2.2 yamt l_ea = udf_rw32(fe->l_ea);
2913 1.1.2.2 yamt l_ad = udf_rw32(fe->l_ad);
2914 1.1.2.2 yamt data_pos = (uint8_t *) fe + dscr_size + l_ea;
2915 1.1.2.2 yamt } else {
2916 1.1.2.2 yamt icbtag = &efe->icbtag;
2917 1.1.2.2 yamt inflen = udf_rw64(efe->inf_len);
2918 1.1.2.2 yamt objsize = udf_rw64(efe->obj_size);
2919 1.1.2.2 yamt dscr_size = sizeof(struct extfile_entry) -1;
2920 1.1.2.2 yamt l_ea = udf_rw32(efe->l_ea);
2921 1.1.2.2 yamt l_ad = udf_rw32(efe->l_ad);
2922 1.1.2.2 yamt data_pos = (uint8_t *) efe + dscr_size + l_ea;
2923 1.1.2.2 yamt }
2924 1.1.2.2 yamt max_l_ad = lb_size - dscr_size - l_ea;
2925 1.1.2.2 yamt
2926 1.1.2.2 yamt icbflags = udf_rw16(icbtag->flags);
2927 1.1.2.2 yamt addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
2928 1.1.2.2 yamt
2929 1.1.2.2 yamt old_size = inflen;
2930 1.1.2.2 yamt size_diff = old_size - new_size;
2931 1.1.2.2 yamt
2932 1.1.2.2 yamt DPRINTF(ALLOC, ("\tfrom %"PRIu64" to %"PRIu64"\n", old_size, new_size));
2933 1.1.2.2 yamt
2934 1.1.2.2 yamt /* shrink the node to its new size */
2935 1.1.2.2 yamt if (addr_type == UDF_ICB_INTERN_ALLOC) {
2936 1.1.2.2 yamt /* only reflect size change directly in the node */
2937 1.1.2.2 yamt KASSERT(new_size <= max_l_ad);
2938 1.1.2.2 yamt inflen -= size_diff;
2939 1.1.2.2 yamt objsize -= size_diff;
2940 1.1.2.2 yamt l_ad -= size_diff;
2941 1.1.2.2 yamt crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea + l_ad;
2942 1.1.2.2 yamt if (fe) {
2943 1.1.2.2 yamt fe->inf_len = udf_rw64(inflen);
2944 1.1.2.2 yamt fe->l_ad = udf_rw32(l_ad);
2945 1.1.2.4 yamt fe->tag.desc_crc_len = udf_rw16(crclen);
2946 1.1.2.2 yamt } else {
2947 1.1.2.2 yamt efe->inf_len = udf_rw64(inflen);
2948 1.1.2.2 yamt efe->obj_size = udf_rw64(objsize);
2949 1.1.2.2 yamt efe->l_ad = udf_rw32(l_ad);
2950 1.1.2.4 yamt efe->tag.desc_crc_len = udf_rw16(crclen);
2951 1.1.2.2 yamt }
2952 1.1.2.2 yamt error = 0;
2953 1.1.2.3 yamt
2954 1.1.2.3 yamt /* clear the space in the descriptor */
2955 1.1.2.3 yamt KASSERT(old_size > new_size);
2956 1.1.2.3 yamt memset(data_pos + new_size, 0, old_size - new_size);
2957 1.1.2.3 yamt
2958 1.1.2.2 yamt /* TODO zero appened space in buffer! */
2959 1.1.2.2 yamt /* using uvm_vnp_zerorange(vp, old_size, old_size - new_size); ? */
2960 1.1.2.2 yamt
2961 1.1.2.2 yamt /* set new size for uvm */
2962 1.1.2.2 yamt uvm_vnp_setsize(vp, new_size);
2963 1.1.2.2 yamt
2964 1.1.2.2 yamt udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
2965 1.1.2.3 yamt UDF_UNLOCK_NODE(udf_node, 0);
2966 1.1.2.3 yamt
2967 1.1.2.2 yamt KASSERT(new_inflen == orig_inflen - size_diff);
2968 1.1.2.2 yamt KASSERT(new_lbrec == orig_lbrec);
2969 1.1.2.2 yamt KASSERT(new_lbrec == 0);
2970 1.1.2.2 yamt
2971 1.1.2.2 yamt return 0;
2972 1.1.2.2 yamt }
2973 1.1.2.2 yamt
2974 1.1.2.2 yamt /* setup node cleanup extents copy space */
2975 1.1.2.2 yamt node_ad_cpy = malloc(lb_size * UDF_MAX_ALLOC_EXTENTS,
2976 1.1.2.2 yamt M_UDFMNT, M_WAITOK);
2977 1.1.2.2 yamt memset(node_ad_cpy, 0, lb_size * UDF_MAX_ALLOC_EXTENTS);
2978 1.1.2.2 yamt
2979 1.1.2.2 yamt /*
2980 1.1.2.2 yamt * Shrink the node by releasing the allocations and truncate the last
2981 1.1.2.2 yamt * allocation to the new size. If the new size fits into the
2982 1.1.2.2 yamt * allocation descriptor itself, transform it into an
2983 1.1.2.2 yamt * UDF_ICB_INTERN_ALLOC.
2984 1.1.2.2 yamt */
2985 1.1.2.2 yamt slot = 0;
2986 1.1.2.2 yamt cpy_slot = 0;
2987 1.1.2.2 yamt foffset = 0;
2988 1.1.2.2 yamt
2989 1.1.2.2 yamt /* 1) copy till first overlap piece to the rewrite buffer */
2990 1.1.2.2 yamt for (;;) {
2991 1.1.2.2 yamt udf_get_adslot(udf_node, slot, &s_ad, &eof);
2992 1.1.2.2 yamt if (eof) {
2993 1.1.2.2 yamt DPRINTF(WRITE,
2994 1.1.2.2 yamt ("Shrink node failed: "
2995 1.1.2.2 yamt "encountered EOF\n"));
2996 1.1.2.2 yamt error = EINVAL;
2997 1.1.2.2 yamt goto errorout; /* panic? */
2998 1.1.2.2 yamt }
2999 1.1.2.2 yamt len = udf_rw32(s_ad.len);
3000 1.1.2.2 yamt flags = UDF_EXT_FLAGS(len);
3001 1.1.2.2 yamt len = UDF_EXT_LEN(len);
3002 1.1.2.2 yamt
3003 1.1.2.2 yamt if (flags == UDF_EXT_REDIRECT) {
3004 1.1.2.2 yamt slot++;
3005 1.1.2.2 yamt continue;
3006 1.1.2.2 yamt }
3007 1.1.2.2 yamt
3008 1.1.2.2 yamt end_foffset = foffset + len;
3009 1.1.2.2 yamt if (end_foffset > new_size)
3010 1.1.2.2 yamt break; /* found */
3011 1.1.2.2 yamt
3012 1.1.2.2 yamt node_ad_cpy[cpy_slot++] = s_ad;
3013 1.1.2.2 yamt
3014 1.1.2.2 yamt DPRINTF(ALLOC, ("\t1: vp %d, lb %d, len %d, flags %d "
3015 1.1.2.2 yamt "-> stack\n",
3016 1.1.2.2 yamt udf_rw16(s_ad.loc.part_num),
3017 1.1.2.2 yamt udf_rw32(s_ad.loc.lb_num),
3018 1.1.2.2 yamt UDF_EXT_LEN(udf_rw32(s_ad.len)),
3019 1.1.2.2 yamt UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
3020 1.1.2.2 yamt
3021 1.1.2.2 yamt foffset = end_foffset;
3022 1.1.2.2 yamt slot++;
3023 1.1.2.2 yamt }
3024 1.1.2.2 yamt slot_offset = new_size - foffset;
3025 1.1.2.2 yamt
3026 1.1.2.2 yamt /* 2) trunc overlapping slot at overlap and copy it */
3027 1.1.2.2 yamt if (slot_offset > 0) {
3028 1.1.2.2 yamt lb_num = udf_rw32(s_ad.loc.lb_num);
3029 1.1.2.2 yamt vpart_num = udf_rw16(s_ad.loc.part_num);
3030 1.1.2.2 yamt
3031 1.1.2.2 yamt if (flags == UDF_EXT_ALLOCATED) {
3032 1.1.2.5 yamt /* calculate extent in lb, and offset in lb */
3033 1.1.2.5 yamt num_lb = (len + lb_size -1) / lb_size;
3034 1.1.2.5 yamt slot_offset_lb = (slot_offset + lb_size -1) / lb_size;
3035 1.1.2.5 yamt
3036 1.1.2.5 yamt /* adjust our slot */
3037 1.1.2.5 yamt lb_num += slot_offset_lb;
3038 1.1.2.5 yamt num_lb -= slot_offset_lb;
3039 1.1.2.2 yamt
3040 1.1.2.2 yamt udf_free_allocated_space(ump, lb_num, vpart_num, num_lb);
3041 1.1.2.2 yamt }
3042 1.1.2.2 yamt
3043 1.1.2.2 yamt s_ad.len = udf_rw32(slot_offset | flags);
3044 1.1.2.2 yamt node_ad_cpy[cpy_slot++] = s_ad;
3045 1.1.2.2 yamt slot++;
3046 1.1.2.2 yamt
3047 1.1.2.2 yamt DPRINTF(ALLOC, ("\t2: vp %d, lb %d, len %d, flags %d "
3048 1.1.2.2 yamt "-> stack\n",
3049 1.1.2.2 yamt udf_rw16(s_ad.loc.part_num),
3050 1.1.2.2 yamt udf_rw32(s_ad.loc.lb_num),
3051 1.1.2.2 yamt UDF_EXT_LEN(udf_rw32(s_ad.len)),
3052 1.1.2.2 yamt UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
3053 1.1.2.2 yamt }
3054 1.1.2.2 yamt
3055 1.1.2.2 yamt /* 3) delete remainder */
3056 1.1.2.2 yamt for (;;) {
3057 1.1.2.2 yamt udf_get_adslot(udf_node, slot, &s_ad, &eof);
3058 1.1.2.2 yamt if (eof)
3059 1.1.2.2 yamt break;
3060 1.1.2.2 yamt
3061 1.1.2.2 yamt len = udf_rw32(s_ad.len);
3062 1.1.2.2 yamt flags = UDF_EXT_FLAGS(len);
3063 1.1.2.2 yamt len = UDF_EXT_LEN(len);
3064 1.1.2.2 yamt
3065 1.1.2.2 yamt if (flags == UDF_EXT_REDIRECT) {
3066 1.1.2.2 yamt slot++;
3067 1.1.2.2 yamt continue;
3068 1.1.2.2 yamt }
3069 1.1.2.2 yamt
3070 1.1.2.2 yamt DPRINTF(ALLOC, ("\t3: delete remainder "
3071 1.1.2.2 yamt "vp %d lb %d, len %d, flags %d\n",
3072 1.1.2.2 yamt udf_rw16(s_ad.loc.part_num),
3073 1.1.2.2 yamt udf_rw32(s_ad.loc.lb_num),
3074 1.1.2.2 yamt UDF_EXT_LEN(udf_rw32(s_ad.len)),
3075 1.1.2.2 yamt UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
3076 1.1.2.2 yamt
3077 1.1.2.2 yamt if (flags == UDF_EXT_ALLOCATED) {
3078 1.1.2.2 yamt lb_num = udf_rw32(s_ad.loc.lb_num);
3079 1.1.2.2 yamt vpart_num = udf_rw16(s_ad.loc.part_num);
3080 1.1.2.2 yamt num_lb = (len + lb_size - 1) / lb_size;
3081 1.1.2.2 yamt
3082 1.1.2.2 yamt udf_free_allocated_space(ump, lb_num, vpart_num,
3083 1.1.2.2 yamt num_lb);
3084 1.1.2.2 yamt }
3085 1.1.2.2 yamt
3086 1.1.2.2 yamt slot++;
3087 1.1.2.2 yamt }
3088 1.1.2.2 yamt
3089 1.1.2.2 yamt /* 4) if it will fit into the descriptor then convert */
3090 1.1.2.2 yamt if (new_size < max_l_ad) {
3091 1.1.2.2 yamt /*
3092 1.1.2.2 yamt * resque/evacuate old piece by reading it in, and convert it
3093 1.1.2.2 yamt * to internal alloc.
3094 1.1.2.2 yamt */
3095 1.1.2.2 yamt if (new_size == 0) {
3096 1.1.2.2 yamt /* XXX/TODO only for zero sizing now */
3097 1.1.2.2 yamt udf_wipe_adslots(udf_node);
3098 1.1.2.2 yamt
3099 1.1.2.2 yamt icbflags &= ~UDF_ICB_TAG_FLAGS_ALLOC_MASK;
3100 1.1.2.2 yamt icbflags |= UDF_ICB_INTERN_ALLOC;
3101 1.1.2.2 yamt icbtag->flags = udf_rw16(icbflags);
3102 1.1.2.2 yamt
3103 1.1.2.2 yamt inflen -= size_diff; KASSERT(inflen == 0);
3104 1.1.2.2 yamt objsize -= size_diff;
3105 1.1.2.2 yamt l_ad = new_size;
3106 1.1.2.2 yamt crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea + l_ad;
3107 1.1.2.2 yamt if (fe) {
3108 1.1.2.2 yamt fe->inf_len = udf_rw64(inflen);
3109 1.1.2.2 yamt fe->l_ad = udf_rw32(l_ad);
3110 1.1.2.4 yamt fe->tag.desc_crc_len = udf_rw16(crclen);
3111 1.1.2.2 yamt } else {
3112 1.1.2.2 yamt efe->inf_len = udf_rw64(inflen);
3113 1.1.2.2 yamt efe->obj_size = udf_rw64(objsize);
3114 1.1.2.2 yamt efe->l_ad = udf_rw32(l_ad);
3115 1.1.2.4 yamt efe->tag.desc_crc_len = udf_rw16(crclen);
3116 1.1.2.2 yamt }
3117 1.1.2.2 yamt /* eventually copy in evacuated piece */
3118 1.1.2.2 yamt /* set new size for uvm */
3119 1.1.2.2 yamt uvm_vnp_setsize(vp, new_size);
3120 1.1.2.2 yamt
3121 1.1.2.2 yamt free(node_ad_cpy, M_UDFMNT);
3122 1.1.2.3 yamt udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
3123 1.1.2.3 yamt
3124 1.1.2.2 yamt UDF_UNLOCK_NODE(udf_node, 0);
3125 1.1.2.2 yamt
3126 1.1.2.2 yamt KASSERT(new_inflen == orig_inflen - size_diff);
3127 1.1.2.2 yamt KASSERT(new_inflen == 0);
3128 1.1.2.2 yamt KASSERT(new_lbrec == 0);
3129 1.1.2.2 yamt
3130 1.1.2.2 yamt return 0;
3131 1.1.2.2 yamt }
3132 1.1.2.2 yamt
3133 1.1.2.2 yamt printf("UDF_SHRINK_NODE: could convert to internal alloc!\n");
3134 1.1.2.2 yamt }
3135 1.1.2.2 yamt
3136 1.1.2.2 yamt /* 5) reset node descriptors */
3137 1.1.2.2 yamt udf_wipe_adslots(udf_node);
3138 1.1.2.2 yamt
3139 1.1.2.2 yamt /* 6) copy back extents; merge when possible. Recounting on the fly */
3140 1.1.2.2 yamt cpy_slots = cpy_slot;
3141 1.1.2.2 yamt
3142 1.1.2.2 yamt c_ad = node_ad_cpy[0];
3143 1.1.2.2 yamt slot = 0;
3144 1.1.2.2 yamt for (cpy_slot = 1; cpy_slot < cpy_slots; cpy_slot++) {
3145 1.1.2.2 yamt s_ad = node_ad_cpy[cpy_slot];
3146 1.1.2.2 yamt
3147 1.1.2.2 yamt DPRINTF(ALLOC, ("\t6: stack -> got mapping vp %d "
3148 1.1.2.2 yamt "lb %d, len %d, flags %d\n",
3149 1.1.2.2 yamt udf_rw16(s_ad.loc.part_num),
3150 1.1.2.2 yamt udf_rw32(s_ad.loc.lb_num),
3151 1.1.2.2 yamt UDF_EXT_LEN(udf_rw32(s_ad.len)),
3152 1.1.2.2 yamt UDF_EXT_FLAGS(udf_rw32(s_ad.len)) >> 30));
3153 1.1.2.2 yamt
3154 1.1.2.2 yamt /* see if we can merge */
3155 1.1.2.2 yamt if (udf_ads_merge(lb_size, &c_ad, &s_ad)) {
3156 1.1.2.2 yamt /* not mergable (anymore) */
3157 1.1.2.2 yamt DPRINTF(ALLOC, ("\t6: appending vp %d lb %d, "
3158 1.1.2.2 yamt "len %d, flags %d\n",
3159 1.1.2.2 yamt udf_rw16(c_ad.loc.part_num),
3160 1.1.2.2 yamt udf_rw32(c_ad.loc.lb_num),
3161 1.1.2.2 yamt UDF_EXT_LEN(udf_rw32(c_ad.len)),
3162 1.1.2.2 yamt UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
3163 1.1.2.2 yamt
3164 1.1.2.3 yamt error = udf_append_adslot(udf_node, &slot, &c_ad);
3165 1.1.2.2 yamt if (error)
3166 1.1.2.2 yamt goto errorout; /* panic? */
3167 1.1.2.2 yamt c_ad = s_ad;
3168 1.1.2.2 yamt slot++;
3169 1.1.2.2 yamt }
3170 1.1.2.2 yamt }
3171 1.1.2.2 yamt
3172 1.1.2.2 yamt /* 7) push rest slot (if any) */
3173 1.1.2.2 yamt if (UDF_EXT_LEN(c_ad.len) > 0) {
3174 1.1.2.2 yamt DPRINTF(ALLOC, ("\t7: last append vp %d lb %d, "
3175 1.1.2.2 yamt "len %d, flags %d\n",
3176 1.1.2.2 yamt udf_rw16(c_ad.loc.part_num),
3177 1.1.2.2 yamt udf_rw32(c_ad.loc.lb_num),
3178 1.1.2.2 yamt UDF_EXT_LEN(udf_rw32(c_ad.len)),
3179 1.1.2.2 yamt UDF_EXT_FLAGS(udf_rw32(c_ad.len)) >> 30));
3180 1.1.2.2 yamt
3181 1.1.2.3 yamt error = udf_append_adslot(udf_node, &slot, &c_ad);
3182 1.1.2.2 yamt if (error)
3183 1.1.2.2 yamt goto errorout; /* panic? */
3184 1.1.2.2 yamt ;
3185 1.1.2.2 yamt }
3186 1.1.2.2 yamt
3187 1.1.2.2 yamt inflen -= size_diff;
3188 1.1.2.2 yamt objsize -= size_diff;
3189 1.1.2.2 yamt if (fe) {
3190 1.1.2.2 yamt fe->inf_len = udf_rw64(inflen);
3191 1.1.2.2 yamt } else {
3192 1.1.2.2 yamt efe->inf_len = udf_rw64(inflen);
3193 1.1.2.2 yamt efe->obj_size = udf_rw64(objsize);
3194 1.1.2.2 yamt }
3195 1.1.2.2 yamt error = 0;
3196 1.1.2.2 yamt
3197 1.1.2.2 yamt /* set new size for uvm */
3198 1.1.2.2 yamt uvm_vnp_setsize(vp, new_size);
3199 1.1.2.2 yamt
3200 1.1.2.2 yamt errorout:
3201 1.1.2.2 yamt free(node_ad_cpy, M_UDFMNT);
3202 1.1.2.3 yamt
3203 1.1.2.3 yamt udf_count_alloc_exts(udf_node);
3204 1.1.2.2 yamt
3205 1.1.2.2 yamt udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
3206 1.1.2.3 yamt UDF_UNLOCK_NODE(udf_node, 0);
3207 1.1.2.3 yamt
3208 1.1.2.2 yamt KASSERT(new_inflen == orig_inflen - size_diff);
3209 1.1.2.2 yamt
3210 1.1.2.2 yamt return error;
3211 1.1.2.2 yamt }
3212 1.1.2.2 yamt
3213