udf_readwrite.c revision 1.1 1 /* $NetBSD: udf_readwrite.c,v 1.1 2008/05/14 16:49:48 reinoud Exp $ */
2
3 /*
4 * Copyright (c) 2007, 2008 Reinoud Zandijk
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 *
27 */
28
29 #include <sys/cdefs.h>
30 #ifndef lint
31 __KERNEL_RCSID(0, "$NetBSD: udf_readwrite.c,v 1.1 2008/05/14 16:49:48 reinoud Exp $");
32 #endif /* not lint */
33
34
35 #if defined(_KERNEL_OPT)
36 #include "opt_quota.h"
37 #include "opt_compat_netbsd.h"
38 #endif
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/sysctl.h>
43 #include <sys/namei.h>
44 #include <sys/proc.h>
45 #include <sys/kernel.h>
46 #include <sys/vnode.h>
47 #include <miscfs/genfs/genfs_node.h>
48 #include <sys/mount.h>
49 #include <sys/buf.h>
50 #include <sys/file.h>
51 #include <sys/device.h>
52 #include <sys/disklabel.h>
53 #include <sys/ioctl.h>
54 #include <sys/malloc.h>
55 #include <sys/dirent.h>
56 #include <sys/stat.h>
57 #include <sys/conf.h>
58 #include <sys/kauth.h>
59 #include <sys/kthread.h>
60 #include <dev/clock_subr.h>
61
62 #include <fs/udf/ecma167-udf.h>
63 #include <fs/udf/udf_mount.h>
64
65 #if defined(_KERNEL_OPT)
66 #include "opt_udf.h"
67 #endif
68
69 #include "udf.h"
70 #include "udf_subr.h"
71 #include "udf_bswap.h"
72
73
74 #define VTOI(vnode) ((struct udf_node *) vnode->v_data)
75
76 /* --------------------------------------------------------------------- */
77
78 void
79 udf_fixup_fid_block(uint8_t *blob, int lb_size,
80 int rfix_pos, int max_rfix_pos, uint32_t lb_num)
81 {
82 struct fileid_desc *fid;
83 uint8_t *fid_pos;
84 int fid_len, found;
85
86 /* needs to be word aligned */
87 KASSERT(rfix_pos % 4 == 0);
88
89 /* first resync with the FID stream !!! */
90 found = 0;
91 while (rfix_pos + sizeof(struct desc_tag) <= max_rfix_pos) {
92 fid_pos = blob + rfix_pos;
93 fid = (struct fileid_desc *) fid_pos;
94 if (udf_rw16(fid->tag.id) == TAGID_FID) {
95 if (udf_check_tag((union dscrptr *) fid) == 0)
96 found = 1;
97 }
98 if (found)
99 break;
100 /* try next location; can only be 4 bytes aligned */
101 rfix_pos += 4;
102 }
103
104 /* walk over the fids */
105 fid_pos = blob + rfix_pos;
106 while (rfix_pos + sizeof(struct desc_tag) <= max_rfix_pos) {
107 fid = (struct fileid_desc *) fid_pos;
108 if (udf_rw16(fid->tag.id) != TAGID_FID) {
109 /* end of FID stream; end of directory or currupted */
110 break;
111 }
112
113 /* update sector number and recalculate checkum */
114 fid->tag.tag_loc = udf_rw32(lb_num);
115 udf_validate_tag_sum((union dscrptr *) fid);
116
117 /* if the FID crosses the memory, we're done! */
118 if (rfix_pos + UDF_FID_SIZE >= max_rfix_pos)
119 break;
120
121 fid_len = udf_fidsize(fid);
122 fid_pos += fid_len;
123 rfix_pos += fid_len;
124 }
125 }
126
127
128 void
129 udf_fixup_internal_extattr(uint8_t *blob, uint32_t lb_num)
130 {
131 struct desc_tag *tag;
132 struct file_entry *fe;
133 struct extfile_entry *efe;
134 struct extattrhdr_desc *eahdr;
135 int l_ea, error;
136
137 /* get information from fe/efe */
138 tag = (struct desc_tag *) blob;
139 switch (udf_rw16(tag->id)) {
140 case TAGID_FENTRY :
141 fe = (struct file_entry *) blob;
142 l_ea = udf_rw32(fe->l_ea);
143 eahdr = (struct extattrhdr_desc *) fe->data;
144 break;
145 case TAGID_EXTFENTRY :
146 efe = (struct extfile_entry *) blob;
147 l_ea = udf_rw32(efe->l_ea);
148 eahdr = (struct extattrhdr_desc *) efe->data;
149 break;
150 case TAGID_INDIRECTENTRY :
151 case TAGID_ALLOCEXTENT :
152 case TAGID_EXTATTR_HDR :
153 return;
154 default:
155 panic("%s: passed bad tag\n", __FUNCTION__);
156 }
157
158 /* something recorded here? (why am i called?) */
159 if (l_ea == 0)
160 return;
161
162 /* check extended attribute tag */
163 /* TODO XXX what to do when we encounter an error here? */
164 error = udf_check_tag(eahdr);
165 if (error)
166 return; /* for now */
167 if (udf_rw16(eahdr->tag.id) != TAGID_EXTATTR_HDR)
168 return; /* for now */
169 error = udf_check_tag_payload(eahdr, sizeof(struct extattrhdr_desc));
170 if (error)
171 return; /* for now */
172
173 DPRINTF(EXTATTR, ("node fixup: found %d bytes of extended attributes\n",
174 l_ea));
175
176 /* fixup eahdr tag */
177 eahdr->tag.tag_loc = udf_rw32(lb_num);
178 udf_validate_tag_sum((union dscrptr *) eahdr);
179 }
180
181
182 void
183 udf_fixup_node_internals(struct udf_mount *ump, uint8_t *blob, int udf_c_type)
184 {
185 struct desc_tag *tag;
186 struct file_entry *fe;
187 struct extfile_entry *efe;
188 uint32_t lb_size, lb_num;
189 uint32_t rfid_pos, max_rfid_pos;
190 int icbflags, addr_type, has_fids, l_ea;
191
192 lb_size = udf_rw32(ump->logical_vol->lb_size);
193 /* if its not a node we're done */
194 if (udf_c_type != UDF_C_NODE)
195 return;
196
197 /* NOTE this could also be done in write_internal */
198 /* start of a descriptor */
199 has_fids = 0;
200 max_rfid_pos = rfid_pos = lb_num = 0; /* shut up gcc! */
201
202 tag = (struct desc_tag *) blob;
203 switch (udf_rw16(tag->id)) {
204 case TAGID_FENTRY :
205 fe = (struct file_entry *) tag;
206 l_ea = udf_rw32(fe->l_ea);
207 icbflags = udf_rw16(fe->icbtag.flags);
208 addr_type = (icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK);
209 has_fids = (addr_type == UDF_ICB_INTERN_ALLOC);
210 rfid_pos = UDF_FENTRY_SIZE + l_ea;
211 max_rfid_pos = rfid_pos + udf_rw64(fe->inf_len);
212 lb_num = udf_rw32(fe->tag.tag_loc);
213 break;
214 case TAGID_EXTFENTRY :
215 efe = (struct extfile_entry *) tag;
216 l_ea = udf_rw32(efe->l_ea);
217 icbflags = udf_rw16(efe->icbtag.flags);
218 addr_type = (icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK);
219 has_fids = (addr_type == UDF_ICB_INTERN_ALLOC);
220 rfid_pos = UDF_EXTFENTRY_SIZE + l_ea;
221 max_rfid_pos = rfid_pos + udf_rw64(efe->inf_len);
222 lb_num = udf_rw32(efe->tag.tag_loc);
223 break;
224 case TAGID_INDIRECTENTRY :
225 case TAGID_ALLOCEXTENT :
226 case TAGID_EXTATTR_HDR :
227 l_ea = 0;
228 has_fids = 0;
229 break;
230 default:
231 panic("%s: passed bad tag\n", __FUNCTION__);
232 break;
233 }
234
235 /* fixup internal extended attributes if present */
236 if (l_ea)
237 udf_fixup_internal_extattr(blob, lb_num);
238
239 if (has_fids) {
240 udf_fixup_fid_block(blob, lb_size, rfid_pos,
241 max_rfid_pos, lb_num);
242 }
243 udf_validate_tag_and_crc_sums(blob);
244 }
245
246 /* --------------------------------------------------------------------- */
247
248 /*
249 * Set of generic descriptor readers and writers and their helper functions.
250 * Descriptors inside `logical space' i.e. inside logically mapped partitions
251 * can never be longer than one logical sector.
252 *
253 * NOTE that these functions *can* be used by the sheduler backends to read
254 * node descriptors too.
255 *
256 * For reading, the size of allocated piece is returned in multiple of sector
257 * size due to udf_calc_udf_malloc_size().
258 */
259
260
261 /* SYNC reading of n blocks from specified sector */
262 /* NOTE only used by udf_read_phys_dscr */
263 static int
264 udf_read_phys_sectors(struct udf_mount *ump, int what, void *blob,
265 uint32_t start, uint32_t sectors)
266 {
267 struct buf *buf, *nestbuf;
268 uint32_t buf_offset;
269 off_t lblkno, rblkno;
270 int sector_size = ump->discinfo.sector_size;
271 int blks = sector_size / DEV_BSIZE;
272 int piece;
273 int error;
274
275 DPRINTF(READ, ("udf_intbreadn() : sectors = %d, sector_size = %d\n",
276 sectors, sector_size));
277 buf = getiobuf(ump->devvp, true);
278 buf->b_flags = B_READ;
279 buf->b_cflags = BC_BUSY; /* needed? */
280 buf->b_iodone = NULL;
281 buf->b_data = blob;
282 buf->b_bcount = sectors * sector_size;
283 buf->b_resid = buf->b_bcount;
284 buf->b_bufsize = buf->b_bcount;
285 buf->b_private = NULL; /* not needed yet */
286 BIO_SETPRIO(buf, BPRIO_DEFAULT);
287 buf->b_lblkno = buf->b_blkno = buf->b_rawblkno = start * blks;
288 buf->b_proc = NULL;
289
290 error = 0;
291 buf_offset = 0;
292 rblkno = start;
293 lblkno = 0;
294 while ((sectors > 0) && (error == 0)) {
295 piece = MIN(MAXPHYS/sector_size, sectors);
296 DPRINTF(READ, ("read in %d + %d\n", (uint32_t) rblkno, piece));
297
298 nestbuf = getiobuf(NULL, true);
299 nestiobuf_setup(buf, nestbuf, buf_offset, piece * sector_size);
300 /* nestbuf is B_ASYNC */
301
302 /* identify this nestbuf */
303 nestbuf->b_lblkno = lblkno;
304
305 /* CD shedules on raw blkno */
306 nestbuf->b_blkno = rblkno * blks;
307 nestbuf->b_proc = NULL;
308 nestbuf->b_rawblkno = rblkno * blks;
309 nestbuf->b_udf_c_type = what;
310
311 udf_discstrat_queuebuf(ump, nestbuf);
312
313 lblkno += piece;
314 rblkno += piece;
315 buf_offset += piece * sector_size;
316 sectors -= piece;
317 }
318 error = biowait(buf);
319 putiobuf(buf);
320
321 return error;
322 }
323
324
325 /* synchronous generic descriptor read */
326 int
327 udf_read_phys_dscr(struct udf_mount *ump, uint32_t sector,
328 struct malloc_type *mtype, union dscrptr **dstp)
329 {
330 union dscrptr *dst, *new_dst;
331 uint8_t *pos;
332 int sectors, dscrlen;
333 int i, error, sector_size;
334
335 sector_size = ump->discinfo.sector_size;
336
337 *dstp = dst = NULL;
338 dscrlen = sector_size;
339
340 /* read initial piece */
341 dst = malloc(sector_size, mtype, M_WAITOK);
342 error = udf_read_phys_sectors(ump, UDF_C_DSCR, dst, sector, 1);
343 DPRINTFIF(DESCRIPTOR, error, ("read error (%d)\n", error));
344
345 if (!error) {
346 /* check if its a valid tag */
347 error = udf_check_tag(dst);
348 if (error) {
349 /* check if its an empty block */
350 pos = (uint8_t *) dst;
351 for (i = 0; i < sector_size; i++, pos++) {
352 if (*pos) break;
353 }
354 if (i == sector_size) {
355 /* return no error but with no dscrptr */
356 /* dispose first block */
357 free(dst, mtype);
358 return 0;
359 }
360 }
361 /* calculate descriptor size */
362 dscrlen = udf_tagsize(dst, sector_size);
363 }
364 DPRINTFIF(DESCRIPTOR, error, ("bad tag checksum\n"));
365
366 if (!error && (dscrlen > sector_size)) {
367 DPRINTF(DESCRIPTOR, ("multi block descriptor read\n"));
368 /*
369 * Read the rest of descriptor. Since it is only used at mount
370 * time its overdone to define and use a specific udf_intbreadn
371 * for this alone.
372 */
373
374 new_dst = realloc(dst, dscrlen, mtype, M_WAITOK);
375 if (new_dst == NULL) {
376 free(dst, mtype);
377 return ENOMEM;
378 }
379 dst = new_dst;
380
381 sectors = (dscrlen + sector_size -1) / sector_size;
382 DPRINTF(DESCRIPTOR, ("dscrlen = %d (%d blk)\n", dscrlen, sectors));
383
384 pos = (uint8_t *) dst + sector_size;
385 error = udf_read_phys_sectors(ump, UDF_C_DSCR, pos,
386 sector + 1, sectors-1);
387
388 DPRINTFIF(DESCRIPTOR, error, ("read error on multi (%d)\n",
389 error));
390 }
391 if (!error) {
392 error = udf_check_tag_payload(dst, dscrlen);
393 DPRINTFIF(DESCRIPTOR, error, ("bad payload check sum\n"));
394 }
395 if (error && dst) {
396 free(dst, mtype);
397 dst = NULL;
398 }
399 *dstp = dst;
400
401 return error;
402 }
403
404
405 static void
406 udf_write_phys_buf(struct udf_mount *ump, int what, struct buf *buf)
407 {
408 struct buf *nestbuf;
409 uint32_t buf_offset;
410 off_t lblkno, rblkno;
411 int sector_size = ump->discinfo.sector_size;
412 int blks = sector_size / DEV_BSIZE;
413 uint32_t sectors;
414 int piece;
415 int error;
416
417 sectors = buf->b_bcount / sector_size;
418 DPRINTF(WRITE, ("udf_intbwriten() : sectors = %d, sector_size = %d\n",
419 sectors, sector_size));
420
421 /* don't forget to increase pending count for the bwrite itself */
422 /* panic("NO WRITING\n"); */
423 if (buf->b_vp) {
424 mutex_enter(&buf->b_vp->v_interlock);
425 buf->b_vp->v_numoutput++;
426 mutex_exit(&buf->b_vp->v_interlock);
427 }
428
429 error = 0;
430 buf_offset = 0;
431 rblkno = buf->b_blkno / blks;
432 lblkno = 0;
433 while ((sectors > 0) && (error == 0)) {
434 piece = MIN(MAXPHYS/sector_size, sectors);
435 DPRINTF(WRITE, ("write out %d + %d\n",
436 (uint32_t) rblkno, piece));
437
438 nestbuf = getiobuf(NULL, true);
439 nestiobuf_setup(buf, nestbuf, buf_offset, piece * sector_size);
440 /* nestbuf is B_ASYNC */
441
442 /* identify this nestbuf */
443 nestbuf->b_lblkno = lblkno;
444
445 /* CD shedules on raw blkno */
446 nestbuf->b_blkno = rblkno * blks;
447 nestbuf->b_proc = NULL;
448 nestbuf->b_rawblkno = rblkno * blks;
449 nestbuf->b_udf_c_type = what;
450
451 udf_discstrat_queuebuf(ump, nestbuf);
452
453 lblkno += piece;
454 rblkno += piece;
455 buf_offset += piece * sector_size;
456 sectors -= piece;
457 }
458 }
459
460
461 /* synchronous generic descriptor write */
462 int
463 udf_write_phys_dscr_sync(struct udf_mount *ump, struct udf_node *udf_node, int what,
464 union dscrptr *dscr, uint32_t sector, uint32_t logsector)
465 {
466 struct vnode *vp;
467 struct buf *buf;
468 int sector_size = ump->discinfo.sector_size;
469 int blks = sector_size / DEV_BSIZE;
470 int dscrlen;
471 int error;
472
473 /* set sector number in the descriptor and validate */
474 dscr->tag.tag_loc = udf_rw32(logsector);
475 udf_validate_tag_and_crc_sums(dscr);
476
477 /* calculate descriptor size */
478 dscrlen = udf_tagsize(dscr, sector_size);
479
480 /* get transfer buffer */
481 vp = udf_node ? udf_node->vnode : ump->devvp;
482 buf = getiobuf(vp, true);
483 buf->b_flags = B_WRITE;
484 buf->b_cflags = BC_BUSY; /* needed? */
485 buf->b_iodone = NULL;
486 buf->b_data = (void *) dscr;
487 buf->b_bcount = dscrlen;
488 buf->b_resid = buf->b_bcount;
489 buf->b_bufsize = buf->b_bcount;
490 buf->b_private = NULL; /* not needed yet */
491 BIO_SETPRIO(buf, BPRIO_DEFAULT);
492 buf->b_lblkno = buf->b_blkno = buf->b_rawblkno = sector * blks;
493 buf->b_proc = NULL;
494
495 /* do the write, wait and return error */
496 udf_write_phys_buf(ump, what, buf);
497 error = biowait(buf);
498 putiobuf(buf);
499
500 return error;
501 }
502
503
504 /* asynchronous generic descriptor write */
505 int
506 udf_write_phys_dscr_async(struct udf_mount *ump, struct udf_node *udf_node,
507 int what, union dscrptr *dscr,
508 uint32_t sector, uint32_t logsector,
509 void (*dscrwr_callback)(struct buf *))
510 {
511 struct vnode *vp;
512 struct buf *buf;
513 int dscrlen;
514 int sector_size = ump->discinfo.sector_size;
515 int blks = sector_size / DEV_BSIZE;
516
517 KASSERT(dscrwr_callback);
518 DPRINTF(NODE, ("udf_write_phys_dscr_async() called\n"));
519
520 /* set sector number in the descriptor and validate */
521 dscr->tag.tag_loc = udf_rw32(logsector);
522 udf_validate_tag_and_crc_sums(dscr);
523
524 /* calculate descriptor size */
525 dscrlen = udf_tagsize(dscr, sector_size);
526
527 /* get transfer buffer */
528 vp = udf_node ? udf_node->vnode : ump->devvp;
529 buf = getiobuf(vp, true);
530 buf->b_flags = B_WRITE; // | B_ASYNC;
531 buf->b_cflags = BC_BUSY;
532 buf->b_iodone = dscrwr_callback;
533 buf->b_data = dscr;
534 buf->b_bcount = dscrlen;
535 buf->b_resid = buf->b_bcount;
536 buf->b_bufsize = buf->b_bcount;
537 buf->b_private = NULL; /* not needed yet */
538 BIO_SETPRIO(buf, BPRIO_DEFAULT);
539 buf->b_lblkno = buf->b_blkno = buf->b_rawblkno = sector * blks;
540 buf->b_proc = NULL;
541
542 /* do the write and return no error */
543 udf_write_phys_buf(ump, what, buf);
544 return 0;
545 }
546
547 /* --------------------------------------------------------------------- */
548
549 /* disc strategy dispatchers */
550
551 int
552 udf_create_logvol_dscr(struct udf_mount *ump, struct udf_node *udf_node, struct long_ad *icb,
553 union dscrptr **dscrptr)
554 {
555 struct udf_strategy *strategy = ump->strategy;
556 struct udf_strat_args args;
557 int error;
558
559 args.ump = ump;
560 args.udf_node = udf_node;
561 args.icb = icb;
562 args.dscr = NULL;
563
564 error = (strategy->create_logvol_dscr)(&args);
565 *dscrptr = args.dscr;
566
567 return error;
568 }
569
570
571 void
572 udf_free_logvol_dscr(struct udf_mount *ump, struct long_ad *icb,
573 void *dscr)
574 {
575 struct udf_strategy *strategy = ump->strategy;
576 struct udf_strat_args args;
577
578 args.ump = ump;
579 args.icb = icb;
580 args.dscr = dscr;
581
582 (strategy->free_logvol_dscr)(&args);
583 }
584
585
586 int
587 udf_read_logvol_dscr(struct udf_mount *ump, struct long_ad *icb,
588 union dscrptr **dscrptr)
589 {
590 struct udf_strategy *strategy = ump->strategy;
591 struct udf_strat_args args;
592 int error;
593
594 args.ump = ump;
595 args.icb = icb;
596 args.dscr = NULL;
597
598 error = (strategy->read_logvol_dscr)(&args);
599 *dscrptr = args.dscr;
600
601 return error;
602 }
603
604
605 int
606 udf_write_logvol_dscr(struct udf_node *udf_node, union dscrptr *dscr,
607 struct long_ad *icb, int waitfor)
608 {
609 struct udf_strategy *strategy = udf_node->ump->strategy;
610 struct udf_strat_args args;
611 int error;
612
613 args.ump = udf_node->ump;
614 args.udf_node = udf_node;
615 args.icb = icb;
616 args.dscr = dscr;
617 args.waitfor = waitfor;
618
619 error = (strategy->write_logvol_dscr)(&args);
620 return error;
621 }
622
623
624 void
625 udf_discstrat_queuebuf(struct udf_mount *ump, struct buf *nestbuf)
626 {
627 struct udf_strategy *strategy = ump->strategy;
628 struct udf_strat_args args;
629
630 args.ump = ump;
631 args.nestbuf = nestbuf;
632
633 (strategy->queuebuf)(&args);
634 }
635
636
637 void
638 udf_discstrat_init(struct udf_mount *ump)
639 {
640 struct udf_strategy *strategy = ump->strategy;
641 struct udf_strat_args args;
642
643 args.ump = ump;
644 (strategy->discstrat_init)(&args);
645 }
646
647
648 void udf_discstrat_finish(struct udf_mount *ump)
649 {
650 struct udf_strategy *strategy = ump->strategy;
651 struct udf_strat_args args;
652
653 args.ump = ump;
654 (strategy->discstrat_finish)(&args);
655 }
656
657 /* --------------------------------------------------------------------- */
658
659