udf_strat_sequential.c revision 1.5.4.1 1 /* $NetBSD: udf_strat_sequential.c,v 1.5.4.1 2009/02/18 00:51:27 snj Exp $ */
2
3 /*
4 * Copyright (c) 2006, 2008 Reinoud Zandijk
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 *
27 */
28
29 #include <sys/cdefs.h>
30 #ifndef lint
31 __KERNEL_RCSID(0, "$NetBSD: udf_strat_sequential.c,v 1.5.4.1 2009/02/18 00:51:27 snj Exp $");
32 #endif /* not lint */
33
34
35 #if defined(_KERNEL_OPT)
36 #include "opt_quota.h"
37 #include "opt_compat_netbsd.h"
38 #endif
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/sysctl.h>
43 #include <sys/namei.h>
44 #include <sys/proc.h>
45 #include <sys/kernel.h>
46 #include <sys/vnode.h>
47 #include <miscfs/genfs/genfs_node.h>
48 #include <sys/mount.h>
49 #include <sys/buf.h>
50 #include <sys/file.h>
51 #include <sys/device.h>
52 #include <sys/disklabel.h>
53 #include <sys/ioctl.h>
54 #include <sys/malloc.h>
55 #include <sys/dirent.h>
56 #include <sys/stat.h>
57 #include <sys/conf.h>
58 #include <sys/kauth.h>
59 #include <sys/kthread.h>
60 #include <dev/clock_subr.h>
61
62 #include <fs/udf/ecma167-udf.h>
63 #include <fs/udf/udf_mount.h>
64
65 #include "udf.h"
66 #include "udf_subr.h"
67 #include "udf_bswap.h"
68
69
70 #define VTOI(vnode) ((struct udf_node *) vnode->v_data)
71 #define PRIV(ump) ((struct strat_private *) ump->strategy_private)
72
73 /* --------------------------------------------------------------------- */
74
75 /* BUFQ's */
76 #define UDF_SHED_MAX 3
77
78 #define UDF_SHED_READING 0
79 #define UDF_SHED_WRITING 1
80 #define UDF_SHED_SEQWRITING 2
81
82 struct strat_private {
83 struct pool desc_pool; /* node descriptors */
84
85 lwp_t *queue_lwp;
86 kcondvar_t discstrat_cv; /* to wait on */
87 kmutex_t discstrat_mutex; /* disc strategy */
88
89 int run_thread; /* thread control */
90 int cur_queue;
91
92 struct disk_strategy old_strategy_setting;
93 struct bufq_state *queues[UDF_SHED_MAX];
94 struct timespec last_queued[UDF_SHED_MAX];
95 };
96
97
98 /* --------------------------------------------------------------------- */
99
100 static void
101 udf_wr_nodedscr_callback(struct buf *buf)
102 {
103 struct udf_node *udf_node;
104
105 KASSERT(buf);
106 KASSERT(buf->b_data);
107
108 /* called when write action is done */
109 DPRINTF(WRITE, ("udf_wr_nodedscr_callback(): node written out\n"));
110
111 udf_node = VTOI(buf->b_vp);
112 if (udf_node == NULL) {
113 putiobuf(buf);
114 printf("udf_wr_node_callback: NULL node?\n");
115 return;
116 }
117
118 /* XXX right flags to mark dirty again on error? */
119 if (buf->b_error) {
120 udf_node->i_flags |= IN_MODIFIED | IN_ACCESSED;
121 /* XXX TODO reshedule on error */
122 }
123
124 /* decrement outstanding_nodedscr */
125 KASSERT(udf_node->outstanding_nodedscr >= 1);
126 udf_node->outstanding_nodedscr--;
127 if (udf_node->outstanding_nodedscr == 0) {
128 /* first unlock the node */
129 KASSERT(udf_node->i_flags & IN_CALLBACK_ULK);
130 UDF_UNLOCK_NODE(udf_node, IN_CALLBACK_ULK);
131
132 wakeup(&udf_node->outstanding_nodedscr);
133 }
134
135 /* unreference the vnode so it can be recycled */
136 holdrele(udf_node->vnode);
137
138 putiobuf(buf);
139 }
140
141 /* --------------------------------------------------------------------- */
142
143 static int
144 udf_create_logvol_dscr_seq(struct udf_strat_args *args)
145 {
146 union dscrptr **dscrptr = &args->dscr;
147 struct udf_mount *ump = args->ump;
148 struct strat_private *priv = PRIV(ump);
149 uint32_t lb_size;
150
151 lb_size = udf_rw32(ump->logical_vol->lb_size);
152 *dscrptr = pool_get(&priv->desc_pool, PR_WAITOK);
153 memset(*dscrptr, 0, lb_size);
154
155 return 0;
156 }
157
158
159 static void
160 udf_free_logvol_dscr_seq(struct udf_strat_args *args)
161 {
162 union dscrptr *dscr = args->dscr;
163 struct udf_mount *ump = args->ump;
164 struct strat_private *priv = PRIV(ump);
165
166 pool_put(&priv->desc_pool, dscr);
167 }
168
169
170 static int
171 udf_read_logvol_dscr_seq(struct udf_strat_args *args)
172 {
173 union dscrptr **dscrptr = &args->dscr;
174 union dscrptr *tmpdscr;
175 struct udf_mount *ump = args->ump;
176 struct long_ad *icb = args->icb;
177 struct strat_private *priv = PRIV(ump);
178 uint32_t lb_size;
179 uint32_t sector, dummy;
180 int error;
181
182 lb_size = udf_rw32(ump->logical_vol->lb_size);
183
184 error = udf_translate_vtop(ump, icb, §or, &dummy);
185 if (error)
186 return error;
187
188 /* try to read in fe/efe */
189 error = udf_read_phys_dscr(ump, sector, M_UDFTEMP, &tmpdscr);
190 if (error)
191 return error;
192
193 *dscrptr = pool_get(&priv->desc_pool, PR_WAITOK);
194 memcpy(*dscrptr, tmpdscr, lb_size);
195 free(tmpdscr, M_UDFTEMP);
196
197 return 0;
198 }
199
200
201 static int
202 udf_write_logvol_dscr_seq(struct udf_strat_args *args)
203 {
204 union dscrptr *dscr = args->dscr;
205 struct udf_mount *ump = args->ump;
206 struct udf_node *udf_node = args->udf_node;
207 struct long_ad *icb = args->icb;
208 int waitfor = args->waitfor;
209 uint32_t logsectornr, sectornr, dummy;
210 int error, vpart;
211
212 /*
213 * we have to decide if we write it out sequential or at its fixed
214 * position by examining the partition its (to be) written on.
215 */
216 vpart = udf_rw16(udf_node->loc.loc.part_num);
217 logsectornr = udf_rw32(icb->loc.lb_num);
218 sectornr = 0;
219 if (ump->vtop_tp[vpart] != UDF_VTOP_TYPE_VIRT) {
220 error = udf_translate_vtop(ump, icb, §ornr, &dummy);
221 if (error)
222 goto out;
223 }
224
225 /* add reference to the vnode to prevent recycling */
226 vhold(udf_node->vnode);
227
228 if (waitfor) {
229 DPRINTF(WRITE, ("udf_write_logvol_dscr: sync write\n"));
230
231 error = udf_write_phys_dscr_sync(ump, udf_node, UDF_C_NODE,
232 dscr, sectornr, logsectornr);
233 } else {
234 DPRINTF(WRITE, ("udf_write_logvol_dscr: no wait, async write\n"));
235
236 error = udf_write_phys_dscr_async(ump, udf_node, UDF_C_NODE,
237 dscr, sectornr, logsectornr, udf_wr_nodedscr_callback);
238 /* will be UNLOCKED in call back */
239 return error;
240 }
241
242 holdrele(udf_node->vnode);
243 out:
244 udf_node->outstanding_nodedscr--;
245 if (udf_node->outstanding_nodedscr == 0) {
246 UDF_UNLOCK_NODE(udf_node, 0);
247 wakeup(&udf_node->outstanding_nodedscr);
248 }
249
250 return error;
251 }
252
253 /* --------------------------------------------------------------------- */
254
255 /*
256 * Main file-system specific sheduler. Due to the nature of optical media
257 * sheduling can't be performed in the traditional way. Most OS
258 * implementations i've seen thus read or write a file atomically giving all
259 * kinds of side effects.
260 *
261 * This implementation uses a kernel thread to shedule the queued requests in
262 * such a way that is semi-optimal for optical media; this means aproximately
263 * (R*|(Wr*|Ws*))* since switching between reading and writing is expensive in
264 * time.
265 */
266
267 static void
268 udf_queuebuf_seq(struct udf_strat_args *args)
269 {
270 struct udf_mount *ump = args->ump;
271 struct buf *nestbuf = args->nestbuf;
272 struct strat_private *priv = PRIV(ump);
273 int queue;
274 int what;
275
276 KASSERT(ump);
277 KASSERT(nestbuf);
278 KASSERT(nestbuf->b_iodone == nestiobuf_iodone);
279
280 what = nestbuf->b_udf_c_type;
281 queue = UDF_SHED_READING;
282 if ((nestbuf->b_flags & B_READ) == 0) {
283 /* writing */
284 queue = UDF_SHED_SEQWRITING;
285 if (what == UDF_C_ABSOLUTE)
286 queue = UDF_SHED_WRITING;
287 }
288
289 /* use our own sheduler lists for more complex sheduling */
290 mutex_enter(&priv->discstrat_mutex);
291 BUFQ_PUT(priv->queues[queue], nestbuf);
292 vfs_timestamp(&priv->last_queued[queue]);
293 mutex_exit(&priv->discstrat_mutex);
294
295 /* signal our thread that there might be something to do */
296 cv_signal(&priv->discstrat_cv);
297 }
298
299 /* --------------------------------------------------------------------- */
300
301 /* TODO convert to lb_size */
302 static void
303 udf_VAT_mapping_update(struct udf_mount *ump, struct buf *buf, uint32_t lb_map)
304 {
305 union dscrptr *fdscr = (union dscrptr *) buf->b_data;
306 struct vnode *vp = buf->b_vp;
307 struct udf_node *udf_node = VTOI(vp);
308 uint32_t lb_size, blks;
309 uint32_t lb_num;
310 uint32_t udf_rw32_lbmap;
311 int c_type = buf->b_udf_c_type;
312 int error;
313
314 /* only interested when we're using a VAT */
315 KASSERT(ump->vat_node);
316 KASSERT(ump->vtop_alloc[ump->node_part] == UDF_ALLOC_VAT);
317
318 /* only nodes are recorded in the VAT */
319 /* NOTE: and the fileset descriptor (FIXME ?) */
320 if (c_type != UDF_C_NODE)
321 return;
322
323 /* we now have an UDF FE/EFE node on media with VAT (or VAT itself) */
324 lb_size = udf_rw32(ump->logical_vol->lb_size);
325 blks = lb_size / DEV_BSIZE;
326
327 udf_rw32_lbmap = udf_rw32(lb_map);
328
329 /* if we're the VAT itself, only update our assigned sector number */
330 if (udf_node == ump->vat_node) {
331 fdscr->tag.tag_loc = udf_rw32_lbmap;
332 udf_validate_tag_sum(fdscr);
333 DPRINTF(TRANSLATE, ("VAT assigned to sector %u\n",
334 udf_rw32(udf_rw32_lbmap)));
335 /* no use mapping the VAT node in the VAT */
336 return;
337 }
338
339 /* record new position in VAT file */
340 lb_num = udf_rw32(fdscr->tag.tag_loc);
341
342 /* lb_num = udf_rw32(udf_node->write_loc.loc.lb_num); */
343
344 DPRINTF(TRANSLATE, ("VAT entry change (log %u -> phys %u)\n",
345 lb_num, lb_map));
346
347 /* VAT should be the longer than this write, can't go wrong */
348 KASSERT(lb_num <= ump->vat_entries);
349
350 mutex_enter(&ump->allocate_mutex);
351 error = udf_vat_write(ump->vat_node,
352 (uint8_t *) &udf_rw32_lbmap, 4,
353 ump->vat_offset + lb_num * 4);
354 mutex_exit(&ump->allocate_mutex);
355
356 if (error)
357 panic( "udf_VAT_mapping_update: HELP! i couldn't "
358 "write in the VAT file ?\n");
359 }
360
361
362 static void
363 udf_issue_buf(struct udf_mount *ump, int queue, struct buf *buf)
364 {
365 union dscrptr *dscr;
366 struct long_ad *node_ad_cpy;
367 struct part_desc *pdesc;
368 uint64_t *lmapping, *lmappos, blknr;
369 uint32_t our_sectornr, sectornr, bpos;
370 uint32_t ptov;
371 uint16_t vpart_num;
372 uint8_t *fidblk;
373 int sector_size = ump->discinfo.sector_size;
374 int blks = sector_size / DEV_BSIZE;
375 int len, buf_len;
376
377 /* if reading, just pass to the device's STRATEGY */
378 if (queue == UDF_SHED_READING) {
379 DPRINTF(SHEDULE, ("\nudf_issue_buf READ %p : sector %d type %d,"
380 "b_resid %d, b_bcount %d, b_bufsize %d\n",
381 buf, (uint32_t) buf->b_blkno / blks, buf->b_udf_c_type,
382 buf->b_resid, buf->b_bcount, buf->b_bufsize));
383 VOP_STRATEGY(ump->devvp, buf);
384 return;
385 }
386
387 blknr = buf->b_blkno;
388 our_sectornr = blknr / blks;
389
390 if (queue == UDF_SHED_WRITING) {
391 DPRINTF(SHEDULE, ("\nudf_issue_buf WRITE %p : sector %d "
392 "type %d, b_resid %d, b_bcount %d, b_bufsize %d\n",
393 buf, (uint32_t) buf->b_blkno / blks, buf->b_udf_c_type,
394 buf->b_resid, buf->b_bcount, buf->b_bufsize));
395 KASSERT(buf->b_udf_c_type == UDF_C_ABSOLUTE);
396
397 // udf_fixup_node_internals(ump, buf->b_data, buf->b_udf_c_type);
398 VOP_STRATEGY(ump->devvp, buf);
399 return;
400 }
401
402 KASSERT(queue == UDF_SHED_SEQWRITING);
403 DPRINTF(SHEDULE, ("\nudf_issue_buf SEQWRITE %p : sector XXXX "
404 "type %d, b_resid %d, b_bcount %d, b_bufsize %d\n",
405 buf, buf->b_udf_c_type, buf->b_resid, buf->b_bcount,
406 buf->b_bufsize));
407
408 /*
409 * Buffers should not have been allocated to disc addresses yet on
410 * this queue. Note that a buffer can get multiple extents allocated.
411 *
412 * lmapping contains lb_num relative to base partition.
413 */
414 lmapping = ump->la_lmapping;
415 node_ad_cpy = ump->la_node_ad_cpy;
416
417 /* logically allocate buf and map it in the file */
418 udf_late_allocate_buf(ump, buf, lmapping, node_ad_cpy, &vpart_num);
419
420 /*
421 * NOTE We are using the knowledge here that sequential media will
422 * always be mapped linearly. Thus no use to explicitly translate the
423 * lmapping list.
424 */
425
426 /* calculate offset from physical base partition */
427 pdesc = ump->partitions[ump->vtop[vpart_num]];
428 ptov = udf_rw32(pdesc->start_loc);
429
430 /* set buffers blkno to the physical block number */
431 buf->b_blkno = (*lmapping + ptov) * blks;
432
433 /* fixate floating descriptors */
434 if (buf->b_udf_c_type == UDF_C_FLOAT_DSCR) {
435 /* set our tag location to the absolute position */
436 dscr = (union dscrptr *) buf->b_data;
437 dscr->tag.tag_loc = udf_rw32(*lmapping + ptov);
438 udf_validate_tag_and_crc_sums(dscr);
439 }
440
441 /* update mapping in the VAT */
442 if (buf->b_udf_c_type == UDF_C_NODE) {
443 udf_VAT_mapping_update(ump, buf, *lmapping);
444 udf_fixup_node_internals(ump, buf->b_data, buf->b_udf_c_type);
445 }
446
447 /* if we have FIDs, fixup using the new allocation table */
448 if (buf->b_udf_c_type == UDF_C_FIDS) {
449 buf_len = buf->b_bcount;
450 bpos = 0;
451 lmappos = lmapping;
452 while (buf_len) {
453 sectornr = *lmappos++;
454 len = MIN(buf_len, sector_size);
455 fidblk = (uint8_t *) buf->b_data + bpos;
456 udf_fixup_fid_block(fidblk, sector_size,
457 0, len, sectornr);
458 bpos += len;
459 buf_len -= len;
460 }
461 }
462
463 VOP_STRATEGY(ump->devvp, buf);
464 }
465
466
467 static void
468 udf_doshedule(struct udf_mount *ump)
469 {
470 struct buf *buf;
471 struct timespec now, *last;
472 struct strat_private *priv = PRIV(ump);
473 void (*b_callback)(struct buf *);
474 int new_queue;
475 int error;
476
477 buf = BUFQ_GET(priv->queues[priv->cur_queue]);
478 if (buf) {
479 /* transfer from the current queue to the device queue */
480 mutex_exit(&priv->discstrat_mutex);
481
482 /* transform buffer to synchronous; XXX needed? */
483 b_callback = buf->b_iodone;
484 buf->b_iodone = NULL;
485 CLR(buf->b_flags, B_ASYNC);
486
487 /* issue and wait on completion */
488 udf_issue_buf(ump, priv->cur_queue, buf);
489 biowait(buf);
490
491 mutex_enter(&priv->discstrat_mutex);
492
493 /* if there is an error, repair this error, otherwise propagate */
494 if (buf->b_error && ((buf->b_flags & B_READ) == 0)) {
495 /* check what we need to do */
496 panic("UDF write error, can't handle yet!\n");
497 }
498
499 /* propagate result to higher layers */
500 if (b_callback) {
501 buf->b_iodone = b_callback;
502 (*buf->b_iodone)(buf);
503 }
504
505 return;
506 }
507
508 /* Check if we're idling in this state */
509 vfs_timestamp(&now);
510 last = &priv->last_queued[priv->cur_queue];
511 if (ump->discinfo.mmc_class == MMC_CLASS_CD) {
512 /* dont switch too fast for CD media; its expensive in time */
513 if (now.tv_sec - last->tv_sec < 3)
514 return;
515 }
516
517 /* check if we can/should switch */
518 new_queue = priv->cur_queue;
519
520 if (BUFQ_PEEK(priv->queues[UDF_SHED_READING]))
521 new_queue = UDF_SHED_READING;
522 if (BUFQ_PEEK(priv->queues[UDF_SHED_SEQWRITING]))
523 new_queue = UDF_SHED_SEQWRITING;
524 if (BUFQ_PEEK(priv->queues[UDF_SHED_WRITING])) /* only for unmount */
525 new_queue = UDF_SHED_WRITING;
526 if (priv->cur_queue == UDF_SHED_READING) {
527 if (new_queue == UDF_SHED_SEQWRITING) {
528 /* TODO use flag to signal if this is needed */
529 mutex_exit(&priv->discstrat_mutex);
530
531 /* update trackinfo for data and metadata */
532 error = udf_update_trackinfo(ump,
533 &ump->data_track);
534 assert(error == 0);
535 error = udf_update_trackinfo(ump,
536 &ump->metadata_track);
537 assert(error == 0);
538 mutex_enter(&priv->discstrat_mutex);
539 }
540 }
541
542 if (new_queue != priv->cur_queue) {
543 DPRINTF(SHEDULE, ("switching from %d to %d\n",
544 priv->cur_queue, new_queue));
545 }
546
547 priv->cur_queue = new_queue;
548 }
549
550
551 static void
552 udf_discstrat_thread(void *arg)
553 {
554 struct udf_mount *ump = (struct udf_mount *) arg;
555 struct strat_private *priv = PRIV(ump);
556 int empty;
557
558 empty = 1;
559 mutex_enter(&priv->discstrat_mutex);
560 while (priv->run_thread || !empty) {
561 /* process the current selected queue */
562 udf_doshedule(ump);
563 empty = (BUFQ_PEEK(priv->queues[UDF_SHED_READING]) == NULL);
564 empty &= (BUFQ_PEEK(priv->queues[UDF_SHED_WRITING]) == NULL);
565 empty &= (BUFQ_PEEK(priv->queues[UDF_SHED_SEQWRITING]) == NULL);
566
567 /* wait for more if needed */
568 if (empty)
569 cv_timedwait(&priv->discstrat_cv,
570 &priv->discstrat_mutex, hz/8);
571 }
572 mutex_exit(&priv->discstrat_mutex);
573
574 wakeup(&priv->run_thread);
575 kthread_exit(0);
576 /* not reached */
577 }
578
579 /* --------------------------------------------------------------------- */
580
581 static void
582 udf_discstrat_init_seq(struct udf_strat_args *args)
583 {
584 struct udf_mount *ump = args->ump;
585 struct strat_private *priv = PRIV(ump);
586 struct disk_strategy dkstrat;
587 uint32_t lb_size;
588
589 KASSERT(ump);
590 KASSERT(ump->logical_vol);
591 KASSERT(priv == NULL);
592
593 lb_size = udf_rw32(ump->logical_vol->lb_size);
594 KASSERT(lb_size > 0);
595
596 /* initialise our memory space */
597 ump->strategy_private = malloc(sizeof(struct strat_private),
598 M_UDFTEMP, M_WAITOK);
599 priv = ump->strategy_private;
600 memset(priv, 0 , sizeof(struct strat_private));
601
602 /* initialise locks */
603 cv_init(&priv->discstrat_cv, "udfstrat");
604 mutex_init(&priv->discstrat_mutex, MUTEX_DEFAULT, IPL_NONE);
605
606 /*
607 * Initialise pool for descriptors associated with nodes. This is done
608 * in lb_size units though currently lb_size is dictated to be
609 * sector_size.
610 */
611 pool_init(&priv->desc_pool, lb_size, 0, 0, 0, "udf_desc_pool", NULL,
612 IPL_NONE);
613
614 /*
615 * remember old device strategy method and explicit set method
616 * `discsort' since we have our own more complex strategy that is not
617 * implementable on the CD device and other strategies will get in the
618 * way.
619 */
620 memset(&priv->old_strategy_setting, 0,
621 sizeof(struct disk_strategy));
622 VOP_IOCTL(ump->devvp, DIOCGSTRATEGY, &priv->old_strategy_setting,
623 FREAD | FKIOCTL, NOCRED);
624 memset(&dkstrat, 0, sizeof(struct disk_strategy));
625 strcpy(dkstrat.dks_name, "discsort");
626 VOP_IOCTL(ump->devvp, DIOCSSTRATEGY, &dkstrat, FWRITE | FKIOCTL,
627 NOCRED);
628
629 /* initialise our internal sheduler */
630 priv->cur_queue = UDF_SHED_READING;
631 bufq_alloc(&priv->queues[UDF_SHED_READING], "disksort",
632 BUFQ_SORT_RAWBLOCK);
633 bufq_alloc(&priv->queues[UDF_SHED_WRITING], "disksort",
634 BUFQ_SORT_RAWBLOCK);
635 bufq_alloc(&priv->queues[UDF_SHED_SEQWRITING], "fcfs", 0);
636 vfs_timestamp(&priv->last_queued[UDF_SHED_READING]);
637 vfs_timestamp(&priv->last_queued[UDF_SHED_WRITING]);
638 vfs_timestamp(&priv->last_queued[UDF_SHED_SEQWRITING]);
639
640 /* create our disk strategy thread */
641 priv->run_thread = 1;
642 if (kthread_create(PRI_NONE, 0 /* KTHREAD_MPSAFE*/, NULL /* cpu_info*/,
643 udf_discstrat_thread, ump, &priv->queue_lwp,
644 "%s", "udf_rw")) {
645 panic("fork udf_rw");
646 }
647 }
648
649
650 static void
651 udf_discstrat_finish_seq(struct udf_strat_args *args)
652 {
653 struct udf_mount *ump = args->ump;
654 struct strat_private *priv = PRIV(ump);
655 int error;
656
657 if (ump == NULL)
658 return;
659
660 /* stop our sheduling thread */
661 KASSERT(priv->run_thread == 1);
662 priv->run_thread = 0;
663 wakeup(priv->queue_lwp);
664 do {
665 error = tsleep(&priv->run_thread, PRIBIO+1,
666 "udfshedfin", hz);
667 } while (error);
668 /* kthread should be finished now */
669
670 /* set back old device strategy method */
671 VOP_IOCTL(ump->devvp, DIOCSSTRATEGY, &priv->old_strategy_setting,
672 FWRITE, NOCRED);
673
674 /* destroy our pool */
675 pool_destroy(&priv->desc_pool);
676
677 /* free our private space */
678 free(ump->strategy_private, M_UDFTEMP);
679 ump->strategy_private = NULL;
680 }
681
682 /* --------------------------------------------------------------------- */
683
684 struct udf_strategy udf_strat_sequential =
685 {
686 udf_create_logvol_dscr_seq,
687 udf_free_logvol_dscr_seq,
688 udf_read_logvol_dscr_seq,
689 udf_write_logvol_dscr_seq,
690 udf_queuebuf_seq,
691 udf_discstrat_init_seq,
692 udf_discstrat_finish_seq
693 };
694
695
696