udf_strat_direct.c revision 1.1.6.4 1 1.1.6.2 mjf /* $NetBSD: udf_strat_direct.c,v 1.1.6.4 2009/01/17 13:29:17 mjf Exp $ */
2 1.1.6.2 mjf
3 1.1.6.2 mjf /*
4 1.1.6.2 mjf * Copyright (c) 2006, 2008 Reinoud Zandijk
5 1.1.6.2 mjf * All rights reserved.
6 1.1.6.2 mjf *
7 1.1.6.2 mjf * Redistribution and use in source and binary forms, with or without
8 1.1.6.2 mjf * modification, are permitted provided that the following conditions
9 1.1.6.2 mjf * are met:
10 1.1.6.2 mjf * 1. Redistributions of source code must retain the above copyright
11 1.1.6.2 mjf * notice, this list of conditions and the following disclaimer.
12 1.1.6.2 mjf * 2. Redistributions in binary form must reproduce the above copyright
13 1.1.6.2 mjf * notice, this list of conditions and the following disclaimer in the
14 1.1.6.2 mjf * documentation and/or other materials provided with the distribution.
15 1.1.6.2 mjf *
16 1.1.6.2 mjf * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 1.1.6.2 mjf * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 1.1.6.2 mjf * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 1.1.6.2 mjf * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 1.1.6.2 mjf * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 1.1.6.2 mjf * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 1.1.6.2 mjf * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 1.1.6.2 mjf * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 1.1.6.2 mjf * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 1.1.6.2 mjf * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 1.1.6.2 mjf *
27 1.1.6.2 mjf */
28 1.1.6.2 mjf
29 1.1.6.2 mjf #include <sys/cdefs.h>
30 1.1.6.2 mjf #ifndef lint
31 1.1.6.2 mjf __KERNEL_RCSID(0, "$NetBSD: udf_strat_direct.c,v 1.1.6.4 2009/01/17 13:29:17 mjf Exp $");
32 1.1.6.2 mjf #endif /* not lint */
33 1.1.6.2 mjf
34 1.1.6.2 mjf
35 1.1.6.2 mjf #if defined(_KERNEL_OPT)
36 1.1.6.2 mjf #include "opt_compat_netbsd.h"
37 1.1.6.2 mjf #endif
38 1.1.6.2 mjf
39 1.1.6.2 mjf #include <sys/param.h>
40 1.1.6.2 mjf #include <sys/systm.h>
41 1.1.6.2 mjf #include <sys/sysctl.h>
42 1.1.6.2 mjf #include <sys/namei.h>
43 1.1.6.2 mjf #include <sys/proc.h>
44 1.1.6.2 mjf #include <sys/kernel.h>
45 1.1.6.2 mjf #include <sys/vnode.h>
46 1.1.6.2 mjf #include <miscfs/genfs/genfs_node.h>
47 1.1.6.2 mjf #include <sys/mount.h>
48 1.1.6.2 mjf #include <sys/buf.h>
49 1.1.6.2 mjf #include <sys/file.h>
50 1.1.6.2 mjf #include <sys/device.h>
51 1.1.6.2 mjf #include <sys/disklabel.h>
52 1.1.6.2 mjf #include <sys/ioctl.h>
53 1.1.6.2 mjf #include <sys/malloc.h>
54 1.1.6.2 mjf #include <sys/dirent.h>
55 1.1.6.2 mjf #include <sys/stat.h>
56 1.1.6.2 mjf #include <sys/conf.h>
57 1.1.6.2 mjf #include <sys/kauth.h>
58 1.1.6.2 mjf #include <sys/kthread.h>
59 1.1.6.2 mjf #include <dev/clock_subr.h>
60 1.1.6.2 mjf
61 1.1.6.2 mjf #include <fs/udf/ecma167-udf.h>
62 1.1.6.2 mjf #include <fs/udf/udf_mount.h>
63 1.1.6.2 mjf
64 1.1.6.2 mjf #include "udf.h"
65 1.1.6.2 mjf #include "udf_subr.h"
66 1.1.6.2 mjf #include "udf_bswap.h"
67 1.1.6.2 mjf
68 1.1.6.2 mjf
69 1.1.6.2 mjf #define VTOI(vnode) ((struct udf_node *) vnode->v_data)
70 1.1.6.2 mjf #define PRIV(ump) ((struct strat_private *) ump->strategy_private)
71 1.1.6.2 mjf
72 1.1.6.2 mjf /* --------------------------------------------------------------------- */
73 1.1.6.2 mjf
74 1.1.6.2 mjf /* BUFQ's */
75 1.1.6.2 mjf #define UDF_SHED_MAX 3
76 1.1.6.2 mjf
77 1.1.6.2 mjf #define UDF_SHED_READING 0
78 1.1.6.2 mjf #define UDF_SHED_WRITING 1
79 1.1.6.2 mjf #define UDF_SHED_SEQWRITING 2
80 1.1.6.2 mjf
81 1.1.6.2 mjf
82 1.1.6.2 mjf struct strat_private {
83 1.1.6.2 mjf struct pool desc_pool; /* node descriptors */
84 1.1.6.2 mjf };
85 1.1.6.2 mjf
86 1.1.6.2 mjf /* --------------------------------------------------------------------- */
87 1.1.6.2 mjf
88 1.1.6.2 mjf static void
89 1.1.6.2 mjf udf_wr_nodedscr_callback(struct buf *buf)
90 1.1.6.2 mjf {
91 1.1.6.2 mjf struct udf_node *udf_node;
92 1.1.6.2 mjf
93 1.1.6.2 mjf KASSERT(buf);
94 1.1.6.2 mjf KASSERT(buf->b_data);
95 1.1.6.2 mjf
96 1.1.6.2 mjf /* called when write action is done */
97 1.1.6.2 mjf DPRINTF(WRITE, ("udf_wr_nodedscr_callback(): node written out\n"));
98 1.1.6.2 mjf
99 1.1.6.2 mjf udf_node = VTOI(buf->b_vp);
100 1.1.6.2 mjf if (udf_node == NULL) {
101 1.1.6.2 mjf putiobuf(buf);
102 1.1.6.2 mjf printf("udf_wr_node_callback: NULL node?\n");
103 1.1.6.2 mjf return;
104 1.1.6.2 mjf }
105 1.1.6.2 mjf
106 1.1.6.2 mjf /* XXX right flags to mark dirty again on error? */
107 1.1.6.2 mjf if (buf->b_error) {
108 1.1.6.2 mjf /* write error on `defect free' media??? how to solve? */
109 1.1.6.2 mjf /* XXX lookup UDF standard for unallocatable space */
110 1.1.6.2 mjf udf_node->i_flags |= IN_MODIFIED | IN_ACCESSED;
111 1.1.6.2 mjf }
112 1.1.6.2 mjf
113 1.1.6.3 mjf /* decrement outstanding_nodedscr */
114 1.1.6.3 mjf KASSERT(udf_node->outstanding_nodedscr >= 1);
115 1.1.6.3 mjf udf_node->outstanding_nodedscr--;
116 1.1.6.3 mjf if (udf_node->outstanding_nodedscr == 0) {
117 1.1.6.3 mjf /* unlock the node */
118 1.1.6.3 mjf KASSERT(udf_node->i_flags & IN_CALLBACK_ULK);
119 1.1.6.3 mjf UDF_UNLOCK_NODE(udf_node, IN_CALLBACK_ULK);
120 1.1.6.2 mjf
121 1.1.6.3 mjf wakeup(&udf_node->outstanding_nodedscr);
122 1.1.6.3 mjf }
123 1.1.6.2 mjf /* unreference the vnode so it can be recycled */
124 1.1.6.2 mjf holdrele(udf_node->vnode);
125 1.1.6.2 mjf
126 1.1.6.2 mjf putiobuf(buf);
127 1.1.6.2 mjf }
128 1.1.6.2 mjf
129 1.1.6.2 mjf /* --------------------------------------------------------------------- */
130 1.1.6.2 mjf
131 1.1.6.2 mjf static int
132 1.1.6.2 mjf udf_getblank_nodedscr_direct(struct udf_strat_args *args)
133 1.1.6.2 mjf {
134 1.1.6.2 mjf union dscrptr **dscrptr = &args->dscr;
135 1.1.6.2 mjf struct udf_mount *ump = args->ump;
136 1.1.6.2 mjf struct strat_private *priv = PRIV(ump);
137 1.1.6.2 mjf uint32_t lb_size;
138 1.1.6.2 mjf
139 1.1.6.2 mjf lb_size = udf_rw32(ump->logical_vol->lb_size);
140 1.1.6.2 mjf *dscrptr = pool_get(&priv->desc_pool, PR_WAITOK);
141 1.1.6.2 mjf memset(*dscrptr, 0, lb_size);
142 1.1.6.2 mjf
143 1.1.6.2 mjf return 0;
144 1.1.6.2 mjf }
145 1.1.6.2 mjf
146 1.1.6.2 mjf
147 1.1.6.2 mjf static void
148 1.1.6.2 mjf udf_free_nodedscr_direct(struct udf_strat_args *args)
149 1.1.6.2 mjf {
150 1.1.6.2 mjf union dscrptr *dscr = args->dscr;
151 1.1.6.2 mjf struct udf_mount *ump = args->ump;
152 1.1.6.2 mjf struct strat_private *priv = PRIV(ump);
153 1.1.6.2 mjf
154 1.1.6.2 mjf pool_put(&priv->desc_pool, dscr);
155 1.1.6.2 mjf }
156 1.1.6.2 mjf
157 1.1.6.2 mjf
158 1.1.6.2 mjf static int
159 1.1.6.2 mjf udf_read_nodedscr_direct(struct udf_strat_args *args)
160 1.1.6.2 mjf {
161 1.1.6.2 mjf union dscrptr **dscrptr = &args->dscr;
162 1.1.6.2 mjf union dscrptr *tmpdscr;
163 1.1.6.2 mjf struct udf_mount *ump = args->ump;
164 1.1.6.2 mjf struct long_ad *icb = args->icb;
165 1.1.6.2 mjf struct strat_private *priv = PRIV(ump);
166 1.1.6.2 mjf uint32_t lb_size;
167 1.1.6.2 mjf uint32_t sector, dummy;
168 1.1.6.2 mjf int error;
169 1.1.6.2 mjf
170 1.1.6.2 mjf lb_size = udf_rw32(ump->logical_vol->lb_size);
171 1.1.6.2 mjf
172 1.1.6.2 mjf error = udf_translate_vtop(ump, icb, §or, &dummy);
173 1.1.6.2 mjf if (error)
174 1.1.6.2 mjf return error;
175 1.1.6.2 mjf
176 1.1.6.2 mjf /* try to read in fe/efe */
177 1.1.6.2 mjf error = udf_read_phys_dscr(ump, sector, M_UDFTEMP, &tmpdscr);
178 1.1.6.2 mjf if (error)
179 1.1.6.2 mjf return error;
180 1.1.6.2 mjf
181 1.1.6.2 mjf *dscrptr = pool_get(&priv->desc_pool, PR_WAITOK);
182 1.1.6.2 mjf memcpy(*dscrptr, tmpdscr, lb_size);
183 1.1.6.2 mjf free(tmpdscr, M_UDFTEMP);
184 1.1.6.2 mjf
185 1.1.6.2 mjf return 0;
186 1.1.6.2 mjf }
187 1.1.6.2 mjf
188 1.1.6.2 mjf
189 1.1.6.2 mjf static int
190 1.1.6.2 mjf udf_write_nodedscr_direct(struct udf_strat_args *args)
191 1.1.6.2 mjf {
192 1.1.6.2 mjf struct udf_mount *ump = args->ump;
193 1.1.6.2 mjf struct udf_node *udf_node = args->udf_node;
194 1.1.6.2 mjf union dscrptr *dscr = args->dscr;
195 1.1.6.2 mjf struct long_ad *icb = args->icb;
196 1.1.6.2 mjf int waitfor = args->waitfor;
197 1.1.6.2 mjf uint32_t logsector, sector, dummy;
198 1.1.6.2 mjf int error, vpart;
199 1.1.6.2 mjf
200 1.1.6.2 mjf /*
201 1.1.6.2 mjf * we have to decide if we write it out sequential or at its fixed
202 1.1.6.2 mjf * position by examining the partition its (to be) written on.
203 1.1.6.2 mjf */
204 1.1.6.2 mjf vpart = udf_rw16(udf_node->loc.loc.part_num);
205 1.1.6.2 mjf logsector = udf_rw32(icb->loc.lb_num);
206 1.1.6.2 mjf KASSERT(ump->vtop_tp[vpart] != UDF_VTOP_TYPE_VIRT);
207 1.1.6.2 mjf
208 1.1.6.2 mjf sector = 0;
209 1.1.6.2 mjf error = udf_translate_vtop(ump, icb, §or, &dummy);
210 1.1.6.2 mjf if (error)
211 1.1.6.3 mjf goto out;
212 1.1.6.2 mjf
213 1.1.6.3 mjf /* add reference to the vnode to prevent recycling */
214 1.1.6.3 mjf vhold(udf_node->vnode);
215 1.1.6.2 mjf
216 1.1.6.2 mjf if (waitfor) {
217 1.1.6.2 mjf DPRINTF(WRITE, ("udf_write_nodedscr: sync write\n"));
218 1.1.6.2 mjf
219 1.1.6.2 mjf error = udf_write_phys_dscr_sync(ump, udf_node, UDF_C_NODE,
220 1.1.6.2 mjf dscr, sector, logsector);
221 1.1.6.2 mjf } else {
222 1.1.6.2 mjf DPRINTF(WRITE, ("udf_write_nodedscr: no wait, async write\n"));
223 1.1.6.2 mjf
224 1.1.6.2 mjf error = udf_write_phys_dscr_async(ump, udf_node, UDF_C_NODE,
225 1.1.6.2 mjf dscr, sector, logsector, udf_wr_nodedscr_callback);
226 1.1.6.2 mjf /* will be UNLOCKED in call back */
227 1.1.6.3 mjf return error;
228 1.1.6.2 mjf }
229 1.1.6.3 mjf
230 1.1.6.3 mjf holdrele(udf_node->vnode);
231 1.1.6.3 mjf out:
232 1.1.6.3 mjf udf_node->outstanding_nodedscr--;
233 1.1.6.3 mjf if (udf_node->outstanding_nodedscr == 0) {
234 1.1.6.3 mjf UDF_UNLOCK_NODE(udf_node, 0);
235 1.1.6.3 mjf wakeup(&udf_node->outstanding_nodedscr);
236 1.1.6.3 mjf }
237 1.1.6.3 mjf
238 1.1.6.2 mjf return error;
239 1.1.6.2 mjf }
240 1.1.6.2 mjf
241 1.1.6.2 mjf /* --------------------------------------------------------------------- */
242 1.1.6.2 mjf
243 1.1.6.2 mjf static void
244 1.1.6.2 mjf udf_queue_buf_direct(struct udf_strat_args *args)
245 1.1.6.2 mjf {
246 1.1.6.2 mjf struct udf_mount *ump = args->ump;
247 1.1.6.2 mjf struct buf *buf = args->nestbuf;
248 1.1.6.2 mjf struct buf *nestbuf;
249 1.1.6.3 mjf struct desc_tag *tag;
250 1.1.6.2 mjf struct long_ad *node_ad_cpy;
251 1.1.6.2 mjf uint64_t *lmapping, *pmapping, *lmappos, blknr, run_start;
252 1.1.6.2 mjf uint32_t our_sectornr, sectornr;
253 1.1.6.2 mjf uint32_t lb_size, buf_offset, rbuflen, bpos;
254 1.1.6.3 mjf uint16_t vpart_num;
255 1.1.6.2 mjf uint8_t *fidblk;
256 1.1.6.2 mjf off_t rblk;
257 1.1.6.2 mjf int sector_size = ump->discinfo.sector_size;
258 1.1.6.2 mjf int blks = sector_size / DEV_BSIZE;
259 1.1.6.2 mjf int len, buf_len, sector, sectors, run_length;
260 1.1.6.2 mjf int what, class, queue;
261 1.1.6.2 mjf
262 1.1.6.2 mjf KASSERT(ump);
263 1.1.6.2 mjf KASSERT(buf);
264 1.1.6.2 mjf KASSERT(buf->b_iodone == nestiobuf_iodone);
265 1.1.6.2 mjf
266 1.1.6.2 mjf what = buf->b_udf_c_type;
267 1.1.6.2 mjf queue = UDF_SHED_READING;
268 1.1.6.2 mjf if ((buf->b_flags & B_READ) == 0) {
269 1.1.6.2 mjf /* writing */
270 1.1.6.2 mjf queue = UDF_SHED_SEQWRITING;
271 1.1.6.2 mjf if (what == UDF_C_DSCR)
272 1.1.6.2 mjf queue = UDF_SHED_WRITING;
273 1.1.6.2 mjf if (what == UDF_C_NODE)
274 1.1.6.2 mjf queue = UDF_SHED_WRITING;
275 1.1.6.2 mjf }
276 1.1.6.2 mjf
277 1.1.6.2 mjf /* use disc sheduler */
278 1.1.6.2 mjf class = ump->discinfo.mmc_class;
279 1.1.6.2 mjf KASSERT((class == MMC_CLASS_UNKN) || (class == MMC_CLASS_DISC) ||
280 1.1.6.4 mjf (ump->discinfo.mmc_cur & MMC_CAP_HW_DEFECTFREE) ||
281 1.1.6.4 mjf (ump->vfs_mountp->mnt_flag & MNT_RDONLY));
282 1.1.6.2 mjf
283 1.1.6.2 mjf if (queue == UDF_SHED_READING) {
284 1.1.6.2 mjf DPRINTF(SHEDULE, ("\nudf_issue_buf READ %p : sector %d type %d,"
285 1.1.6.2 mjf "b_resid %d, b_bcount %d, b_bufsize %d\n",
286 1.1.6.2 mjf buf, (uint32_t) buf->b_blkno / blks, buf->b_udf_c_type,
287 1.1.6.2 mjf buf->b_resid, buf->b_bcount, buf->b_bufsize));
288 1.1.6.2 mjf VOP_STRATEGY(ump->devvp, buf);
289 1.1.6.2 mjf return;
290 1.1.6.2 mjf }
291 1.1.6.2 mjf
292 1.1.6.2 mjf /* (sectorsize == lb_size) for UDF */
293 1.1.6.2 mjf lb_size = udf_rw32(ump->logical_vol->lb_size);
294 1.1.6.2 mjf blknr = buf->b_blkno;
295 1.1.6.2 mjf our_sectornr = blknr / blks;
296 1.1.6.2 mjf
297 1.1.6.2 mjf if (queue == UDF_SHED_WRITING) {
298 1.1.6.2 mjf DPRINTF(SHEDULE, ("\nudf_issue_buf WRITE %p : sector %d "
299 1.1.6.2 mjf "type %d, b_resid %d, b_bcount %d, b_bufsize %d\n",
300 1.1.6.2 mjf buf, (uint32_t) buf->b_blkno / blks, buf->b_udf_c_type,
301 1.1.6.2 mjf buf->b_resid, buf->b_bcount, buf->b_bufsize));
302 1.1.6.2 mjf /* if we have FIDs fixup using buffer's sector number(s) */
303 1.1.6.2 mjf if (buf->b_udf_c_type == UDF_C_FIDS) {
304 1.1.6.2 mjf panic("UDF_C_FIDS in SHED_WRITING!\n");
305 1.1.6.2 mjf buf_len = buf->b_bcount;
306 1.1.6.2 mjf sectornr = our_sectornr;
307 1.1.6.2 mjf bpos = 0;
308 1.1.6.2 mjf while (buf_len) {
309 1.1.6.2 mjf len = MIN(buf_len, sector_size);
310 1.1.6.2 mjf fidblk = (uint8_t *) buf->b_data + bpos;
311 1.1.6.2 mjf udf_fixup_fid_block(fidblk, sector_size,
312 1.1.6.2 mjf 0, len, sectornr);
313 1.1.6.2 mjf sectornr++;
314 1.1.6.2 mjf bpos += len;
315 1.1.6.2 mjf buf_len -= len;
316 1.1.6.2 mjf }
317 1.1.6.2 mjf }
318 1.1.6.2 mjf udf_fixup_node_internals(ump, buf->b_data, buf->b_udf_c_type);
319 1.1.6.2 mjf VOP_STRATEGY(ump->devvp, buf);
320 1.1.6.2 mjf return;
321 1.1.6.2 mjf }
322 1.1.6.2 mjf
323 1.1.6.2 mjf /* UDF_SHED_SEQWRITING */
324 1.1.6.2 mjf KASSERT(queue == UDF_SHED_SEQWRITING);
325 1.1.6.2 mjf DPRINTF(SHEDULE, ("\nudf_issue_buf SEQWRITE %p : sector XXXX "
326 1.1.6.2 mjf "type %d, b_resid %d, b_bcount %d, b_bufsize %d\n",
327 1.1.6.2 mjf buf, buf->b_udf_c_type, buf->b_resid, buf->b_bcount,
328 1.1.6.2 mjf buf->b_bufsize));
329 1.1.6.2 mjf
330 1.1.6.2 mjf /*
331 1.1.6.2 mjf * Buffers should not have been allocated to disc addresses yet on
332 1.1.6.2 mjf * this queue. Note that a buffer can get multiple extents allocated.
333 1.1.6.2 mjf *
334 1.1.6.2 mjf * lmapping contains lb_num relative to base partition.
335 1.1.6.2 mjf */
336 1.1.6.2 mjf lmapping = ump->la_lmapping;
337 1.1.6.2 mjf node_ad_cpy = ump->la_node_ad_cpy;
338 1.1.6.2 mjf
339 1.1.6.3 mjf /* logically allocate buf and map it in the file */
340 1.1.6.3 mjf udf_late_allocate_buf(ump, buf, lmapping, node_ad_cpy, &vpart_num);
341 1.1.6.2 mjf
342 1.1.6.2 mjf /* if we have FIDs, fixup using the new allocation table */
343 1.1.6.2 mjf if (buf->b_udf_c_type == UDF_C_FIDS) {
344 1.1.6.2 mjf buf_len = buf->b_bcount;
345 1.1.6.2 mjf bpos = 0;
346 1.1.6.2 mjf lmappos = lmapping;
347 1.1.6.2 mjf while (buf_len) {
348 1.1.6.2 mjf sectornr = *lmappos++;
349 1.1.6.2 mjf len = MIN(buf_len, sector_size);
350 1.1.6.2 mjf fidblk = (uint8_t *) buf->b_data + bpos;
351 1.1.6.2 mjf udf_fixup_fid_block(fidblk, sector_size,
352 1.1.6.2 mjf 0, len, sectornr);
353 1.1.6.2 mjf bpos += len;
354 1.1.6.2 mjf buf_len -= len;
355 1.1.6.2 mjf }
356 1.1.6.2 mjf }
357 1.1.6.3 mjf if (buf->b_udf_c_type == UDF_C_METADATA_SBM) {
358 1.1.6.3 mjf if (buf->b_lblkno == 0) {
359 1.1.6.3 mjf /* update the tag location inside */
360 1.1.6.3 mjf tag = (struct desc_tag *) buf->b_data;
361 1.1.6.3 mjf tag->tag_loc = udf_rw32(*lmapping);
362 1.1.6.3 mjf udf_validate_tag_and_crc_sums(buf->b_data);
363 1.1.6.3 mjf }
364 1.1.6.3 mjf }
365 1.1.6.2 mjf udf_fixup_node_internals(ump, buf->b_data, buf->b_udf_c_type);
366 1.1.6.2 mjf
367 1.1.6.3 mjf /*
368 1.1.6.3 mjf * Translate new mappings in lmapping to pmappings and try to
369 1.1.6.3 mjf * conglomerate extents to reduce the number of writes.
370 1.1.6.3 mjf *
371 1.1.6.3 mjf * pmapping to contain lb_nums as used for disc adressing.
372 1.1.6.3 mjf */
373 1.1.6.3 mjf pmapping = ump->la_pmapping;
374 1.1.6.3 mjf sectors = (buf->b_bcount + sector_size -1) / sector_size;
375 1.1.6.3 mjf udf_translate_vtop_list(ump, sectors, vpart_num, lmapping, pmapping);
376 1.1.6.3 mjf
377 1.1.6.2 mjf for (sector = 0; sector < sectors; sector++) {
378 1.1.6.2 mjf buf_offset = sector * sector_size;
379 1.1.6.2 mjf DPRINTF(WRITE, ("\tprocessing rel sector %d\n", sector));
380 1.1.6.2 mjf
381 1.1.6.2 mjf DPRINTF(WRITE, ("\tissue write sector %"PRIu64"\n",
382 1.1.6.2 mjf pmapping[sector]));
383 1.1.6.2 mjf
384 1.1.6.2 mjf run_start = pmapping[sector];
385 1.1.6.2 mjf run_length = 1;
386 1.1.6.2 mjf while (sector < sectors-1) {
387 1.1.6.2 mjf if (pmapping[sector+1] != pmapping[sector]+1)
388 1.1.6.2 mjf break;
389 1.1.6.2 mjf run_length++;
390 1.1.6.2 mjf sector++;
391 1.1.6.2 mjf }
392 1.1.6.2 mjf
393 1.1.6.2 mjf /* nest an iobuf for the extent */
394 1.1.6.2 mjf rbuflen = run_length * sector_size;
395 1.1.6.2 mjf rblk = run_start * (sector_size/DEV_BSIZE);
396 1.1.6.2 mjf
397 1.1.6.2 mjf nestbuf = getiobuf(NULL, true);
398 1.1.6.2 mjf nestiobuf_setup(buf, nestbuf, buf_offset, rbuflen);
399 1.1.6.2 mjf /* nestbuf is B_ASYNC */
400 1.1.6.2 mjf
401 1.1.6.2 mjf /* identify this nestbuf */
402 1.1.6.2 mjf nestbuf->b_lblkno = sector;
403 1.1.6.2 mjf assert(nestbuf->b_vp == buf->b_vp);
404 1.1.6.2 mjf
405 1.1.6.2 mjf /* CD shedules on raw blkno */
406 1.1.6.2 mjf nestbuf->b_blkno = rblk;
407 1.1.6.2 mjf nestbuf->b_proc = NULL;
408 1.1.6.2 mjf nestbuf->b_rawblkno = rblk;
409 1.1.6.2 mjf nestbuf->b_udf_c_type = UDF_C_PROCESSED;
410 1.1.6.2 mjf
411 1.1.6.2 mjf VOP_STRATEGY(ump->devvp, nestbuf);
412 1.1.6.2 mjf }
413 1.1.6.2 mjf }
414 1.1.6.2 mjf
415 1.1.6.2 mjf
416 1.1.6.2 mjf static void
417 1.1.6.2 mjf udf_discstrat_init_direct(struct udf_strat_args *args)
418 1.1.6.2 mjf {
419 1.1.6.2 mjf struct udf_mount *ump = args->ump;
420 1.1.6.2 mjf struct strat_private *priv = PRIV(ump);
421 1.1.6.2 mjf uint32_t lb_size;
422 1.1.6.2 mjf
423 1.1.6.2 mjf KASSERT(priv == NULL);
424 1.1.6.2 mjf ump->strategy_private = malloc(sizeof(struct strat_private),
425 1.1.6.2 mjf M_UDFTEMP, M_WAITOK);
426 1.1.6.2 mjf priv = ump->strategy_private;
427 1.1.6.2 mjf memset(priv, 0 , sizeof(struct strat_private));
428 1.1.6.2 mjf
429 1.1.6.2 mjf /*
430 1.1.6.2 mjf * Initialise pool for descriptors associated with nodes. This is done
431 1.1.6.2 mjf * in lb_size units though currently lb_size is dictated to be
432 1.1.6.2 mjf * sector_size.
433 1.1.6.2 mjf */
434 1.1.6.2 mjf memset(&priv->desc_pool, 0, sizeof(struct pool));
435 1.1.6.2 mjf
436 1.1.6.2 mjf lb_size = udf_rw32(ump->logical_vol->lb_size);
437 1.1.6.2 mjf pool_init(&priv->desc_pool, lb_size, 0, 0, 0, "udf_desc_pool", NULL,
438 1.1.6.2 mjf IPL_NONE);
439 1.1.6.2 mjf }
440 1.1.6.2 mjf
441 1.1.6.2 mjf
442 1.1.6.2 mjf static void
443 1.1.6.2 mjf udf_discstrat_finish_direct(struct udf_strat_args *args)
444 1.1.6.2 mjf {
445 1.1.6.2 mjf struct udf_mount *ump = args->ump;
446 1.1.6.2 mjf struct strat_private *priv = PRIV(ump);
447 1.1.6.2 mjf
448 1.1.6.2 mjf /* destroy our pool */
449 1.1.6.2 mjf pool_destroy(&priv->desc_pool);
450 1.1.6.2 mjf
451 1.1.6.2 mjf /* free our private space */
452 1.1.6.2 mjf free(ump->strategy_private, M_UDFTEMP);
453 1.1.6.2 mjf ump->strategy_private = NULL;
454 1.1.6.2 mjf }
455 1.1.6.2 mjf
456 1.1.6.2 mjf /* --------------------------------------------------------------------- */
457 1.1.6.2 mjf
458 1.1.6.2 mjf struct udf_strategy udf_strat_direct =
459 1.1.6.2 mjf {
460 1.1.6.2 mjf udf_getblank_nodedscr_direct,
461 1.1.6.2 mjf udf_free_nodedscr_direct,
462 1.1.6.2 mjf udf_read_nodedscr_direct,
463 1.1.6.2 mjf udf_write_nodedscr_direct,
464 1.1.6.2 mjf udf_queue_buf_direct,
465 1.1.6.2 mjf udf_discstrat_init_direct,
466 1.1.6.2 mjf udf_discstrat_finish_direct
467 1.1.6.2 mjf };
468 1.1.6.2 mjf
469