rf_netbsdkintf.c revision 1.46 1 1.46 oster /* $NetBSD: rf_netbsdkintf.c,v 1.46 2000/01/09 03:39:13 oster Exp $ */
2 1.1 oster /*-
3 1.1 oster * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
4 1.1 oster * All rights reserved.
5 1.1 oster *
6 1.1 oster * This code is derived from software contributed to The NetBSD Foundation
7 1.1 oster * by Greg Oster; Jason R. Thorpe.
8 1.1 oster *
9 1.1 oster * Redistribution and use in source and binary forms, with or without
10 1.1 oster * modification, are permitted provided that the following conditions
11 1.1 oster * are met:
12 1.1 oster * 1. Redistributions of source code must retain the above copyright
13 1.1 oster * notice, this list of conditions and the following disclaimer.
14 1.1 oster * 2. Redistributions in binary form must reproduce the above copyright
15 1.1 oster * notice, this list of conditions and the following disclaimer in the
16 1.1 oster * documentation and/or other materials provided with the distribution.
17 1.1 oster * 3. All advertising materials mentioning features or use of this software
18 1.1 oster * must display the following acknowledgement:
19 1.1 oster * This product includes software developed by the NetBSD
20 1.1 oster * Foundation, Inc. and its contributors.
21 1.1 oster * 4. Neither the name of The NetBSD Foundation nor the names of its
22 1.1 oster * contributors may be used to endorse or promote products derived
23 1.1 oster * from this software without specific prior written permission.
24 1.1 oster *
25 1.1 oster * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
26 1.1 oster * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 1.1 oster * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 1.1 oster * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
29 1.1 oster * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 1.1 oster * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 1.1 oster * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 1.1 oster * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 1.1 oster * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 1.1 oster * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 1.1 oster * POSSIBILITY OF SUCH DAMAGE.
36 1.1 oster */
37 1.1 oster
38 1.1 oster /*
39 1.1 oster * Copyright (c) 1988 University of Utah.
40 1.1 oster * Copyright (c) 1990, 1993
41 1.1 oster * The Regents of the University of California. All rights reserved.
42 1.1 oster *
43 1.1 oster * This code is derived from software contributed to Berkeley by
44 1.1 oster * the Systems Programming Group of the University of Utah Computer
45 1.1 oster * Science Department.
46 1.1 oster *
47 1.1 oster * Redistribution and use in source and binary forms, with or without
48 1.1 oster * modification, are permitted provided that the following conditions
49 1.1 oster * are met:
50 1.1 oster * 1. Redistributions of source code must retain the above copyright
51 1.1 oster * notice, this list of conditions and the following disclaimer.
52 1.1 oster * 2. Redistributions in binary form must reproduce the above copyright
53 1.1 oster * notice, this list of conditions and the following disclaimer in the
54 1.1 oster * documentation and/or other materials provided with the distribution.
55 1.1 oster * 3. All advertising materials mentioning features or use of this software
56 1.1 oster * must display the following acknowledgement:
57 1.1 oster * This product includes software developed by the University of
58 1.1 oster * California, Berkeley and its contributors.
59 1.1 oster * 4. Neither the name of the University nor the names of its contributors
60 1.1 oster * may be used to endorse or promote products derived from this software
61 1.1 oster * without specific prior written permission.
62 1.1 oster *
63 1.1 oster * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
64 1.1 oster * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
65 1.1 oster * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
66 1.1 oster * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
67 1.1 oster * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
68 1.1 oster * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
69 1.1 oster * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
70 1.1 oster * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
71 1.1 oster * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
72 1.1 oster * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
73 1.1 oster * SUCH DAMAGE.
74 1.1 oster *
75 1.1 oster * from: Utah $Hdr: cd.c 1.6 90/11/28$
76 1.1 oster *
77 1.1 oster * @(#)cd.c 8.2 (Berkeley) 11/16/93
78 1.1 oster */
79 1.1 oster
80 1.1 oster
81 1.1 oster
82 1.1 oster
83 1.1 oster /*
84 1.1 oster * Copyright (c) 1995 Carnegie-Mellon University.
85 1.1 oster * All rights reserved.
86 1.1 oster *
87 1.1 oster * Authors: Mark Holland, Jim Zelenka
88 1.1 oster *
89 1.1 oster * Permission to use, copy, modify and distribute this software and
90 1.1 oster * its documentation is hereby granted, provided that both the copyright
91 1.1 oster * notice and this permission notice appear in all copies of the
92 1.1 oster * software, derivative works or modified versions, and any portions
93 1.1 oster * thereof, and that both notices appear in supporting documentation.
94 1.1 oster *
95 1.1 oster * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
96 1.1 oster * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
97 1.1 oster * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
98 1.1 oster *
99 1.1 oster * Carnegie Mellon requests users of this software to return to
100 1.1 oster *
101 1.1 oster * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
102 1.1 oster * School of Computer Science
103 1.1 oster * Carnegie Mellon University
104 1.1 oster * Pittsburgh PA 15213-3890
105 1.1 oster *
106 1.1 oster * any improvements or extensions that they make and grant Carnegie the
107 1.1 oster * rights to redistribute these changes.
108 1.1 oster */
109 1.1 oster
110 1.1 oster /***********************************************************
111 1.1 oster *
112 1.1 oster * rf_kintf.c -- the kernel interface routines for RAIDframe
113 1.1 oster *
114 1.1 oster ***********************************************************/
115 1.1 oster
116 1.1 oster #include <sys/errno.h>
117 1.1 oster #include <sys/param.h>
118 1.1 oster #include <sys/pool.h>
119 1.1 oster #include <sys/queue.h>
120 1.1 oster #include <sys/disk.h>
121 1.1 oster #include <sys/device.h>
122 1.1 oster #include <sys/stat.h>
123 1.1 oster #include <sys/ioctl.h>
124 1.1 oster #include <sys/fcntl.h>
125 1.1 oster #include <sys/systm.h>
126 1.1 oster #include <sys/namei.h>
127 1.1 oster #include <sys/vnode.h>
128 1.1 oster #include <sys/param.h>
129 1.1 oster #include <sys/types.h>
130 1.1 oster #include <machine/types.h>
131 1.1 oster #include <sys/disklabel.h>
132 1.1 oster #include <sys/conf.h>
133 1.1 oster #include <sys/lock.h>
134 1.1 oster #include <sys/buf.h>
135 1.1 oster #include <sys/user.h>
136 1.8 oster
137 1.8 oster #include "raid.h"
138 1.1 oster #include "rf_raid.h"
139 1.1 oster #include "rf_raidframe.h"
140 1.44 oster #include "rf_copyback.h"
141 1.1 oster #include "rf_dag.h"
142 1.1 oster #include "rf_dagflags.h"
143 1.1 oster #include "rf_diskqueue.h"
144 1.1 oster #include "rf_acctrace.h"
145 1.1 oster #include "rf_etimer.h"
146 1.1 oster #include "rf_general.h"
147 1.1 oster #include "rf_debugMem.h"
148 1.1 oster #include "rf_kintf.h"
149 1.1 oster #include "rf_options.h"
150 1.1 oster #include "rf_driver.h"
151 1.1 oster #include "rf_parityscan.h"
152 1.1 oster #include "rf_debugprint.h"
153 1.1 oster #include "rf_threadstuff.h"
154 1.1 oster
155 1.9 oster int rf_kdebug_level = 0;
156 1.1 oster
157 1.1 oster #ifdef DEBUG
158 1.1 oster #define db1_printf(a) if (rf_kdebug_level > 0) printf a
159 1.9 oster #else /* DEBUG */
160 1.1 oster #define db1_printf(a) { }
161 1.9 oster #endif /* DEBUG */
162 1.1 oster
163 1.9 oster static RF_Raid_t **raidPtrs; /* global raid device descriptors */
164 1.1 oster
165 1.11 oster RF_DECLARE_STATIC_MUTEX(rf_sparet_wait_mutex)
166 1.1 oster
167 1.10 oster static RF_SparetWait_t *rf_sparet_wait_queue; /* requests to install a
168 1.10 oster * spare table */
169 1.10 oster static RF_SparetWait_t *rf_sparet_resp_queue; /* responses from
170 1.10 oster * installation process */
171 1.10 oster
172 1.1 oster /* prototypes */
173 1.10 oster static void KernelWakeupFunc(struct buf * bp);
174 1.10 oster static void InitBP(struct buf * bp, struct vnode *, unsigned rw_flag,
175 1.10 oster dev_t dev, RF_SectorNum_t startSect,
176 1.10 oster RF_SectorCount_t numSect, caddr_t buf,
177 1.10 oster void (*cbFunc) (struct buf *), void *cbArg,
178 1.10 oster int logBytesPerSector, struct proc * b_proc);
179 1.46 oster static int raidinit __P((dev_t, RF_Raid_t *, int));
180 1.1 oster
181 1.10 oster void raidattach __P((int));
182 1.10 oster int raidsize __P((dev_t));
183 1.10 oster int raidopen __P((dev_t, int, int, struct proc *));
184 1.10 oster int raidclose __P((dev_t, int, int, struct proc *));
185 1.10 oster int raidioctl __P((dev_t, u_long, caddr_t, int, struct proc *));
186 1.10 oster int raidwrite __P((dev_t, struct uio *, int));
187 1.10 oster int raidread __P((dev_t, struct uio *, int));
188 1.10 oster void raidstrategy __P((struct buf *));
189 1.10 oster int raiddump __P((dev_t, daddr_t, caddr_t, size_t));
190 1.1 oster
191 1.1 oster /*
192 1.1 oster * Pilfered from ccd.c
193 1.1 oster */
194 1.1 oster
195 1.10 oster struct raidbuf {
196 1.10 oster struct buf rf_buf; /* new I/O buf. MUST BE FIRST!!! */
197 1.10 oster struct buf *rf_obp; /* ptr. to original I/O buf */
198 1.10 oster int rf_flags; /* misc. flags */
199 1.11 oster RF_DiskQueueData_t *req;/* the request that this was part of.. */
200 1.10 oster };
201 1.1 oster
202 1.1 oster
203 1.1 oster #define RAIDGETBUF(rs) pool_get(&(rs)->sc_cbufpool, PR_NOWAIT)
204 1.1 oster #define RAIDPUTBUF(rs, cbp) pool_put(&(rs)->sc_cbufpool, cbp)
205 1.1 oster
206 1.9 oster /* XXX Not sure if the following should be replacing the raidPtrs above,
207 1.10 oster or if it should be used in conjunction with that... */
208 1.1 oster
209 1.10 oster struct raid_softc {
210 1.10 oster int sc_flags; /* flags */
211 1.10 oster int sc_cflags; /* configuration flags */
212 1.11 oster size_t sc_size; /* size of the raid device */
213 1.11 oster dev_t sc_dev; /* our device.. */
214 1.10 oster char sc_xname[20]; /* XXX external name */
215 1.10 oster struct disk sc_dkdev; /* generic disk device info */
216 1.10 oster struct pool sc_cbufpool; /* component buffer pool */
217 1.34 oster struct buf buf_queue; /* used for the device queue */
218 1.10 oster };
219 1.1 oster /* sc_flags */
220 1.1 oster #define RAIDF_INITED 0x01 /* unit has been initialized */
221 1.1 oster #define RAIDF_WLABEL 0x02 /* label area is writable */
222 1.1 oster #define RAIDF_LABELLING 0x04 /* unit is currently being labelled */
223 1.1 oster #define RAIDF_WANTED 0x40 /* someone is waiting to obtain a lock */
224 1.1 oster #define RAIDF_LOCKED 0x80 /* unit is locked */
225 1.1 oster
226 1.1 oster #define raidunit(x) DISKUNIT(x)
227 1.10 oster static int numraid = 0;
228 1.1 oster
229 1.20 oster /*
230 1.20 oster * Allow RAIDOUTSTANDING number of simultaneous IO's to this RAID device.
231 1.20 oster * Be aware that large numbers can allow the driver to consume a lot of
232 1.28 oster * kernel memory, especially on writes, and in degraded mode reads.
233 1.28 oster *
234 1.28 oster * For example: with a stripe width of 64 blocks (32k) and 5 disks,
235 1.28 oster * a single 64K write will typically require 64K for the old data,
236 1.28 oster * 64K for the old parity, and 64K for the new parity, for a total
237 1.28 oster * of 192K (if the parity buffer is not re-used immediately).
238 1.28 oster * Even it if is used immedately, that's still 128K, which when multiplied
239 1.28 oster * by say 10 requests, is 1280K, *on top* of the 640K of incoming data.
240 1.28 oster *
241 1.28 oster * Now in degraded mode, for example, a 64K read on the above setup may
242 1.28 oster * require data reconstruction, which will require *all* of the 4 remaining
243 1.28 oster * disks to participate -- 4 * 32K/disk == 128K again.
244 1.20 oster */
245 1.20 oster
246 1.20 oster #ifndef RAIDOUTSTANDING
247 1.28 oster #define RAIDOUTSTANDING 6
248 1.20 oster #endif
249 1.20 oster
250 1.1 oster #define RAIDLABELDEV(dev) \
251 1.1 oster (MAKEDISKDEV(major((dev)), raidunit((dev)), RAW_PART))
252 1.1 oster
253 1.1 oster /* declared here, and made public, for the benefit of KVM stuff.. */
254 1.10 oster struct raid_softc *raid_softc;
255 1.9 oster
256 1.10 oster static void raidgetdefaultlabel __P((RF_Raid_t *, struct raid_softc *,
257 1.10 oster struct disklabel *));
258 1.10 oster static void raidgetdisklabel __P((dev_t));
259 1.10 oster static void raidmakedisklabel __P((struct raid_softc *));
260 1.1 oster
261 1.10 oster static int raidlock __P((struct raid_softc *));
262 1.10 oster static void raidunlock __P((struct raid_softc *));
263 1.1 oster
264 1.12 oster static void rf_markalldirty __P((RF_Raid_t *));
265 1.1 oster
266 1.37 oster void rf_ReconThread __P((struct rf_recon_req *));
267 1.37 oster /* XXX what I want is: */
268 1.37 oster /*void rf_ReconThread __P((RF_Raid_t *raidPtr)); */
269 1.37 oster void rf_RewriteParityThread __P((RF_Raid_t *raidPtr));
270 1.37 oster void rf_CopybackThread __P((RF_Raid_t *raidPtr));
271 1.37 oster void rf_ReconstructInPlaceThread __P((struct rf_recon_req *));
272 1.37 oster
273 1.10 oster void
274 1.10 oster raidattach(num)
275 1.9 oster int num;
276 1.1 oster {
277 1.14 oster int raidID;
278 1.14 oster int i, rc;
279 1.1 oster
280 1.1 oster #ifdef DEBUG
281 1.9 oster printf("raidattach: Asked for %d units\n", num);
282 1.1 oster #endif
283 1.1 oster
284 1.1 oster if (num <= 0) {
285 1.1 oster #ifdef DIAGNOSTIC
286 1.1 oster panic("raidattach: count <= 0");
287 1.1 oster #endif
288 1.1 oster return;
289 1.1 oster }
290 1.9 oster /* This is where all the initialization stuff gets done. */
291 1.1 oster
292 1.1 oster /* Make some space for requested number of units... */
293 1.1 oster
294 1.1 oster RF_Calloc(raidPtrs, num, sizeof(RF_Raid_t *), (RF_Raid_t **));
295 1.1 oster if (raidPtrs == NULL) {
296 1.1 oster panic("raidPtrs is NULL!!\n");
297 1.1 oster }
298 1.14 oster
299 1.14 oster rc = rf_mutex_init(&rf_sparet_wait_mutex);
300 1.14 oster if (rc) {
301 1.14 oster RF_PANIC();
302 1.14 oster }
303 1.14 oster
304 1.14 oster rf_sparet_wait_queue = rf_sparet_resp_queue = NULL;
305 1.14 oster
306 1.14 oster for (i = 0; i < numraid; i++)
307 1.14 oster raidPtrs[i] = NULL;
308 1.14 oster rc = rf_BootRaidframe();
309 1.14 oster if (rc == 0)
310 1.14 oster printf("Kernelized RAIDframe activated\n");
311 1.14 oster else
312 1.1 oster panic("Serious error booting RAID!!\n");
313 1.14 oster
314 1.9 oster /* put together some datastructures like the CCD device does.. This
315 1.9 oster * lets us lock the device and what-not when it gets opened. */
316 1.1 oster
317 1.1 oster raid_softc = (struct raid_softc *)
318 1.9 oster malloc(num * sizeof(struct raid_softc),
319 1.9 oster M_RAIDFRAME, M_NOWAIT);
320 1.1 oster if (raid_softc == NULL) {
321 1.1 oster printf("WARNING: no memory for RAIDframe driver\n");
322 1.1 oster return;
323 1.1 oster }
324 1.1 oster numraid = num;
325 1.1 oster bzero(raid_softc, num * sizeof(struct raid_softc));
326 1.34 oster
327 1.9 oster for (raidID = 0; raidID < num; raidID++) {
328 1.35 oster raid_softc[raidID].buf_queue.b_actf = NULL;
329 1.35 oster raid_softc[raidID].buf_queue.b_actb =
330 1.35 oster &raid_softc[raidID].buf_queue.b_actf;
331 1.9 oster RF_Calloc(raidPtrs[raidID], 1, sizeof(RF_Raid_t),
332 1.11 oster (RF_Raid_t *));
333 1.9 oster if (raidPtrs[raidID] == NULL) {
334 1.39 oster printf("WARNING: raidPtrs[%d] is NULL\n", raidID);
335 1.39 oster numraid = raidID;
336 1.39 oster return;
337 1.1 oster }
338 1.1 oster }
339 1.1 oster }
340 1.1 oster
341 1.1 oster
342 1.1 oster int
343 1.1 oster raidsize(dev)
344 1.9 oster dev_t dev;
345 1.1 oster {
346 1.1 oster struct raid_softc *rs;
347 1.1 oster struct disklabel *lp;
348 1.9 oster int part, unit, omask, size;
349 1.1 oster
350 1.1 oster unit = raidunit(dev);
351 1.1 oster if (unit >= numraid)
352 1.1 oster return (-1);
353 1.1 oster rs = &raid_softc[unit];
354 1.1 oster
355 1.1 oster if ((rs->sc_flags & RAIDF_INITED) == 0)
356 1.1 oster return (-1);
357 1.1 oster
358 1.1 oster part = DISKPART(dev);
359 1.1 oster omask = rs->sc_dkdev.dk_openmask & (1 << part);
360 1.1 oster lp = rs->sc_dkdev.dk_label;
361 1.1 oster
362 1.1 oster if (omask == 0 && raidopen(dev, 0, S_IFBLK, curproc))
363 1.1 oster return (-1);
364 1.1 oster
365 1.1 oster if (lp->d_partitions[part].p_fstype != FS_SWAP)
366 1.1 oster size = -1;
367 1.1 oster else
368 1.1 oster size = lp->d_partitions[part].p_size *
369 1.1 oster (lp->d_secsize / DEV_BSIZE);
370 1.1 oster
371 1.1 oster if (omask == 0 && raidclose(dev, 0, S_IFBLK, curproc))
372 1.1 oster return (-1);
373 1.1 oster
374 1.1 oster return (size);
375 1.1 oster
376 1.1 oster }
377 1.1 oster
378 1.1 oster int
379 1.1 oster raiddump(dev, blkno, va, size)
380 1.9 oster dev_t dev;
381 1.1 oster daddr_t blkno;
382 1.1 oster caddr_t va;
383 1.9 oster size_t size;
384 1.1 oster {
385 1.1 oster /* Not implemented. */
386 1.1 oster return ENXIO;
387 1.1 oster }
388 1.1 oster /* ARGSUSED */
389 1.1 oster int
390 1.1 oster raidopen(dev, flags, fmt, p)
391 1.9 oster dev_t dev;
392 1.9 oster int flags, fmt;
393 1.1 oster struct proc *p;
394 1.1 oster {
395 1.9 oster int unit = raidunit(dev);
396 1.1 oster struct raid_softc *rs;
397 1.1 oster struct disklabel *lp;
398 1.9 oster int part, pmask;
399 1.9 oster int error = 0;
400 1.9 oster
401 1.1 oster if (unit >= numraid)
402 1.1 oster return (ENXIO);
403 1.1 oster rs = &raid_softc[unit];
404 1.1 oster
405 1.1 oster if ((error = raidlock(rs)) != 0)
406 1.9 oster return (error);
407 1.1 oster lp = rs->sc_dkdev.dk_label;
408 1.1 oster
409 1.1 oster part = DISKPART(dev);
410 1.1 oster pmask = (1 << part);
411 1.1 oster
412 1.1 oster db1_printf(("Opening raid device number: %d partition: %d\n",
413 1.14 oster unit, part));
414 1.1 oster
415 1.1 oster
416 1.1 oster if ((rs->sc_flags & RAIDF_INITED) &&
417 1.1 oster (rs->sc_dkdev.dk_openmask == 0))
418 1.9 oster raidgetdisklabel(dev);
419 1.1 oster
420 1.1 oster /* make sure that this partition exists */
421 1.1 oster
422 1.1 oster if (part != RAW_PART) {
423 1.1 oster db1_printf(("Not a raw partition..\n"));
424 1.1 oster if (((rs->sc_flags & RAIDF_INITED) == 0) ||
425 1.1 oster ((part >= lp->d_npartitions) ||
426 1.9 oster (lp->d_partitions[part].p_fstype == FS_UNUSED))) {
427 1.1 oster error = ENXIO;
428 1.1 oster raidunlock(rs);
429 1.1 oster db1_printf(("Bailing out...\n"));
430 1.9 oster return (error);
431 1.1 oster }
432 1.1 oster }
433 1.1 oster /* Prevent this unit from being unconfigured while open. */
434 1.1 oster switch (fmt) {
435 1.1 oster case S_IFCHR:
436 1.1 oster rs->sc_dkdev.dk_copenmask |= pmask;
437 1.1 oster break;
438 1.1 oster
439 1.1 oster case S_IFBLK:
440 1.1 oster rs->sc_dkdev.dk_bopenmask |= pmask;
441 1.1 oster break;
442 1.1 oster }
443 1.13 oster
444 1.13 oster if ((rs->sc_dkdev.dk_openmask == 0) &&
445 1.13 oster ((rs->sc_flags & RAIDF_INITED) != 0)) {
446 1.13 oster /* First one... mark things as dirty... Note that we *MUST*
447 1.13 oster have done a configure before this. I DO NOT WANT TO BE
448 1.13 oster SCRIBBLING TO RANDOM COMPONENTS UNTIL IT'S BEEN DETERMINED
449 1.13 oster THAT THEY BELONG TOGETHER!!!!! */
450 1.13 oster /* XXX should check to see if we're only open for reading
451 1.13 oster here... If so, we needn't do this, but then need some
452 1.13 oster other way of keeping track of what's happened.. */
453 1.13 oster
454 1.13 oster rf_markalldirty( raidPtrs[unit] );
455 1.13 oster }
456 1.13 oster
457 1.13 oster
458 1.1 oster rs->sc_dkdev.dk_openmask =
459 1.1 oster rs->sc_dkdev.dk_copenmask | rs->sc_dkdev.dk_bopenmask;
460 1.1 oster
461 1.1 oster raidunlock(rs);
462 1.1 oster
463 1.9 oster return (error);
464 1.1 oster
465 1.1 oster
466 1.1 oster }
467 1.1 oster /* ARGSUSED */
468 1.1 oster int
469 1.1 oster raidclose(dev, flags, fmt, p)
470 1.9 oster dev_t dev;
471 1.9 oster int flags, fmt;
472 1.1 oster struct proc *p;
473 1.1 oster {
474 1.9 oster int unit = raidunit(dev);
475 1.1 oster struct raid_softc *rs;
476 1.9 oster int error = 0;
477 1.9 oster int part;
478 1.1 oster
479 1.1 oster if (unit >= numraid)
480 1.1 oster return (ENXIO);
481 1.1 oster rs = &raid_softc[unit];
482 1.1 oster
483 1.1 oster if ((error = raidlock(rs)) != 0)
484 1.1 oster return (error);
485 1.1 oster
486 1.1 oster part = DISKPART(dev);
487 1.1 oster
488 1.1 oster /* ...that much closer to allowing unconfiguration... */
489 1.1 oster switch (fmt) {
490 1.1 oster case S_IFCHR:
491 1.1 oster rs->sc_dkdev.dk_copenmask &= ~(1 << part);
492 1.1 oster break;
493 1.1 oster
494 1.1 oster case S_IFBLK:
495 1.1 oster rs->sc_dkdev.dk_bopenmask &= ~(1 << part);
496 1.1 oster break;
497 1.1 oster }
498 1.1 oster rs->sc_dkdev.dk_openmask =
499 1.1 oster rs->sc_dkdev.dk_copenmask | rs->sc_dkdev.dk_bopenmask;
500 1.13 oster
501 1.13 oster if ((rs->sc_dkdev.dk_openmask == 0) &&
502 1.13 oster ((rs->sc_flags & RAIDF_INITED) != 0)) {
503 1.13 oster /* Last one... device is not unconfigured yet.
504 1.13 oster Device shutdown has taken care of setting the
505 1.13 oster clean bits if RAIDF_INITED is not set
506 1.13 oster mark things as clean... */
507 1.13 oster rf_update_component_labels( raidPtrs[unit] );
508 1.13 oster }
509 1.1 oster
510 1.1 oster raidunlock(rs);
511 1.1 oster return (0);
512 1.1 oster
513 1.1 oster }
514 1.1 oster
515 1.1 oster void
516 1.1 oster raidstrategy(bp)
517 1.1 oster register struct buf *bp;
518 1.1 oster {
519 1.1 oster register int s;
520 1.1 oster
521 1.1 oster unsigned int raidID = raidunit(bp->b_dev);
522 1.1 oster RF_Raid_t *raidPtr;
523 1.1 oster struct raid_softc *rs = &raid_softc[raidID];
524 1.1 oster struct disklabel *lp;
525 1.34 oster struct buf *dp;
526 1.9 oster int wlabel;
527 1.1 oster
528 1.30 oster if ((rs->sc_flags & RAIDF_INITED) ==0) {
529 1.30 oster bp->b_error = ENXIO;
530 1.30 oster bp->b_flags = B_ERROR;
531 1.30 oster bp->b_resid = bp->b_bcount;
532 1.30 oster biodone(bp);
533 1.1 oster return;
534 1.30 oster }
535 1.1 oster if (raidID >= numraid || !raidPtrs[raidID]) {
536 1.1 oster bp->b_error = ENODEV;
537 1.1 oster bp->b_flags |= B_ERROR;
538 1.1 oster bp->b_resid = bp->b_bcount;
539 1.1 oster biodone(bp);
540 1.1 oster return;
541 1.1 oster }
542 1.1 oster raidPtr = raidPtrs[raidID];
543 1.1 oster if (!raidPtr->valid) {
544 1.1 oster bp->b_error = ENODEV;
545 1.1 oster bp->b_flags |= B_ERROR;
546 1.1 oster bp->b_resid = bp->b_bcount;
547 1.1 oster biodone(bp);
548 1.1 oster return;
549 1.1 oster }
550 1.1 oster if (bp->b_bcount == 0) {
551 1.1 oster db1_printf(("b_bcount is zero..\n"));
552 1.1 oster biodone(bp);
553 1.1 oster return;
554 1.1 oster }
555 1.1 oster lp = rs->sc_dkdev.dk_label;
556 1.1 oster
557 1.1 oster /*
558 1.1 oster * Do bounds checking and adjust transfer. If there's an
559 1.1 oster * error, the bounds check will flag that for us.
560 1.1 oster */
561 1.1 oster
562 1.9 oster wlabel = rs->sc_flags & (RAIDF_WLABEL | RAIDF_LABELLING);
563 1.1 oster if (DISKPART(bp->b_dev) != RAW_PART)
564 1.1 oster if (bounds_check_with_label(bp, lp, wlabel) <= 0) {
565 1.1 oster db1_printf(("Bounds check failed!!:%d %d\n",
566 1.9 oster (int) bp->b_blkno, (int) wlabel));
567 1.1 oster biodone(bp);
568 1.1 oster return;
569 1.1 oster }
570 1.34 oster s = splbio();
571 1.1 oster
572 1.1 oster bp->b_resid = 0;
573 1.34 oster
574 1.34 oster /* stuff it onto our queue */
575 1.34 oster
576 1.34 oster dp = &rs->buf_queue;
577 1.34 oster bp->b_actf = NULL;
578 1.34 oster bp->b_actb = dp->b_actb;
579 1.34 oster *dp->b_actb = bp;
580 1.34 oster dp->b_actb = &bp->b_actf;
581 1.34 oster
582 1.34 oster raidstart(raidPtrs[raidID]);
583 1.34 oster
584 1.1 oster splx(s);
585 1.1 oster }
586 1.1 oster /* ARGSUSED */
587 1.1 oster int
588 1.1 oster raidread(dev, uio, flags)
589 1.9 oster dev_t dev;
590 1.1 oster struct uio *uio;
591 1.9 oster int flags;
592 1.1 oster {
593 1.9 oster int unit = raidunit(dev);
594 1.1 oster struct raid_softc *rs;
595 1.9 oster int part;
596 1.1 oster
597 1.1 oster if (unit >= numraid)
598 1.1 oster return (ENXIO);
599 1.1 oster rs = &raid_softc[unit];
600 1.1 oster
601 1.1 oster if ((rs->sc_flags & RAIDF_INITED) == 0)
602 1.1 oster return (ENXIO);
603 1.1 oster part = DISKPART(dev);
604 1.1 oster
605 1.9 oster db1_printf(("raidread: unit: %d partition: %d\n", unit, part));
606 1.1 oster
607 1.1 oster return (physio(raidstrategy, NULL, dev, B_READ, minphys, uio));
608 1.1 oster
609 1.1 oster }
610 1.1 oster /* ARGSUSED */
611 1.1 oster int
612 1.1 oster raidwrite(dev, uio, flags)
613 1.9 oster dev_t dev;
614 1.1 oster struct uio *uio;
615 1.9 oster int flags;
616 1.1 oster {
617 1.9 oster int unit = raidunit(dev);
618 1.1 oster struct raid_softc *rs;
619 1.1 oster
620 1.1 oster if (unit >= numraid)
621 1.1 oster return (ENXIO);
622 1.1 oster rs = &raid_softc[unit];
623 1.1 oster
624 1.1 oster if ((rs->sc_flags & RAIDF_INITED) == 0)
625 1.1 oster return (ENXIO);
626 1.1 oster db1_printf(("raidwrite\n"));
627 1.1 oster return (physio(raidstrategy, NULL, dev, B_WRITE, minphys, uio));
628 1.1 oster
629 1.1 oster }
630 1.1 oster
631 1.1 oster int
632 1.1 oster raidioctl(dev, cmd, data, flag, p)
633 1.9 oster dev_t dev;
634 1.9 oster u_long cmd;
635 1.1 oster caddr_t data;
636 1.9 oster int flag;
637 1.1 oster struct proc *p;
638 1.1 oster {
639 1.9 oster int unit = raidunit(dev);
640 1.9 oster int error = 0;
641 1.9 oster int part, pmask;
642 1.1 oster struct raid_softc *rs;
643 1.1 oster RF_Config_t *k_cfg, *u_cfg;
644 1.42 oster RF_Raid_t *raidPtr;
645 1.41 oster RF_AccTotals_t *totals;
646 1.41 oster RF_DeviceConfig_t *d_cfg, **ucfgp;
647 1.1 oster u_char *specific_buf;
648 1.11 oster int retcode = 0;
649 1.11 oster int row;
650 1.11 oster int column;
651 1.1 oster struct rf_recon_req *rrcopy, *rr;
652 1.11 oster RF_ComponentLabel_t *component_label;
653 1.11 oster RF_ComponentLabel_t ci_label;
654 1.11 oster RF_ComponentLabel_t **c_label_ptr;
655 1.12 oster RF_SingleComponent_t *sparePtr,*componentPtr;
656 1.12 oster RF_SingleComponent_t hot_spare;
657 1.12 oster RF_SingleComponent_t component;
658 1.41 oster int i, j, d;
659 1.1 oster
660 1.1 oster if (unit >= numraid)
661 1.1 oster return (ENXIO);
662 1.1 oster rs = &raid_softc[unit];
663 1.42 oster raidPtr = raidPtrs[unit];
664 1.1 oster
665 1.9 oster db1_printf(("raidioctl: %d %d %d %d\n", (int) dev,
666 1.9 oster (int) DISKPART(dev), (int) unit, (int) cmd));
667 1.1 oster
668 1.1 oster /* Must be open for writes for these commands... */
669 1.1 oster switch (cmd) {
670 1.1 oster case DIOCSDINFO:
671 1.1 oster case DIOCWDINFO:
672 1.1 oster case DIOCWLABEL:
673 1.1 oster if ((flag & FWRITE) == 0)
674 1.1 oster return (EBADF);
675 1.1 oster }
676 1.1 oster
677 1.1 oster /* Must be initialized for these... */
678 1.1 oster switch (cmd) {
679 1.1 oster case DIOCGDINFO:
680 1.1 oster case DIOCSDINFO:
681 1.1 oster case DIOCWDINFO:
682 1.1 oster case DIOCGPART:
683 1.1 oster case DIOCWLABEL:
684 1.1 oster case DIOCGDEFLABEL:
685 1.1 oster case RAIDFRAME_SHUTDOWN:
686 1.1 oster case RAIDFRAME_REWRITEPARITY:
687 1.1 oster case RAIDFRAME_GET_INFO:
688 1.1 oster case RAIDFRAME_RESET_ACCTOTALS:
689 1.1 oster case RAIDFRAME_GET_ACCTOTALS:
690 1.1 oster case RAIDFRAME_KEEP_ACCTOTALS:
691 1.1 oster case RAIDFRAME_GET_SIZE:
692 1.1 oster case RAIDFRAME_FAIL_DISK:
693 1.1 oster case RAIDFRAME_COPYBACK:
694 1.37 oster case RAIDFRAME_CHECK_RECON_STATUS:
695 1.11 oster case RAIDFRAME_GET_COMPONENT_LABEL:
696 1.11 oster case RAIDFRAME_SET_COMPONENT_LABEL:
697 1.11 oster case RAIDFRAME_ADD_HOT_SPARE:
698 1.11 oster case RAIDFRAME_REMOVE_HOT_SPARE:
699 1.11 oster case RAIDFRAME_INIT_LABELS:
700 1.12 oster case RAIDFRAME_REBUILD_IN_PLACE:
701 1.23 oster case RAIDFRAME_CHECK_PARITY:
702 1.37 oster case RAIDFRAME_CHECK_PARITYREWRITE_STATUS:
703 1.37 oster case RAIDFRAME_CHECK_COPYBACK_STATUS:
704 1.1 oster if ((rs->sc_flags & RAIDF_INITED) == 0)
705 1.1 oster return (ENXIO);
706 1.1 oster }
707 1.9 oster
708 1.1 oster switch (cmd) {
709 1.1 oster
710 1.1 oster /* configure the system */
711 1.1 oster case RAIDFRAME_CONFIGURE:
712 1.1 oster /* copy-in the configuration information */
713 1.1 oster /* data points to a pointer to the configuration structure */
714 1.43 oster
715 1.9 oster u_cfg = *((RF_Config_t **) data);
716 1.9 oster RF_Malloc(k_cfg, sizeof(RF_Config_t), (RF_Config_t *));
717 1.1 oster if (k_cfg == NULL) {
718 1.9 oster return (ENOMEM);
719 1.1 oster }
720 1.9 oster retcode = copyin((caddr_t) u_cfg, (caddr_t) k_cfg,
721 1.9 oster sizeof(RF_Config_t));
722 1.1 oster if (retcode) {
723 1.33 oster RF_Free(k_cfg, sizeof(RF_Config_t));
724 1.46 oster db1_printf(("rf_ioctl: retcode=%d copyin.1\n",
725 1.9 oster retcode));
726 1.9 oster return (retcode);
727 1.1 oster }
728 1.9 oster /* allocate a buffer for the layout-specific data, and copy it
729 1.9 oster * in */
730 1.1 oster if (k_cfg->layoutSpecificSize) {
731 1.9 oster if (k_cfg->layoutSpecificSize > 10000) {
732 1.1 oster /* sanity check */
733 1.33 oster RF_Free(k_cfg, sizeof(RF_Config_t));
734 1.9 oster return (EINVAL);
735 1.1 oster }
736 1.9 oster RF_Malloc(specific_buf, k_cfg->layoutSpecificSize,
737 1.9 oster (u_char *));
738 1.1 oster if (specific_buf == NULL) {
739 1.9 oster RF_Free(k_cfg, sizeof(RF_Config_t));
740 1.9 oster return (ENOMEM);
741 1.1 oster }
742 1.9 oster retcode = copyin(k_cfg->layoutSpecific,
743 1.9 oster (caddr_t) specific_buf,
744 1.9 oster k_cfg->layoutSpecificSize);
745 1.1 oster if (retcode) {
746 1.33 oster RF_Free(k_cfg, sizeof(RF_Config_t));
747 1.42 oster RF_Free(specific_buf,
748 1.42 oster k_cfg->layoutSpecificSize);
749 1.46 oster db1_printf(("rf_ioctl: retcode=%d copyin.2\n",
750 1.9 oster retcode));
751 1.9 oster return (retcode);
752 1.1 oster }
753 1.9 oster } else
754 1.9 oster specific_buf = NULL;
755 1.1 oster k_cfg->layoutSpecific = specific_buf;
756 1.9 oster
757 1.9 oster /* should do some kind of sanity check on the configuration.
758 1.9 oster * Store the sum of all the bytes in the last byte? */
759 1.1 oster
760 1.1 oster /* configure the system */
761 1.1 oster
762 1.42 oster raidPtr->raidid = unit;
763 1.20 oster
764 1.42 oster retcode = rf_Configure(raidPtr, k_cfg);
765 1.1 oster
766 1.40 oster if (retcode == 0) {
767 1.37 oster
768 1.40 oster /* allow this many simultaneous IO's to
769 1.40 oster this RAID device */
770 1.42 oster raidPtr->openings = RAIDOUTSTANDING;
771 1.40 oster
772 1.40 oster /* XXX should be moved to rf_Configure() */
773 1.40 oster
774 1.42 oster raidPtr->copyback_in_progress = 0;
775 1.42 oster raidPtr->parity_rewrite_in_progress = 0;
776 1.42 oster raidPtr->recon_in_progress = 0;
777 1.37 oster
778 1.42 oster retcode = raidinit(dev, raidPtr, unit);
779 1.42 oster rf_markalldirty( raidPtr );
780 1.9 oster }
781 1.1 oster /* free the buffers. No return code here. */
782 1.1 oster if (k_cfg->layoutSpecificSize) {
783 1.9 oster RF_Free(specific_buf, k_cfg->layoutSpecificSize);
784 1.1 oster }
785 1.9 oster RF_Free(k_cfg, sizeof(RF_Config_t));
786 1.9 oster
787 1.9 oster return (retcode);
788 1.9 oster
789 1.9 oster /* shutdown the system */
790 1.1 oster case RAIDFRAME_SHUTDOWN:
791 1.9 oster
792 1.9 oster if ((error = raidlock(rs)) != 0)
793 1.9 oster return (error);
794 1.1 oster
795 1.1 oster /*
796 1.1 oster * If somebody has a partition mounted, we shouldn't
797 1.1 oster * shutdown.
798 1.1 oster */
799 1.1 oster
800 1.1 oster part = DISKPART(dev);
801 1.1 oster pmask = (1 << part);
802 1.9 oster if ((rs->sc_dkdev.dk_openmask & ~pmask) ||
803 1.9 oster ((rs->sc_dkdev.dk_bopenmask & pmask) &&
804 1.9 oster (rs->sc_dkdev.dk_copenmask & pmask))) {
805 1.9 oster raidunlock(rs);
806 1.9 oster return (EBUSY);
807 1.9 oster }
808 1.11 oster
809 1.42 oster retcode = rf_Shutdown(raidPtr);
810 1.1 oster
811 1.1 oster pool_destroy(&rs->sc_cbufpool);
812 1.1 oster
813 1.1 oster /* It's no longer initialized... */
814 1.1 oster rs->sc_flags &= ~RAIDF_INITED;
815 1.16 oster
816 1.9 oster /* Detach the disk. */
817 1.9 oster disk_detach(&rs->sc_dkdev);
818 1.1 oster
819 1.1 oster raidunlock(rs);
820 1.1 oster
821 1.9 oster return (retcode);
822 1.11 oster case RAIDFRAME_GET_COMPONENT_LABEL:
823 1.11 oster c_label_ptr = (RF_ComponentLabel_t **) data;
824 1.11 oster /* need to read the component label for the disk indicated
825 1.43 oster by row,column in component_label */
826 1.11 oster
827 1.11 oster /* For practice, let's get it directly fromdisk, rather
828 1.11 oster than from the in-core copy */
829 1.11 oster RF_Malloc( component_label, sizeof( RF_ComponentLabel_t ),
830 1.11 oster (RF_ComponentLabel_t *));
831 1.11 oster if (component_label == NULL)
832 1.11 oster return (ENOMEM);
833 1.11 oster
834 1.11 oster bzero((char *) component_label, sizeof(RF_ComponentLabel_t));
835 1.11 oster
836 1.11 oster retcode = copyin( *c_label_ptr, component_label,
837 1.11 oster sizeof(RF_ComponentLabel_t));
838 1.11 oster
839 1.11 oster if (retcode) {
840 1.33 oster RF_Free( component_label, sizeof(RF_ComponentLabel_t));
841 1.11 oster return(retcode);
842 1.11 oster }
843 1.11 oster
844 1.11 oster row = component_label->row;
845 1.11 oster column = component_label->column;
846 1.26 oster
847 1.42 oster if ((row < 0) || (row >= raidPtr->numRow) ||
848 1.42 oster (column < 0) || (column >= raidPtr->numCol)) {
849 1.33 oster RF_Free( component_label, sizeof(RF_ComponentLabel_t));
850 1.26 oster return(EINVAL);
851 1.11 oster }
852 1.11 oster
853 1.11 oster raidread_component_label(
854 1.42 oster raidPtr->Disks[row][column].dev,
855 1.42 oster raidPtr->raid_cinfo[row][column].ci_vp,
856 1.11 oster component_label );
857 1.11 oster
858 1.11 oster retcode = copyout((caddr_t) component_label,
859 1.11 oster (caddr_t) *c_label_ptr,
860 1.11 oster sizeof(RF_ComponentLabel_t));
861 1.11 oster RF_Free( component_label, sizeof(RF_ComponentLabel_t));
862 1.11 oster return (retcode);
863 1.11 oster
864 1.11 oster case RAIDFRAME_SET_COMPONENT_LABEL:
865 1.11 oster component_label = (RF_ComponentLabel_t *) data;
866 1.11 oster
867 1.11 oster /* XXX check the label for valid stuff... */
868 1.11 oster /* Note that some things *should not* get modified --
869 1.11 oster the user should be re-initing the labels instead of
870 1.11 oster trying to patch things.
871 1.11 oster */
872 1.11 oster
873 1.11 oster printf("Got component label:\n");
874 1.11 oster printf("Version: %d\n",component_label->version);
875 1.11 oster printf("Serial Number: %d\n",component_label->serial_number);
876 1.11 oster printf("Mod counter: %d\n",component_label->mod_counter);
877 1.11 oster printf("Row: %d\n", component_label->row);
878 1.11 oster printf("Column: %d\n", component_label->column);
879 1.11 oster printf("Num Rows: %d\n", component_label->num_rows);
880 1.11 oster printf("Num Columns: %d\n", component_label->num_columns);
881 1.11 oster printf("Clean: %d\n", component_label->clean);
882 1.11 oster printf("Status: %d\n", component_label->status);
883 1.11 oster
884 1.11 oster row = component_label->row;
885 1.11 oster column = component_label->column;
886 1.12 oster
887 1.42 oster if ((row < 0) || (row >= raidPtr->numRow) ||
888 1.42 oster (column < 0) || (column >= raidPtr->numCol)) {
889 1.12 oster return(EINVAL);
890 1.11 oster }
891 1.12 oster
892 1.12 oster /* XXX this isn't allowed to do anything for now :-) */
893 1.12 oster #if 0
894 1.11 oster raidwrite_component_label(
895 1.42 oster raidPtr->Disks[row][column].dev,
896 1.42 oster raidPtr->raid_cinfo[row][column].ci_vp,
897 1.11 oster component_label );
898 1.12 oster #endif
899 1.12 oster return (0);
900 1.11 oster
901 1.11 oster case RAIDFRAME_INIT_LABELS:
902 1.11 oster component_label = (RF_ComponentLabel_t *) data;
903 1.11 oster /*
904 1.11 oster we only want the serial number from
905 1.11 oster the above. We get all the rest of the information
906 1.11 oster from the config that was used to create this RAID
907 1.11 oster set.
908 1.11 oster */
909 1.12 oster
910 1.42 oster raidPtr->serial_number = component_label->serial_number;
911 1.12 oster /* current version number */
912 1.12 oster ci_label.version = RF_COMPONENT_LABEL_VERSION;
913 1.11 oster ci_label.serial_number = component_label->serial_number;
914 1.42 oster ci_label.mod_counter = raidPtr->mod_counter;
915 1.42 oster ci_label.num_rows = raidPtr->numRow;
916 1.42 oster ci_label.num_columns = raidPtr->numCol;
917 1.11 oster ci_label.clean = RF_RAID_DIRTY; /* not clean */
918 1.11 oster ci_label.status = rf_ds_optimal; /* "It's good!" */
919 1.11 oster
920 1.42 oster for(row=0;row<raidPtr->numRow;row++) {
921 1.11 oster ci_label.row = row;
922 1.42 oster for(column=0;column<raidPtr->numCol;column++) {
923 1.11 oster ci_label.column = column;
924 1.11 oster raidwrite_component_label(
925 1.42 oster raidPtr->Disks[row][column].dev,
926 1.42 oster raidPtr->raid_cinfo[row][column].ci_vp,
927 1.11 oster &ci_label );
928 1.11 oster }
929 1.11 oster }
930 1.11 oster
931 1.11 oster return (retcode);
932 1.9 oster
933 1.1 oster /* initialize all parity */
934 1.1 oster case RAIDFRAME_REWRITEPARITY:
935 1.1 oster
936 1.42 oster if (raidPtr->Layout.map->faultsTolerated == 0) {
937 1.17 oster /* Parity for RAID 0 is trivially correct */
938 1.42 oster raidPtr->parity_good = RF_RAID_CLEAN;
939 1.17 oster return(0);
940 1.17 oster }
941 1.37 oster
942 1.42 oster if (raidPtr->parity_rewrite_in_progress == 1) {
943 1.37 oster /* Re-write is already in progress! */
944 1.37 oster return(EINVAL);
945 1.37 oster }
946 1.27 oster
947 1.42 oster retcode = RF_CREATE_THREAD(raidPtr->parity_rewrite_thread,
948 1.37 oster rf_RewriteParityThread,
949 1.42 oster raidPtr,"raid_parity");
950 1.9 oster return (retcode);
951 1.9 oster
952 1.11 oster
953 1.11 oster case RAIDFRAME_ADD_HOT_SPARE:
954 1.12 oster sparePtr = (RF_SingleComponent_t *) data;
955 1.12 oster memcpy( &hot_spare, sparePtr, sizeof(RF_SingleComponent_t));
956 1.12 oster printf("Adding spare\n");
957 1.42 oster retcode = rf_add_hot_spare(raidPtr, &hot_spare);
958 1.11 oster return(retcode);
959 1.11 oster
960 1.11 oster case RAIDFRAME_REMOVE_HOT_SPARE:
961 1.11 oster return(retcode);
962 1.11 oster
963 1.12 oster case RAIDFRAME_REBUILD_IN_PLACE:
964 1.24 oster
965 1.42 oster if (raidPtr->Layout.map->faultsTolerated == 0) {
966 1.24 oster /* Can't do this on a RAID 0!! */
967 1.24 oster return(EINVAL);
968 1.24 oster }
969 1.24 oster
970 1.42 oster if (raidPtr->recon_in_progress == 1) {
971 1.37 oster /* a reconstruct is already in progress! */
972 1.37 oster return(EINVAL);
973 1.37 oster }
974 1.37 oster
975 1.12 oster componentPtr = (RF_SingleComponent_t *) data;
976 1.12 oster memcpy( &component, componentPtr,
977 1.12 oster sizeof(RF_SingleComponent_t));
978 1.12 oster row = component.row;
979 1.12 oster column = component.column;
980 1.12 oster printf("Rebuild: %d %d\n",row, column);
981 1.42 oster if ((row < 0) || (row >= raidPtr->numRow) ||
982 1.42 oster (column < 0) || (column >= raidPtr->numCol)) {
983 1.12 oster return(EINVAL);
984 1.12 oster }
985 1.37 oster
986 1.37 oster RF_Malloc(rrcopy, sizeof(*rrcopy), (struct rf_recon_req *));
987 1.38 oster if (rrcopy == NULL)
988 1.38 oster return(ENOMEM);
989 1.37 oster
990 1.42 oster rrcopy->raidPtr = (void *) raidPtr;
991 1.37 oster rrcopy->row = row;
992 1.37 oster rrcopy->col = column;
993 1.37 oster
994 1.42 oster retcode = RF_CREATE_THREAD(raidPtr->recon_thread,
995 1.37 oster rf_ReconstructInPlaceThread,
996 1.37 oster rrcopy,"raid_reconip");
997 1.12 oster return(retcode);
998 1.12 oster
999 1.1 oster case RAIDFRAME_GET_INFO:
1000 1.42 oster if (!raidPtr->valid)
1001 1.41 oster return (ENODEV);
1002 1.41 oster ucfgp = (RF_DeviceConfig_t **) data;
1003 1.41 oster RF_Malloc(d_cfg, sizeof(RF_DeviceConfig_t),
1004 1.41 oster (RF_DeviceConfig_t *));
1005 1.41 oster if (d_cfg == NULL)
1006 1.41 oster return (ENOMEM);
1007 1.41 oster bzero((char *) d_cfg, sizeof(RF_DeviceConfig_t));
1008 1.42 oster d_cfg->rows = raidPtr->numRow;
1009 1.42 oster d_cfg->cols = raidPtr->numCol;
1010 1.42 oster d_cfg->ndevs = raidPtr->numRow * raidPtr->numCol;
1011 1.41 oster if (d_cfg->ndevs >= RF_MAX_DISKS) {
1012 1.41 oster RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
1013 1.41 oster return (ENOMEM);
1014 1.41 oster }
1015 1.42 oster d_cfg->nspares = raidPtr->numSpare;
1016 1.41 oster if (d_cfg->nspares >= RF_MAX_DISKS) {
1017 1.41 oster RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
1018 1.41 oster return (ENOMEM);
1019 1.41 oster }
1020 1.42 oster d_cfg->maxqdepth = raidPtr->maxQueueDepth;
1021 1.41 oster d = 0;
1022 1.41 oster for (i = 0; i < d_cfg->rows; i++) {
1023 1.41 oster for (j = 0; j < d_cfg->cols; j++) {
1024 1.42 oster d_cfg->devs[d] = raidPtr->Disks[i][j];
1025 1.41 oster d++;
1026 1.1 oster }
1027 1.41 oster }
1028 1.41 oster for (j = d_cfg->cols, i = 0; i < d_cfg->nspares; i++, j++) {
1029 1.42 oster d_cfg->spares[i] = raidPtr->Disks[0][j];
1030 1.41 oster }
1031 1.41 oster retcode = copyout((caddr_t) d_cfg, (caddr_t) * ucfgp,
1032 1.41 oster sizeof(RF_DeviceConfig_t));
1033 1.41 oster RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
1034 1.41 oster
1035 1.41 oster return (retcode);
1036 1.9 oster
1037 1.22 oster case RAIDFRAME_CHECK_PARITY:
1038 1.42 oster *(int *) data = raidPtr->parity_good;
1039 1.22 oster return (0);
1040 1.41 oster
1041 1.1 oster case RAIDFRAME_RESET_ACCTOTALS:
1042 1.42 oster bzero(&raidPtr->acc_totals, sizeof(raidPtr->acc_totals));
1043 1.41 oster return (0);
1044 1.9 oster
1045 1.1 oster case RAIDFRAME_GET_ACCTOTALS:
1046 1.41 oster totals = (RF_AccTotals_t *) data;
1047 1.42 oster *totals = raidPtr->acc_totals;
1048 1.41 oster return (0);
1049 1.9 oster
1050 1.1 oster case RAIDFRAME_KEEP_ACCTOTALS:
1051 1.42 oster raidPtr->keep_acc_totals = *(int *)data;
1052 1.41 oster return (0);
1053 1.9 oster
1054 1.1 oster case RAIDFRAME_GET_SIZE:
1055 1.42 oster *(int *) data = raidPtr->totalSectors;
1056 1.9 oster return (0);
1057 1.1 oster
1058 1.1 oster /* fail a disk & optionally start reconstruction */
1059 1.1 oster case RAIDFRAME_FAIL_DISK:
1060 1.24 oster
1061 1.42 oster if (raidPtr->Layout.map->faultsTolerated == 0) {
1062 1.24 oster /* Can't do this on a RAID 0!! */
1063 1.24 oster return(EINVAL);
1064 1.24 oster }
1065 1.24 oster
1066 1.1 oster rr = (struct rf_recon_req *) data;
1067 1.9 oster
1068 1.42 oster if (rr->row < 0 || rr->row >= raidPtr->numRow
1069 1.42 oster || rr->col < 0 || rr->col >= raidPtr->numCol)
1070 1.9 oster return (EINVAL);
1071 1.1 oster
1072 1.12 oster printf("raid%d: Failing the disk: row: %d col: %d\n",
1073 1.12 oster unit, rr->row, rr->col);
1074 1.9 oster
1075 1.9 oster /* make a copy of the recon request so that we don't rely on
1076 1.9 oster * the user's buffer */
1077 1.1 oster RF_Malloc(rrcopy, sizeof(*rrcopy), (struct rf_recon_req *));
1078 1.38 oster if (rrcopy == NULL)
1079 1.38 oster return(ENOMEM);
1080 1.1 oster bcopy(rr, rrcopy, sizeof(*rr));
1081 1.42 oster rrcopy->raidPtr = (void *) raidPtr;
1082 1.1 oster
1083 1.42 oster retcode = RF_CREATE_THREAD(raidPtr->recon_thread,
1084 1.37 oster rf_ReconThread,
1085 1.37 oster rrcopy,"raid_recon");
1086 1.9 oster return (0);
1087 1.9 oster
1088 1.9 oster /* invoke a copyback operation after recon on whatever disk
1089 1.9 oster * needs it, if any */
1090 1.9 oster case RAIDFRAME_COPYBACK:
1091 1.24 oster
1092 1.42 oster if (raidPtr->Layout.map->faultsTolerated == 0) {
1093 1.24 oster /* This makes no sense on a RAID 0!! */
1094 1.24 oster return(EINVAL);
1095 1.24 oster }
1096 1.24 oster
1097 1.42 oster if (raidPtr->copyback_in_progress == 1) {
1098 1.37 oster /* Copyback is already in progress! */
1099 1.37 oster return(EINVAL);
1100 1.37 oster }
1101 1.27 oster
1102 1.42 oster retcode = RF_CREATE_THREAD(raidPtr->copyback_thread,
1103 1.37 oster rf_CopybackThread,
1104 1.42 oster raidPtr,"raid_copyback");
1105 1.37 oster return (retcode);
1106 1.9 oster
1107 1.1 oster /* return the percentage completion of reconstruction */
1108 1.37 oster case RAIDFRAME_CHECK_RECON_STATUS:
1109 1.42 oster if (raidPtr->Layout.map->faultsTolerated == 0) {
1110 1.24 oster /* This makes no sense on a RAID 0 */
1111 1.24 oster return(EINVAL);
1112 1.24 oster }
1113 1.37 oster row = 0; /* XXX we only consider a single row... */
1114 1.42 oster if (raidPtr->status[row] != rf_rs_reconstructing)
1115 1.1 oster *(int *) data = 100;
1116 1.9 oster else
1117 1.42 oster *(int *) data = raidPtr->reconControl[row]->percentComplete;
1118 1.9 oster return (0);
1119 1.9 oster
1120 1.37 oster case RAIDFRAME_CHECK_PARITYREWRITE_STATUS:
1121 1.42 oster if (raidPtr->Layout.map->faultsTolerated == 0) {
1122 1.37 oster /* This makes no sense on a RAID 0 */
1123 1.37 oster return(EINVAL);
1124 1.37 oster }
1125 1.42 oster if (raidPtr->parity_rewrite_in_progress == 1) {
1126 1.42 oster *(int *) data = 100 * raidPtr->parity_rewrite_stripes_done / raidPtr->Layout.numStripe;
1127 1.37 oster } else {
1128 1.37 oster *(int *) data = 100;
1129 1.37 oster }
1130 1.37 oster return (0);
1131 1.37 oster
1132 1.37 oster case RAIDFRAME_CHECK_COPYBACK_STATUS:
1133 1.42 oster if (raidPtr->Layout.map->faultsTolerated == 0) {
1134 1.37 oster /* This makes no sense on a RAID 0 */
1135 1.37 oster return(EINVAL);
1136 1.37 oster }
1137 1.42 oster if (raidPtr->copyback_in_progress == 1) {
1138 1.42 oster *(int *) data = 100 * raidPtr->copyback_stripes_done /
1139 1.42 oster raidPtr->Layout.numStripe;
1140 1.37 oster } else {
1141 1.37 oster *(int *) data = 100;
1142 1.37 oster }
1143 1.37 oster return (0);
1144 1.37 oster
1145 1.37 oster
1146 1.9 oster /* the sparetable daemon calls this to wait for the kernel to
1147 1.9 oster * need a spare table. this ioctl does not return until a
1148 1.9 oster * spare table is needed. XXX -- calling mpsleep here in the
1149 1.9 oster * ioctl code is almost certainly wrong and evil. -- XXX XXX
1150 1.9 oster * -- I should either compute the spare table in the kernel,
1151 1.9 oster * or have a different -- XXX XXX -- interface (a different
1152 1.42 oster * character device) for delivering the table -- XXX */
1153 1.1 oster #if 0
1154 1.1 oster case RAIDFRAME_SPARET_WAIT:
1155 1.1 oster RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1156 1.9 oster while (!rf_sparet_wait_queue)
1157 1.9 oster mpsleep(&rf_sparet_wait_queue, (PZERO + 1) | PCATCH, "sparet wait", 0, (void *) simple_lock_addr(rf_sparet_wait_mutex), MS_LOCK_SIMPLE);
1158 1.1 oster waitreq = rf_sparet_wait_queue;
1159 1.1 oster rf_sparet_wait_queue = rf_sparet_wait_queue->next;
1160 1.1 oster RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1161 1.9 oster
1162 1.42 oster /* structure assignment */
1163 1.42 oster *((RF_SparetWait_t *) data) = *waitreq;
1164 1.9 oster
1165 1.1 oster RF_Free(waitreq, sizeof(*waitreq));
1166 1.9 oster return (0);
1167 1.9 oster
1168 1.9 oster /* wakes up a process waiting on SPARET_WAIT and puts an error
1169 1.9 oster * code in it that will cause the dameon to exit */
1170 1.1 oster case RAIDFRAME_ABORT_SPARET_WAIT:
1171 1.1 oster RF_Malloc(waitreq, sizeof(*waitreq), (RF_SparetWait_t *));
1172 1.1 oster waitreq->fcol = -1;
1173 1.1 oster RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1174 1.1 oster waitreq->next = rf_sparet_wait_queue;
1175 1.1 oster rf_sparet_wait_queue = waitreq;
1176 1.1 oster RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1177 1.1 oster wakeup(&rf_sparet_wait_queue);
1178 1.9 oster return (0);
1179 1.1 oster
1180 1.9 oster /* used by the spare table daemon to deliver a spare table
1181 1.9 oster * into the kernel */
1182 1.1 oster case RAIDFRAME_SEND_SPARET:
1183 1.9 oster
1184 1.1 oster /* install the spare table */
1185 1.42 oster retcode = rf_SetSpareTable(raidPtr, *(void **) data);
1186 1.9 oster
1187 1.9 oster /* respond to the requestor. the return status of the spare
1188 1.9 oster * table installation is passed in the "fcol" field */
1189 1.1 oster RF_Malloc(waitreq, sizeof(*waitreq), (RF_SparetWait_t *));
1190 1.1 oster waitreq->fcol = retcode;
1191 1.1 oster RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1192 1.1 oster waitreq->next = rf_sparet_resp_queue;
1193 1.1 oster rf_sparet_resp_queue = waitreq;
1194 1.1 oster wakeup(&rf_sparet_resp_queue);
1195 1.1 oster RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1196 1.9 oster
1197 1.9 oster return (retcode);
1198 1.1 oster #endif
1199 1.1 oster
1200 1.9 oster default:
1201 1.36 oster break; /* fall through to the os-specific code below */
1202 1.1 oster
1203 1.1 oster }
1204 1.9 oster
1205 1.42 oster if (!raidPtr->valid)
1206 1.9 oster return (EINVAL);
1207 1.9 oster
1208 1.1 oster /*
1209 1.1 oster * Add support for "regular" device ioctls here.
1210 1.1 oster */
1211 1.9 oster
1212 1.1 oster switch (cmd) {
1213 1.1 oster case DIOCGDINFO:
1214 1.9 oster *(struct disklabel *) data = *(rs->sc_dkdev.dk_label);
1215 1.1 oster break;
1216 1.1 oster
1217 1.1 oster case DIOCGPART:
1218 1.9 oster ((struct partinfo *) data)->disklab = rs->sc_dkdev.dk_label;
1219 1.9 oster ((struct partinfo *) data)->part =
1220 1.1 oster &rs->sc_dkdev.dk_label->d_partitions[DISKPART(dev)];
1221 1.1 oster break;
1222 1.1 oster
1223 1.1 oster case DIOCWDINFO:
1224 1.1 oster case DIOCSDINFO:
1225 1.1 oster if ((error = raidlock(rs)) != 0)
1226 1.1 oster return (error);
1227 1.1 oster
1228 1.1 oster rs->sc_flags |= RAIDF_LABELLING;
1229 1.1 oster
1230 1.1 oster error = setdisklabel(rs->sc_dkdev.dk_label,
1231 1.9 oster (struct disklabel *) data, 0, rs->sc_dkdev.dk_cpulabel);
1232 1.1 oster if (error == 0) {
1233 1.1 oster if (cmd == DIOCWDINFO)
1234 1.1 oster error = writedisklabel(RAIDLABELDEV(dev),
1235 1.1 oster raidstrategy, rs->sc_dkdev.dk_label,
1236 1.1 oster rs->sc_dkdev.dk_cpulabel);
1237 1.1 oster }
1238 1.1 oster rs->sc_flags &= ~RAIDF_LABELLING;
1239 1.1 oster
1240 1.1 oster raidunlock(rs);
1241 1.1 oster
1242 1.1 oster if (error)
1243 1.1 oster return (error);
1244 1.1 oster break;
1245 1.1 oster
1246 1.1 oster case DIOCWLABEL:
1247 1.9 oster if (*(int *) data != 0)
1248 1.1 oster rs->sc_flags |= RAIDF_WLABEL;
1249 1.1 oster else
1250 1.1 oster rs->sc_flags &= ~RAIDF_WLABEL;
1251 1.1 oster break;
1252 1.1 oster
1253 1.1 oster case DIOCGDEFLABEL:
1254 1.42 oster raidgetdefaultlabel(raidPtr, rs,
1255 1.9 oster (struct disklabel *) data);
1256 1.1 oster break;
1257 1.1 oster
1258 1.1 oster default:
1259 1.39 oster retcode = ENOTTY;
1260 1.1 oster }
1261 1.9 oster return (retcode);
1262 1.1 oster
1263 1.1 oster }
1264 1.1 oster
1265 1.1 oster
1266 1.9 oster /* raidinit -- complete the rest of the initialization for the
1267 1.1 oster RAIDframe device. */
1268 1.1 oster
1269 1.1 oster
1270 1.1 oster static int
1271 1.9 oster raidinit(dev, raidPtr, unit)
1272 1.9 oster dev_t dev;
1273 1.1 oster RF_Raid_t *raidPtr;
1274 1.9 oster int unit;
1275 1.1 oster {
1276 1.9 oster int retcode;
1277 1.1 oster struct raid_softc *rs;
1278 1.1 oster
1279 1.1 oster retcode = 0;
1280 1.1 oster
1281 1.1 oster rs = &raid_softc[unit];
1282 1.1 oster pool_init(&rs->sc_cbufpool, sizeof(struct raidbuf), 0,
1283 1.11 oster 0, 0, "raidpl", 0, NULL, NULL, M_RAIDFRAME);
1284 1.9 oster
1285 1.1 oster
1286 1.1 oster /* XXX should check return code first... */
1287 1.1 oster rs->sc_flags |= RAIDF_INITED;
1288 1.1 oster
1289 1.9 oster sprintf(rs->sc_xname, "raid%d", unit); /* XXX doesn't check bounds. */
1290 1.1 oster
1291 1.9 oster rs->sc_dkdev.dk_name = rs->sc_xname;
1292 1.11 oster
1293 1.1 oster /* disk_attach actually creates space for the CPU disklabel, among
1294 1.9 oster * other things, so it's critical to call this *BEFORE* we try putzing
1295 1.9 oster * with disklabels. */
1296 1.11 oster
1297 1.1 oster disk_attach(&rs->sc_dkdev);
1298 1.1 oster
1299 1.1 oster /* XXX There may be a weird interaction here between this, and
1300 1.9 oster * protectedSectors, as used in RAIDframe. */
1301 1.11 oster
1302 1.9 oster rs->sc_size = raidPtr->totalSectors;
1303 1.1 oster rs->sc_dev = dev;
1304 1.11 oster
1305 1.9 oster return (retcode);
1306 1.1 oster }
1307 1.1 oster
1308 1.1 oster /* wake up the daemon & tell it to get us a spare table
1309 1.1 oster * XXX
1310 1.9 oster * the entries in the queues should be tagged with the raidPtr
1311 1.11 oster * so that in the extremely rare case that two recons happen at once,
1312 1.11 oster * we know for which device were requesting a spare table
1313 1.1 oster * XXX
1314 1.39 oster *
1315 1.39 oster * XXX This code is not currently used. GO
1316 1.1 oster */
1317 1.9 oster int
1318 1.9 oster rf_GetSpareTableFromDaemon(req)
1319 1.9 oster RF_SparetWait_t *req;
1320 1.9 oster {
1321 1.9 oster int retcode;
1322 1.9 oster
1323 1.9 oster RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1324 1.9 oster req->next = rf_sparet_wait_queue;
1325 1.9 oster rf_sparet_wait_queue = req;
1326 1.9 oster wakeup(&rf_sparet_wait_queue);
1327 1.9 oster
1328 1.9 oster /* mpsleep unlocks the mutex */
1329 1.9 oster while (!rf_sparet_resp_queue) {
1330 1.15 oster tsleep(&rf_sparet_resp_queue, PRIBIO,
1331 1.9 oster "raidframe getsparetable", 0);
1332 1.9 oster }
1333 1.9 oster req = rf_sparet_resp_queue;
1334 1.9 oster rf_sparet_resp_queue = req->next;
1335 1.9 oster RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1336 1.9 oster
1337 1.9 oster retcode = req->fcol;
1338 1.9 oster RF_Free(req, sizeof(*req)); /* this is not the same req as we
1339 1.9 oster * alloc'd */
1340 1.9 oster return (retcode);
1341 1.1 oster }
1342 1.39 oster
1343 1.11 oster /* a wrapper around rf_DoAccess that extracts appropriate info from the
1344 1.11 oster * bp & passes it down.
1345 1.1 oster * any calls originating in the kernel must use non-blocking I/O
1346 1.1 oster * do some extra sanity checking to return "appropriate" error values for
1347 1.1 oster * certain conditions (to make some standard utilities work)
1348 1.34 oster *
1349 1.34 oster * Formerly known as: rf_DoAccessKernel
1350 1.1 oster */
1351 1.34 oster void
1352 1.34 oster raidstart(raidPtr)
1353 1.9 oster RF_Raid_t *raidPtr;
1354 1.1 oster {
1355 1.1 oster RF_SectorCount_t num_blocks, pb, sum;
1356 1.1 oster RF_RaidAddr_t raid_addr;
1357 1.9 oster int retcode;
1358 1.1 oster struct partition *pp;
1359 1.9 oster daddr_t blocknum;
1360 1.9 oster int unit;
1361 1.1 oster struct raid_softc *rs;
1362 1.9 oster int do_async;
1363 1.34 oster struct buf *bp;
1364 1.34 oster struct buf *dp;
1365 1.1 oster
1366 1.1 oster unit = raidPtr->raidid;
1367 1.1 oster rs = &raid_softc[unit];
1368 1.34 oster
1369 1.34 oster /* Check to see if we're at the limit... */
1370 1.34 oster RF_LOCK_MUTEX(raidPtr->mutex);
1371 1.34 oster while (raidPtr->openings > 0) {
1372 1.34 oster RF_UNLOCK_MUTEX(raidPtr->mutex);
1373 1.34 oster
1374 1.34 oster /* get the next item, if any, from the queue */
1375 1.34 oster dp = &rs->buf_queue;
1376 1.34 oster bp = dp->b_actf;
1377 1.34 oster if (bp == NULL) {
1378 1.34 oster /* nothing more to do */
1379 1.34 oster return;
1380 1.34 oster }
1381 1.34 oster
1382 1.34 oster /* update structures */
1383 1.34 oster dp = bp->b_actf;
1384 1.34 oster if (dp != NULL) {
1385 1.34 oster dp->b_actb = bp->b_actb;
1386 1.34 oster } else {
1387 1.34 oster rs->buf_queue.b_actb = bp->b_actb;
1388 1.34 oster }
1389 1.34 oster *bp->b_actb = dp;
1390 1.34 oster
1391 1.34 oster /* Ok, for the bp we have here, bp->b_blkno is relative to the
1392 1.34 oster * partition.. Need to make it absolute to the underlying
1393 1.34 oster * device.. */
1394 1.1 oster
1395 1.34 oster blocknum = bp->b_blkno;
1396 1.34 oster if (DISKPART(bp->b_dev) != RAW_PART) {
1397 1.34 oster pp = &rs->sc_dkdev.dk_label->d_partitions[DISKPART(bp->b_dev)];
1398 1.34 oster blocknum += pp->p_offset;
1399 1.34 oster }
1400 1.1 oster
1401 1.34 oster db1_printf(("Blocks: %d, %d\n", (int) bp->b_blkno,
1402 1.34 oster (int) blocknum));
1403 1.34 oster
1404 1.34 oster db1_printf(("bp->b_bcount = %d\n", (int) bp->b_bcount));
1405 1.34 oster db1_printf(("bp->b_resid = %d\n", (int) bp->b_resid));
1406 1.34 oster
1407 1.34 oster /* *THIS* is where we adjust what block we're going to...
1408 1.34 oster * but DO NOT TOUCH bp->b_blkno!!! */
1409 1.34 oster raid_addr = blocknum;
1410 1.34 oster
1411 1.34 oster num_blocks = bp->b_bcount >> raidPtr->logBytesPerSector;
1412 1.34 oster pb = (bp->b_bcount & raidPtr->sectorMask) ? 1 : 0;
1413 1.34 oster sum = raid_addr + num_blocks + pb;
1414 1.34 oster if (1 || rf_debugKernelAccess) {
1415 1.34 oster db1_printf(("raid_addr=%d sum=%d num_blocks=%d(+%d) (%d)\n",
1416 1.34 oster (int) raid_addr, (int) sum, (int) num_blocks,
1417 1.34 oster (int) pb, (int) bp->b_resid));
1418 1.34 oster }
1419 1.34 oster if ((sum > raidPtr->totalSectors) || (sum < raid_addr)
1420 1.34 oster || (sum < num_blocks) || (sum < pb)) {
1421 1.34 oster bp->b_error = ENOSPC;
1422 1.34 oster bp->b_flags |= B_ERROR;
1423 1.34 oster bp->b_resid = bp->b_bcount;
1424 1.34 oster biodone(bp);
1425 1.34 oster RF_LOCK_MUTEX(raidPtr->mutex);
1426 1.34 oster continue;
1427 1.34 oster }
1428 1.34 oster /*
1429 1.34 oster * XXX rf_DoAccess() should do this, not just DoAccessKernel()
1430 1.34 oster */
1431 1.34 oster
1432 1.34 oster if (bp->b_bcount & raidPtr->sectorMask) {
1433 1.34 oster bp->b_error = EINVAL;
1434 1.34 oster bp->b_flags |= B_ERROR;
1435 1.34 oster bp->b_resid = bp->b_bcount;
1436 1.34 oster biodone(bp);
1437 1.34 oster RF_LOCK_MUTEX(raidPtr->mutex);
1438 1.34 oster continue;
1439 1.34 oster
1440 1.34 oster }
1441 1.34 oster db1_printf(("Calling DoAccess..\n"));
1442 1.34 oster
1443 1.1 oster
1444 1.34 oster RF_LOCK_MUTEX(raidPtr->mutex);
1445 1.34 oster raidPtr->openings--;
1446 1.34 oster RF_UNLOCK_MUTEX(raidPtr->mutex);
1447 1.1 oster
1448 1.34 oster /*
1449 1.34 oster * Everything is async.
1450 1.34 oster */
1451 1.34 oster do_async = 1;
1452 1.34 oster
1453 1.34 oster /* don't ever condition on bp->b_flags & B_WRITE.
1454 1.34 oster * always condition on B_READ instead */
1455 1.34 oster
1456 1.34 oster /* XXX we're still at splbio() here... do we *really*
1457 1.34 oster need to be? */
1458 1.20 oster
1459 1.37 oster
1460 1.34 oster retcode = rf_DoAccess(raidPtr, (bp->b_flags & B_READ) ?
1461 1.34 oster RF_IO_TYPE_READ : RF_IO_TYPE_WRITE,
1462 1.34 oster do_async, raid_addr, num_blocks,
1463 1.34 oster bp->b_un.b_addr, bp, NULL, NULL,
1464 1.34 oster RF_DAG_NONBLOCKING_IO, NULL, NULL, NULL);
1465 1.20 oster
1466 1.20 oster
1467 1.20 oster RF_LOCK_MUTEX(raidPtr->mutex);
1468 1.20 oster }
1469 1.34 oster RF_UNLOCK_MUTEX(raidPtr->mutex);
1470 1.34 oster }
1471 1.20 oster
1472 1.20 oster
1473 1.7 explorer
1474 1.7 explorer
1475 1.1 oster /* invoke an I/O from kernel mode. Disk queue should be locked upon entry */
1476 1.1 oster
1477 1.9 oster int
1478 1.9 oster rf_DispatchKernelIO(queue, req)
1479 1.9 oster RF_DiskQueue_t *queue;
1480 1.9 oster RF_DiskQueueData_t *req;
1481 1.1 oster {
1482 1.9 oster int op = (req->type == RF_IO_TYPE_READ) ? B_READ : B_WRITE;
1483 1.1 oster struct buf *bp;
1484 1.9 oster struct raidbuf *raidbp = NULL;
1485 1.1 oster struct raid_softc *rs;
1486 1.9 oster int unit;
1487 1.37 oster int s;
1488 1.9 oster
1489 1.37 oster s=0;
1490 1.37 oster /* s = splbio();*/ /* want to test this */
1491 1.1 oster /* XXX along with the vnode, we also need the softc associated with
1492 1.9 oster * this device.. */
1493 1.9 oster
1494 1.1 oster req->queue = queue;
1495 1.9 oster
1496 1.1 oster unit = queue->raidPtr->raidid;
1497 1.1 oster
1498 1.9 oster db1_printf(("DispatchKernelIO unit: %d\n", unit));
1499 1.1 oster
1500 1.9 oster if (unit >= numraid) {
1501 1.9 oster printf("Invalid unit number: %d %d\n", unit, numraid);
1502 1.1 oster panic("Invalid Unit number in rf_DispatchKernelIO\n");
1503 1.1 oster }
1504 1.1 oster rs = &raid_softc[unit];
1505 1.1 oster
1506 1.1 oster /* XXX is this the right place? */
1507 1.9 oster disk_busy(&rs->sc_dkdev);
1508 1.1 oster
1509 1.1 oster bp = req->bp;
1510 1.16 oster #if 1
1511 1.9 oster /* XXX when there is a physical disk failure, someone is passing us a
1512 1.9 oster * buffer that contains old stuff!! Attempt to deal with this problem
1513 1.9 oster * without taking a performance hit... (not sure where the real bug
1514 1.9 oster * is. It's buried in RAIDframe somewhere) :-( GO ) */
1515 1.4 oster
1516 1.4 oster if (bp->b_flags & B_ERROR) {
1517 1.4 oster bp->b_flags &= ~B_ERROR;
1518 1.4 oster }
1519 1.9 oster if (bp->b_error != 0) {
1520 1.4 oster bp->b_error = 0;
1521 1.4 oster }
1522 1.16 oster #endif
1523 1.1 oster raidbp = RAIDGETBUF(rs);
1524 1.1 oster
1525 1.9 oster raidbp->rf_flags = 0; /* XXX not really used anywhere... */
1526 1.1 oster
1527 1.1 oster /*
1528 1.1 oster * context for raidiodone
1529 1.1 oster */
1530 1.1 oster raidbp->rf_obp = bp;
1531 1.1 oster raidbp->req = req;
1532 1.1 oster
1533 1.32 oster LIST_INIT(&raidbp->rf_buf.b_dep);
1534 1.32 oster
1535 1.1 oster switch (req->type) {
1536 1.9 oster case RF_IO_TYPE_NOP: /* used primarily to unlock a locked queue */
1537 1.1 oster /* XXX need to do something extra here.. */
1538 1.9 oster /* I'm leaving this in, as I've never actually seen it used,
1539 1.9 oster * and I'd like folks to report it... GO */
1540 1.1 oster printf(("WAKEUP CALLED\n"));
1541 1.1 oster queue->numOutstanding++;
1542 1.1 oster
1543 1.1 oster /* XXX need to glue the original buffer into this?? */
1544 1.1 oster
1545 1.1 oster KernelWakeupFunc(&raidbp->rf_buf);
1546 1.1 oster break;
1547 1.9 oster
1548 1.1 oster case RF_IO_TYPE_READ:
1549 1.1 oster case RF_IO_TYPE_WRITE:
1550 1.9 oster
1551 1.1 oster if (req->tracerec) {
1552 1.1 oster RF_ETIMER_START(req->tracerec->timer);
1553 1.1 oster }
1554 1.9 oster InitBP(&raidbp->rf_buf, queue->rf_cinfo->ci_vp,
1555 1.9 oster op | bp->b_flags, queue->rf_cinfo->ci_dev,
1556 1.9 oster req->sectorOffset, req->numSector,
1557 1.9 oster req->buf, KernelWakeupFunc, (void *) req,
1558 1.9 oster queue->raidPtr->logBytesPerSector, req->b_proc);
1559 1.1 oster
1560 1.1 oster if (rf_debugKernelAccess) {
1561 1.9 oster db1_printf(("dispatch: bp->b_blkno = %ld\n",
1562 1.9 oster (long) bp->b_blkno));
1563 1.1 oster }
1564 1.1 oster queue->numOutstanding++;
1565 1.1 oster queue->last_deq_sector = req->sectorOffset;
1566 1.9 oster /* acc wouldn't have been let in if there were any pending
1567 1.9 oster * reqs at any other priority */
1568 1.1 oster queue->curPriority = req->priority;
1569 1.1 oster
1570 1.1 oster db1_printf(("Going for %c to unit %d row %d col %d\n",
1571 1.9 oster req->type, unit, queue->row, queue->col));
1572 1.1 oster db1_printf(("sector %d count %d (%d bytes) %d\n",
1573 1.9 oster (int) req->sectorOffset, (int) req->numSector,
1574 1.9 oster (int) (req->numSector <<
1575 1.9 oster queue->raidPtr->logBytesPerSector),
1576 1.9 oster (int) queue->raidPtr->logBytesPerSector));
1577 1.1 oster if ((raidbp->rf_buf.b_flags & B_READ) == 0) {
1578 1.1 oster raidbp->rf_buf.b_vp->v_numoutput++;
1579 1.1 oster }
1580 1.9 oster VOP_STRATEGY(&raidbp->rf_buf);
1581 1.1 oster
1582 1.1 oster break;
1583 1.9 oster
1584 1.1 oster default:
1585 1.1 oster panic("bad req->type in rf_DispatchKernelIO");
1586 1.1 oster }
1587 1.1 oster db1_printf(("Exiting from DispatchKernelIO\n"));
1588 1.37 oster /* splx(s); */ /* want to test this */
1589 1.9 oster return (0);
1590 1.1 oster }
1591 1.9 oster /* this is the callback function associated with a I/O invoked from
1592 1.1 oster kernel code.
1593 1.1 oster */
1594 1.9 oster static void
1595 1.9 oster KernelWakeupFunc(vbp)
1596 1.9 oster struct buf *vbp;
1597 1.9 oster {
1598 1.9 oster RF_DiskQueueData_t *req = NULL;
1599 1.9 oster RF_DiskQueue_t *queue;
1600 1.9 oster struct raidbuf *raidbp = (struct raidbuf *) vbp;
1601 1.9 oster struct buf *bp;
1602 1.9 oster struct raid_softc *rs;
1603 1.9 oster int unit;
1604 1.9 oster register int s;
1605 1.9 oster
1606 1.36 oster s = splbio();
1607 1.9 oster db1_printf(("recovering the request queue:\n"));
1608 1.9 oster req = raidbp->req;
1609 1.1 oster
1610 1.9 oster bp = raidbp->rf_obp;
1611 1.1 oster
1612 1.9 oster queue = (RF_DiskQueue_t *) req->queue;
1613 1.1 oster
1614 1.9 oster if (raidbp->rf_buf.b_flags & B_ERROR) {
1615 1.9 oster bp->b_flags |= B_ERROR;
1616 1.9 oster bp->b_error = raidbp->rf_buf.b_error ?
1617 1.9 oster raidbp->rf_buf.b_error : EIO;
1618 1.9 oster }
1619 1.1 oster
1620 1.9 oster /* XXX methinks this could be wrong... */
1621 1.1 oster #if 1
1622 1.9 oster bp->b_resid = raidbp->rf_buf.b_resid;
1623 1.1 oster #endif
1624 1.1 oster
1625 1.9 oster if (req->tracerec) {
1626 1.9 oster RF_ETIMER_STOP(req->tracerec->timer);
1627 1.9 oster RF_ETIMER_EVAL(req->tracerec->timer);
1628 1.9 oster RF_LOCK_MUTEX(rf_tracing_mutex);
1629 1.9 oster req->tracerec->diskwait_us += RF_ETIMER_VAL_US(req->tracerec->timer);
1630 1.9 oster req->tracerec->phys_io_us += RF_ETIMER_VAL_US(req->tracerec->timer);
1631 1.9 oster req->tracerec->num_phys_ios++;
1632 1.9 oster RF_UNLOCK_MUTEX(rf_tracing_mutex);
1633 1.9 oster }
1634 1.9 oster bp->b_bcount = raidbp->rf_buf.b_bcount; /* XXXX ?? */
1635 1.1 oster
1636 1.9 oster unit = queue->raidPtr->raidid; /* *Much* simpler :-> */
1637 1.1 oster
1638 1.1 oster
1639 1.9 oster /* XXX Ok, let's get aggressive... If B_ERROR is set, let's go
1640 1.9 oster * ballistic, and mark the component as hosed... */
1641 1.36 oster
1642 1.9 oster if (bp->b_flags & B_ERROR) {
1643 1.9 oster /* Mark the disk as dead */
1644 1.9 oster /* but only mark it once... */
1645 1.9 oster if (queue->raidPtr->Disks[queue->row][queue->col].status ==
1646 1.9 oster rf_ds_optimal) {
1647 1.9 oster printf("raid%d: IO Error. Marking %s as failed.\n",
1648 1.9 oster unit, queue->raidPtr->Disks[queue->row][queue->col].devname);
1649 1.9 oster queue->raidPtr->Disks[queue->row][queue->col].status =
1650 1.9 oster rf_ds_failed;
1651 1.9 oster queue->raidPtr->status[queue->row] = rf_rs_degraded;
1652 1.9 oster queue->raidPtr->numFailures++;
1653 1.11 oster /* XXX here we should bump the version number for each component, and write that data out */
1654 1.9 oster } else { /* Disk is already dead... */
1655 1.9 oster /* printf("Disk already marked as dead!\n"); */
1656 1.9 oster }
1657 1.4 oster
1658 1.9 oster }
1659 1.4 oster
1660 1.9 oster rs = &raid_softc[unit];
1661 1.9 oster RAIDPUTBUF(rs, raidbp);
1662 1.9 oster
1663 1.4 oster
1664 1.9 oster if (bp->b_resid == 0) {
1665 1.9 oster /* XXX is this the right place for a disk_unbusy()??!??!?!? */
1666 1.9 oster disk_unbusy(&rs->sc_dkdev, (bp->b_bcount - bp->b_resid));
1667 1.36 oster }
1668 1.1 oster
1669 1.9 oster rf_DiskIOComplete(queue, req, (bp->b_flags & B_ERROR) ? 1 : 0);
1670 1.9 oster (req->CompleteFunc) (req->argument, (bp->b_flags & B_ERROR) ? 1 : 0);
1671 1.1 oster
1672 1.36 oster splx(s);
1673 1.1 oster }
1674 1.1 oster
1675 1.1 oster
1676 1.1 oster
1677 1.1 oster /*
1678 1.1 oster * initialize a buf structure for doing an I/O in the kernel.
1679 1.1 oster */
1680 1.9 oster static void
1681 1.9 oster InitBP(
1682 1.9 oster struct buf * bp,
1683 1.9 oster struct vnode * b_vp,
1684 1.9 oster unsigned rw_flag,
1685 1.9 oster dev_t dev,
1686 1.9 oster RF_SectorNum_t startSect,
1687 1.9 oster RF_SectorCount_t numSect,
1688 1.9 oster caddr_t buf,
1689 1.9 oster void (*cbFunc) (struct buf *),
1690 1.9 oster void *cbArg,
1691 1.9 oster int logBytesPerSector,
1692 1.9 oster struct proc * b_proc)
1693 1.9 oster {
1694 1.9 oster /* bp->b_flags = B_PHYS | rw_flag; */
1695 1.9 oster bp->b_flags = B_CALL | rw_flag; /* XXX need B_PHYS here too??? */
1696 1.9 oster bp->b_bcount = numSect << logBytesPerSector;
1697 1.9 oster bp->b_bufsize = bp->b_bcount;
1698 1.9 oster bp->b_error = 0;
1699 1.9 oster bp->b_dev = dev;
1700 1.9 oster bp->b_un.b_addr = buf;
1701 1.9 oster bp->b_blkno = startSect;
1702 1.9 oster bp->b_resid = bp->b_bcount; /* XXX is this right!??!?!! */
1703 1.1 oster if (bp->b_bcount == 0) {
1704 1.1 oster panic("bp->b_bcount is zero in InitBP!!\n");
1705 1.1 oster }
1706 1.9 oster bp->b_proc = b_proc;
1707 1.9 oster bp->b_iodone = cbFunc;
1708 1.9 oster bp->b_vp = b_vp;
1709 1.9 oster
1710 1.1 oster }
1711 1.1 oster
1712 1.1 oster static void
1713 1.1 oster raidgetdefaultlabel(raidPtr, rs, lp)
1714 1.1 oster RF_Raid_t *raidPtr;
1715 1.1 oster struct raid_softc *rs;
1716 1.1 oster struct disklabel *lp;
1717 1.1 oster {
1718 1.1 oster db1_printf(("Building a default label...\n"));
1719 1.1 oster bzero(lp, sizeof(*lp));
1720 1.1 oster
1721 1.1 oster /* fabricate a label... */
1722 1.1 oster lp->d_secperunit = raidPtr->totalSectors;
1723 1.1 oster lp->d_secsize = raidPtr->bytesPerSector;
1724 1.45 oster lp->d_nsectors = raidPtr->Layout.dataSectorsPerStripe;
1725 1.1 oster lp->d_ntracks = 1;
1726 1.45 oster lp->d_ncylinders = raidPtr->totalSectors /
1727 1.45 oster (lp->d_nsectors * lp->d_ntracks);
1728 1.1 oster lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
1729 1.1 oster
1730 1.1 oster strncpy(lp->d_typename, "raid", sizeof(lp->d_typename));
1731 1.9 oster lp->d_type = DTYPE_RAID;
1732 1.1 oster strncpy(lp->d_packname, "fictitious", sizeof(lp->d_packname));
1733 1.1 oster lp->d_rpm = 3600;
1734 1.1 oster lp->d_interleave = 1;
1735 1.1 oster lp->d_flags = 0;
1736 1.1 oster
1737 1.1 oster lp->d_partitions[RAW_PART].p_offset = 0;
1738 1.1 oster lp->d_partitions[RAW_PART].p_size = raidPtr->totalSectors;
1739 1.1 oster lp->d_partitions[RAW_PART].p_fstype = FS_UNUSED;
1740 1.1 oster lp->d_npartitions = RAW_PART + 1;
1741 1.1 oster
1742 1.1 oster lp->d_magic = DISKMAGIC;
1743 1.1 oster lp->d_magic2 = DISKMAGIC;
1744 1.1 oster lp->d_checksum = dkcksum(rs->sc_dkdev.dk_label);
1745 1.1 oster
1746 1.1 oster }
1747 1.1 oster /*
1748 1.1 oster * Read the disklabel from the raid device. If one is not present, fake one
1749 1.1 oster * up.
1750 1.1 oster */
1751 1.1 oster static void
1752 1.1 oster raidgetdisklabel(dev)
1753 1.9 oster dev_t dev;
1754 1.1 oster {
1755 1.9 oster int unit = raidunit(dev);
1756 1.1 oster struct raid_softc *rs = &raid_softc[unit];
1757 1.9 oster char *errstring;
1758 1.1 oster struct disklabel *lp = rs->sc_dkdev.dk_label;
1759 1.1 oster struct cpu_disklabel *clp = rs->sc_dkdev.dk_cpulabel;
1760 1.1 oster RF_Raid_t *raidPtr;
1761 1.1 oster
1762 1.1 oster db1_printf(("Getting the disklabel...\n"));
1763 1.1 oster
1764 1.1 oster bzero(clp, sizeof(*clp));
1765 1.1 oster
1766 1.1 oster raidPtr = raidPtrs[unit];
1767 1.1 oster
1768 1.1 oster raidgetdefaultlabel(raidPtr, rs, lp);
1769 1.1 oster
1770 1.1 oster /*
1771 1.1 oster * Call the generic disklabel extraction routine.
1772 1.1 oster */
1773 1.1 oster errstring = readdisklabel(RAIDLABELDEV(dev), raidstrategy,
1774 1.1 oster rs->sc_dkdev.dk_label, rs->sc_dkdev.dk_cpulabel);
1775 1.9 oster if (errstring)
1776 1.1 oster raidmakedisklabel(rs);
1777 1.1 oster else {
1778 1.9 oster int i;
1779 1.1 oster struct partition *pp;
1780 1.1 oster
1781 1.1 oster /*
1782 1.1 oster * Sanity check whether the found disklabel is valid.
1783 1.1 oster *
1784 1.1 oster * This is necessary since total size of the raid device
1785 1.1 oster * may vary when an interleave is changed even though exactly
1786 1.1 oster * same componets are used, and old disklabel may used
1787 1.1 oster * if that is found.
1788 1.1 oster */
1789 1.1 oster if (lp->d_secperunit != rs->sc_size)
1790 1.1 oster printf("WARNING: %s: "
1791 1.1 oster "total sector size in disklabel (%d) != "
1792 1.18 oster "the size of raid (%ld)\n", rs->sc_xname,
1793 1.18 oster lp->d_secperunit, (long) rs->sc_size);
1794 1.1 oster for (i = 0; i < lp->d_npartitions; i++) {
1795 1.1 oster pp = &lp->d_partitions[i];
1796 1.1 oster if (pp->p_offset + pp->p_size > rs->sc_size)
1797 1.1 oster printf("WARNING: %s: end of partition `%c' "
1798 1.18 oster "exceeds the size of raid (%ld)\n",
1799 1.18 oster rs->sc_xname, 'a' + i, (long) rs->sc_size);
1800 1.1 oster }
1801 1.1 oster }
1802 1.1 oster
1803 1.1 oster }
1804 1.1 oster /*
1805 1.1 oster * Take care of things one might want to take care of in the event
1806 1.1 oster * that a disklabel isn't present.
1807 1.1 oster */
1808 1.1 oster static void
1809 1.1 oster raidmakedisklabel(rs)
1810 1.1 oster struct raid_softc *rs;
1811 1.1 oster {
1812 1.1 oster struct disklabel *lp = rs->sc_dkdev.dk_label;
1813 1.1 oster db1_printf(("Making a label..\n"));
1814 1.1 oster
1815 1.1 oster /*
1816 1.1 oster * For historical reasons, if there's no disklabel present
1817 1.1 oster * the raw partition must be marked FS_BSDFFS.
1818 1.1 oster */
1819 1.1 oster
1820 1.1 oster lp->d_partitions[RAW_PART].p_fstype = FS_BSDFFS;
1821 1.1 oster
1822 1.1 oster strncpy(lp->d_packname, "default label", sizeof(lp->d_packname));
1823 1.1 oster
1824 1.1 oster lp->d_checksum = dkcksum(lp);
1825 1.1 oster }
1826 1.1 oster /*
1827 1.1 oster * Lookup the provided name in the filesystem. If the file exists,
1828 1.1 oster * is a valid block device, and isn't being used by anyone else,
1829 1.1 oster * set *vpp to the file's vnode.
1830 1.9 oster * You'll find the original of this in ccd.c
1831 1.1 oster */
1832 1.1 oster int
1833 1.1 oster raidlookup(path, p, vpp)
1834 1.9 oster char *path;
1835 1.1 oster struct proc *p;
1836 1.1 oster struct vnode **vpp; /* result */
1837 1.1 oster {
1838 1.1 oster struct nameidata nd;
1839 1.1 oster struct vnode *vp;
1840 1.1 oster struct vattr va;
1841 1.9 oster int error;
1842 1.1 oster
1843 1.1 oster NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, path, p);
1844 1.9 oster if ((error = vn_open(&nd, FREAD | FWRITE, 0)) != 0) {
1845 1.1 oster #ifdef DEBUG
1846 1.9 oster printf("RAIDframe: vn_open returned %d\n", error);
1847 1.1 oster #endif
1848 1.1 oster return (error);
1849 1.1 oster }
1850 1.1 oster vp = nd.ni_vp;
1851 1.1 oster if (vp->v_usecount > 1) {
1852 1.1 oster VOP_UNLOCK(vp, 0);
1853 1.9 oster (void) vn_close(vp, FREAD | FWRITE, p->p_ucred, p);
1854 1.1 oster return (EBUSY);
1855 1.1 oster }
1856 1.1 oster if ((error = VOP_GETATTR(vp, &va, p->p_ucred, p)) != 0) {
1857 1.1 oster VOP_UNLOCK(vp, 0);
1858 1.9 oster (void) vn_close(vp, FREAD | FWRITE, p->p_ucred, p);
1859 1.1 oster return (error);
1860 1.1 oster }
1861 1.1 oster /* XXX: eventually we should handle VREG, too. */
1862 1.1 oster if (va.va_type != VBLK) {
1863 1.1 oster VOP_UNLOCK(vp, 0);
1864 1.9 oster (void) vn_close(vp, FREAD | FWRITE, p->p_ucred, p);
1865 1.1 oster return (ENOTBLK);
1866 1.1 oster }
1867 1.1 oster VOP_UNLOCK(vp, 0);
1868 1.1 oster *vpp = vp;
1869 1.1 oster return (0);
1870 1.1 oster }
1871 1.1 oster /*
1872 1.1 oster * Wait interruptibly for an exclusive lock.
1873 1.1 oster *
1874 1.1 oster * XXX
1875 1.1 oster * Several drivers do this; it should be abstracted and made MP-safe.
1876 1.1 oster * (Hmm... where have we seen this warning before :-> GO )
1877 1.1 oster */
1878 1.1 oster static int
1879 1.1 oster raidlock(rs)
1880 1.1 oster struct raid_softc *rs;
1881 1.1 oster {
1882 1.9 oster int error;
1883 1.1 oster
1884 1.1 oster while ((rs->sc_flags & RAIDF_LOCKED) != 0) {
1885 1.1 oster rs->sc_flags |= RAIDF_WANTED;
1886 1.9 oster if ((error =
1887 1.9 oster tsleep(rs, PRIBIO | PCATCH, "raidlck", 0)) != 0)
1888 1.1 oster return (error);
1889 1.1 oster }
1890 1.1 oster rs->sc_flags |= RAIDF_LOCKED;
1891 1.1 oster return (0);
1892 1.1 oster }
1893 1.1 oster /*
1894 1.1 oster * Unlock and wake up any waiters.
1895 1.1 oster */
1896 1.1 oster static void
1897 1.1 oster raidunlock(rs)
1898 1.1 oster struct raid_softc *rs;
1899 1.1 oster {
1900 1.1 oster
1901 1.1 oster rs->sc_flags &= ~RAIDF_LOCKED;
1902 1.1 oster if ((rs->sc_flags & RAIDF_WANTED) != 0) {
1903 1.1 oster rs->sc_flags &= ~RAIDF_WANTED;
1904 1.1 oster wakeup(rs);
1905 1.1 oster }
1906 1.11 oster }
1907 1.11 oster
1908 1.11 oster
1909 1.11 oster #define RF_COMPONENT_INFO_OFFSET 16384 /* bytes */
1910 1.11 oster #define RF_COMPONENT_INFO_SIZE 1024 /* bytes */
1911 1.11 oster
1912 1.11 oster int
1913 1.12 oster raidmarkclean(dev_t dev, struct vnode *b_vp, int mod_counter)
1914 1.12 oster {
1915 1.12 oster RF_ComponentLabel_t component_label;
1916 1.12 oster raidread_component_label(dev, b_vp, &component_label);
1917 1.12 oster component_label.mod_counter = mod_counter;
1918 1.12 oster component_label.clean = RF_RAID_CLEAN;
1919 1.12 oster raidwrite_component_label(dev, b_vp, &component_label);
1920 1.12 oster return(0);
1921 1.12 oster }
1922 1.12 oster
1923 1.12 oster
1924 1.12 oster int
1925 1.12 oster raidmarkdirty(dev_t dev, struct vnode *b_vp, int mod_counter)
1926 1.11 oster {
1927 1.12 oster RF_ComponentLabel_t component_label;
1928 1.12 oster raidread_component_label(dev, b_vp, &component_label);
1929 1.12 oster component_label.mod_counter = mod_counter;
1930 1.12 oster component_label.clean = RF_RAID_DIRTY;
1931 1.12 oster raidwrite_component_label(dev, b_vp, &component_label);
1932 1.11 oster return(0);
1933 1.11 oster }
1934 1.11 oster
1935 1.11 oster /* ARGSUSED */
1936 1.11 oster int
1937 1.11 oster raidread_component_label(dev, b_vp, component_label)
1938 1.11 oster dev_t dev;
1939 1.11 oster struct vnode *b_vp;
1940 1.11 oster RF_ComponentLabel_t *component_label;
1941 1.11 oster {
1942 1.11 oster struct buf *bp;
1943 1.11 oster int error;
1944 1.11 oster
1945 1.11 oster /* XXX should probably ensure that we don't try to do this if
1946 1.11 oster someone has changed rf_protected_sectors. */
1947 1.11 oster
1948 1.11 oster /* get a block of the appropriate size... */
1949 1.11 oster bp = geteblk((int)RF_COMPONENT_INFO_SIZE);
1950 1.11 oster bp->b_dev = dev;
1951 1.11 oster
1952 1.11 oster /* get our ducks in a row for the read */
1953 1.11 oster bp->b_blkno = RF_COMPONENT_INFO_OFFSET / DEV_BSIZE;
1954 1.11 oster bp->b_bcount = RF_COMPONENT_INFO_SIZE;
1955 1.11 oster bp->b_flags = B_BUSY | B_READ;
1956 1.11 oster bp->b_resid = RF_COMPONENT_INFO_SIZE / DEV_BSIZE;
1957 1.11 oster
1958 1.11 oster (*bdevsw[major(bp->b_dev)].d_strategy)(bp);
1959 1.11 oster
1960 1.11 oster error = biowait(bp);
1961 1.11 oster
1962 1.11 oster if (!error) {
1963 1.11 oster memcpy(component_label, bp->b_un.b_addr,
1964 1.11 oster sizeof(RF_ComponentLabel_t));
1965 1.12 oster #if 0
1966 1.11 oster printf("raidread_component_label: got component label:\n");
1967 1.11 oster printf("Version: %d\n",component_label->version);
1968 1.11 oster printf("Serial Number: %d\n",component_label->serial_number);
1969 1.11 oster printf("Mod counter: %d\n",component_label->mod_counter);
1970 1.11 oster printf("Row: %d\n", component_label->row);
1971 1.11 oster printf("Column: %d\n", component_label->column);
1972 1.11 oster printf("Num Rows: %d\n", component_label->num_rows);
1973 1.11 oster printf("Num Columns: %d\n", component_label->num_columns);
1974 1.11 oster printf("Clean: %d\n", component_label->clean);
1975 1.11 oster printf("Status: %d\n", component_label->status);
1976 1.11 oster #endif
1977 1.11 oster } else {
1978 1.11 oster printf("Failed to read RAID component label!\n");
1979 1.11 oster }
1980 1.11 oster
1981 1.11 oster bp->b_flags = B_INVAL | B_AGE;
1982 1.11 oster brelse(bp);
1983 1.11 oster return(error);
1984 1.11 oster }
1985 1.11 oster /* ARGSUSED */
1986 1.11 oster int
1987 1.11 oster raidwrite_component_label(dev, b_vp, component_label)
1988 1.11 oster dev_t dev;
1989 1.11 oster struct vnode *b_vp;
1990 1.11 oster RF_ComponentLabel_t *component_label;
1991 1.11 oster {
1992 1.11 oster struct buf *bp;
1993 1.11 oster int error;
1994 1.11 oster
1995 1.11 oster /* get a block of the appropriate size... */
1996 1.11 oster bp = geteblk((int)RF_COMPONENT_INFO_SIZE);
1997 1.11 oster bp->b_dev = dev;
1998 1.11 oster
1999 1.11 oster /* get our ducks in a row for the write */
2000 1.11 oster bp->b_blkno = RF_COMPONENT_INFO_OFFSET / DEV_BSIZE;
2001 1.11 oster bp->b_bcount = RF_COMPONENT_INFO_SIZE;
2002 1.11 oster bp->b_flags = B_BUSY | B_WRITE;
2003 1.11 oster bp->b_resid = RF_COMPONENT_INFO_SIZE / DEV_BSIZE;
2004 1.11 oster
2005 1.11 oster memset( bp->b_un.b_addr, 0, RF_COMPONENT_INFO_SIZE );
2006 1.11 oster
2007 1.11 oster memcpy( bp->b_un.b_addr, component_label, sizeof(RF_ComponentLabel_t));
2008 1.11 oster
2009 1.11 oster (*bdevsw[major(bp->b_dev)].d_strategy)(bp);
2010 1.11 oster error = biowait(bp);
2011 1.11 oster bp->b_flags = B_INVAL | B_AGE;
2012 1.11 oster brelse(bp);
2013 1.11 oster if (error) {
2014 1.11 oster printf("Failed to write RAID component info!\n");
2015 1.11 oster }
2016 1.11 oster
2017 1.11 oster return(error);
2018 1.1 oster }
2019 1.12 oster
2020 1.12 oster void
2021 1.12 oster rf_markalldirty( raidPtr )
2022 1.12 oster RF_Raid_t *raidPtr;
2023 1.12 oster {
2024 1.12 oster RF_ComponentLabel_t c_label;
2025 1.12 oster int r,c;
2026 1.12 oster
2027 1.12 oster raidPtr->mod_counter++;
2028 1.12 oster for (r = 0; r < raidPtr->numRow; r++) {
2029 1.12 oster for (c = 0; c < raidPtr->numCol; c++) {
2030 1.12 oster if (raidPtr->Disks[r][c].status != rf_ds_failed) {
2031 1.12 oster raidread_component_label(
2032 1.12 oster raidPtr->Disks[r][c].dev,
2033 1.12 oster raidPtr->raid_cinfo[r][c].ci_vp,
2034 1.12 oster &c_label);
2035 1.12 oster if (c_label.status == rf_ds_spared) {
2036 1.12 oster /* XXX do something special...
2037 1.12 oster but whatever you do, don't
2038 1.12 oster try to access it!! */
2039 1.12 oster } else {
2040 1.12 oster #if 0
2041 1.12 oster c_label.status =
2042 1.12 oster raidPtr->Disks[r][c].status;
2043 1.12 oster raidwrite_component_label(
2044 1.12 oster raidPtr->Disks[r][c].dev,
2045 1.12 oster raidPtr->raid_cinfo[r][c].ci_vp,
2046 1.12 oster &c_label);
2047 1.12 oster #endif
2048 1.12 oster raidmarkdirty(
2049 1.12 oster raidPtr->Disks[r][c].dev,
2050 1.12 oster raidPtr->raid_cinfo[r][c].ci_vp,
2051 1.12 oster raidPtr->mod_counter);
2052 1.12 oster }
2053 1.12 oster }
2054 1.12 oster }
2055 1.12 oster }
2056 1.13 oster /* printf("Component labels marked dirty.\n"); */
2057 1.12 oster #if 0
2058 1.12 oster for( c = 0; c < raidPtr->numSpare ; c++) {
2059 1.12 oster sparecol = raidPtr->numCol + c;
2060 1.12 oster if (raidPtr->Disks[r][sparecol].status == rf_ds_used_spare) {
2061 1.12 oster /*
2062 1.12 oster
2063 1.12 oster XXX this is where we get fancy and map this spare
2064 1.12 oster into it's correct spot in the array.
2065 1.12 oster
2066 1.12 oster */
2067 1.12 oster /*
2068 1.12 oster
2069 1.12 oster we claim this disk is "optimal" if it's
2070 1.12 oster rf_ds_used_spare, as that means it should be
2071 1.12 oster directly substitutable for the disk it replaced.
2072 1.12 oster We note that too...
2073 1.12 oster
2074 1.12 oster */
2075 1.12 oster
2076 1.12 oster for(i=0;i<raidPtr->numRow;i++) {
2077 1.12 oster for(j=0;j<raidPtr->numCol;j++) {
2078 1.12 oster if ((raidPtr->Disks[i][j].spareRow ==
2079 1.12 oster r) &&
2080 1.12 oster (raidPtr->Disks[i][j].spareCol ==
2081 1.12 oster sparecol)) {
2082 1.12 oster srow = r;
2083 1.12 oster scol = sparecol;
2084 1.12 oster break;
2085 1.12 oster }
2086 1.12 oster }
2087 1.12 oster }
2088 1.12 oster
2089 1.12 oster raidread_component_label(
2090 1.12 oster raidPtr->Disks[r][sparecol].dev,
2091 1.12 oster raidPtr->raid_cinfo[r][sparecol].ci_vp,
2092 1.12 oster &c_label);
2093 1.12 oster /* make sure status is noted */
2094 1.12 oster c_label.version = RF_COMPONENT_LABEL_VERSION;
2095 1.12 oster c_label.mod_counter = raidPtr->mod_counter;
2096 1.12 oster c_label.serial_number = raidPtr->serial_number;
2097 1.12 oster c_label.row = srow;
2098 1.12 oster c_label.column = scol;
2099 1.12 oster c_label.num_rows = raidPtr->numRow;
2100 1.12 oster c_label.num_columns = raidPtr->numCol;
2101 1.12 oster c_label.clean = RF_RAID_DIRTY; /* changed in a bit*/
2102 1.12 oster c_label.status = rf_ds_optimal;
2103 1.12 oster raidwrite_component_label(
2104 1.12 oster raidPtr->Disks[r][sparecol].dev,
2105 1.12 oster raidPtr->raid_cinfo[r][sparecol].ci_vp,
2106 1.12 oster &c_label);
2107 1.12 oster raidmarkclean( raidPtr->Disks[r][sparecol].dev,
2108 1.12 oster raidPtr->raid_cinfo[r][sparecol].ci_vp);
2109 1.12 oster }
2110 1.12 oster }
2111 1.12 oster
2112 1.12 oster #endif
2113 1.12 oster }
2114 1.12 oster
2115 1.13 oster
2116 1.13 oster void
2117 1.13 oster rf_update_component_labels( raidPtr )
2118 1.13 oster RF_Raid_t *raidPtr;
2119 1.13 oster {
2120 1.13 oster RF_ComponentLabel_t c_label;
2121 1.13 oster int sparecol;
2122 1.13 oster int r,c;
2123 1.13 oster int i,j;
2124 1.13 oster int srow, scol;
2125 1.13 oster
2126 1.13 oster srow = -1;
2127 1.13 oster scol = -1;
2128 1.13 oster
2129 1.13 oster /* XXX should do extra checks to make sure things really are clean,
2130 1.13 oster rather than blindly setting the clean bit... */
2131 1.13 oster
2132 1.13 oster raidPtr->mod_counter++;
2133 1.13 oster
2134 1.13 oster for (r = 0; r < raidPtr->numRow; r++) {
2135 1.13 oster for (c = 0; c < raidPtr->numCol; c++) {
2136 1.13 oster if (raidPtr->Disks[r][c].status == rf_ds_optimal) {
2137 1.13 oster raidread_component_label(
2138 1.13 oster raidPtr->Disks[r][c].dev,
2139 1.13 oster raidPtr->raid_cinfo[r][c].ci_vp,
2140 1.13 oster &c_label);
2141 1.13 oster /* make sure status is noted */
2142 1.13 oster c_label.status = rf_ds_optimal;
2143 1.13 oster raidwrite_component_label(
2144 1.13 oster raidPtr->Disks[r][c].dev,
2145 1.13 oster raidPtr->raid_cinfo[r][c].ci_vp,
2146 1.13 oster &c_label);
2147 1.13 oster if (raidPtr->parity_good == RF_RAID_CLEAN) {
2148 1.13 oster raidmarkclean(
2149 1.13 oster raidPtr->Disks[r][c].dev,
2150 1.13 oster raidPtr->raid_cinfo[r][c].ci_vp,
2151 1.13 oster raidPtr->mod_counter);
2152 1.13 oster }
2153 1.13 oster }
2154 1.13 oster /* else we don't touch it.. */
2155 1.13 oster #if 0
2156 1.13 oster else if (raidPtr->Disks[r][c].status !=
2157 1.13 oster rf_ds_failed) {
2158 1.13 oster raidread_component_label(
2159 1.13 oster raidPtr->Disks[r][c].dev,
2160 1.13 oster raidPtr->raid_cinfo[r][c].ci_vp,
2161 1.13 oster &c_label);
2162 1.13 oster /* make sure status is noted */
2163 1.13 oster c_label.status =
2164 1.13 oster raidPtr->Disks[r][c].status;
2165 1.13 oster raidwrite_component_label(
2166 1.13 oster raidPtr->Disks[r][c].dev,
2167 1.13 oster raidPtr->raid_cinfo[r][c].ci_vp,
2168 1.13 oster &c_label);
2169 1.13 oster if (raidPtr->parity_good == RF_RAID_CLEAN) {
2170 1.13 oster raidmarkclean(
2171 1.13 oster raidPtr->Disks[r][c].dev,
2172 1.13 oster raidPtr->raid_cinfo[r][c].ci_vp,
2173 1.13 oster raidPtr->mod_counter);
2174 1.13 oster }
2175 1.13 oster }
2176 1.13 oster #endif
2177 1.13 oster }
2178 1.13 oster }
2179 1.13 oster
2180 1.13 oster for( c = 0; c < raidPtr->numSpare ; c++) {
2181 1.13 oster sparecol = raidPtr->numCol + c;
2182 1.13 oster if (raidPtr->Disks[0][sparecol].status == rf_ds_used_spare) {
2183 1.13 oster /*
2184 1.13 oster
2185 1.13 oster we claim this disk is "optimal" if it's
2186 1.13 oster rf_ds_used_spare, as that means it should be
2187 1.13 oster directly substitutable for the disk it replaced.
2188 1.13 oster We note that too...
2189 1.13 oster
2190 1.13 oster */
2191 1.13 oster
2192 1.13 oster for(i=0;i<raidPtr->numRow;i++) {
2193 1.13 oster for(j=0;j<raidPtr->numCol;j++) {
2194 1.13 oster if ((raidPtr->Disks[i][j].spareRow ==
2195 1.13 oster 0) &&
2196 1.13 oster (raidPtr->Disks[i][j].spareCol ==
2197 1.13 oster sparecol)) {
2198 1.13 oster srow = i;
2199 1.13 oster scol = j;
2200 1.13 oster break;
2201 1.13 oster }
2202 1.13 oster }
2203 1.13 oster }
2204 1.13 oster
2205 1.13 oster raidread_component_label(
2206 1.13 oster raidPtr->Disks[0][sparecol].dev,
2207 1.13 oster raidPtr->raid_cinfo[0][sparecol].ci_vp,
2208 1.13 oster &c_label);
2209 1.13 oster /* make sure status is noted */
2210 1.13 oster c_label.version = RF_COMPONENT_LABEL_VERSION;
2211 1.13 oster c_label.mod_counter = raidPtr->mod_counter;
2212 1.13 oster c_label.serial_number = raidPtr->serial_number;
2213 1.13 oster c_label.row = srow;
2214 1.13 oster c_label.column = scol;
2215 1.13 oster c_label.num_rows = raidPtr->numRow;
2216 1.13 oster c_label.num_columns = raidPtr->numCol;
2217 1.13 oster c_label.clean = RF_RAID_DIRTY; /* changed in a bit*/
2218 1.13 oster c_label.status = rf_ds_optimal;
2219 1.13 oster raidwrite_component_label(
2220 1.13 oster raidPtr->Disks[0][sparecol].dev,
2221 1.13 oster raidPtr->raid_cinfo[0][sparecol].ci_vp,
2222 1.13 oster &c_label);
2223 1.13 oster if (raidPtr->parity_good == RF_RAID_CLEAN) {
2224 1.13 oster raidmarkclean( raidPtr->Disks[0][sparecol].dev,
2225 1.13 oster raidPtr->raid_cinfo[0][sparecol].ci_vp,
2226 1.13 oster raidPtr->mod_counter);
2227 1.13 oster }
2228 1.13 oster }
2229 1.13 oster }
2230 1.13 oster /* printf("Component labels updated\n"); */
2231 1.37 oster }
2232 1.37 oster
2233 1.37 oster void
2234 1.37 oster rf_ReconThread(req)
2235 1.37 oster struct rf_recon_req *req;
2236 1.37 oster {
2237 1.37 oster int s;
2238 1.37 oster RF_Raid_t *raidPtr;
2239 1.37 oster
2240 1.37 oster s = splbio();
2241 1.37 oster raidPtr = (RF_Raid_t *) req->raidPtr;
2242 1.37 oster raidPtr->recon_in_progress = 1;
2243 1.37 oster
2244 1.37 oster rf_FailDisk((RF_Raid_t *) req->raidPtr, req->row, req->col,
2245 1.37 oster ((req->flags & RF_FDFLAGS_RECON) ? 1 : 0));
2246 1.37 oster
2247 1.37 oster /* XXX get rid of this! we don't need it at all.. */
2248 1.37 oster RF_Free(req, sizeof(*req));
2249 1.37 oster
2250 1.37 oster raidPtr->recon_in_progress = 0;
2251 1.37 oster splx(s);
2252 1.37 oster
2253 1.37 oster /* That's all... */
2254 1.37 oster kthread_exit(0); /* does not return */
2255 1.37 oster }
2256 1.37 oster
2257 1.37 oster void
2258 1.37 oster rf_RewriteParityThread(raidPtr)
2259 1.37 oster RF_Raid_t *raidPtr;
2260 1.37 oster {
2261 1.37 oster int retcode;
2262 1.37 oster int s;
2263 1.37 oster
2264 1.37 oster raidPtr->parity_rewrite_in_progress = 1;
2265 1.37 oster s = splbio();
2266 1.37 oster retcode = rf_RewriteParity(raidPtr);
2267 1.37 oster splx(s);
2268 1.37 oster if (retcode) {
2269 1.37 oster printf("raid%d: Error re-writing parity!\n",raidPtr->raidid);
2270 1.37 oster } else {
2271 1.37 oster /* set the clean bit! If we shutdown correctly,
2272 1.37 oster the clean bit on each component label will get
2273 1.37 oster set */
2274 1.37 oster raidPtr->parity_good = RF_RAID_CLEAN;
2275 1.37 oster }
2276 1.37 oster raidPtr->parity_rewrite_in_progress = 0;
2277 1.37 oster
2278 1.37 oster /* That's all... */
2279 1.37 oster kthread_exit(0); /* does not return */
2280 1.37 oster }
2281 1.37 oster
2282 1.37 oster
2283 1.37 oster void
2284 1.37 oster rf_CopybackThread(raidPtr)
2285 1.37 oster RF_Raid_t *raidPtr;
2286 1.37 oster {
2287 1.37 oster int s;
2288 1.37 oster
2289 1.37 oster raidPtr->copyback_in_progress = 1;
2290 1.37 oster s = splbio();
2291 1.37 oster rf_CopybackReconstructedData(raidPtr);
2292 1.37 oster splx(s);
2293 1.37 oster raidPtr->copyback_in_progress = 0;
2294 1.37 oster
2295 1.37 oster /* That's all... */
2296 1.37 oster kthread_exit(0); /* does not return */
2297 1.37 oster }
2298 1.37 oster
2299 1.37 oster
2300 1.37 oster void
2301 1.37 oster rf_ReconstructInPlaceThread(req)
2302 1.37 oster struct rf_recon_req *req;
2303 1.37 oster {
2304 1.37 oster int retcode;
2305 1.37 oster int s;
2306 1.37 oster RF_Raid_t *raidPtr;
2307 1.37 oster
2308 1.37 oster s = splbio();
2309 1.37 oster raidPtr = req->raidPtr;
2310 1.37 oster raidPtr->recon_in_progress = 1;
2311 1.37 oster retcode = rf_ReconstructInPlace(raidPtr, req->row, req->col);
2312 1.37 oster RF_Free(req, sizeof(*req));
2313 1.37 oster raidPtr->recon_in_progress = 0;
2314 1.37 oster splx(s);
2315 1.37 oster
2316 1.37 oster /* That's all... */
2317 1.37 oster kthread_exit(0); /* does not return */
2318 1.13 oster }
2319