rf_netbsdkintf.c revision 1.20 1 1.20 oster /* $NetBSD: rf_netbsdkintf.c,v 1.20 1999/07/08 00:45:23 oster Exp $ */
2 1.1 oster /*-
3 1.1 oster * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
4 1.1 oster * All rights reserved.
5 1.1 oster *
6 1.1 oster * This code is derived from software contributed to The NetBSD Foundation
7 1.1 oster * by Greg Oster; Jason R. Thorpe.
8 1.1 oster *
9 1.1 oster * Redistribution and use in source and binary forms, with or without
10 1.1 oster * modification, are permitted provided that the following conditions
11 1.1 oster * are met:
12 1.1 oster * 1. Redistributions of source code must retain the above copyright
13 1.1 oster * notice, this list of conditions and the following disclaimer.
14 1.1 oster * 2. Redistributions in binary form must reproduce the above copyright
15 1.1 oster * notice, this list of conditions and the following disclaimer in the
16 1.1 oster * documentation and/or other materials provided with the distribution.
17 1.1 oster * 3. All advertising materials mentioning features or use of this software
18 1.1 oster * must display the following acknowledgement:
19 1.1 oster * This product includes software developed by the NetBSD
20 1.1 oster * Foundation, Inc. and its contributors.
21 1.1 oster * 4. Neither the name of The NetBSD Foundation nor the names of its
22 1.1 oster * contributors may be used to endorse or promote products derived
23 1.1 oster * from this software without specific prior written permission.
24 1.1 oster *
25 1.1 oster * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
26 1.1 oster * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 1.1 oster * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 1.1 oster * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
29 1.1 oster * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 1.1 oster * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 1.1 oster * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 1.1 oster * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 1.1 oster * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 1.1 oster * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 1.1 oster * POSSIBILITY OF SUCH DAMAGE.
36 1.1 oster */
37 1.1 oster
38 1.1 oster /*
39 1.1 oster * Copyright (c) 1988 University of Utah.
40 1.1 oster * Copyright (c) 1990, 1993
41 1.1 oster * The Regents of the University of California. All rights reserved.
42 1.1 oster *
43 1.1 oster * This code is derived from software contributed to Berkeley by
44 1.1 oster * the Systems Programming Group of the University of Utah Computer
45 1.1 oster * Science Department.
46 1.1 oster *
47 1.1 oster * Redistribution and use in source and binary forms, with or without
48 1.1 oster * modification, are permitted provided that the following conditions
49 1.1 oster * are met:
50 1.1 oster * 1. Redistributions of source code must retain the above copyright
51 1.1 oster * notice, this list of conditions and the following disclaimer.
52 1.1 oster * 2. Redistributions in binary form must reproduce the above copyright
53 1.1 oster * notice, this list of conditions and the following disclaimer in the
54 1.1 oster * documentation and/or other materials provided with the distribution.
55 1.1 oster * 3. All advertising materials mentioning features or use of this software
56 1.1 oster * must display the following acknowledgement:
57 1.1 oster * This product includes software developed by the University of
58 1.1 oster * California, Berkeley and its contributors.
59 1.1 oster * 4. Neither the name of the University nor the names of its contributors
60 1.1 oster * may be used to endorse or promote products derived from this software
61 1.1 oster * without specific prior written permission.
62 1.1 oster *
63 1.1 oster * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
64 1.1 oster * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
65 1.1 oster * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
66 1.1 oster * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
67 1.1 oster * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
68 1.1 oster * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
69 1.1 oster * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
70 1.1 oster * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
71 1.1 oster * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
72 1.1 oster * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
73 1.1 oster * SUCH DAMAGE.
74 1.1 oster *
75 1.1 oster * from: Utah $Hdr: cd.c 1.6 90/11/28$
76 1.1 oster *
77 1.1 oster * @(#)cd.c 8.2 (Berkeley) 11/16/93
78 1.1 oster */
79 1.1 oster
80 1.1 oster
81 1.1 oster
82 1.1 oster
83 1.1 oster /*
84 1.1 oster * Copyright (c) 1995 Carnegie-Mellon University.
85 1.1 oster * All rights reserved.
86 1.1 oster *
87 1.1 oster * Authors: Mark Holland, Jim Zelenka
88 1.1 oster *
89 1.1 oster * Permission to use, copy, modify and distribute this software and
90 1.1 oster * its documentation is hereby granted, provided that both the copyright
91 1.1 oster * notice and this permission notice appear in all copies of the
92 1.1 oster * software, derivative works or modified versions, and any portions
93 1.1 oster * thereof, and that both notices appear in supporting documentation.
94 1.1 oster *
95 1.1 oster * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
96 1.1 oster * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
97 1.1 oster * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
98 1.1 oster *
99 1.1 oster * Carnegie Mellon requests users of this software to return to
100 1.1 oster *
101 1.1 oster * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
102 1.1 oster * School of Computer Science
103 1.1 oster * Carnegie Mellon University
104 1.1 oster * Pittsburgh PA 15213-3890
105 1.1 oster *
106 1.1 oster * any improvements or extensions that they make and grant Carnegie the
107 1.1 oster * rights to redistribute these changes.
108 1.1 oster */
109 1.1 oster
110 1.1 oster /***********************************************************
111 1.1 oster *
112 1.1 oster * rf_kintf.c -- the kernel interface routines for RAIDframe
113 1.1 oster *
114 1.1 oster ***********************************************************/
115 1.1 oster
116 1.1 oster #include <sys/errno.h>
117 1.1 oster #include <sys/param.h>
118 1.1 oster #include <sys/pool.h>
119 1.1 oster #include <sys/queue.h>
120 1.1 oster #include <sys/disk.h>
121 1.1 oster #include <sys/device.h>
122 1.1 oster #include <sys/stat.h>
123 1.1 oster #include <sys/ioctl.h>
124 1.1 oster #include <sys/fcntl.h>
125 1.1 oster #include <sys/systm.h>
126 1.1 oster #include <sys/namei.h>
127 1.1 oster #include <sys/vnode.h>
128 1.1 oster #include <sys/param.h>
129 1.1 oster #include <sys/types.h>
130 1.1 oster #include <machine/types.h>
131 1.1 oster #include <sys/disklabel.h>
132 1.1 oster #include <sys/conf.h>
133 1.1 oster #include <sys/lock.h>
134 1.1 oster #include <sys/buf.h>
135 1.1 oster #include <sys/user.h>
136 1.8 oster
137 1.8 oster #include "raid.h"
138 1.1 oster #include "rf_raid.h"
139 1.1 oster #include "rf_raidframe.h"
140 1.1 oster #include "rf_dag.h"
141 1.1 oster #include "rf_dagflags.h"
142 1.1 oster #include "rf_diskqueue.h"
143 1.1 oster #include "rf_acctrace.h"
144 1.1 oster #include "rf_etimer.h"
145 1.1 oster #include "rf_general.h"
146 1.1 oster #include "rf_debugMem.h"
147 1.1 oster #include "rf_kintf.h"
148 1.1 oster #include "rf_options.h"
149 1.1 oster #include "rf_driver.h"
150 1.1 oster #include "rf_parityscan.h"
151 1.1 oster #include "rf_debugprint.h"
152 1.1 oster #include "rf_threadstuff.h"
153 1.1 oster
154 1.9 oster int rf_kdebug_level = 0;
155 1.1 oster
156 1.1 oster #define RFK_BOOT_NONE 0
157 1.1 oster #define RFK_BOOT_GOOD 1
158 1.1 oster #define RFK_BOOT_BAD 2
159 1.1 oster static int rf_kbooted = RFK_BOOT_NONE;
160 1.1 oster
161 1.1 oster #ifdef DEBUG
162 1.1 oster #define db0_printf(a) printf a
163 1.1 oster #define db_printf(a) if (rf_kdebug_level > 0) printf a
164 1.1 oster #define db1_printf(a) if (rf_kdebug_level > 0) printf a
165 1.1 oster #define db2_printf(a) if (rf_kdebug_level > 1) printf a
166 1.1 oster #define db3_printf(a) if (rf_kdebug_level > 2) printf a
167 1.1 oster #define db4_printf(a) if (rf_kdebug_level > 3) printf a
168 1.1 oster #define db5_printf(a) if (rf_kdebug_level > 4) printf a
169 1.9 oster #else /* DEBUG */
170 1.1 oster #define db0_printf(a) printf a
171 1.1 oster #define db1_printf(a) { }
172 1.1 oster #define db2_printf(a) { }
173 1.1 oster #define db3_printf(a) { }
174 1.1 oster #define db4_printf(a) { }
175 1.1 oster #define db5_printf(a) { }
176 1.9 oster #endif /* DEBUG */
177 1.1 oster
178 1.9 oster static RF_Raid_t **raidPtrs; /* global raid device descriptors */
179 1.1 oster
180 1.11 oster RF_DECLARE_STATIC_MUTEX(rf_sparet_wait_mutex)
181 1.1 oster
182 1.10 oster static RF_SparetWait_t *rf_sparet_wait_queue; /* requests to install a
183 1.10 oster * spare table */
184 1.10 oster static RF_SparetWait_t *rf_sparet_resp_queue; /* responses from
185 1.10 oster * installation process */
186 1.10 oster
187 1.10 oster static struct rf_recon_req *recon_queue = NULL; /* used to communicate
188 1.10 oster * reconstruction
189 1.10 oster * requests */
190 1.1 oster
191 1.1 oster
192 1.9 oster decl_simple_lock_data(, recon_queue_mutex)
193 1.1 oster #define LOCK_RECON_Q_MUTEX() simple_lock(&recon_queue_mutex)
194 1.1 oster #define UNLOCK_RECON_Q_MUTEX() simple_unlock(&recon_queue_mutex)
195 1.1 oster
196 1.1 oster /* prototypes */
197 1.10 oster static void KernelWakeupFunc(struct buf * bp);
198 1.10 oster static void InitBP(struct buf * bp, struct vnode *, unsigned rw_flag,
199 1.10 oster dev_t dev, RF_SectorNum_t startSect,
200 1.10 oster RF_SectorCount_t numSect, caddr_t buf,
201 1.10 oster void (*cbFunc) (struct buf *), void *cbArg,
202 1.10 oster int logBytesPerSector, struct proc * b_proc);
203 1.1 oster
204 1.11 oster #define Dprintf0(s) if (rf_queueDebug) \
205 1.11 oster rf_debug_printf(s,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL)
206 1.11 oster #define Dprintf1(s,a) if (rf_queueDebug) \
207 1.11 oster rf_debug_printf(s,a,NULL,NULL,NULL,NULL,NULL,NULL,NULL)
208 1.11 oster #define Dprintf2(s,a,b) if (rf_queueDebug) \
209 1.11 oster rf_debug_printf(s,a,b,NULL,NULL,NULL,NULL,NULL,NULL)
210 1.11 oster #define Dprintf3(s,a,b,c) if (rf_queueDebug) \
211 1.11 oster rf_debug_printf(s,a,b,c,NULL,NULL,NULL,NULL,NULL)
212 1.1 oster
213 1.12 oster int raidmarkclean(dev_t dev, struct vnode *b_vp, int);
214 1.12 oster int raidmarkdirty(dev_t dev, struct vnode *b_vp, int);
215 1.1 oster
216 1.10 oster void raidattach __P((int));
217 1.10 oster int raidsize __P((dev_t));
218 1.1 oster
219 1.10 oster void rf_DiskIOComplete(RF_DiskQueue_t *, RF_DiskQueueData_t *, int);
220 1.10 oster void rf_CopybackReconstructedData(RF_Raid_t * raidPtr);
221 1.10 oster static int raidinit __P((dev_t, RF_Raid_t *, int));
222 1.10 oster
223 1.10 oster int raidopen __P((dev_t, int, int, struct proc *));
224 1.10 oster int raidclose __P((dev_t, int, int, struct proc *));
225 1.10 oster int raidioctl __P((dev_t, u_long, caddr_t, int, struct proc *));
226 1.10 oster int raidwrite __P((dev_t, struct uio *, int));
227 1.10 oster int raidread __P((dev_t, struct uio *, int));
228 1.10 oster void raidstrategy __P((struct buf *));
229 1.10 oster int raiddump __P((dev_t, daddr_t, caddr_t, size_t));
230 1.1 oster
231 1.11 oster int raidwrite_component_label(dev_t, struct vnode *, RF_ComponentLabel_t *);
232 1.11 oster int raidread_component_label(dev_t, struct vnode *, RF_ComponentLabel_t *);
233 1.13 oster void rf_update_component_labels( RF_Raid_t *);
234 1.1 oster /*
235 1.1 oster * Pilfered from ccd.c
236 1.1 oster */
237 1.1 oster
238 1.10 oster struct raidbuf {
239 1.10 oster struct buf rf_buf; /* new I/O buf. MUST BE FIRST!!! */
240 1.10 oster struct buf *rf_obp; /* ptr. to original I/O buf */
241 1.10 oster int rf_flags; /* misc. flags */
242 1.11 oster RF_DiskQueueData_t *req;/* the request that this was part of.. */
243 1.10 oster };
244 1.1 oster
245 1.1 oster
246 1.1 oster #define RAIDGETBUF(rs) pool_get(&(rs)->sc_cbufpool, PR_NOWAIT)
247 1.1 oster #define RAIDPUTBUF(rs, cbp) pool_put(&(rs)->sc_cbufpool, cbp)
248 1.1 oster
249 1.9 oster /* XXX Not sure if the following should be replacing the raidPtrs above,
250 1.10 oster or if it should be used in conjunction with that... */
251 1.1 oster
252 1.10 oster struct raid_softc {
253 1.10 oster int sc_flags; /* flags */
254 1.10 oster int sc_cflags; /* configuration flags */
255 1.11 oster size_t sc_size; /* size of the raid device */
256 1.11 oster dev_t sc_dev; /* our device.. */
257 1.10 oster char sc_xname[20]; /* XXX external name */
258 1.10 oster struct disk sc_dkdev; /* generic disk device info */
259 1.10 oster struct pool sc_cbufpool; /* component buffer pool */
260 1.10 oster };
261 1.1 oster /* sc_flags */
262 1.1 oster #define RAIDF_INITED 0x01 /* unit has been initialized */
263 1.1 oster #define RAIDF_WLABEL 0x02 /* label area is writable */
264 1.1 oster #define RAIDF_LABELLING 0x04 /* unit is currently being labelled */
265 1.1 oster #define RAIDF_WANTED 0x40 /* someone is waiting to obtain a lock */
266 1.1 oster #define RAIDF_LOCKED 0x80 /* unit is locked */
267 1.1 oster
268 1.1 oster #define raidunit(x) DISKUNIT(x)
269 1.10 oster static int numraid = 0;
270 1.1 oster
271 1.20 oster /*
272 1.20 oster * Allow RAIDOUTSTANDING number of simultaneous IO's to this RAID device.
273 1.20 oster * Be aware that large numbers can allow the driver to consume a lot of
274 1.20 oster * kernel memory, especially on writes...
275 1.20 oster */
276 1.20 oster
277 1.20 oster #ifndef RAIDOUTSTANDING
278 1.20 oster #define RAIDOUTSTANDING 10
279 1.20 oster #endif
280 1.20 oster
281 1.1 oster #define RAIDLABELDEV(dev) \
282 1.1 oster (MAKEDISKDEV(major((dev)), raidunit((dev)), RAW_PART))
283 1.1 oster
284 1.1 oster /* declared here, and made public, for the benefit of KVM stuff.. */
285 1.10 oster struct raid_softc *raid_softc;
286 1.9 oster
287 1.10 oster static void raidgetdefaultlabel __P((RF_Raid_t *, struct raid_softc *,
288 1.10 oster struct disklabel *));
289 1.10 oster static void raidgetdisklabel __P((dev_t));
290 1.10 oster static void raidmakedisklabel __P((struct raid_softc *));
291 1.1 oster
292 1.10 oster static int raidlock __P((struct raid_softc *));
293 1.10 oster static void raidunlock __P((struct raid_softc *));
294 1.10 oster int raidlookup __P((char *, struct proc * p, struct vnode **));
295 1.1 oster
296 1.12 oster static void rf_markalldirty __P((RF_Raid_t *));
297 1.1 oster
298 1.10 oster void
299 1.10 oster raidattach(num)
300 1.9 oster int num;
301 1.1 oster {
302 1.14 oster int raidID;
303 1.14 oster int i, rc;
304 1.1 oster
305 1.1 oster #ifdef DEBUG
306 1.9 oster printf("raidattach: Asked for %d units\n", num);
307 1.1 oster #endif
308 1.1 oster
309 1.1 oster if (num <= 0) {
310 1.1 oster #ifdef DIAGNOSTIC
311 1.1 oster panic("raidattach: count <= 0");
312 1.1 oster #endif
313 1.1 oster return;
314 1.1 oster }
315 1.9 oster /* This is where all the initialization stuff gets done. */
316 1.1 oster
317 1.1 oster /* Make some space for requested number of units... */
318 1.1 oster
319 1.1 oster RF_Calloc(raidPtrs, num, sizeof(RF_Raid_t *), (RF_Raid_t **));
320 1.1 oster if (raidPtrs == NULL) {
321 1.1 oster panic("raidPtrs is NULL!!\n");
322 1.1 oster }
323 1.14 oster
324 1.14 oster rc = rf_mutex_init(&rf_sparet_wait_mutex);
325 1.14 oster if (rc) {
326 1.14 oster RF_PANIC();
327 1.14 oster }
328 1.14 oster
329 1.14 oster rf_sparet_wait_queue = rf_sparet_resp_queue = NULL;
330 1.14 oster recon_queue = NULL;
331 1.14 oster
332 1.14 oster for (i = 0; i < numraid; i++)
333 1.14 oster raidPtrs[i] = NULL;
334 1.14 oster rc = rf_BootRaidframe();
335 1.14 oster if (rc == 0)
336 1.14 oster printf("Kernelized RAIDframe activated\n");
337 1.14 oster else
338 1.1 oster panic("Serious error booting RAID!!\n");
339 1.14 oster
340 1.9 oster rf_kbooted = RFK_BOOT_GOOD;
341 1.9 oster
342 1.9 oster /* put together some datastructures like the CCD device does.. This
343 1.9 oster * lets us lock the device and what-not when it gets opened. */
344 1.1 oster
345 1.1 oster raid_softc = (struct raid_softc *)
346 1.9 oster malloc(num * sizeof(struct raid_softc),
347 1.9 oster M_RAIDFRAME, M_NOWAIT);
348 1.1 oster if (raid_softc == NULL) {
349 1.1 oster printf("WARNING: no memory for RAIDframe driver\n");
350 1.1 oster return;
351 1.1 oster }
352 1.1 oster numraid = num;
353 1.1 oster bzero(raid_softc, num * sizeof(struct raid_softc));
354 1.11 oster
355 1.9 oster for (raidID = 0; raidID < num; raidID++) {
356 1.9 oster RF_Calloc(raidPtrs[raidID], 1, sizeof(RF_Raid_t),
357 1.11 oster (RF_Raid_t *));
358 1.9 oster if (raidPtrs[raidID] == NULL) {
359 1.9 oster printf("raidPtrs[%d] is NULL\n", raidID);
360 1.1 oster }
361 1.1 oster }
362 1.1 oster }
363 1.1 oster
364 1.1 oster
365 1.1 oster int
366 1.1 oster raidsize(dev)
367 1.9 oster dev_t dev;
368 1.1 oster {
369 1.1 oster struct raid_softc *rs;
370 1.1 oster struct disklabel *lp;
371 1.9 oster int part, unit, omask, size;
372 1.1 oster
373 1.1 oster unit = raidunit(dev);
374 1.1 oster if (unit >= numraid)
375 1.1 oster return (-1);
376 1.1 oster rs = &raid_softc[unit];
377 1.1 oster
378 1.1 oster if ((rs->sc_flags & RAIDF_INITED) == 0)
379 1.1 oster return (-1);
380 1.1 oster
381 1.1 oster part = DISKPART(dev);
382 1.1 oster omask = rs->sc_dkdev.dk_openmask & (1 << part);
383 1.1 oster lp = rs->sc_dkdev.dk_label;
384 1.1 oster
385 1.1 oster if (omask == 0 && raidopen(dev, 0, S_IFBLK, curproc))
386 1.1 oster return (-1);
387 1.1 oster
388 1.1 oster if (lp->d_partitions[part].p_fstype != FS_SWAP)
389 1.1 oster size = -1;
390 1.1 oster else
391 1.1 oster size = lp->d_partitions[part].p_size *
392 1.1 oster (lp->d_secsize / DEV_BSIZE);
393 1.1 oster
394 1.1 oster if (omask == 0 && raidclose(dev, 0, S_IFBLK, curproc))
395 1.1 oster return (-1);
396 1.1 oster
397 1.1 oster return (size);
398 1.1 oster
399 1.1 oster }
400 1.1 oster
401 1.1 oster int
402 1.1 oster raiddump(dev, blkno, va, size)
403 1.9 oster dev_t dev;
404 1.1 oster daddr_t blkno;
405 1.1 oster caddr_t va;
406 1.9 oster size_t size;
407 1.1 oster {
408 1.1 oster /* Not implemented. */
409 1.1 oster return ENXIO;
410 1.1 oster }
411 1.1 oster /* ARGSUSED */
412 1.1 oster int
413 1.1 oster raidopen(dev, flags, fmt, p)
414 1.9 oster dev_t dev;
415 1.9 oster int flags, fmt;
416 1.1 oster struct proc *p;
417 1.1 oster {
418 1.9 oster int unit = raidunit(dev);
419 1.1 oster struct raid_softc *rs;
420 1.1 oster struct disklabel *lp;
421 1.9 oster int part, pmask;
422 1.9 oster int error = 0;
423 1.9 oster
424 1.1 oster if (unit >= numraid)
425 1.1 oster return (ENXIO);
426 1.1 oster rs = &raid_softc[unit];
427 1.1 oster
428 1.1 oster if ((error = raidlock(rs)) != 0)
429 1.9 oster return (error);
430 1.1 oster lp = rs->sc_dkdev.dk_label;
431 1.1 oster
432 1.1 oster part = DISKPART(dev);
433 1.1 oster pmask = (1 << part);
434 1.1 oster
435 1.1 oster db1_printf(("Opening raid device number: %d partition: %d\n",
436 1.14 oster unit, part));
437 1.1 oster
438 1.1 oster
439 1.1 oster if ((rs->sc_flags & RAIDF_INITED) &&
440 1.1 oster (rs->sc_dkdev.dk_openmask == 0))
441 1.9 oster raidgetdisklabel(dev);
442 1.1 oster
443 1.1 oster /* make sure that this partition exists */
444 1.1 oster
445 1.1 oster if (part != RAW_PART) {
446 1.1 oster db1_printf(("Not a raw partition..\n"));
447 1.1 oster if (((rs->sc_flags & RAIDF_INITED) == 0) ||
448 1.1 oster ((part >= lp->d_npartitions) ||
449 1.9 oster (lp->d_partitions[part].p_fstype == FS_UNUSED))) {
450 1.1 oster error = ENXIO;
451 1.1 oster raidunlock(rs);
452 1.1 oster db1_printf(("Bailing out...\n"));
453 1.9 oster return (error);
454 1.1 oster }
455 1.1 oster }
456 1.1 oster /* Prevent this unit from being unconfigured while open. */
457 1.1 oster switch (fmt) {
458 1.1 oster case S_IFCHR:
459 1.1 oster rs->sc_dkdev.dk_copenmask |= pmask;
460 1.1 oster break;
461 1.1 oster
462 1.1 oster case S_IFBLK:
463 1.1 oster rs->sc_dkdev.dk_bopenmask |= pmask;
464 1.1 oster break;
465 1.1 oster }
466 1.13 oster
467 1.13 oster if ((rs->sc_dkdev.dk_openmask == 0) &&
468 1.13 oster ((rs->sc_flags & RAIDF_INITED) != 0)) {
469 1.13 oster /* First one... mark things as dirty... Note that we *MUST*
470 1.13 oster have done a configure before this. I DO NOT WANT TO BE
471 1.13 oster SCRIBBLING TO RANDOM COMPONENTS UNTIL IT'S BEEN DETERMINED
472 1.13 oster THAT THEY BELONG TOGETHER!!!!! */
473 1.13 oster /* XXX should check to see if we're only open for reading
474 1.13 oster here... If so, we needn't do this, but then need some
475 1.13 oster other way of keeping track of what's happened.. */
476 1.13 oster
477 1.13 oster rf_markalldirty( raidPtrs[unit] );
478 1.13 oster }
479 1.13 oster
480 1.13 oster
481 1.1 oster rs->sc_dkdev.dk_openmask =
482 1.1 oster rs->sc_dkdev.dk_copenmask | rs->sc_dkdev.dk_bopenmask;
483 1.1 oster
484 1.1 oster raidunlock(rs);
485 1.1 oster
486 1.9 oster return (error);
487 1.1 oster
488 1.1 oster
489 1.1 oster }
490 1.1 oster /* ARGSUSED */
491 1.1 oster int
492 1.1 oster raidclose(dev, flags, fmt, p)
493 1.9 oster dev_t dev;
494 1.9 oster int flags, fmt;
495 1.1 oster struct proc *p;
496 1.1 oster {
497 1.9 oster int unit = raidunit(dev);
498 1.1 oster struct raid_softc *rs;
499 1.9 oster int error = 0;
500 1.9 oster int part;
501 1.1 oster
502 1.1 oster if (unit >= numraid)
503 1.1 oster return (ENXIO);
504 1.1 oster rs = &raid_softc[unit];
505 1.1 oster
506 1.1 oster if ((error = raidlock(rs)) != 0)
507 1.1 oster return (error);
508 1.1 oster
509 1.1 oster part = DISKPART(dev);
510 1.1 oster
511 1.1 oster /* ...that much closer to allowing unconfiguration... */
512 1.1 oster switch (fmt) {
513 1.1 oster case S_IFCHR:
514 1.1 oster rs->sc_dkdev.dk_copenmask &= ~(1 << part);
515 1.1 oster break;
516 1.1 oster
517 1.1 oster case S_IFBLK:
518 1.1 oster rs->sc_dkdev.dk_bopenmask &= ~(1 << part);
519 1.1 oster break;
520 1.1 oster }
521 1.1 oster rs->sc_dkdev.dk_openmask =
522 1.1 oster rs->sc_dkdev.dk_copenmask | rs->sc_dkdev.dk_bopenmask;
523 1.13 oster
524 1.13 oster if ((rs->sc_dkdev.dk_openmask == 0) &&
525 1.13 oster ((rs->sc_flags & RAIDF_INITED) != 0)) {
526 1.13 oster /* Last one... device is not unconfigured yet.
527 1.13 oster Device shutdown has taken care of setting the
528 1.13 oster clean bits if RAIDF_INITED is not set
529 1.13 oster mark things as clean... */
530 1.13 oster rf_update_component_labels( raidPtrs[unit] );
531 1.13 oster }
532 1.1 oster
533 1.1 oster raidunlock(rs);
534 1.1 oster return (0);
535 1.1 oster
536 1.1 oster }
537 1.1 oster
538 1.1 oster void
539 1.1 oster raidstrategy(bp)
540 1.1 oster register struct buf *bp;
541 1.1 oster {
542 1.1 oster register int s;
543 1.1 oster
544 1.1 oster unsigned int raidID = raidunit(bp->b_dev);
545 1.1 oster RF_Raid_t *raidPtr;
546 1.1 oster struct raid_softc *rs = &raid_softc[raidID];
547 1.1 oster struct disklabel *lp;
548 1.9 oster int wlabel;
549 1.1 oster
550 1.5 oster #if 0
551 1.9 oster db1_printf(("Strategy: 0x%x 0x%x\n", bp, bp->b_data));
552 1.9 oster db1_printf(("Strategy(2): bp->b_bufsize%d\n", (int) bp->b_bufsize));
553 1.9 oster db1_printf(("bp->b_count=%d\n", (int) bp->b_bcount));
554 1.9 oster db1_printf(("bp->b_resid=%d\n", (int) bp->b_resid));
555 1.9 oster db1_printf(("bp->b_blkno=%d\n", (int) bp->b_blkno));
556 1.5 oster
557 1.9 oster if (bp->b_flags & B_READ)
558 1.1 oster db1_printf(("READ\n"));
559 1.1 oster else
560 1.1 oster db1_printf(("WRITE\n"));
561 1.1 oster #endif
562 1.1 oster if (rf_kbooted != RFK_BOOT_GOOD)
563 1.1 oster return;
564 1.1 oster if (raidID >= numraid || !raidPtrs[raidID]) {
565 1.1 oster bp->b_error = ENODEV;
566 1.1 oster bp->b_flags |= B_ERROR;
567 1.1 oster bp->b_resid = bp->b_bcount;
568 1.1 oster biodone(bp);
569 1.1 oster return;
570 1.1 oster }
571 1.1 oster raidPtr = raidPtrs[raidID];
572 1.1 oster if (!raidPtr->valid) {
573 1.1 oster bp->b_error = ENODEV;
574 1.1 oster bp->b_flags |= B_ERROR;
575 1.1 oster bp->b_resid = bp->b_bcount;
576 1.1 oster biodone(bp);
577 1.1 oster return;
578 1.1 oster }
579 1.1 oster if (bp->b_bcount == 0) {
580 1.1 oster db1_printf(("b_bcount is zero..\n"));
581 1.1 oster biodone(bp);
582 1.1 oster return;
583 1.1 oster }
584 1.1 oster lp = rs->sc_dkdev.dk_label;
585 1.1 oster
586 1.1 oster /*
587 1.1 oster * Do bounds checking and adjust transfer. If there's an
588 1.1 oster * error, the bounds check will flag that for us.
589 1.1 oster */
590 1.1 oster
591 1.9 oster wlabel = rs->sc_flags & (RAIDF_WLABEL | RAIDF_LABELLING);
592 1.1 oster if (DISKPART(bp->b_dev) != RAW_PART)
593 1.1 oster if (bounds_check_with_label(bp, lp, wlabel) <= 0) {
594 1.1 oster db1_printf(("Bounds check failed!!:%d %d\n",
595 1.9 oster (int) bp->b_blkno, (int) wlabel));
596 1.1 oster biodone(bp);
597 1.1 oster return;
598 1.1 oster }
599 1.9 oster s = splbio(); /* XXX Needed? */
600 1.9 oster db1_printf(("Beginning strategy...\n"));
601 1.1 oster
602 1.1 oster bp->b_resid = 0;
603 1.9 oster bp->b_error = rf_DoAccessKernel(raidPtrs[raidID], bp,
604 1.9 oster NULL, NULL, NULL);
605 1.1 oster if (bp->b_error) {
606 1.1 oster bp->b_flags |= B_ERROR;
607 1.1 oster db1_printf(("bp->b_flags HAS B_ERROR SET!!!: %d\n",
608 1.9 oster bp->b_error));
609 1.1 oster }
610 1.1 oster splx(s);
611 1.5 oster #if 0
612 1.1 oster db1_printf(("Strategy exiting: 0x%x 0x%x %d %d\n",
613 1.9 oster bp, bp->b_data,
614 1.9 oster (int) bp->b_bcount, (int) bp->b_resid));
615 1.5 oster #endif
616 1.1 oster }
617 1.1 oster /* ARGSUSED */
618 1.1 oster int
619 1.1 oster raidread(dev, uio, flags)
620 1.9 oster dev_t dev;
621 1.1 oster struct uio *uio;
622 1.9 oster int flags;
623 1.1 oster {
624 1.9 oster int unit = raidunit(dev);
625 1.1 oster struct raid_softc *rs;
626 1.9 oster int part;
627 1.1 oster
628 1.1 oster if (unit >= numraid)
629 1.1 oster return (ENXIO);
630 1.1 oster rs = &raid_softc[unit];
631 1.1 oster
632 1.1 oster if ((rs->sc_flags & RAIDF_INITED) == 0)
633 1.1 oster return (ENXIO);
634 1.1 oster part = DISKPART(dev);
635 1.1 oster
636 1.9 oster db1_printf(("raidread: unit: %d partition: %d\n", unit, part));
637 1.1 oster
638 1.1 oster return (physio(raidstrategy, NULL, dev, B_READ, minphys, uio));
639 1.1 oster
640 1.1 oster }
641 1.1 oster /* ARGSUSED */
642 1.1 oster int
643 1.1 oster raidwrite(dev, uio, flags)
644 1.9 oster dev_t dev;
645 1.1 oster struct uio *uio;
646 1.9 oster int flags;
647 1.1 oster {
648 1.9 oster int unit = raidunit(dev);
649 1.1 oster struct raid_softc *rs;
650 1.1 oster
651 1.1 oster if (unit >= numraid)
652 1.1 oster return (ENXIO);
653 1.1 oster rs = &raid_softc[unit];
654 1.1 oster
655 1.1 oster if ((rs->sc_flags & RAIDF_INITED) == 0)
656 1.1 oster return (ENXIO);
657 1.1 oster db1_printf(("raidwrite\n"));
658 1.1 oster return (physio(raidstrategy, NULL, dev, B_WRITE, minphys, uio));
659 1.1 oster
660 1.1 oster }
661 1.1 oster
662 1.1 oster int
663 1.1 oster raidioctl(dev, cmd, data, flag, p)
664 1.9 oster dev_t dev;
665 1.9 oster u_long cmd;
666 1.1 oster caddr_t data;
667 1.9 oster int flag;
668 1.1 oster struct proc *p;
669 1.1 oster {
670 1.9 oster int unit = raidunit(dev);
671 1.9 oster int error = 0;
672 1.9 oster int part, pmask;
673 1.1 oster struct raid_softc *rs;
674 1.1 oster #if 0
675 1.9 oster int r, c;
676 1.1 oster #endif
677 1.9 oster /* struct raid_ioctl *ccio = (struct ccd_ioctl *)data; */
678 1.1 oster
679 1.9 oster /* struct ccdbuf *cbp; */
680 1.9 oster /* struct raidbuf *raidbp; */
681 1.1 oster RF_Config_t *k_cfg, *u_cfg;
682 1.1 oster u_char *specific_buf;
683 1.11 oster int retcode = 0;
684 1.11 oster int row;
685 1.11 oster int column;
686 1.1 oster struct rf_recon_req *rrcopy, *rr;
687 1.11 oster RF_ComponentLabel_t *component_label;
688 1.11 oster RF_ComponentLabel_t ci_label;
689 1.11 oster RF_ComponentLabel_t **c_label_ptr;
690 1.12 oster RF_SingleComponent_t *sparePtr,*componentPtr;
691 1.12 oster RF_SingleComponent_t hot_spare;
692 1.12 oster RF_SingleComponent_t component;
693 1.1 oster
694 1.1 oster if (unit >= numraid)
695 1.1 oster return (ENXIO);
696 1.1 oster rs = &raid_softc[unit];
697 1.1 oster
698 1.9 oster db1_printf(("raidioctl: %d %d %d %d\n", (int) dev,
699 1.9 oster (int) DISKPART(dev), (int) unit, (int) cmd));
700 1.1 oster
701 1.1 oster /* Must be open for writes for these commands... */
702 1.1 oster switch (cmd) {
703 1.1 oster case DIOCSDINFO:
704 1.1 oster case DIOCWDINFO:
705 1.1 oster case DIOCWLABEL:
706 1.1 oster if ((flag & FWRITE) == 0)
707 1.1 oster return (EBADF);
708 1.1 oster }
709 1.1 oster
710 1.1 oster /* Must be initialized for these... */
711 1.1 oster switch (cmd) {
712 1.1 oster case DIOCGDINFO:
713 1.1 oster case DIOCSDINFO:
714 1.1 oster case DIOCWDINFO:
715 1.1 oster case DIOCGPART:
716 1.1 oster case DIOCWLABEL:
717 1.1 oster case DIOCGDEFLABEL:
718 1.1 oster case RAIDFRAME_SHUTDOWN:
719 1.1 oster case RAIDFRAME_REWRITEPARITY:
720 1.1 oster case RAIDFRAME_GET_INFO:
721 1.1 oster case RAIDFRAME_RESET_ACCTOTALS:
722 1.1 oster case RAIDFRAME_GET_ACCTOTALS:
723 1.1 oster case RAIDFRAME_KEEP_ACCTOTALS:
724 1.1 oster case RAIDFRAME_GET_SIZE:
725 1.1 oster case RAIDFRAME_FAIL_DISK:
726 1.1 oster case RAIDFRAME_COPYBACK:
727 1.1 oster case RAIDFRAME_CHECKRECON:
728 1.11 oster case RAIDFRAME_GET_COMPONENT_LABEL:
729 1.11 oster case RAIDFRAME_SET_COMPONENT_LABEL:
730 1.11 oster case RAIDFRAME_ADD_HOT_SPARE:
731 1.11 oster case RAIDFRAME_REMOVE_HOT_SPARE:
732 1.11 oster case RAIDFRAME_INIT_LABELS:
733 1.12 oster case RAIDFRAME_REBUILD_IN_PLACE:
734 1.1 oster if ((rs->sc_flags & RAIDF_INITED) == 0)
735 1.1 oster return (ENXIO);
736 1.1 oster }
737 1.9 oster
738 1.1 oster switch (cmd) {
739 1.1 oster
740 1.1 oster
741 1.1 oster /* configure the system */
742 1.1 oster case RAIDFRAME_CONFIGURE:
743 1.1 oster
744 1.1 oster db3_printf(("rf_ioctl: RAIDFRAME_CONFIGURE\n"));
745 1.1 oster /* copy-in the configuration information */
746 1.1 oster /* data points to a pointer to the configuration structure */
747 1.9 oster u_cfg = *((RF_Config_t **) data);
748 1.9 oster RF_Malloc(k_cfg, sizeof(RF_Config_t), (RF_Config_t *));
749 1.1 oster if (k_cfg == NULL) {
750 1.1 oster db3_printf(("rf_ioctl: ENOMEM for config. Code is %d\n", retcode));
751 1.9 oster return (ENOMEM);
752 1.1 oster }
753 1.9 oster retcode = copyin((caddr_t) u_cfg, (caddr_t) k_cfg,
754 1.9 oster sizeof(RF_Config_t));
755 1.1 oster if (retcode) {
756 1.9 oster db3_printf(("rf_ioctl: retcode=%d copyin.1\n",
757 1.9 oster retcode));
758 1.9 oster return (retcode);
759 1.1 oster }
760 1.9 oster /* allocate a buffer for the layout-specific data, and copy it
761 1.9 oster * in */
762 1.1 oster if (k_cfg->layoutSpecificSize) {
763 1.9 oster if (k_cfg->layoutSpecificSize > 10000) {
764 1.1 oster /* sanity check */
765 1.1 oster db3_printf(("rf_ioctl: EINVAL %d\n", retcode));
766 1.9 oster return (EINVAL);
767 1.1 oster }
768 1.9 oster RF_Malloc(specific_buf, k_cfg->layoutSpecificSize,
769 1.9 oster (u_char *));
770 1.1 oster if (specific_buf == NULL) {
771 1.9 oster RF_Free(k_cfg, sizeof(RF_Config_t));
772 1.1 oster db3_printf(("rf_ioctl: ENOMEM %d\n", retcode));
773 1.9 oster return (ENOMEM);
774 1.1 oster }
775 1.9 oster retcode = copyin(k_cfg->layoutSpecific,
776 1.9 oster (caddr_t) specific_buf,
777 1.9 oster k_cfg->layoutSpecificSize);
778 1.1 oster if (retcode) {
779 1.1 oster db3_printf(("rf_ioctl: retcode=%d copyin.2\n",
780 1.9 oster retcode));
781 1.9 oster return (retcode);
782 1.1 oster }
783 1.9 oster } else
784 1.9 oster specific_buf = NULL;
785 1.1 oster k_cfg->layoutSpecific = specific_buf;
786 1.9 oster
787 1.9 oster /* should do some kind of sanity check on the configuration.
788 1.9 oster * Store the sum of all the bytes in the last byte? */
789 1.1 oster
790 1.5 oster #if 0
791 1.1 oster db1_printf(("Considering configuring the system.:%d 0x%x\n",
792 1.9 oster unit, p));
793 1.5 oster #endif
794 1.1 oster
795 1.9 oster /* We need the pointer to this a little deeper, so stash it
796 1.9 oster * here... */
797 1.1 oster
798 1.1 oster raidPtrs[unit]->proc = p;
799 1.1 oster
800 1.1 oster /* configure the system */
801 1.1 oster
802 1.1 oster raidPtrs[unit]->raidid = unit;
803 1.20 oster
804 1.1 oster retcode = rf_Configure(raidPtrs[unit], k_cfg);
805 1.1 oster
806 1.20 oster /* allow this many simultaneous IO's to this RAID device */
807 1.20 oster raidPtrs[unit]->openings = RAIDOUTSTANDING;
808 1.9 oster
809 1.1 oster if (retcode == 0) {
810 1.9 oster retcode = raidinit(dev, raidPtrs[unit], unit);
811 1.12 oster rf_markalldirty( raidPtrs[unit] );
812 1.9 oster }
813 1.1 oster /* free the buffers. No return code here. */
814 1.1 oster if (k_cfg->layoutSpecificSize) {
815 1.9 oster RF_Free(specific_buf, k_cfg->layoutSpecificSize);
816 1.1 oster }
817 1.9 oster RF_Free(k_cfg, sizeof(RF_Config_t));
818 1.9 oster
819 1.9 oster db3_printf(("rf_ioctl: retcode=%d RAIDFRAME_CONFIGURE\n",
820 1.9 oster retcode));
821 1.11 oster
822 1.9 oster return (retcode);
823 1.9 oster
824 1.9 oster /* shutdown the system */
825 1.1 oster case RAIDFRAME_SHUTDOWN:
826 1.9 oster
827 1.9 oster if ((error = raidlock(rs)) != 0)
828 1.9 oster return (error);
829 1.1 oster
830 1.1 oster /*
831 1.1 oster * If somebody has a partition mounted, we shouldn't
832 1.1 oster * shutdown.
833 1.1 oster */
834 1.1 oster
835 1.1 oster part = DISKPART(dev);
836 1.1 oster pmask = (1 << part);
837 1.9 oster if ((rs->sc_dkdev.dk_openmask & ~pmask) ||
838 1.9 oster ((rs->sc_dkdev.dk_bopenmask & pmask) &&
839 1.9 oster (rs->sc_dkdev.dk_copenmask & pmask))) {
840 1.9 oster raidunlock(rs);
841 1.9 oster return (EBUSY);
842 1.9 oster }
843 1.11 oster
844 1.1 oster if (rf_debugKernelAccess) {
845 1.1 oster printf("call shutdown\n");
846 1.1 oster }
847 1.9 oster raidPtrs[unit]->proc = p; /* XXX necessary evil */
848 1.11 oster
849 1.1 oster retcode = rf_Shutdown(raidPtrs[unit]);
850 1.1 oster
851 1.3 hubertf db1_printf(("Done main shutdown\n"));
852 1.1 oster
853 1.1 oster pool_destroy(&rs->sc_cbufpool);
854 1.3 hubertf db1_printf(("Done freeing component buffer freelist\n"));
855 1.1 oster
856 1.1 oster /* It's no longer initialized... */
857 1.1 oster rs->sc_flags &= ~RAIDF_INITED;
858 1.16 oster
859 1.9 oster /* Detach the disk. */
860 1.9 oster disk_detach(&rs->sc_dkdev);
861 1.1 oster
862 1.1 oster raidunlock(rs);
863 1.1 oster
864 1.9 oster return (retcode);
865 1.11 oster case RAIDFRAME_GET_COMPONENT_LABEL:
866 1.11 oster c_label_ptr = (RF_ComponentLabel_t **) data;
867 1.11 oster /* need to read the component label for the disk indicated
868 1.11 oster by row,column in component_label
869 1.11 oster XXX need to sanity check these values!!!
870 1.11 oster */
871 1.11 oster
872 1.11 oster /* For practice, let's get it directly fromdisk, rather
873 1.11 oster than from the in-core copy */
874 1.11 oster RF_Malloc( component_label, sizeof( RF_ComponentLabel_t ),
875 1.11 oster (RF_ComponentLabel_t *));
876 1.11 oster if (component_label == NULL)
877 1.11 oster return (ENOMEM);
878 1.11 oster
879 1.11 oster bzero((char *) component_label, sizeof(RF_ComponentLabel_t));
880 1.11 oster
881 1.11 oster retcode = copyin( *c_label_ptr, component_label,
882 1.11 oster sizeof(RF_ComponentLabel_t));
883 1.11 oster
884 1.11 oster if (retcode) {
885 1.11 oster return(retcode);
886 1.11 oster }
887 1.11 oster
888 1.11 oster row = component_label->row;
889 1.11 oster printf("Row: %d\n",row);
890 1.11 oster if (row > raidPtrs[unit]->numRow) {
891 1.11 oster row = 0; /* XXX */
892 1.11 oster }
893 1.11 oster column = component_label->column;
894 1.11 oster printf("Column: %d\n",column);
895 1.11 oster if (column > raidPtrs[unit]->numCol) {
896 1.11 oster column = 0; /* XXX */
897 1.11 oster }
898 1.11 oster
899 1.11 oster raidread_component_label(
900 1.11 oster raidPtrs[unit]->Disks[row][column].dev,
901 1.11 oster raidPtrs[unit]->raid_cinfo[row][column].ci_vp,
902 1.11 oster component_label );
903 1.11 oster
904 1.11 oster retcode = copyout((caddr_t) component_label,
905 1.11 oster (caddr_t) *c_label_ptr,
906 1.11 oster sizeof(RF_ComponentLabel_t));
907 1.11 oster RF_Free( component_label, sizeof(RF_ComponentLabel_t));
908 1.11 oster return (retcode);
909 1.11 oster
910 1.11 oster case RAIDFRAME_SET_COMPONENT_LABEL:
911 1.11 oster component_label = (RF_ComponentLabel_t *) data;
912 1.11 oster
913 1.11 oster /* XXX check the label for valid stuff... */
914 1.11 oster /* Note that some things *should not* get modified --
915 1.11 oster the user should be re-initing the labels instead of
916 1.11 oster trying to patch things.
917 1.11 oster */
918 1.11 oster
919 1.11 oster printf("Got component label:\n");
920 1.11 oster printf("Version: %d\n",component_label->version);
921 1.11 oster printf("Serial Number: %d\n",component_label->serial_number);
922 1.11 oster printf("Mod counter: %d\n",component_label->mod_counter);
923 1.11 oster printf("Row: %d\n", component_label->row);
924 1.11 oster printf("Column: %d\n", component_label->column);
925 1.11 oster printf("Num Rows: %d\n", component_label->num_rows);
926 1.11 oster printf("Num Columns: %d\n", component_label->num_columns);
927 1.11 oster printf("Clean: %d\n", component_label->clean);
928 1.11 oster printf("Status: %d\n", component_label->status);
929 1.11 oster
930 1.11 oster row = component_label->row;
931 1.11 oster column = component_label->column;
932 1.12 oster
933 1.12 oster if ((row < 0) || (row > raidPtrs[unit]->numRow) ||
934 1.12 oster (column < 0) || (column > raidPtrs[unit]->numCol)) {
935 1.12 oster return(EINVAL);
936 1.11 oster }
937 1.12 oster
938 1.12 oster /* XXX this isn't allowed to do anything for now :-) */
939 1.12 oster #if 0
940 1.11 oster raidwrite_component_label(
941 1.11 oster raidPtrs[unit]->Disks[row][column].dev,
942 1.11 oster raidPtrs[unit]->raid_cinfo[row][column].ci_vp,
943 1.11 oster component_label );
944 1.12 oster #endif
945 1.12 oster return (0);
946 1.11 oster
947 1.11 oster case RAIDFRAME_INIT_LABELS:
948 1.11 oster component_label = (RF_ComponentLabel_t *) data;
949 1.11 oster /*
950 1.11 oster we only want the serial number from
951 1.11 oster the above. We get all the rest of the information
952 1.11 oster from the config that was used to create this RAID
953 1.11 oster set.
954 1.11 oster */
955 1.12 oster
956 1.12 oster raidPtrs[unit]->serial_number = component_label->serial_number;
957 1.12 oster /* current version number */
958 1.12 oster ci_label.version = RF_COMPONENT_LABEL_VERSION;
959 1.11 oster ci_label.serial_number = component_label->serial_number;
960 1.12 oster ci_label.mod_counter = raidPtrs[unit]->mod_counter;
961 1.11 oster ci_label.num_rows = raidPtrs[unit]->numRow;
962 1.11 oster ci_label.num_columns = raidPtrs[unit]->numCol;
963 1.11 oster ci_label.clean = RF_RAID_DIRTY; /* not clean */
964 1.11 oster ci_label.status = rf_ds_optimal; /* "It's good!" */
965 1.11 oster
966 1.11 oster for(row=0;row<raidPtrs[unit]->numRow;row++) {
967 1.11 oster ci_label.row = row;
968 1.11 oster for(column=0;column<raidPtrs[unit]->numCol;column++) {
969 1.11 oster ci_label.column = column;
970 1.11 oster raidwrite_component_label(
971 1.11 oster raidPtrs[unit]->Disks[row][column].dev,
972 1.11 oster raidPtrs[unit]->raid_cinfo[row][column].ci_vp,
973 1.11 oster &ci_label );
974 1.11 oster }
975 1.11 oster }
976 1.11 oster
977 1.11 oster return (retcode);
978 1.9 oster
979 1.1 oster /* initialize all parity */
980 1.1 oster case RAIDFRAME_REWRITEPARITY:
981 1.1 oster
982 1.17 oster if (raidPtrs[unit]->Layout.map->faultsTolerated == 0) {
983 1.17 oster /* Parity for RAID 0 is trivially correct */
984 1.17 oster raidPtrs[unit]->parity_good = RF_RAID_CLEAN;
985 1.17 oster return(0);
986 1.17 oster }
987 1.17 oster
988 1.1 oster /* borrow the thread of the requesting process */
989 1.9 oster raidPtrs[unit]->proc = p; /* Blah... :-p GO */
990 1.1 oster retcode = rf_RewriteParity(raidPtrs[unit]);
991 1.9 oster /* return I/O Error if the parity rewrite fails */
992 1.1 oster
993 1.11 oster if (retcode) {
994 1.9 oster retcode = EIO;
995 1.11 oster } else {
996 1.12 oster /* set the clean bit! If we shutdown correctly,
997 1.12 oster the clean bit on each component label will get
998 1.12 oster set */
999 1.12 oster raidPtrs[unit]->parity_good = RF_RAID_CLEAN;
1000 1.11 oster }
1001 1.9 oster return (retcode);
1002 1.9 oster
1003 1.11 oster
1004 1.11 oster case RAIDFRAME_ADD_HOT_SPARE:
1005 1.12 oster sparePtr = (RF_SingleComponent_t *) data;
1006 1.12 oster memcpy( &hot_spare, sparePtr, sizeof(RF_SingleComponent_t));
1007 1.12 oster printf("Adding spare\n");
1008 1.19 oster raidPtrs[unit]->proc = p; /* Blah... :-p GO */
1009 1.12 oster retcode = rf_add_hot_spare(raidPtrs[unit], &hot_spare);
1010 1.11 oster return(retcode);
1011 1.11 oster
1012 1.11 oster case RAIDFRAME_REMOVE_HOT_SPARE:
1013 1.11 oster return(retcode);
1014 1.11 oster
1015 1.12 oster case RAIDFRAME_REBUILD_IN_PLACE:
1016 1.12 oster componentPtr = (RF_SingleComponent_t *) data;
1017 1.12 oster memcpy( &component, componentPtr,
1018 1.12 oster sizeof(RF_SingleComponent_t));
1019 1.12 oster row = component.row;
1020 1.12 oster column = component.column;
1021 1.12 oster printf("Rebuild: %d %d\n",row, column);
1022 1.12 oster if ((row < 0) || (row > raidPtrs[unit]->numRow) ||
1023 1.12 oster (column < 0) || (column > raidPtrs[unit]->numCol)) {
1024 1.12 oster return(EINVAL);
1025 1.12 oster }
1026 1.12 oster printf("Attempting a rebuild in place\n");
1027 1.12 oster raidPtrs[unit]->proc = p; /* Blah... :-p GO */
1028 1.12 oster retcode = rf_ReconstructInPlace(raidPtrs[unit], row, column);
1029 1.12 oster return(retcode);
1030 1.12 oster
1031 1.9 oster /* issue a test-unit-ready through raidframe to the indicated
1032 1.9 oster * device */
1033 1.9 oster #if 0 /* XXX not supported yet (ever?) */
1034 1.1 oster case RAIDFRAME_TUR:
1035 1.1 oster /* debug only */
1036 1.9 oster retcode = rf_SCSI_DoTUR(0, 0, 0, 0, *(dev_t *) data);
1037 1.9 oster return (retcode);
1038 1.1 oster #endif
1039 1.1 oster case RAIDFRAME_GET_INFO:
1040 1.1 oster {
1041 1.1 oster RF_Raid_t *raid = raidPtrs[unit];
1042 1.1 oster RF_DeviceConfig_t *cfg, **ucfgp;
1043 1.9 oster int i, j, d;
1044 1.9 oster
1045 1.1 oster if (!raid->valid)
1046 1.9 oster return (ENODEV);
1047 1.9 oster ucfgp = (RF_DeviceConfig_t **) data;
1048 1.9 oster RF_Malloc(cfg, sizeof(RF_DeviceConfig_t),
1049 1.11 oster (RF_DeviceConfig_t *));
1050 1.1 oster if (cfg == NULL)
1051 1.9 oster return (ENOMEM);
1052 1.9 oster bzero((char *) cfg, sizeof(RF_DeviceConfig_t));
1053 1.1 oster cfg->rows = raid->numRow;
1054 1.1 oster cfg->cols = raid->numCol;
1055 1.1 oster cfg->ndevs = raid->numRow * raid->numCol;
1056 1.1 oster if (cfg->ndevs >= RF_MAX_DISKS) {
1057 1.1 oster cfg->ndevs = 0;
1058 1.9 oster return (ENOMEM);
1059 1.1 oster }
1060 1.1 oster cfg->nspares = raid->numSpare;
1061 1.1 oster if (cfg->nspares >= RF_MAX_DISKS) {
1062 1.1 oster cfg->nspares = 0;
1063 1.9 oster return (ENOMEM);
1064 1.1 oster }
1065 1.1 oster cfg->maxqdepth = raid->maxQueueDepth;
1066 1.1 oster d = 0;
1067 1.9 oster for (i = 0; i < cfg->rows; i++) {
1068 1.9 oster for (j = 0; j < cfg->cols; j++) {
1069 1.1 oster cfg->devs[d] = raid->Disks[i][j];
1070 1.1 oster d++;
1071 1.1 oster }
1072 1.1 oster }
1073 1.9 oster for (j = cfg->cols, i = 0; i < cfg->nspares; i++, j++) {
1074 1.1 oster cfg->spares[i] = raid->Disks[0][j];
1075 1.1 oster }
1076 1.9 oster retcode = copyout((caddr_t) cfg, (caddr_t) * ucfgp,
1077 1.11 oster sizeof(RF_DeviceConfig_t));
1078 1.9 oster RF_Free(cfg, sizeof(RF_DeviceConfig_t));
1079 1.9 oster
1080 1.9 oster return (retcode);
1081 1.1 oster }
1082 1.9 oster break;
1083 1.9 oster
1084 1.1 oster case RAIDFRAME_RESET_ACCTOTALS:
1085 1.1 oster {
1086 1.1 oster RF_Raid_t *raid = raidPtrs[unit];
1087 1.9 oster
1088 1.1 oster bzero(&raid->acc_totals, sizeof(raid->acc_totals));
1089 1.9 oster return (0);
1090 1.1 oster }
1091 1.9 oster break;
1092 1.9 oster
1093 1.1 oster case RAIDFRAME_GET_ACCTOTALS:
1094 1.1 oster {
1095 1.9 oster RF_AccTotals_t *totals = (RF_AccTotals_t *) data;
1096 1.1 oster RF_Raid_t *raid = raidPtrs[unit];
1097 1.9 oster
1098 1.1 oster *totals = raid->acc_totals;
1099 1.9 oster return (0);
1100 1.1 oster }
1101 1.9 oster break;
1102 1.9 oster
1103 1.1 oster case RAIDFRAME_KEEP_ACCTOTALS:
1104 1.1 oster {
1105 1.1 oster RF_Raid_t *raid = raidPtrs[unit];
1106 1.9 oster int *keep = (int *) data;
1107 1.9 oster
1108 1.1 oster raid->keep_acc_totals = *keep;
1109 1.9 oster return (0);
1110 1.1 oster }
1111 1.9 oster break;
1112 1.9 oster
1113 1.1 oster case RAIDFRAME_GET_SIZE:
1114 1.1 oster *(int *) data = raidPtrs[unit]->totalSectors;
1115 1.9 oster return (0);
1116 1.1 oster
1117 1.1 oster #define RAIDFRAME_RECON 1
1118 1.1 oster /* XXX The above should probably be set somewhere else!! GO */
1119 1.1 oster #if RAIDFRAME_RECON > 0
1120 1.1 oster
1121 1.1 oster /* fail a disk & optionally start reconstruction */
1122 1.1 oster case RAIDFRAME_FAIL_DISK:
1123 1.1 oster rr = (struct rf_recon_req *) data;
1124 1.9 oster
1125 1.9 oster if (rr->row < 0 || rr->row >= raidPtrs[unit]->numRow
1126 1.1 oster || rr->col < 0 || rr->col >= raidPtrs[unit]->numCol)
1127 1.9 oster return (EINVAL);
1128 1.1 oster
1129 1.12 oster printf("raid%d: Failing the disk: row: %d col: %d\n",
1130 1.12 oster unit, rr->row, rr->col);
1131 1.9 oster
1132 1.9 oster /* make a copy of the recon request so that we don't rely on
1133 1.9 oster * the user's buffer */
1134 1.1 oster RF_Malloc(rrcopy, sizeof(*rrcopy), (struct rf_recon_req *));
1135 1.1 oster bcopy(rr, rrcopy, sizeof(*rr));
1136 1.1 oster rrcopy->raidPtr = (void *) raidPtrs[unit];
1137 1.1 oster
1138 1.1 oster LOCK_RECON_Q_MUTEX();
1139 1.1 oster rrcopy->next = recon_queue;
1140 1.1 oster recon_queue = rrcopy;
1141 1.1 oster wakeup(&recon_queue);
1142 1.1 oster UNLOCK_RECON_Q_MUTEX();
1143 1.9 oster
1144 1.9 oster return (0);
1145 1.9 oster
1146 1.9 oster /* invoke a copyback operation after recon on whatever disk
1147 1.9 oster * needs it, if any */
1148 1.9 oster case RAIDFRAME_COPYBACK:
1149 1.1 oster /* borrow the current thread to get this done */
1150 1.9 oster raidPtrs[unit]->proc = p; /* ICK.. but needed :-p GO */
1151 1.1 oster rf_CopybackReconstructedData(raidPtrs[unit]);
1152 1.9 oster return (0);
1153 1.9 oster
1154 1.1 oster /* return the percentage completion of reconstruction */
1155 1.1 oster case RAIDFRAME_CHECKRECON:
1156 1.1 oster row = *(int *) data;
1157 1.1 oster if (row < 0 || row >= raidPtrs[unit]->numRow)
1158 1.9 oster return (EINVAL);
1159 1.9 oster if (raidPtrs[unit]->status[row] != rf_rs_reconstructing)
1160 1.1 oster *(int *) data = 100;
1161 1.9 oster else
1162 1.1 oster *(int *) data = raidPtrs[unit]->reconControl[row]->percentComplete;
1163 1.9 oster return (0);
1164 1.9 oster
1165 1.9 oster /* the sparetable daemon calls this to wait for the kernel to
1166 1.9 oster * need a spare table. this ioctl does not return until a
1167 1.9 oster * spare table is needed. XXX -- calling mpsleep here in the
1168 1.9 oster * ioctl code is almost certainly wrong and evil. -- XXX XXX
1169 1.9 oster * -- I should either compute the spare table in the kernel,
1170 1.9 oster * or have a different -- XXX XXX -- interface (a different
1171 1.9 oster * character device) for delivering the table -- XXX */
1172 1.1 oster #if 0
1173 1.1 oster case RAIDFRAME_SPARET_WAIT:
1174 1.1 oster RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1175 1.9 oster while (!rf_sparet_wait_queue)
1176 1.9 oster mpsleep(&rf_sparet_wait_queue, (PZERO + 1) | PCATCH, "sparet wait", 0, (void *) simple_lock_addr(rf_sparet_wait_mutex), MS_LOCK_SIMPLE);
1177 1.1 oster waitreq = rf_sparet_wait_queue;
1178 1.1 oster rf_sparet_wait_queue = rf_sparet_wait_queue->next;
1179 1.1 oster RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1180 1.9 oster
1181 1.9 oster *((RF_SparetWait_t *) data) = *waitreq; /* structure assignment */
1182 1.9 oster
1183 1.1 oster RF_Free(waitreq, sizeof(*waitreq));
1184 1.9 oster return (0);
1185 1.9 oster
1186 1.9 oster
1187 1.9 oster /* wakes up a process waiting on SPARET_WAIT and puts an error
1188 1.9 oster * code in it that will cause the dameon to exit */
1189 1.1 oster case RAIDFRAME_ABORT_SPARET_WAIT:
1190 1.1 oster RF_Malloc(waitreq, sizeof(*waitreq), (RF_SparetWait_t *));
1191 1.1 oster waitreq->fcol = -1;
1192 1.1 oster RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1193 1.1 oster waitreq->next = rf_sparet_wait_queue;
1194 1.1 oster rf_sparet_wait_queue = waitreq;
1195 1.1 oster RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1196 1.1 oster wakeup(&rf_sparet_wait_queue);
1197 1.9 oster return (0);
1198 1.1 oster
1199 1.9 oster /* used by the spare table daemon to deliver a spare table
1200 1.9 oster * into the kernel */
1201 1.1 oster case RAIDFRAME_SEND_SPARET:
1202 1.9 oster
1203 1.1 oster /* install the spare table */
1204 1.9 oster retcode = rf_SetSpareTable(raidPtrs[unit], *(void **) data);
1205 1.9 oster
1206 1.9 oster /* respond to the requestor. the return status of the spare
1207 1.9 oster * table installation is passed in the "fcol" field */
1208 1.1 oster RF_Malloc(waitreq, sizeof(*waitreq), (RF_SparetWait_t *));
1209 1.1 oster waitreq->fcol = retcode;
1210 1.1 oster RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1211 1.1 oster waitreq->next = rf_sparet_resp_queue;
1212 1.1 oster rf_sparet_resp_queue = waitreq;
1213 1.1 oster wakeup(&rf_sparet_resp_queue);
1214 1.1 oster RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1215 1.9 oster
1216 1.9 oster return (retcode);
1217 1.1 oster #endif
1218 1.1 oster
1219 1.1 oster
1220 1.9 oster #endif /* RAIDFRAME_RECON > 0 */
1221 1.9 oster
1222 1.9 oster default:
1223 1.9 oster break; /* fall through to the os-specific code below */
1224 1.1 oster
1225 1.1 oster }
1226 1.9 oster
1227 1.1 oster if (!raidPtrs[unit]->valid)
1228 1.9 oster return (EINVAL);
1229 1.9 oster
1230 1.1 oster /*
1231 1.1 oster * Add support for "regular" device ioctls here.
1232 1.1 oster */
1233 1.9 oster
1234 1.1 oster switch (cmd) {
1235 1.1 oster case DIOCGDINFO:
1236 1.9 oster db1_printf(("DIOCGDINFO %d %d\n", (int) dev, (int) DISKPART(dev)));
1237 1.9 oster *(struct disklabel *) data = *(rs->sc_dkdev.dk_label);
1238 1.1 oster break;
1239 1.1 oster
1240 1.1 oster case DIOCGPART:
1241 1.9 oster db1_printf(("DIOCGPART: %d %d\n", (int) dev, (int) DISKPART(dev)));
1242 1.9 oster ((struct partinfo *) data)->disklab = rs->sc_dkdev.dk_label;
1243 1.9 oster ((struct partinfo *) data)->part =
1244 1.1 oster &rs->sc_dkdev.dk_label->d_partitions[DISKPART(dev)];
1245 1.1 oster break;
1246 1.1 oster
1247 1.1 oster case DIOCWDINFO:
1248 1.1 oster db1_printf(("DIOCWDINFO\n"));
1249 1.1 oster case DIOCSDINFO:
1250 1.1 oster db1_printf(("DIOCSDINFO\n"));
1251 1.1 oster if ((error = raidlock(rs)) != 0)
1252 1.1 oster return (error);
1253 1.1 oster
1254 1.1 oster rs->sc_flags |= RAIDF_LABELLING;
1255 1.1 oster
1256 1.1 oster error = setdisklabel(rs->sc_dkdev.dk_label,
1257 1.9 oster (struct disklabel *) data, 0, rs->sc_dkdev.dk_cpulabel);
1258 1.1 oster if (error == 0) {
1259 1.1 oster if (cmd == DIOCWDINFO)
1260 1.1 oster error = writedisklabel(RAIDLABELDEV(dev),
1261 1.1 oster raidstrategy, rs->sc_dkdev.dk_label,
1262 1.1 oster rs->sc_dkdev.dk_cpulabel);
1263 1.1 oster }
1264 1.1 oster rs->sc_flags &= ~RAIDF_LABELLING;
1265 1.1 oster
1266 1.1 oster raidunlock(rs);
1267 1.1 oster
1268 1.1 oster if (error)
1269 1.1 oster return (error);
1270 1.1 oster break;
1271 1.1 oster
1272 1.1 oster case DIOCWLABEL:
1273 1.1 oster db1_printf(("DIOCWLABEL\n"));
1274 1.9 oster if (*(int *) data != 0)
1275 1.1 oster rs->sc_flags |= RAIDF_WLABEL;
1276 1.1 oster else
1277 1.1 oster rs->sc_flags &= ~RAIDF_WLABEL;
1278 1.1 oster break;
1279 1.1 oster
1280 1.1 oster case DIOCGDEFLABEL:
1281 1.1 oster db1_printf(("DIOCGDEFLABEL\n"));
1282 1.1 oster raidgetdefaultlabel(raidPtrs[unit], rs,
1283 1.9 oster (struct disklabel *) data);
1284 1.1 oster break;
1285 1.1 oster
1286 1.1 oster default:
1287 1.9 oster retcode = ENOTTY; /* XXXX ?? OR EINVAL ? */
1288 1.1 oster }
1289 1.9 oster return (retcode);
1290 1.1 oster
1291 1.1 oster }
1292 1.1 oster
1293 1.1 oster
1294 1.9 oster /* raidinit -- complete the rest of the initialization for the
1295 1.1 oster RAIDframe device. */
1296 1.1 oster
1297 1.1 oster
1298 1.1 oster static int
1299 1.9 oster raidinit(dev, raidPtr, unit)
1300 1.9 oster dev_t dev;
1301 1.1 oster RF_Raid_t *raidPtr;
1302 1.9 oster int unit;
1303 1.1 oster {
1304 1.9 oster int retcode;
1305 1.9 oster /* int ix; */
1306 1.9 oster /* struct raidbuf *raidbp; */
1307 1.1 oster struct raid_softc *rs;
1308 1.1 oster
1309 1.1 oster retcode = 0;
1310 1.1 oster
1311 1.1 oster rs = &raid_softc[unit];
1312 1.1 oster pool_init(&rs->sc_cbufpool, sizeof(struct raidbuf), 0,
1313 1.11 oster 0, 0, "raidpl", 0, NULL, NULL, M_RAIDFRAME);
1314 1.9 oster
1315 1.1 oster
1316 1.1 oster /* XXX should check return code first... */
1317 1.1 oster rs->sc_flags |= RAIDF_INITED;
1318 1.1 oster
1319 1.9 oster sprintf(rs->sc_xname, "raid%d", unit); /* XXX doesn't check bounds. */
1320 1.1 oster
1321 1.9 oster rs->sc_dkdev.dk_name = rs->sc_xname;
1322 1.11 oster
1323 1.1 oster /* disk_attach actually creates space for the CPU disklabel, among
1324 1.9 oster * other things, so it's critical to call this *BEFORE* we try putzing
1325 1.9 oster * with disklabels. */
1326 1.11 oster
1327 1.1 oster disk_attach(&rs->sc_dkdev);
1328 1.1 oster
1329 1.1 oster /* XXX There may be a weird interaction here between this, and
1330 1.9 oster * protectedSectors, as used in RAIDframe. */
1331 1.11 oster
1332 1.9 oster rs->sc_size = raidPtr->totalSectors;
1333 1.1 oster rs->sc_dev = dev;
1334 1.11 oster
1335 1.9 oster return (retcode);
1336 1.1 oster }
1337 1.1 oster
1338 1.1 oster /*
1339 1.1 oster * This kernel thread never exits. It is created once, and persists
1340 1.1 oster * until the system reboots.
1341 1.1 oster */
1342 1.11 oster
1343 1.9 oster void
1344 1.9 oster rf_ReconKernelThread()
1345 1.1 oster {
1346 1.9 oster struct rf_recon_req *req;
1347 1.9 oster int s;
1348 1.1 oster
1349 1.9 oster /* XXX not sure what spl() level we should be at here... probably
1350 1.9 oster * splbio() */
1351 1.9 oster s = splbio();
1352 1.1 oster
1353 1.9 oster while (1) {
1354 1.9 oster /* grab the next reconstruction request from the queue */
1355 1.9 oster LOCK_RECON_Q_MUTEX();
1356 1.9 oster while (!recon_queue) {
1357 1.9 oster UNLOCK_RECON_Q_MUTEX();
1358 1.15 oster tsleep(&recon_queue, PRIBIO,
1359 1.11 oster "raidframe recon", 0);
1360 1.9 oster LOCK_RECON_Q_MUTEX();
1361 1.9 oster }
1362 1.9 oster req = recon_queue;
1363 1.9 oster recon_queue = recon_queue->next;
1364 1.9 oster UNLOCK_RECON_Q_MUTEX();
1365 1.9 oster
1366 1.9 oster /*
1367 1.9 oster * If flags specifies that we should start recon, this call
1368 1.11 oster * will not return until reconstruction completes, fails,
1369 1.11 oster * or is aborted.
1370 1.9 oster */
1371 1.9 oster rf_FailDisk((RF_Raid_t *) req->raidPtr, req->row, req->col,
1372 1.9 oster ((req->flags & RF_FDFLAGS_RECON) ? 1 : 0));
1373 1.1 oster
1374 1.9 oster RF_Free(req, sizeof(*req));
1375 1.9 oster }
1376 1.1 oster }
1377 1.1 oster /* wake up the daemon & tell it to get us a spare table
1378 1.1 oster * XXX
1379 1.9 oster * the entries in the queues should be tagged with the raidPtr
1380 1.11 oster * so that in the extremely rare case that two recons happen at once,
1381 1.11 oster * we know for which device were requesting a spare table
1382 1.1 oster * XXX
1383 1.1 oster */
1384 1.9 oster int
1385 1.9 oster rf_GetSpareTableFromDaemon(req)
1386 1.9 oster RF_SparetWait_t *req;
1387 1.9 oster {
1388 1.9 oster int retcode;
1389 1.9 oster
1390 1.9 oster RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1391 1.9 oster req->next = rf_sparet_wait_queue;
1392 1.9 oster rf_sparet_wait_queue = req;
1393 1.9 oster wakeup(&rf_sparet_wait_queue);
1394 1.9 oster
1395 1.9 oster /* mpsleep unlocks the mutex */
1396 1.9 oster while (!rf_sparet_resp_queue) {
1397 1.15 oster tsleep(&rf_sparet_resp_queue, PRIBIO,
1398 1.9 oster "raidframe getsparetable", 0);
1399 1.1 oster #if 0
1400 1.11 oster mpsleep(&rf_sparet_resp_queue, PZERO, "sparet resp", 0,
1401 1.11 oster (void *) simple_lock_addr(rf_sparet_wait_mutex),
1402 1.11 oster MS_LOCK_SIMPLE);
1403 1.1 oster #endif
1404 1.9 oster }
1405 1.9 oster req = rf_sparet_resp_queue;
1406 1.9 oster rf_sparet_resp_queue = req->next;
1407 1.9 oster RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1408 1.9 oster
1409 1.9 oster retcode = req->fcol;
1410 1.9 oster RF_Free(req, sizeof(*req)); /* this is not the same req as we
1411 1.9 oster * alloc'd */
1412 1.9 oster return (retcode);
1413 1.1 oster }
1414 1.11 oster /* a wrapper around rf_DoAccess that extracts appropriate info from the
1415 1.11 oster * bp & passes it down.
1416 1.1 oster * any calls originating in the kernel must use non-blocking I/O
1417 1.1 oster * do some extra sanity checking to return "appropriate" error values for
1418 1.1 oster * certain conditions (to make some standard utilities work)
1419 1.1 oster */
1420 1.9 oster int
1421 1.9 oster rf_DoAccessKernel(raidPtr, bp, flags, cbFunc, cbArg)
1422 1.9 oster RF_Raid_t *raidPtr;
1423 1.9 oster struct buf *bp;
1424 1.9 oster RF_RaidAccessFlags_t flags;
1425 1.9 oster void (*cbFunc) (struct buf *);
1426 1.9 oster void *cbArg;
1427 1.1 oster {
1428 1.1 oster RF_SectorCount_t num_blocks, pb, sum;
1429 1.1 oster RF_RaidAddr_t raid_addr;
1430 1.9 oster int retcode;
1431 1.1 oster struct partition *pp;
1432 1.9 oster daddr_t blocknum;
1433 1.9 oster int unit;
1434 1.1 oster struct raid_softc *rs;
1435 1.9 oster int do_async;
1436 1.1 oster
1437 1.1 oster /* XXX The dev_t used here should be for /dev/[r]raid* !!! */
1438 1.1 oster
1439 1.1 oster unit = raidPtr->raidid;
1440 1.1 oster rs = &raid_softc[unit];
1441 1.1 oster
1442 1.1 oster /* Ok, for the bp we have here, bp->b_blkno is relative to the
1443 1.9 oster * partition.. Need to make it absolute to the underlying device.. */
1444 1.1 oster
1445 1.1 oster blocknum = bp->b_blkno;
1446 1.1 oster if (DISKPART(bp->b_dev) != RAW_PART) {
1447 1.1 oster pp = &rs->sc_dkdev.dk_label->d_partitions[DISKPART(bp->b_dev)];
1448 1.1 oster blocknum += pp->p_offset;
1449 1.9 oster db1_printf(("updated: %d %d\n", DISKPART(bp->b_dev),
1450 1.9 oster pp->p_offset));
1451 1.1 oster } else {
1452 1.1 oster db1_printf(("Is raw..\n"));
1453 1.1 oster }
1454 1.1 oster db1_printf(("Blocks: %d, %d\n", (int) bp->b_blkno, (int) blocknum));
1455 1.1 oster
1456 1.9 oster db1_printf(("bp->b_bcount = %d\n", (int) bp->b_bcount));
1457 1.9 oster db1_printf(("bp->b_resid = %d\n", (int) bp->b_resid));
1458 1.1 oster
1459 1.9 oster /* *THIS* is where we adjust what block we're going to... but DO NOT
1460 1.9 oster * TOUCH bp->b_blkno!!! */
1461 1.1 oster raid_addr = blocknum;
1462 1.9 oster
1463 1.1 oster num_blocks = bp->b_bcount >> raidPtr->logBytesPerSector;
1464 1.9 oster pb = (bp->b_bcount & raidPtr->sectorMask) ? 1 : 0;
1465 1.1 oster sum = raid_addr + num_blocks + pb;
1466 1.1 oster if (1 || rf_debugKernelAccess) {
1467 1.9 oster db1_printf(("raid_addr=%d sum=%d num_blocks=%d(+%d) (%d)\n",
1468 1.9 oster (int) raid_addr, (int) sum, (int) num_blocks,
1469 1.9 oster (int) pb, (int) bp->b_resid));
1470 1.1 oster }
1471 1.1 oster if ((sum > raidPtr->totalSectors) || (sum < raid_addr)
1472 1.9 oster || (sum < num_blocks) || (sum < pb)) {
1473 1.1 oster bp->b_error = ENOSPC;
1474 1.1 oster bp->b_flags |= B_ERROR;
1475 1.1 oster bp->b_resid = bp->b_bcount;
1476 1.1 oster biodone(bp);
1477 1.9 oster return (bp->b_error);
1478 1.1 oster }
1479 1.1 oster /*
1480 1.1 oster * XXX rf_DoAccess() should do this, not just DoAccessKernel()
1481 1.1 oster */
1482 1.1 oster
1483 1.1 oster if (bp->b_bcount & raidPtr->sectorMask) {
1484 1.1 oster bp->b_error = EINVAL;
1485 1.1 oster bp->b_flags |= B_ERROR;
1486 1.1 oster bp->b_resid = bp->b_bcount;
1487 1.1 oster biodone(bp);
1488 1.9 oster return (bp->b_error);
1489 1.1 oster }
1490 1.1 oster db1_printf(("Calling DoAccess..\n"));
1491 1.1 oster
1492 1.20 oster
1493 1.20 oster /* Put a throttle on the number of requests we handle simultanously */
1494 1.20 oster
1495 1.20 oster RF_LOCK_MUTEX(raidPtr->mutex);
1496 1.20 oster
1497 1.20 oster while(raidPtr->openings <= 0) {
1498 1.20 oster RF_UNLOCK_MUTEX(raidPtr->mutex);
1499 1.20 oster (void)tsleep(&raidPtr->openings, PRIBIO, "rfdwait", 0);
1500 1.20 oster RF_LOCK_MUTEX(raidPtr->mutex);
1501 1.20 oster }
1502 1.20 oster raidPtr->openings--;
1503 1.20 oster
1504 1.20 oster RF_UNLOCK_MUTEX(raidPtr->mutex);
1505 1.20 oster
1506 1.7 explorer /*
1507 1.20 oster * Everything is async.
1508 1.7 explorer */
1509 1.7 explorer do_async = 1;
1510 1.7 explorer
1511 1.9 oster /* don't ever condition on bp->b_flags & B_WRITE. always condition on
1512 1.9 oster * B_READ instead */
1513 1.9 oster retcode = rf_DoAccess(raidPtr, (bp->b_flags & B_READ) ?
1514 1.9 oster RF_IO_TYPE_READ : RF_IO_TYPE_WRITE,
1515 1.9 oster do_async, raid_addr, num_blocks,
1516 1.9 oster bp->b_un.b_addr,
1517 1.9 oster bp, NULL, NULL, RF_DAG_NONBLOCKING_IO | flags,
1518 1.9 oster NULL, cbFunc, cbArg);
1519 1.5 oster #if 0
1520 1.9 oster db1_printf(("After call to DoAccess: 0x%x 0x%x %d\n", bp,
1521 1.9 oster bp->b_data, (int) bp->b_resid));
1522 1.5 oster #endif
1523 1.7 explorer
1524 1.9 oster return (retcode);
1525 1.1 oster }
1526 1.1 oster /* invoke an I/O from kernel mode. Disk queue should be locked upon entry */
1527 1.1 oster
1528 1.9 oster int
1529 1.9 oster rf_DispatchKernelIO(queue, req)
1530 1.9 oster RF_DiskQueue_t *queue;
1531 1.9 oster RF_DiskQueueData_t *req;
1532 1.1 oster {
1533 1.9 oster int op = (req->type == RF_IO_TYPE_READ) ? B_READ : B_WRITE;
1534 1.1 oster struct buf *bp;
1535 1.9 oster struct raidbuf *raidbp = NULL;
1536 1.1 oster struct raid_softc *rs;
1537 1.9 oster int unit;
1538 1.9 oster
1539 1.1 oster /* XXX along with the vnode, we also need the softc associated with
1540 1.9 oster * this device.. */
1541 1.9 oster
1542 1.1 oster req->queue = queue;
1543 1.9 oster
1544 1.1 oster unit = queue->raidPtr->raidid;
1545 1.1 oster
1546 1.9 oster db1_printf(("DispatchKernelIO unit: %d\n", unit));
1547 1.1 oster
1548 1.9 oster if (unit >= numraid) {
1549 1.9 oster printf("Invalid unit number: %d %d\n", unit, numraid);
1550 1.1 oster panic("Invalid Unit number in rf_DispatchKernelIO\n");
1551 1.1 oster }
1552 1.1 oster rs = &raid_softc[unit];
1553 1.1 oster
1554 1.1 oster /* XXX is this the right place? */
1555 1.9 oster disk_busy(&rs->sc_dkdev);
1556 1.1 oster
1557 1.1 oster bp = req->bp;
1558 1.16 oster #if 1
1559 1.9 oster /* XXX when there is a physical disk failure, someone is passing us a
1560 1.9 oster * buffer that contains old stuff!! Attempt to deal with this problem
1561 1.9 oster * without taking a performance hit... (not sure where the real bug
1562 1.9 oster * is. It's buried in RAIDframe somewhere) :-( GO ) */
1563 1.4 oster
1564 1.4 oster if (bp->b_flags & B_ERROR) {
1565 1.4 oster bp->b_flags &= ~B_ERROR;
1566 1.4 oster }
1567 1.9 oster if (bp->b_error != 0) {
1568 1.4 oster bp->b_error = 0;
1569 1.4 oster }
1570 1.16 oster #endif
1571 1.1 oster raidbp = RAIDGETBUF(rs);
1572 1.1 oster
1573 1.9 oster raidbp->rf_flags = 0; /* XXX not really used anywhere... */
1574 1.1 oster
1575 1.1 oster /*
1576 1.1 oster * context for raidiodone
1577 1.1 oster */
1578 1.1 oster raidbp->rf_obp = bp;
1579 1.1 oster raidbp->req = req;
1580 1.1 oster
1581 1.1 oster switch (req->type) {
1582 1.9 oster case RF_IO_TYPE_NOP: /* used primarily to unlock a locked queue */
1583 1.9 oster /* Dprintf2("rf_DispatchKernelIO: NOP to r %d c %d\n",
1584 1.9 oster * queue->row, queue->col); */
1585 1.1 oster /* XXX need to do something extra here.. */
1586 1.9 oster /* I'm leaving this in, as I've never actually seen it used,
1587 1.9 oster * and I'd like folks to report it... GO */
1588 1.1 oster printf(("WAKEUP CALLED\n"));
1589 1.1 oster queue->numOutstanding++;
1590 1.1 oster
1591 1.1 oster /* XXX need to glue the original buffer into this?? */
1592 1.1 oster
1593 1.1 oster KernelWakeupFunc(&raidbp->rf_buf);
1594 1.1 oster break;
1595 1.9 oster
1596 1.1 oster case RF_IO_TYPE_READ:
1597 1.1 oster case RF_IO_TYPE_WRITE:
1598 1.9 oster
1599 1.1 oster if (req->tracerec) {
1600 1.1 oster RF_ETIMER_START(req->tracerec->timer);
1601 1.1 oster }
1602 1.9 oster InitBP(&raidbp->rf_buf, queue->rf_cinfo->ci_vp,
1603 1.9 oster op | bp->b_flags, queue->rf_cinfo->ci_dev,
1604 1.9 oster req->sectorOffset, req->numSector,
1605 1.9 oster req->buf, KernelWakeupFunc, (void *) req,
1606 1.9 oster queue->raidPtr->logBytesPerSector, req->b_proc);
1607 1.1 oster
1608 1.1 oster if (rf_debugKernelAccess) {
1609 1.9 oster db1_printf(("dispatch: bp->b_blkno = %ld\n",
1610 1.9 oster (long) bp->b_blkno));
1611 1.1 oster }
1612 1.1 oster queue->numOutstanding++;
1613 1.1 oster queue->last_deq_sector = req->sectorOffset;
1614 1.9 oster /* acc wouldn't have been let in if there were any pending
1615 1.9 oster * reqs at any other priority */
1616 1.1 oster queue->curPriority = req->priority;
1617 1.9 oster /* Dprintf3("rf_DispatchKernelIO: %c to row %d col %d\n",
1618 1.9 oster * req->type, queue->row, queue->col); */
1619 1.1 oster
1620 1.1 oster db1_printf(("Going for %c to unit %d row %d col %d\n",
1621 1.9 oster req->type, unit, queue->row, queue->col));
1622 1.1 oster db1_printf(("sector %d count %d (%d bytes) %d\n",
1623 1.9 oster (int) req->sectorOffset, (int) req->numSector,
1624 1.9 oster (int) (req->numSector <<
1625 1.9 oster queue->raidPtr->logBytesPerSector),
1626 1.9 oster (int) queue->raidPtr->logBytesPerSector));
1627 1.1 oster if ((raidbp->rf_buf.b_flags & B_READ) == 0) {
1628 1.1 oster raidbp->rf_buf.b_vp->v_numoutput++;
1629 1.1 oster }
1630 1.9 oster VOP_STRATEGY(&raidbp->rf_buf);
1631 1.1 oster
1632 1.1 oster break;
1633 1.9 oster
1634 1.1 oster default:
1635 1.1 oster panic("bad req->type in rf_DispatchKernelIO");
1636 1.1 oster }
1637 1.1 oster db1_printf(("Exiting from DispatchKernelIO\n"));
1638 1.9 oster return (0);
1639 1.1 oster }
1640 1.9 oster /* this is the callback function associated with a I/O invoked from
1641 1.1 oster kernel code.
1642 1.1 oster */
1643 1.9 oster static void
1644 1.9 oster KernelWakeupFunc(vbp)
1645 1.9 oster struct buf *vbp;
1646 1.9 oster {
1647 1.9 oster RF_DiskQueueData_t *req = NULL;
1648 1.9 oster RF_DiskQueue_t *queue;
1649 1.9 oster struct raidbuf *raidbp = (struct raidbuf *) vbp;
1650 1.9 oster struct buf *bp;
1651 1.9 oster struct raid_softc *rs;
1652 1.9 oster int unit;
1653 1.9 oster register int s;
1654 1.9 oster
1655 1.9 oster s = splbio(); /* XXX */
1656 1.9 oster db1_printf(("recovering the request queue:\n"));
1657 1.9 oster req = raidbp->req;
1658 1.1 oster
1659 1.9 oster bp = raidbp->rf_obp;
1660 1.5 oster #if 0
1661 1.9 oster db1_printf(("bp=0x%x\n", bp));
1662 1.5 oster #endif
1663 1.1 oster
1664 1.9 oster queue = (RF_DiskQueue_t *) req->queue;
1665 1.1 oster
1666 1.9 oster if (raidbp->rf_buf.b_flags & B_ERROR) {
1667 1.1 oster #if 0
1668 1.9 oster printf("Setting bp->b_flags!!! %d\n", raidbp->rf_buf.b_error);
1669 1.1 oster #endif
1670 1.9 oster bp->b_flags |= B_ERROR;
1671 1.9 oster bp->b_error = raidbp->rf_buf.b_error ?
1672 1.9 oster raidbp->rf_buf.b_error : EIO;
1673 1.9 oster }
1674 1.5 oster #if 0
1675 1.9 oster db1_printf(("raidbp->rf_buf.b_bcount=%d\n", (int) raidbp->rf_buf.b_bcount));
1676 1.9 oster db1_printf(("raidbp->rf_buf.b_bufsize=%d\n", (int) raidbp->rf_buf.b_bufsize));
1677 1.9 oster db1_printf(("raidbp->rf_buf.b_resid=%d\n", (int) raidbp->rf_buf.b_resid));
1678 1.9 oster db1_printf(("raidbp->rf_buf.b_data=0x%x\n", raidbp->rf_buf.b_data));
1679 1.5 oster #endif
1680 1.1 oster
1681 1.9 oster /* XXX methinks this could be wrong... */
1682 1.1 oster #if 1
1683 1.9 oster bp->b_resid = raidbp->rf_buf.b_resid;
1684 1.1 oster #endif
1685 1.1 oster
1686 1.9 oster if (req->tracerec) {
1687 1.9 oster RF_ETIMER_STOP(req->tracerec->timer);
1688 1.9 oster RF_ETIMER_EVAL(req->tracerec->timer);
1689 1.9 oster RF_LOCK_MUTEX(rf_tracing_mutex);
1690 1.9 oster req->tracerec->diskwait_us += RF_ETIMER_VAL_US(req->tracerec->timer);
1691 1.9 oster req->tracerec->phys_io_us += RF_ETIMER_VAL_US(req->tracerec->timer);
1692 1.9 oster req->tracerec->num_phys_ios++;
1693 1.9 oster RF_UNLOCK_MUTEX(rf_tracing_mutex);
1694 1.9 oster }
1695 1.9 oster bp->b_bcount = raidbp->rf_buf.b_bcount; /* XXXX ?? */
1696 1.1 oster
1697 1.9 oster unit = queue->raidPtr->raidid; /* *Much* simpler :-> */
1698 1.1 oster
1699 1.1 oster
1700 1.9 oster /* XXX Ok, let's get aggressive... If B_ERROR is set, let's go
1701 1.9 oster * ballistic, and mark the component as hosed... */
1702 1.9 oster #if 1
1703 1.9 oster if (bp->b_flags & B_ERROR) {
1704 1.9 oster /* Mark the disk as dead */
1705 1.9 oster /* but only mark it once... */
1706 1.9 oster if (queue->raidPtr->Disks[queue->row][queue->col].status ==
1707 1.9 oster rf_ds_optimal) {
1708 1.9 oster printf("raid%d: IO Error. Marking %s as failed.\n",
1709 1.9 oster unit, queue->raidPtr->Disks[queue->row][queue->col].devname);
1710 1.9 oster queue->raidPtr->Disks[queue->row][queue->col].status =
1711 1.9 oster rf_ds_failed;
1712 1.9 oster queue->raidPtr->status[queue->row] = rf_rs_degraded;
1713 1.9 oster queue->raidPtr->numFailures++;
1714 1.11 oster /* XXX here we should bump the version number for each component, and write that data out */
1715 1.9 oster } else { /* Disk is already dead... */
1716 1.9 oster /* printf("Disk already marked as dead!\n"); */
1717 1.9 oster }
1718 1.4 oster
1719 1.9 oster }
1720 1.4 oster #endif
1721 1.4 oster
1722 1.9 oster rs = &raid_softc[unit];
1723 1.9 oster RAIDPUTBUF(rs, raidbp);
1724 1.9 oster
1725 1.4 oster
1726 1.9 oster if (bp->b_resid == 0) {
1727 1.9 oster db1_printf(("Disk is no longer busy for this buffer... %d %ld %ld\n",
1728 1.9 oster unit, bp->b_resid, bp->b_bcount));
1729 1.9 oster /* XXX is this the right place for a disk_unbusy()??!??!?!? */
1730 1.9 oster disk_unbusy(&rs->sc_dkdev, (bp->b_bcount - bp->b_resid));
1731 1.9 oster } else {
1732 1.9 oster db1_printf(("b_resid is still %ld\n", bp->b_resid));
1733 1.9 oster }
1734 1.1 oster
1735 1.9 oster rf_DiskIOComplete(queue, req, (bp->b_flags & B_ERROR) ? 1 : 0);
1736 1.9 oster (req->CompleteFunc) (req->argument, (bp->b_flags & B_ERROR) ? 1 : 0);
1737 1.9 oster /* printf("Exiting KernelWakeupFunc\n"); */
1738 1.1 oster
1739 1.9 oster splx(s); /* XXX */
1740 1.1 oster }
1741 1.1 oster
1742 1.1 oster
1743 1.1 oster
1744 1.1 oster /*
1745 1.1 oster * initialize a buf structure for doing an I/O in the kernel.
1746 1.1 oster */
1747 1.9 oster static void
1748 1.9 oster InitBP(
1749 1.9 oster struct buf * bp,
1750 1.9 oster struct vnode * b_vp,
1751 1.9 oster unsigned rw_flag,
1752 1.9 oster dev_t dev,
1753 1.9 oster RF_SectorNum_t startSect,
1754 1.9 oster RF_SectorCount_t numSect,
1755 1.9 oster caddr_t buf,
1756 1.9 oster void (*cbFunc) (struct buf *),
1757 1.9 oster void *cbArg,
1758 1.9 oster int logBytesPerSector,
1759 1.9 oster struct proc * b_proc)
1760 1.9 oster {
1761 1.9 oster /* bp->b_flags = B_PHYS | rw_flag; */
1762 1.9 oster bp->b_flags = B_CALL | rw_flag; /* XXX need B_PHYS here too??? */
1763 1.9 oster bp->b_bcount = numSect << logBytesPerSector;
1764 1.9 oster bp->b_bufsize = bp->b_bcount;
1765 1.9 oster bp->b_error = 0;
1766 1.9 oster bp->b_dev = dev;
1767 1.1 oster db1_printf(("bp->b_dev is %d\n", dev));
1768 1.9 oster bp->b_un.b_addr = buf;
1769 1.5 oster #if 0
1770 1.9 oster db1_printf(("bp->b_data=0x%x\n", bp->b_data));
1771 1.5 oster #endif
1772 1.1 oster
1773 1.9 oster bp->b_blkno = startSect;
1774 1.9 oster bp->b_resid = bp->b_bcount; /* XXX is this right!??!?!! */
1775 1.9 oster db1_printf(("b_bcount is: %d\n", (int) bp->b_bcount));
1776 1.1 oster if (bp->b_bcount == 0) {
1777 1.1 oster panic("bp->b_bcount is zero in InitBP!!\n");
1778 1.1 oster }
1779 1.9 oster bp->b_proc = b_proc;
1780 1.9 oster bp->b_iodone = cbFunc;
1781 1.9 oster bp->b_vp = b_vp;
1782 1.9 oster
1783 1.1 oster }
1784 1.1 oster /* Extras... */
1785 1.1 oster
1786 1.9 oster unsigned int
1787 1.9 oster rpcc()
1788 1.1 oster {
1789 1.9 oster /* XXX no clue what this is supposed to do.. my guess is that it's
1790 1.9 oster * supposed to read the CPU cycle counter... */
1791 1.9 oster /* db1_printf("this is supposed to do something useful too!??\n"); */
1792 1.9 oster return (0);
1793 1.1 oster }
1794 1.1 oster #if 0
1795 1.9 oster int
1796 1.9 oster rf_GetSpareTableFromDaemon(req)
1797 1.9 oster RF_SparetWait_t *req;
1798 1.1 oster {
1799 1.9 oster int retcode = 1;
1800 1.9 oster printf("This is supposed to do something useful!!\n"); /* XXX */
1801 1.9 oster
1802 1.9 oster return (retcode);
1803 1.1 oster
1804 1.1 oster }
1805 1.1 oster #endif
1806 1.1 oster
1807 1.1 oster static void
1808 1.1 oster raidgetdefaultlabel(raidPtr, rs, lp)
1809 1.1 oster RF_Raid_t *raidPtr;
1810 1.1 oster struct raid_softc *rs;
1811 1.1 oster struct disklabel *lp;
1812 1.1 oster {
1813 1.1 oster db1_printf(("Building a default label...\n"));
1814 1.1 oster bzero(lp, sizeof(*lp));
1815 1.1 oster
1816 1.1 oster /* fabricate a label... */
1817 1.1 oster lp->d_secperunit = raidPtr->totalSectors;
1818 1.1 oster lp->d_secsize = raidPtr->bytesPerSector;
1819 1.1 oster lp->d_nsectors = 1024 * (1024 / raidPtr->bytesPerSector);
1820 1.1 oster lp->d_ntracks = 1;
1821 1.1 oster lp->d_ncylinders = raidPtr->totalSectors / lp->d_nsectors;
1822 1.1 oster lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
1823 1.1 oster
1824 1.1 oster strncpy(lp->d_typename, "raid", sizeof(lp->d_typename));
1825 1.9 oster lp->d_type = DTYPE_RAID;
1826 1.1 oster strncpy(lp->d_packname, "fictitious", sizeof(lp->d_packname));
1827 1.1 oster lp->d_rpm = 3600;
1828 1.1 oster lp->d_interleave = 1;
1829 1.1 oster lp->d_flags = 0;
1830 1.1 oster
1831 1.1 oster lp->d_partitions[RAW_PART].p_offset = 0;
1832 1.1 oster lp->d_partitions[RAW_PART].p_size = raidPtr->totalSectors;
1833 1.1 oster lp->d_partitions[RAW_PART].p_fstype = FS_UNUSED;
1834 1.1 oster lp->d_npartitions = RAW_PART + 1;
1835 1.1 oster
1836 1.1 oster lp->d_magic = DISKMAGIC;
1837 1.1 oster lp->d_magic2 = DISKMAGIC;
1838 1.1 oster lp->d_checksum = dkcksum(rs->sc_dkdev.dk_label);
1839 1.1 oster
1840 1.1 oster }
1841 1.1 oster /*
1842 1.1 oster * Read the disklabel from the raid device. If one is not present, fake one
1843 1.1 oster * up.
1844 1.1 oster */
1845 1.1 oster static void
1846 1.1 oster raidgetdisklabel(dev)
1847 1.9 oster dev_t dev;
1848 1.1 oster {
1849 1.9 oster int unit = raidunit(dev);
1850 1.1 oster struct raid_softc *rs = &raid_softc[unit];
1851 1.9 oster char *errstring;
1852 1.1 oster struct disklabel *lp = rs->sc_dkdev.dk_label;
1853 1.1 oster struct cpu_disklabel *clp = rs->sc_dkdev.dk_cpulabel;
1854 1.1 oster RF_Raid_t *raidPtr;
1855 1.1 oster
1856 1.1 oster db1_printf(("Getting the disklabel...\n"));
1857 1.1 oster
1858 1.1 oster bzero(clp, sizeof(*clp));
1859 1.1 oster
1860 1.1 oster raidPtr = raidPtrs[unit];
1861 1.1 oster
1862 1.1 oster raidgetdefaultlabel(raidPtr, rs, lp);
1863 1.1 oster
1864 1.1 oster /*
1865 1.1 oster * Call the generic disklabel extraction routine.
1866 1.1 oster */
1867 1.1 oster errstring = readdisklabel(RAIDLABELDEV(dev), raidstrategy,
1868 1.1 oster rs->sc_dkdev.dk_label, rs->sc_dkdev.dk_cpulabel);
1869 1.9 oster if (errstring)
1870 1.1 oster raidmakedisklabel(rs);
1871 1.1 oster else {
1872 1.9 oster int i;
1873 1.1 oster struct partition *pp;
1874 1.1 oster
1875 1.1 oster /*
1876 1.1 oster * Sanity check whether the found disklabel is valid.
1877 1.1 oster *
1878 1.1 oster * This is necessary since total size of the raid device
1879 1.1 oster * may vary when an interleave is changed even though exactly
1880 1.1 oster * same componets are used, and old disklabel may used
1881 1.1 oster * if that is found.
1882 1.1 oster */
1883 1.1 oster if (lp->d_secperunit != rs->sc_size)
1884 1.1 oster printf("WARNING: %s: "
1885 1.1 oster "total sector size in disklabel (%d) != "
1886 1.18 oster "the size of raid (%ld)\n", rs->sc_xname,
1887 1.18 oster lp->d_secperunit, (long) rs->sc_size);
1888 1.1 oster for (i = 0; i < lp->d_npartitions; i++) {
1889 1.1 oster pp = &lp->d_partitions[i];
1890 1.1 oster if (pp->p_offset + pp->p_size > rs->sc_size)
1891 1.1 oster printf("WARNING: %s: end of partition `%c' "
1892 1.18 oster "exceeds the size of raid (%ld)\n",
1893 1.18 oster rs->sc_xname, 'a' + i, (long) rs->sc_size);
1894 1.1 oster }
1895 1.1 oster }
1896 1.1 oster
1897 1.1 oster }
1898 1.1 oster /*
1899 1.1 oster * Take care of things one might want to take care of in the event
1900 1.1 oster * that a disklabel isn't present.
1901 1.1 oster */
1902 1.1 oster static void
1903 1.1 oster raidmakedisklabel(rs)
1904 1.1 oster struct raid_softc *rs;
1905 1.1 oster {
1906 1.1 oster struct disklabel *lp = rs->sc_dkdev.dk_label;
1907 1.1 oster db1_printf(("Making a label..\n"));
1908 1.1 oster
1909 1.1 oster /*
1910 1.1 oster * For historical reasons, if there's no disklabel present
1911 1.1 oster * the raw partition must be marked FS_BSDFFS.
1912 1.1 oster */
1913 1.1 oster
1914 1.1 oster lp->d_partitions[RAW_PART].p_fstype = FS_BSDFFS;
1915 1.1 oster
1916 1.1 oster strncpy(lp->d_packname, "default label", sizeof(lp->d_packname));
1917 1.1 oster
1918 1.1 oster lp->d_checksum = dkcksum(lp);
1919 1.1 oster }
1920 1.1 oster /*
1921 1.1 oster * Lookup the provided name in the filesystem. If the file exists,
1922 1.1 oster * is a valid block device, and isn't being used by anyone else,
1923 1.1 oster * set *vpp to the file's vnode.
1924 1.9 oster * You'll find the original of this in ccd.c
1925 1.1 oster */
1926 1.1 oster int
1927 1.1 oster raidlookup(path, p, vpp)
1928 1.9 oster char *path;
1929 1.1 oster struct proc *p;
1930 1.1 oster struct vnode **vpp; /* result */
1931 1.1 oster {
1932 1.1 oster struct nameidata nd;
1933 1.1 oster struct vnode *vp;
1934 1.1 oster struct vattr va;
1935 1.9 oster int error;
1936 1.1 oster
1937 1.1 oster NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, path, p);
1938 1.9 oster if ((error = vn_open(&nd, FREAD | FWRITE, 0)) != 0) {
1939 1.1 oster #ifdef DEBUG
1940 1.9 oster printf("RAIDframe: vn_open returned %d\n", error);
1941 1.1 oster #endif
1942 1.1 oster return (error);
1943 1.1 oster }
1944 1.1 oster vp = nd.ni_vp;
1945 1.1 oster if (vp->v_usecount > 1) {
1946 1.1 oster VOP_UNLOCK(vp, 0);
1947 1.9 oster (void) vn_close(vp, FREAD | FWRITE, p->p_ucred, p);
1948 1.1 oster return (EBUSY);
1949 1.1 oster }
1950 1.1 oster if ((error = VOP_GETATTR(vp, &va, p->p_ucred, p)) != 0) {
1951 1.1 oster VOP_UNLOCK(vp, 0);
1952 1.9 oster (void) vn_close(vp, FREAD | FWRITE, p->p_ucred, p);
1953 1.1 oster return (error);
1954 1.1 oster }
1955 1.1 oster /* XXX: eventually we should handle VREG, too. */
1956 1.1 oster if (va.va_type != VBLK) {
1957 1.1 oster VOP_UNLOCK(vp, 0);
1958 1.9 oster (void) vn_close(vp, FREAD | FWRITE, p->p_ucred, p);
1959 1.1 oster return (ENOTBLK);
1960 1.1 oster }
1961 1.1 oster VOP_UNLOCK(vp, 0);
1962 1.1 oster *vpp = vp;
1963 1.1 oster return (0);
1964 1.1 oster }
1965 1.1 oster /*
1966 1.1 oster * Wait interruptibly for an exclusive lock.
1967 1.1 oster *
1968 1.1 oster * XXX
1969 1.1 oster * Several drivers do this; it should be abstracted and made MP-safe.
1970 1.1 oster * (Hmm... where have we seen this warning before :-> GO )
1971 1.1 oster */
1972 1.1 oster static int
1973 1.1 oster raidlock(rs)
1974 1.1 oster struct raid_softc *rs;
1975 1.1 oster {
1976 1.9 oster int error;
1977 1.1 oster
1978 1.1 oster while ((rs->sc_flags & RAIDF_LOCKED) != 0) {
1979 1.1 oster rs->sc_flags |= RAIDF_WANTED;
1980 1.9 oster if ((error =
1981 1.9 oster tsleep(rs, PRIBIO | PCATCH, "raidlck", 0)) != 0)
1982 1.1 oster return (error);
1983 1.1 oster }
1984 1.1 oster rs->sc_flags |= RAIDF_LOCKED;
1985 1.1 oster return (0);
1986 1.1 oster }
1987 1.1 oster /*
1988 1.1 oster * Unlock and wake up any waiters.
1989 1.1 oster */
1990 1.1 oster static void
1991 1.1 oster raidunlock(rs)
1992 1.1 oster struct raid_softc *rs;
1993 1.1 oster {
1994 1.1 oster
1995 1.1 oster rs->sc_flags &= ~RAIDF_LOCKED;
1996 1.1 oster if ((rs->sc_flags & RAIDF_WANTED) != 0) {
1997 1.1 oster rs->sc_flags &= ~RAIDF_WANTED;
1998 1.1 oster wakeup(rs);
1999 1.1 oster }
2000 1.11 oster }
2001 1.11 oster
2002 1.11 oster
2003 1.11 oster #define RF_COMPONENT_INFO_OFFSET 16384 /* bytes */
2004 1.11 oster #define RF_COMPONENT_INFO_SIZE 1024 /* bytes */
2005 1.11 oster
2006 1.11 oster int
2007 1.12 oster raidmarkclean(dev_t dev, struct vnode *b_vp, int mod_counter)
2008 1.12 oster {
2009 1.12 oster RF_ComponentLabel_t component_label;
2010 1.12 oster raidread_component_label(dev, b_vp, &component_label);
2011 1.12 oster component_label.mod_counter = mod_counter;
2012 1.12 oster component_label.clean = RF_RAID_CLEAN;
2013 1.12 oster raidwrite_component_label(dev, b_vp, &component_label);
2014 1.12 oster return(0);
2015 1.12 oster }
2016 1.12 oster
2017 1.12 oster
2018 1.12 oster int
2019 1.12 oster raidmarkdirty(dev_t dev, struct vnode *b_vp, int mod_counter)
2020 1.11 oster {
2021 1.12 oster RF_ComponentLabel_t component_label;
2022 1.12 oster raidread_component_label(dev, b_vp, &component_label);
2023 1.12 oster component_label.mod_counter = mod_counter;
2024 1.12 oster component_label.clean = RF_RAID_DIRTY;
2025 1.12 oster raidwrite_component_label(dev, b_vp, &component_label);
2026 1.11 oster return(0);
2027 1.11 oster }
2028 1.11 oster
2029 1.11 oster /* ARGSUSED */
2030 1.11 oster int
2031 1.11 oster raidread_component_label(dev, b_vp, component_label)
2032 1.11 oster dev_t dev;
2033 1.11 oster struct vnode *b_vp;
2034 1.11 oster RF_ComponentLabel_t *component_label;
2035 1.11 oster {
2036 1.11 oster struct buf *bp;
2037 1.11 oster int error;
2038 1.11 oster
2039 1.11 oster /* XXX should probably ensure that we don't try to do this if
2040 1.11 oster someone has changed rf_protected_sectors. */
2041 1.11 oster
2042 1.11 oster /* get a block of the appropriate size... */
2043 1.11 oster bp = geteblk((int)RF_COMPONENT_INFO_SIZE);
2044 1.11 oster bp->b_dev = dev;
2045 1.11 oster
2046 1.11 oster /* get our ducks in a row for the read */
2047 1.11 oster bp->b_blkno = RF_COMPONENT_INFO_OFFSET / DEV_BSIZE;
2048 1.11 oster bp->b_bcount = RF_COMPONENT_INFO_SIZE;
2049 1.11 oster bp->b_flags = B_BUSY | B_READ;
2050 1.11 oster bp->b_resid = RF_COMPONENT_INFO_SIZE / DEV_BSIZE;
2051 1.11 oster
2052 1.11 oster (*bdevsw[major(bp->b_dev)].d_strategy)(bp);
2053 1.11 oster
2054 1.11 oster error = biowait(bp);
2055 1.11 oster
2056 1.11 oster if (!error) {
2057 1.11 oster memcpy(component_label, bp->b_un.b_addr,
2058 1.11 oster sizeof(RF_ComponentLabel_t));
2059 1.12 oster #if 0
2060 1.11 oster printf("raidread_component_label: got component label:\n");
2061 1.11 oster printf("Version: %d\n",component_label->version);
2062 1.11 oster printf("Serial Number: %d\n",component_label->serial_number);
2063 1.11 oster printf("Mod counter: %d\n",component_label->mod_counter);
2064 1.11 oster printf("Row: %d\n", component_label->row);
2065 1.11 oster printf("Column: %d\n", component_label->column);
2066 1.11 oster printf("Num Rows: %d\n", component_label->num_rows);
2067 1.11 oster printf("Num Columns: %d\n", component_label->num_columns);
2068 1.11 oster printf("Clean: %d\n", component_label->clean);
2069 1.11 oster printf("Status: %d\n", component_label->status);
2070 1.11 oster #endif
2071 1.11 oster } else {
2072 1.11 oster printf("Failed to read RAID component label!\n");
2073 1.11 oster }
2074 1.11 oster
2075 1.11 oster bp->b_flags = B_INVAL | B_AGE;
2076 1.11 oster brelse(bp);
2077 1.11 oster return(error);
2078 1.11 oster }
2079 1.11 oster /* ARGSUSED */
2080 1.11 oster int
2081 1.11 oster raidwrite_component_label(dev, b_vp, component_label)
2082 1.11 oster dev_t dev;
2083 1.11 oster struct vnode *b_vp;
2084 1.11 oster RF_ComponentLabel_t *component_label;
2085 1.11 oster {
2086 1.11 oster struct buf *bp;
2087 1.11 oster int error;
2088 1.11 oster
2089 1.11 oster /* get a block of the appropriate size... */
2090 1.11 oster bp = geteblk((int)RF_COMPONENT_INFO_SIZE);
2091 1.11 oster bp->b_dev = dev;
2092 1.11 oster
2093 1.11 oster /* get our ducks in a row for the write */
2094 1.11 oster bp->b_blkno = RF_COMPONENT_INFO_OFFSET / DEV_BSIZE;
2095 1.11 oster bp->b_bcount = RF_COMPONENT_INFO_SIZE;
2096 1.11 oster bp->b_flags = B_BUSY | B_WRITE;
2097 1.11 oster bp->b_resid = RF_COMPONENT_INFO_SIZE / DEV_BSIZE;
2098 1.11 oster
2099 1.11 oster memset( bp->b_un.b_addr, 0, RF_COMPONENT_INFO_SIZE );
2100 1.11 oster
2101 1.11 oster memcpy( bp->b_un.b_addr, component_label, sizeof(RF_ComponentLabel_t));
2102 1.11 oster
2103 1.11 oster (*bdevsw[major(bp->b_dev)].d_strategy)(bp);
2104 1.11 oster error = biowait(bp);
2105 1.11 oster bp->b_flags = B_INVAL | B_AGE;
2106 1.11 oster brelse(bp);
2107 1.11 oster if (error) {
2108 1.11 oster printf("Failed to write RAID component info!\n");
2109 1.11 oster }
2110 1.11 oster
2111 1.11 oster return(error);
2112 1.1 oster }
2113 1.12 oster
2114 1.12 oster void
2115 1.12 oster rf_markalldirty( raidPtr )
2116 1.12 oster RF_Raid_t *raidPtr;
2117 1.12 oster {
2118 1.12 oster RF_ComponentLabel_t c_label;
2119 1.12 oster int r,c;
2120 1.12 oster
2121 1.12 oster raidPtr->mod_counter++;
2122 1.12 oster for (r = 0; r < raidPtr->numRow; r++) {
2123 1.12 oster for (c = 0; c < raidPtr->numCol; c++) {
2124 1.12 oster if (raidPtr->Disks[r][c].status != rf_ds_failed) {
2125 1.12 oster raidread_component_label(
2126 1.12 oster raidPtr->Disks[r][c].dev,
2127 1.12 oster raidPtr->raid_cinfo[r][c].ci_vp,
2128 1.12 oster &c_label);
2129 1.12 oster if (c_label.status == rf_ds_spared) {
2130 1.12 oster /* XXX do something special...
2131 1.12 oster but whatever you do, don't
2132 1.12 oster try to access it!! */
2133 1.12 oster } else {
2134 1.12 oster #if 0
2135 1.12 oster c_label.status =
2136 1.12 oster raidPtr->Disks[r][c].status;
2137 1.12 oster raidwrite_component_label(
2138 1.12 oster raidPtr->Disks[r][c].dev,
2139 1.12 oster raidPtr->raid_cinfo[r][c].ci_vp,
2140 1.12 oster &c_label);
2141 1.12 oster #endif
2142 1.12 oster raidmarkdirty(
2143 1.12 oster raidPtr->Disks[r][c].dev,
2144 1.12 oster raidPtr->raid_cinfo[r][c].ci_vp,
2145 1.12 oster raidPtr->mod_counter);
2146 1.12 oster }
2147 1.12 oster }
2148 1.12 oster }
2149 1.12 oster }
2150 1.13 oster /* printf("Component labels marked dirty.\n"); */
2151 1.12 oster #if 0
2152 1.12 oster for( c = 0; c < raidPtr->numSpare ; c++) {
2153 1.12 oster sparecol = raidPtr->numCol + c;
2154 1.12 oster if (raidPtr->Disks[r][sparecol].status == rf_ds_used_spare) {
2155 1.12 oster /*
2156 1.12 oster
2157 1.12 oster XXX this is where we get fancy and map this spare
2158 1.12 oster into it's correct spot in the array.
2159 1.12 oster
2160 1.12 oster */
2161 1.12 oster /*
2162 1.12 oster
2163 1.12 oster we claim this disk is "optimal" if it's
2164 1.12 oster rf_ds_used_spare, as that means it should be
2165 1.12 oster directly substitutable for the disk it replaced.
2166 1.12 oster We note that too...
2167 1.12 oster
2168 1.12 oster */
2169 1.12 oster
2170 1.12 oster for(i=0;i<raidPtr->numRow;i++) {
2171 1.12 oster for(j=0;j<raidPtr->numCol;j++) {
2172 1.12 oster if ((raidPtr->Disks[i][j].spareRow ==
2173 1.12 oster r) &&
2174 1.12 oster (raidPtr->Disks[i][j].spareCol ==
2175 1.12 oster sparecol)) {
2176 1.12 oster srow = r;
2177 1.12 oster scol = sparecol;
2178 1.12 oster break;
2179 1.12 oster }
2180 1.12 oster }
2181 1.12 oster }
2182 1.12 oster
2183 1.12 oster raidread_component_label(
2184 1.12 oster raidPtr->Disks[r][sparecol].dev,
2185 1.12 oster raidPtr->raid_cinfo[r][sparecol].ci_vp,
2186 1.12 oster &c_label);
2187 1.12 oster /* make sure status is noted */
2188 1.12 oster c_label.version = RF_COMPONENT_LABEL_VERSION;
2189 1.12 oster c_label.mod_counter = raidPtr->mod_counter;
2190 1.12 oster c_label.serial_number = raidPtr->serial_number;
2191 1.12 oster c_label.row = srow;
2192 1.12 oster c_label.column = scol;
2193 1.12 oster c_label.num_rows = raidPtr->numRow;
2194 1.12 oster c_label.num_columns = raidPtr->numCol;
2195 1.12 oster c_label.clean = RF_RAID_DIRTY; /* changed in a bit*/
2196 1.12 oster c_label.status = rf_ds_optimal;
2197 1.12 oster raidwrite_component_label(
2198 1.12 oster raidPtr->Disks[r][sparecol].dev,
2199 1.12 oster raidPtr->raid_cinfo[r][sparecol].ci_vp,
2200 1.12 oster &c_label);
2201 1.12 oster raidmarkclean( raidPtr->Disks[r][sparecol].dev,
2202 1.12 oster raidPtr->raid_cinfo[r][sparecol].ci_vp);
2203 1.12 oster }
2204 1.12 oster }
2205 1.12 oster
2206 1.12 oster #endif
2207 1.12 oster }
2208 1.12 oster
2209 1.13 oster
2210 1.13 oster void
2211 1.13 oster rf_update_component_labels( raidPtr )
2212 1.13 oster RF_Raid_t *raidPtr;
2213 1.13 oster {
2214 1.13 oster RF_ComponentLabel_t c_label;
2215 1.13 oster int sparecol;
2216 1.13 oster int r,c;
2217 1.13 oster int i,j;
2218 1.13 oster int srow, scol;
2219 1.13 oster
2220 1.13 oster srow = -1;
2221 1.13 oster scol = -1;
2222 1.13 oster
2223 1.13 oster /* XXX should do extra checks to make sure things really are clean,
2224 1.13 oster rather than blindly setting the clean bit... */
2225 1.13 oster
2226 1.13 oster raidPtr->mod_counter++;
2227 1.13 oster
2228 1.13 oster for (r = 0; r < raidPtr->numRow; r++) {
2229 1.13 oster for (c = 0; c < raidPtr->numCol; c++) {
2230 1.13 oster if (raidPtr->Disks[r][c].status == rf_ds_optimal) {
2231 1.13 oster raidread_component_label(
2232 1.13 oster raidPtr->Disks[r][c].dev,
2233 1.13 oster raidPtr->raid_cinfo[r][c].ci_vp,
2234 1.13 oster &c_label);
2235 1.13 oster /* make sure status is noted */
2236 1.13 oster c_label.status = rf_ds_optimal;
2237 1.13 oster raidwrite_component_label(
2238 1.13 oster raidPtr->Disks[r][c].dev,
2239 1.13 oster raidPtr->raid_cinfo[r][c].ci_vp,
2240 1.13 oster &c_label);
2241 1.13 oster if (raidPtr->parity_good == RF_RAID_CLEAN) {
2242 1.13 oster raidmarkclean(
2243 1.13 oster raidPtr->Disks[r][c].dev,
2244 1.13 oster raidPtr->raid_cinfo[r][c].ci_vp,
2245 1.13 oster raidPtr->mod_counter);
2246 1.13 oster }
2247 1.13 oster }
2248 1.13 oster /* else we don't touch it.. */
2249 1.13 oster #if 0
2250 1.13 oster else if (raidPtr->Disks[r][c].status !=
2251 1.13 oster rf_ds_failed) {
2252 1.13 oster raidread_component_label(
2253 1.13 oster raidPtr->Disks[r][c].dev,
2254 1.13 oster raidPtr->raid_cinfo[r][c].ci_vp,
2255 1.13 oster &c_label);
2256 1.13 oster /* make sure status is noted */
2257 1.13 oster c_label.status =
2258 1.13 oster raidPtr->Disks[r][c].status;
2259 1.13 oster raidwrite_component_label(
2260 1.13 oster raidPtr->Disks[r][c].dev,
2261 1.13 oster raidPtr->raid_cinfo[r][c].ci_vp,
2262 1.13 oster &c_label);
2263 1.13 oster if (raidPtr->parity_good == RF_RAID_CLEAN) {
2264 1.13 oster raidmarkclean(
2265 1.13 oster raidPtr->Disks[r][c].dev,
2266 1.13 oster raidPtr->raid_cinfo[r][c].ci_vp,
2267 1.13 oster raidPtr->mod_counter);
2268 1.13 oster }
2269 1.13 oster }
2270 1.13 oster #endif
2271 1.13 oster }
2272 1.13 oster }
2273 1.13 oster
2274 1.13 oster for( c = 0; c < raidPtr->numSpare ; c++) {
2275 1.13 oster sparecol = raidPtr->numCol + c;
2276 1.13 oster if (raidPtr->Disks[0][sparecol].status == rf_ds_used_spare) {
2277 1.13 oster /*
2278 1.13 oster
2279 1.13 oster we claim this disk is "optimal" if it's
2280 1.13 oster rf_ds_used_spare, as that means it should be
2281 1.13 oster directly substitutable for the disk it replaced.
2282 1.13 oster We note that too...
2283 1.13 oster
2284 1.13 oster */
2285 1.13 oster
2286 1.13 oster for(i=0;i<raidPtr->numRow;i++) {
2287 1.13 oster for(j=0;j<raidPtr->numCol;j++) {
2288 1.13 oster if ((raidPtr->Disks[i][j].spareRow ==
2289 1.13 oster 0) &&
2290 1.13 oster (raidPtr->Disks[i][j].spareCol ==
2291 1.13 oster sparecol)) {
2292 1.13 oster srow = i;
2293 1.13 oster scol = j;
2294 1.13 oster break;
2295 1.13 oster }
2296 1.13 oster }
2297 1.13 oster }
2298 1.13 oster
2299 1.13 oster raidread_component_label(
2300 1.13 oster raidPtr->Disks[0][sparecol].dev,
2301 1.13 oster raidPtr->raid_cinfo[0][sparecol].ci_vp,
2302 1.13 oster &c_label);
2303 1.13 oster /* make sure status is noted */
2304 1.13 oster c_label.version = RF_COMPONENT_LABEL_VERSION;
2305 1.13 oster c_label.mod_counter = raidPtr->mod_counter;
2306 1.13 oster c_label.serial_number = raidPtr->serial_number;
2307 1.13 oster c_label.row = srow;
2308 1.13 oster c_label.column = scol;
2309 1.13 oster c_label.num_rows = raidPtr->numRow;
2310 1.13 oster c_label.num_columns = raidPtr->numCol;
2311 1.13 oster c_label.clean = RF_RAID_DIRTY; /* changed in a bit*/
2312 1.13 oster c_label.status = rf_ds_optimal;
2313 1.13 oster raidwrite_component_label(
2314 1.13 oster raidPtr->Disks[0][sparecol].dev,
2315 1.13 oster raidPtr->raid_cinfo[0][sparecol].ci_vp,
2316 1.13 oster &c_label);
2317 1.13 oster if (raidPtr->parity_good == RF_RAID_CLEAN) {
2318 1.13 oster raidmarkclean( raidPtr->Disks[0][sparecol].dev,
2319 1.13 oster raidPtr->raid_cinfo[0][sparecol].ci_vp,
2320 1.13 oster raidPtr->mod_counter);
2321 1.13 oster }
2322 1.13 oster }
2323 1.13 oster }
2324 1.13 oster /* printf("Component labels updated\n"); */
2325 1.13 oster }
2326