rf_netbsdkintf.c revision 1.109.2.2 1 /* $NetBSD: rf_netbsdkintf.c,v 1.109.2.2 2001/09/18 19:13:50 fvdl Exp $ */
2 /*-
3 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to The NetBSD Foundation
7 * by Greg Oster; Jason R. Thorpe.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the NetBSD
20 * Foundation, Inc. and its contributors.
21 * 4. Neither the name of The NetBSD Foundation nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Copyright (c) 1988 University of Utah.
40 * Copyright (c) 1990, 1993
41 * The Regents of the University of California. All rights reserved.
42 *
43 * This code is derived from software contributed to Berkeley by
44 * the Systems Programming Group of the University of Utah Computer
45 * Science Department.
46 *
47 * Redistribution and use in source and binary forms, with or without
48 * modification, are permitted provided that the following conditions
49 * are met:
50 * 1. Redistributions of source code must retain the above copyright
51 * notice, this list of conditions and the following disclaimer.
52 * 2. Redistributions in binary form must reproduce the above copyright
53 * notice, this list of conditions and the following disclaimer in the
54 * documentation and/or other materials provided with the distribution.
55 * 3. All advertising materials mentioning features or use of this software
56 * must display the following acknowledgement:
57 * This product includes software developed by the University of
58 * California, Berkeley and its contributors.
59 * 4. Neither the name of the University nor the names of its contributors
60 * may be used to endorse or promote products derived from this software
61 * without specific prior written permission.
62 *
63 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
64 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
65 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
66 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
67 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
68 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
69 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
70 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
71 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
72 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
73 * SUCH DAMAGE.
74 *
75 * from: Utah $Hdr: cd.c 1.6 90/11/28$
76 *
77 * @(#)cd.c 8.2 (Berkeley) 11/16/93
78 */
79
80 /*
81 * Copyright (c) 1995 Carnegie-Mellon University.
82 * All rights reserved.
83 *
84 * Authors: Mark Holland, Jim Zelenka
85 *
86 * Permission to use, copy, modify and distribute this software and
87 * its documentation is hereby granted, provided that both the copyright
88 * notice and this permission notice appear in all copies of the
89 * software, derivative works or modified versions, and any portions
90 * thereof, and that both notices appear in supporting documentation.
91 *
92 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
93 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
94 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
95 *
96 * Carnegie Mellon requests users of this software to return to
97 *
98 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
99 * School of Computer Science
100 * Carnegie Mellon University
101 * Pittsburgh PA 15213-3890
102 *
103 * any improvements or extensions that they make and grant Carnegie the
104 * rights to redistribute these changes.
105 */
106
107 /***********************************************************
108 *
109 * rf_kintf.c -- the kernel interface routines for RAIDframe
110 *
111 ***********************************************************/
112
113 #include <sys/errno.h>
114 #include <sys/param.h>
115 #include <sys/pool.h>
116 #include <sys/queue.h>
117 #include <sys/disk.h>
118 #include <sys/device.h>
119 #include <sys/stat.h>
120 #include <sys/ioctl.h>
121 #include <sys/fcntl.h>
122 #include <sys/systm.h>
123 #include <sys/namei.h>
124 #include <sys/vnode.h>
125 #include <sys/param.h>
126 #include <sys/types.h>
127 #include <machine/types.h>
128 #include <sys/disklabel.h>
129 #include <sys/conf.h>
130 #include <sys/lock.h>
131 #include <sys/buf.h>
132 #include <sys/user.h>
133 #include <sys/reboot.h>
134
135 #include <miscfs/specfs/specdev.h>
136
137 #include "raid.h"
138 #include "opt_raid_autoconfig.h"
139 #include "rf_raid.h"
140 #include "rf_raidframe.h"
141 #include "rf_copyback.h"
142 #include "rf_dag.h"
143 #include "rf_dagflags.h"
144 #include "rf_desc.h"
145 #include "rf_diskqueue.h"
146 #include "rf_acctrace.h"
147 #include "rf_etimer.h"
148 #include "rf_general.h"
149 #include "rf_debugMem.h"
150 #include "rf_kintf.h"
151 #include "rf_options.h"
152 #include "rf_driver.h"
153 #include "rf_parityscan.h"
154 #include "rf_debugprint.h"
155 #include "rf_threadstuff.h"
156 #include "rf_configure.h"
157
158 int rf_kdebug_level = 0;
159
160 #ifdef DEBUG
161 #define db1_printf(a) if (rf_kdebug_level > 0) printf a
162 #else /* DEBUG */
163 #define db1_printf(a) { }
164 #endif /* DEBUG */
165
166 static RF_Raid_t **raidPtrs; /* global raid device descriptors */
167
168 RF_DECLARE_STATIC_MUTEX(rf_sparet_wait_mutex)
169
170 static RF_SparetWait_t *rf_sparet_wait_queue; /* requests to install a
171 * spare table */
172 static RF_SparetWait_t *rf_sparet_resp_queue; /* responses from
173 * installation process */
174
175 /* prototypes */
176 static void KernelWakeupFunc(struct buf * bp);
177 static void InitBP(struct buf * bp, struct vnode *, unsigned rw_flag,
178 RF_SectorNum_t startSect, RF_SectorCount_t numSect,
179 caddr_t buf, void (*cbFunc) (struct buf *), void *cbArg,
180 int logBytesPerSector, struct proc * b_proc);
181 static void raidinit(RF_Raid_t *);
182
183 void raidattach(int);
184
185 /*
186 * Pilfered from ccd.c
187 */
188
189 struct raidbuf {
190 struct buf rf_buf; /* new I/O buf. MUST BE FIRST!!! */
191 struct buf *rf_obp; /* ptr. to original I/O buf */
192 int rf_flags; /* misc. flags */
193 RF_DiskQueueData_t *req;/* the request that this was part of.. */
194 };
195
196
197 #define RAIDGETBUF(rs) pool_get(&(rs)->sc_cbufpool, PR_NOWAIT)
198 #define RAIDPUTBUF(rs, cbp) pool_put(&(rs)->sc_cbufpool, cbp)
199
200 /* XXX Not sure if the following should be replacing the raidPtrs above,
201 or if it should be used in conjunction with that...
202 */
203
204 struct raid_softc {
205 int sc_flags; /* flags */
206 int sc_cflags; /* configuration flags */
207 size_t sc_size; /* size of the raid device */
208 char sc_xname[20]; /* XXX external name */
209 struct disk sc_dkdev; /* generic disk device info */
210 struct pool sc_cbufpool; /* component buffer pool */
211 struct buf_queue buf_queue; /* used for the device queue */
212 };
213 /* sc_flags */
214 #define RAIDF_INITED 0x01 /* unit has been initialized */
215 #define RAIDF_WLABEL 0x02 /* label area is writable */
216 #define RAIDF_LABELLING 0x04 /* unit is currently being labelled */
217 #define RAIDF_WANTED 0x40 /* someone is waiting to obtain a lock */
218 #define RAIDF_LOCKED 0x80 /* unit is locked */
219
220 #define raidunit(x) DISKUNIT(x)
221 int numraid = 0;
222
223 /*
224 * Allow RAIDOUTSTANDING number of simultaneous IO's to this RAID device.
225 * Be aware that large numbers can allow the driver to consume a lot of
226 * kernel memory, especially on writes, and in degraded mode reads.
227 *
228 * For example: with a stripe width of 64 blocks (32k) and 5 disks,
229 * a single 64K write will typically require 64K for the old data,
230 * 64K for the old parity, and 64K for the new parity, for a total
231 * of 192K (if the parity buffer is not re-used immediately).
232 * Even it if is used immedately, that's still 128K, which when multiplied
233 * by say 10 requests, is 1280K, *on top* of the 640K of incoming data.
234 *
235 * Now in degraded mode, for example, a 64K read on the above setup may
236 * require data reconstruction, which will require *all* of the 4 remaining
237 * disks to participate -- 4 * 32K/disk == 128K again.
238 */
239
240 #ifndef RAIDOUTSTANDING
241 #define RAIDOUTSTANDING 6
242 #endif
243
244 #define RAIDLABELDEV(dev) \
245 (MAKEDISKDEV(major((dev)), raidunit((dev)), RAW_PART))
246
247 /* declared here, and made public, for the benefit of KVM stuff.. */
248 struct raid_softc *raid_softc;
249
250 static void raidgetdefaultlabel(RF_Raid_t *, struct raid_softc *,
251 struct disklabel *);
252 static void raidgetdisklabel(struct vnode *);
253 static void raidmakedisklabel(struct raid_softc *);
254
255 static int raidlock(struct raid_softc *);
256 static void raidunlock(struct raid_softc *);
257
258 static void rf_markalldirty(RF_Raid_t *);
259 void rf_mountroot_hook(struct device *);
260
261 struct device *raidrootdev;
262
263 void rf_ReconThread(struct rf_recon_req *);
264 /* XXX what I want is: */
265 /*void rf_ReconThread(RF_Raid_t *raidPtr); */
266 void rf_RewriteParityThread(RF_Raid_t *raidPtr);
267 void rf_CopybackThread(RF_Raid_t *raidPtr);
268 void rf_ReconstructInPlaceThread(struct rf_recon_req *);
269 void rf_buildroothack(void *);
270
271 RF_AutoConfig_t *rf_find_raid_components(void);
272 RF_ConfigSet_t *rf_create_auto_sets(RF_AutoConfig_t *);
273 static int rf_does_it_fit(RF_ConfigSet_t *,RF_AutoConfig_t *);
274 static int rf_reasonable_label(RF_ComponentLabel_t *);
275 void rf_create_configuration(RF_AutoConfig_t *,RF_Config_t *, RF_Raid_t *);
276 int rf_set_autoconfig(RF_Raid_t *, int);
277 int rf_set_rootpartition(RF_Raid_t *, int);
278 void rf_release_all_vps(RF_ConfigSet_t *);
279 void rf_cleanup_config_set(RF_ConfigSet_t *);
280 int rf_have_enough_components(RF_ConfigSet_t *);
281 int rf_auto_config_set(RF_ConfigSet_t *, int *);
282
283 static int raidautoconfig = 0; /* Debugging, mostly. Set to 0 to not
284 allow autoconfig to take place.
285 Note that this is overridden by having
286 RAID_AUTOCONFIG as an option in the
287 kernel config file. */
288
289 void
290 raidattach(num)
291 int num;
292 {
293 int raidID;
294 int i, rc;
295 RF_AutoConfig_t *ac_list; /* autoconfig list */
296 RF_ConfigSet_t *config_sets;
297
298 #ifdef DEBUG
299 printf("raidattach: Asked for %d units\n", num);
300 #endif
301
302 if (num <= 0) {
303 #ifdef DIAGNOSTIC
304 panic("raidattach: count <= 0");
305 #endif
306 return;
307 }
308 /* This is where all the initialization stuff gets done. */
309
310 numraid = num;
311
312 /* Make some space for requested number of units... */
313
314 RF_Calloc(raidPtrs, num, sizeof(RF_Raid_t *), (RF_Raid_t **));
315 if (raidPtrs == NULL) {
316 panic("raidPtrs is NULL!!\n");
317 }
318
319 rc = rf_mutex_init(&rf_sparet_wait_mutex);
320 if (rc) {
321 RF_PANIC();
322 }
323
324 rf_sparet_wait_queue = rf_sparet_resp_queue = NULL;
325
326 for (i = 0; i < num; i++)
327 raidPtrs[i] = NULL;
328 rc = rf_BootRaidframe();
329 if (rc == 0)
330 printf("Kernelized RAIDframe activated\n");
331 else
332 panic("Serious error booting RAID!!\n");
333
334 /* put together some datastructures like the CCD device does.. This
335 * lets us lock the device and what-not when it gets opened. */
336
337 raid_softc = (struct raid_softc *)
338 malloc(num * sizeof(struct raid_softc),
339 M_RAIDFRAME, M_NOWAIT);
340 if (raid_softc == NULL) {
341 printf("WARNING: no memory for RAIDframe driver\n");
342 return;
343 }
344
345 memset(raid_softc, 0, num * sizeof(struct raid_softc));
346
347 raidrootdev = (struct device *)malloc(num * sizeof(struct device),
348 M_RAIDFRAME, M_NOWAIT);
349 if (raidrootdev == NULL) {
350 panic("No memory for RAIDframe driver!!?!?!\n");
351 }
352
353 for (raidID = 0; raidID < num; raidID++) {
354 BUFQ_INIT(&raid_softc[raidID].buf_queue);
355
356 raidrootdev[raidID].dv_class = DV_DISK;
357 raidrootdev[raidID].dv_cfdata = NULL;
358 raidrootdev[raidID].dv_unit = raidID;
359 raidrootdev[raidID].dv_parent = NULL;
360 raidrootdev[raidID].dv_flags = 0;
361 sprintf(raidrootdev[raidID].dv_xname,"raid%d",raidID);
362
363 RF_Calloc(raidPtrs[raidID], 1, sizeof(RF_Raid_t),
364 (RF_Raid_t *));
365 if (raidPtrs[raidID] == NULL) {
366 printf("WARNING: raidPtrs[%d] is NULL\n", raidID);
367 numraid = raidID;
368 return;
369 }
370 }
371
372 #if RAID_AUTOCONFIG
373 raidautoconfig = 1;
374 #endif
375
376 if (raidautoconfig) {
377 /* 1. locate all RAID components on the system */
378
379 #if DEBUG
380 printf("Searching for raid components...\n");
381 #endif
382 ac_list = rf_find_raid_components();
383
384 /* 2. sort them into their respective sets */
385
386 config_sets = rf_create_auto_sets(ac_list);
387
388 /* 3. evaluate each set and configure the valid ones
389 This gets done in rf_buildroothack() */
390
391 /* schedule the creation of the thread to do the
392 "/ on RAID" stuff */
393
394 kthread_create(rf_buildroothack,config_sets);
395
396 #if 0
397 mountroothook_establish(rf_mountroot_hook, &raidrootdev[0]);
398 #endif
399 }
400
401 }
402
403 void
404 rf_buildroothack(arg)
405 void *arg;
406 {
407 RF_ConfigSet_t *config_sets = arg;
408 RF_ConfigSet_t *cset;
409 RF_ConfigSet_t *next_cset;
410 int retcode;
411 int raidID;
412 int rootID;
413 int num_root;
414
415 rootID = 0;
416 num_root = 0;
417 cset = config_sets;
418 while(cset != NULL ) {
419 next_cset = cset->next;
420 if (rf_have_enough_components(cset) &&
421 cset->ac->clabel->autoconfigure==1) {
422 retcode = rf_auto_config_set(cset,&raidID);
423 if (!retcode) {
424 if (cset->rootable) {
425 rootID = raidID;
426 num_root++;
427 }
428 } else {
429 /* The autoconfig didn't work :( */
430 #if DEBUG
431 printf("Autoconfig failed with code %d for raid%d\n", retcode, raidID);
432 #endif
433 rf_release_all_vps(cset);
434 }
435 } else {
436 /* we're not autoconfiguring this set...
437 release the associated resources */
438 rf_release_all_vps(cset);
439 }
440 /* cleanup */
441 rf_cleanup_config_set(cset);
442 cset = next_cset;
443 }
444 if (boothowto & RB_ASKNAME) {
445 /* We don't auto-config... */
446 } else {
447 /* They didn't ask, and we found something bootable... */
448
449 if (num_root == 1) {
450 booted_device = &raidrootdev[rootID];
451 } else if (num_root > 1) {
452 /* we can't guess.. require the user to answer... */
453 boothowto |= RB_ASKNAME;
454 }
455 }
456 }
457
458
459 int
460 raidsize(dev)
461 dev_t dev;
462 {
463 #if 1 /* XXXthorpej */
464 return (-1);
465 #else
466 struct raid_softc *rs;
467 struct disklabel *lp;
468 int part, unit, omask, size;
469
470 unit = raidunit(dev);
471 if (unit >= numraid)
472 return (-1);
473 rs = &raid_softc[unit];
474
475 if ((rs->sc_flags & RAIDF_INITED) == 0)
476 return (-1);
477
478 part = DISKPART(dev);
479 omask = rs->sc_dkdev.dk_openmask & (1 << part);
480 lp = rs->sc_dkdev.dk_label;
481
482 if (omask == 0 && raidopen(dev, 0, S_IFBLK, curproc))
483 return (-1);
484
485 if (lp->d_partitions[part].p_fstype != FS_SWAP)
486 size = -1;
487 else
488 size = lp->d_partitions[part].p_size *
489 (lp->d_secsize / DEV_BSIZE);
490
491 if (omask == 0 && raidclose(dev, 0, S_IFBLK, curproc))
492 return (-1);
493
494 return (size);
495 #endif
496 }
497
498 int
499 raiddump(dev, blkno, va, size)
500 dev_t dev;
501 daddr_t blkno;
502 caddr_t va;
503 size_t size;
504 {
505 /* Not implemented. */
506 return ENXIO;
507 }
508
509 /* ARGSUSED */
510 int
511 raidopen(devvp, flags, fmt, p)
512 struct vnode *devvp;
513 int flags, fmt;
514 struct proc *p;
515 {
516 int unit = raidunit(devvp->v_rdev);
517 struct raid_softc *rs;
518 struct disklabel *lp;
519 int part, pmask;
520 int error = 0;
521
522 if (unit >= numraid)
523 return (ENXIO);
524 rs = &raid_softc[unit];
525 devvp->v_devcookie = rs;
526
527 if ((error = raidlock(rs)) != 0)
528 return (error);
529 lp = rs->sc_dkdev.dk_label;
530
531 part = DISKPART(devvp->v_rdev);
532 pmask = (1 << part);
533
534 db1_printf(("Opening raid device number: %d partition: %d\n",
535 unit, part));
536
537
538 if ((rs->sc_flags & RAIDF_INITED) &&
539 (rs->sc_dkdev.dk_openmask == 0))
540 raidgetdisklabel(devvp);
541
542 /* make sure that this partition exists */
543
544 if (part != RAW_PART) {
545 db1_printf(("Not a raw partition..\n"));
546 if (((rs->sc_flags & RAIDF_INITED) == 0) ||
547 ((part >= lp->d_npartitions) ||
548 (lp->d_partitions[part].p_fstype == FS_UNUSED))) {
549 error = ENXIO;
550 raidunlock(rs);
551 db1_printf(("Bailing out...\n"));
552 return (error);
553 }
554 }
555 /* Prevent this unit from being unconfigured while open. */
556 switch (fmt) {
557 case S_IFCHR:
558 rs->sc_dkdev.dk_copenmask |= pmask;
559 break;
560
561 case S_IFBLK:
562 rs->sc_dkdev.dk_bopenmask |= pmask;
563 break;
564 }
565
566 if ((rs->sc_dkdev.dk_openmask == 0) &&
567 ((rs->sc_flags & RAIDF_INITED) != 0)) {
568 /* First one... mark things as dirty... Note that we *MUST*
569 have done a configure before this. I DO NOT WANT TO BE
570 SCRIBBLING TO RANDOM COMPONENTS UNTIL IT'S BEEN DETERMINED
571 THAT THEY BELONG TOGETHER!!!!! */
572 /* XXX should check to see if we're only open for reading
573 here... If so, we needn't do this, but then need some
574 other way of keeping track of what's happened.. */
575
576 rf_markalldirty( raidPtrs[unit] );
577 }
578
579
580 rs->sc_dkdev.dk_openmask =
581 rs->sc_dkdev.dk_copenmask | rs->sc_dkdev.dk_bopenmask;
582
583 raidunlock(rs);
584
585 return (error);
586
587
588 }
589
590 /* ARGSUSED */
591 int
592 raidclose(devvp, flags, fmt, p)
593 struct vnode *devvp;
594 int flags, fmt;
595 struct proc *p;
596 {
597 struct raid_softc *rs = devvp->v_devcookie;
598 int error = 0;
599 int part;
600
601 if ((error = raidlock(rs)) != 0)
602 return (error);
603
604 part = DISKPART(devvp->v_rdev);
605
606 /* ...that much closer to allowing unconfiguration... */
607 switch (fmt) {
608 case S_IFCHR:
609 rs->sc_dkdev.dk_copenmask &= ~(1 << part);
610 break;
611
612 case S_IFBLK:
613 rs->sc_dkdev.dk_bopenmask &= ~(1 << part);
614 break;
615 }
616 rs->sc_dkdev.dk_openmask =
617 rs->sc_dkdev.dk_copenmask | rs->sc_dkdev.dk_bopenmask;
618
619 if ((rs->sc_dkdev.dk_openmask == 0) &&
620 ((rs->sc_flags & RAIDF_INITED) != 0)) {
621 /* Last one... device is not unconfigured yet.
622 Device shutdown has taken care of setting the
623 clean bits if RAIDF_INITED is not set
624 mark things as clean... */
625 #if 0
626 printf("Last one on raid%d. Updating status.\n",
627 DISKUNIT(devvp->v_rdev));
628 #endif
629 rf_update_component_labels(raidPtrs[DISKUNIT(devvp->v_rdev)],
630 RF_FINAL_COMPONENT_UPDATE);
631 if (doing_shutdown) {
632 /* last one, and we're going down, so
633 lights out for this RAID set too. */
634 error = rf_Shutdown(raidPtrs[DISKUNIT(devvp->v_rdev)]);
635 pool_destroy(&rs->sc_cbufpool);
636
637 /* It's no longer initialized... */
638 rs->sc_flags &= ~RAIDF_INITED;
639
640 /* Detach the disk. */
641 disk_detach(&rs->sc_dkdev);
642 }
643 }
644
645 raidunlock(rs);
646 return (0);
647 }
648
649 void
650 raidstrategy(bp)
651 struct buf *bp;
652 {
653 int s;
654
655 unsigned int raidID = DISKUNIT(bp->b_devvp->v_rdev);
656 struct raid_softc *rs = bp->b_devvp->v_devcookie;
657 RF_Raid_t *raidPtr;
658 struct disklabel *lp;
659 int wlabel;
660
661 if ((rs->sc_flags & RAIDF_INITED) ==0) {
662 bp->b_error = ENXIO;
663 bp->b_flags |= B_ERROR;
664 bp->b_resid = bp->b_bcount;
665 biodone(bp);
666 return;
667 }
668 raidPtr = raidPtrs[raidID];
669 if (raidPtr == NULL || raidPtr->valid == 0) {
670 bp->b_error = ENODEV;
671 bp->b_flags |= B_ERROR;
672 bp->b_resid = bp->b_bcount;
673 biodone(bp);
674 return;
675 }
676 if (bp->b_bcount == 0) {
677 db1_printf(("b_bcount is zero..\n"));
678 biodone(bp);
679 return;
680 }
681 lp = rs->sc_dkdev.dk_label;
682
683 /*
684 * Do bounds checking and adjust transfer. If there's an
685 * error, the bounds check will flag that for us.
686 */
687
688 wlabel = rs->sc_flags & (RAIDF_WLABEL | RAIDF_LABELLING);
689 if (DISKPART(bp->b_devvp->v_rdev) != RAW_PART &&
690 (bp->b_flags & B_DKLABEL) == 0) {
691 if (bounds_check_with_label(bp, lp, wlabel) <= 0) {
692 db1_printf(("Bounds check failed!!:%d %d\n",
693 (int) bp->b_blkno, (int) wlabel));
694 biodone(bp);
695 return;
696 }
697 }
698 s = splbio();
699
700 bp->b_resid = 0;
701
702 /* stuff it onto our queue */
703 BUFQ_INSERT_TAIL(&rs->buf_queue, bp);
704
705 raidstart(raidPtrs[raidID]);
706
707 splx(s);
708 }
709
710 /* ARGSUSED */
711 int
712 raidread(devvp, uio, flags)
713 struct vnode *devvp;
714 struct uio *uio;
715 int flags;
716 {
717 struct raid_softc *rs = devvp->v_devcookie;
718
719 if ((rs->sc_flags & RAIDF_INITED) == 0)
720 return (ENXIO);
721
722 db1_printf(("raidread: unit: %d partition: %d\n",
723 DISKUNIT(devvp->v_rdev), DISKPART(devvp->v_rdev)));
724
725 return (physio(raidstrategy, NULL, devvp, B_READ, minphys, uio));
726 }
727
728 /* ARGSUSED */
729 int
730 raidwrite(devvp, uio, flags)
731 struct vnode *devvp;
732 struct uio *uio;
733 int flags;
734 {
735 struct raid_softc *rs = devvp->v_devcookie;
736
737 if ((rs->sc_flags & RAIDF_INITED) == 0)
738 return (ENXIO);
739
740 db1_printf(("raidwrite\n"));
741
742 return (physio(raidstrategy, NULL, devvp, B_WRITE, minphys, uio));
743 }
744
745 int
746 raidioctl(devvp, cmd, data, flag, p)
747 struct vnode *devvp;
748 u_long cmd;
749 caddr_t data;
750 int flag;
751 struct proc *p;
752 {
753 struct raid_softc *rs = devvp->v_devcookie;
754 int error = 0;
755 int part, pmask;
756 RF_Config_t *k_cfg, *u_cfg;
757 RF_Raid_t *raidPtr;
758 RF_RaidDisk_t *diskPtr;
759 RF_AccTotals_t *totals;
760 RF_DeviceConfig_t *d_cfg, **ucfgp;
761 u_char *specific_buf;
762 int retcode = 0;
763 int row;
764 int column;
765 struct rf_recon_req *rrcopy, *rr;
766 RF_ComponentLabel_t *clabel;
767 RF_ComponentLabel_t ci_label;
768 RF_ComponentLabel_t **clabel_ptr;
769 RF_SingleComponent_t *sparePtr,*componentPtr;
770 RF_SingleComponent_t hot_spare;
771 RF_SingleComponent_t component;
772 RF_ProgressInfo_t progressInfo, **progressInfoPtr;
773 int i, j, d;
774 #ifdef __HAVE_OLD_DISKLABEL
775 struct disklabel newlabel;
776 #endif
777
778 raidPtr = raidPtrs[DISKUNIT(devvp->v_rdev)];
779
780 db1_printf(("raidioctl: 0x%x %d %d %ld\n", devvp->v_rdev,
781 DISKPART(devvp->v_rdev), DISKUNIT(devvp->v_rdev), cmd));
782
783 /* Must be open for writes for these commands... */
784 switch (cmd) {
785 case DIOCSDINFO:
786 case DIOCWDINFO:
787 #ifdef __HAVE_OLD_DISKLABEL
788 case ODIOCWDINFO:
789 case ODIOCSDINFO:
790 #endif
791 case DIOCWLABEL:
792 if ((flag & FWRITE) == 0)
793 return (EBADF);
794 }
795
796 /* Must be initialized for these... */
797 switch (cmd) {
798 case DIOCGDINFO:
799 case DIOCSDINFO:
800 case DIOCWDINFO:
801 #ifdef __HAVE_OLD_DISKLABEL
802 case ODIOCGDINFO:
803 case ODIOCWDINFO:
804 case ODIOCSDINFO:
805 case ODIOCGDEFLABEL:
806 #endif
807 case DIOCGPART:
808 case DIOCWLABEL:
809 case DIOCGDEFLABEL:
810 case RAIDFRAME_SHUTDOWN:
811 case RAIDFRAME_REWRITEPARITY:
812 case RAIDFRAME_GET_INFO:
813 case RAIDFRAME_RESET_ACCTOTALS:
814 case RAIDFRAME_GET_ACCTOTALS:
815 case RAIDFRAME_KEEP_ACCTOTALS:
816 case RAIDFRAME_GET_SIZE:
817 case RAIDFRAME_FAIL_DISK:
818 case RAIDFRAME_COPYBACK:
819 case RAIDFRAME_CHECK_RECON_STATUS:
820 case RAIDFRAME_CHECK_RECON_STATUS_EXT:
821 case RAIDFRAME_GET_COMPONENT_LABEL:
822 case RAIDFRAME_SET_COMPONENT_LABEL:
823 case RAIDFRAME_ADD_HOT_SPARE:
824 case RAIDFRAME_REMOVE_HOT_SPARE:
825 case RAIDFRAME_INIT_LABELS:
826 case RAIDFRAME_REBUILD_IN_PLACE:
827 case RAIDFRAME_CHECK_PARITY:
828 case RAIDFRAME_CHECK_PARITYREWRITE_STATUS:
829 case RAIDFRAME_CHECK_PARITYREWRITE_STATUS_EXT:
830 case RAIDFRAME_CHECK_COPYBACK_STATUS:
831 case RAIDFRAME_CHECK_COPYBACK_STATUS_EXT:
832 case RAIDFRAME_SET_AUTOCONFIG:
833 case RAIDFRAME_SET_ROOT:
834 case RAIDFRAME_DELETE_COMPONENT:
835 case RAIDFRAME_INCORPORATE_HOT_SPARE:
836 if ((rs->sc_flags & RAIDF_INITED) == 0)
837 return (ENXIO);
838 }
839
840 switch (cmd) {
841
842 /* configure the system */
843 case RAIDFRAME_CONFIGURE:
844
845 if (raidPtr->valid) {
846 /* There is a valid RAID set running on this unit! */
847 printf("raid%d: Device already configured!\n",
848 DISKUNIT(devvp->v_rdev));
849 return(EINVAL);
850 }
851
852 /* copy-in the configuration information */
853 /* data points to a pointer to the configuration structure */
854
855 u_cfg = *((RF_Config_t **) data);
856 RF_Malloc(k_cfg, sizeof(RF_Config_t), (RF_Config_t *));
857 if (k_cfg == NULL) {
858 return (ENOMEM);
859 }
860 retcode = copyin((caddr_t) u_cfg, (caddr_t) k_cfg,
861 sizeof(RF_Config_t));
862 if (retcode) {
863 RF_Free(k_cfg, sizeof(RF_Config_t));
864 db1_printf(("rf_ioctl: retcode=%d copyin.1\n",
865 retcode));
866 return (retcode);
867 }
868 /* allocate a buffer for the layout-specific data, and copy it
869 * in */
870 if (k_cfg->layoutSpecificSize) {
871 if (k_cfg->layoutSpecificSize > 10000) {
872 /* sanity check */
873 RF_Free(k_cfg, sizeof(RF_Config_t));
874 return (EINVAL);
875 }
876 RF_Malloc(specific_buf, k_cfg->layoutSpecificSize,
877 (u_char *));
878 if (specific_buf == NULL) {
879 RF_Free(k_cfg, sizeof(RF_Config_t));
880 return (ENOMEM);
881 }
882 retcode = copyin(k_cfg->layoutSpecific,
883 (caddr_t) specific_buf,
884 k_cfg->layoutSpecificSize);
885 if (retcode) {
886 RF_Free(k_cfg, sizeof(RF_Config_t));
887 RF_Free(specific_buf,
888 k_cfg->layoutSpecificSize);
889 db1_printf(("rf_ioctl: retcode=%d copyin.2\n",
890 retcode));
891 return (retcode);
892 }
893 } else
894 specific_buf = NULL;
895 k_cfg->layoutSpecific = specific_buf;
896
897 /* should do some kind of sanity check on the configuration.
898 * Store the sum of all the bytes in the last byte? */
899
900 /* configure the system */
901
902 /*
903 * Clear the entire RAID descriptor, just to make sure
904 * there is no stale data left in the case of a
905 * reconfiguration
906 */
907 memset((char *) raidPtr, 0, sizeof(RF_Raid_t));
908 raidPtr->raidid = DISKUNIT(devvp->v_rdev);
909
910 retcode = rf_Configure(raidPtr, k_cfg, NULL);
911
912 if (retcode == 0) {
913
914 /* allow this many simultaneous IO's to
915 this RAID device */
916 raidPtr->openings = RAIDOUTSTANDING;
917
918 raidinit(raidPtr);
919 rf_markalldirty(raidPtr);
920 }
921 /* free the buffers. No return code here. */
922 if (k_cfg->layoutSpecificSize) {
923 RF_Free(specific_buf, k_cfg->layoutSpecificSize);
924 }
925 RF_Free(k_cfg, sizeof(RF_Config_t));
926
927 return (retcode);
928
929 /* shutdown the system */
930 case RAIDFRAME_SHUTDOWN:
931
932 if ((error = raidlock(rs)) != 0)
933 return (error);
934
935 /*
936 * If somebody has a partition mounted, we shouldn't
937 * shutdown.
938 */
939
940 part = DISKPART(devvp->v_rdev);
941 pmask = (1 << part);
942 if ((rs->sc_dkdev.dk_openmask & ~pmask) ||
943 ((rs->sc_dkdev.dk_bopenmask & pmask) &&
944 (rs->sc_dkdev.dk_copenmask & pmask))) {
945 raidunlock(rs);
946 return (EBUSY);
947 }
948
949 retcode = rf_Shutdown(raidPtr);
950
951 pool_destroy(&rs->sc_cbufpool);
952
953 /* It's no longer initialized... */
954 rs->sc_flags &= ~RAIDF_INITED;
955
956 /* Detach the disk. */
957 disk_detach(&rs->sc_dkdev);
958
959 raidunlock(rs);
960
961 return (retcode);
962 case RAIDFRAME_GET_COMPONENT_LABEL:
963 clabel_ptr = (RF_ComponentLabel_t **) data;
964 /* need to read the component label for the disk indicated
965 by row,column in clabel */
966
967 /* For practice, let's get it directly fromdisk, rather
968 than from the in-core copy */
969 RF_Malloc( clabel, sizeof( RF_ComponentLabel_t ),
970 (RF_ComponentLabel_t *));
971 if (clabel == NULL)
972 return (ENOMEM);
973
974 memset((char *) clabel, 0, sizeof(RF_ComponentLabel_t));
975
976 retcode = copyin( *clabel_ptr, clabel,
977 sizeof(RF_ComponentLabel_t));
978
979 if (retcode) {
980 RF_Free( clabel, sizeof(RF_ComponentLabel_t));
981 return(retcode);
982 }
983
984 row = clabel->row;
985 column = clabel->column;
986
987 if ((row < 0) || (row >= raidPtr->numRow) ||
988 (column < 0) || (column >= raidPtr->numCol +
989 raidPtr->numSpare)) {
990 RF_Free( clabel, sizeof(RF_ComponentLabel_t));
991 return(EINVAL);
992 }
993
994 raidread_component_label(raidPtr->raid_cinfo[row][column].ci_vp,
995 clabel);
996
997 retcode = copyout((caddr_t) clabel,
998 (caddr_t) *clabel_ptr,
999 sizeof(RF_ComponentLabel_t));
1000 RF_Free( clabel, sizeof(RF_ComponentLabel_t));
1001 return (retcode);
1002
1003 case RAIDFRAME_SET_COMPONENT_LABEL:
1004 clabel = (RF_ComponentLabel_t *) data;
1005
1006 /* XXX check the label for valid stuff... */
1007 /* Note that some things *should not* get modified --
1008 the user should be re-initing the labels instead of
1009 trying to patch things.
1010 */
1011
1012 printf("Got component label:\n");
1013 printf("Version: %d\n",clabel->version);
1014 printf("Serial Number: %d\n",clabel->serial_number);
1015 printf("Mod counter: %d\n",clabel->mod_counter);
1016 printf("Row: %d\n", clabel->row);
1017 printf("Column: %d\n", clabel->column);
1018 printf("Num Rows: %d\n", clabel->num_rows);
1019 printf("Num Columns: %d\n", clabel->num_columns);
1020 printf("Clean: %d\n", clabel->clean);
1021 printf("Status: %d\n", clabel->status);
1022
1023 row = clabel->row;
1024 column = clabel->column;
1025
1026 if ((row < 0) || (row >= raidPtr->numRow) ||
1027 (column < 0) || (column >= raidPtr->numCol)) {
1028 return(EINVAL);
1029 }
1030
1031 /* XXX this isn't allowed to do anything for now :-) */
1032
1033 /* XXX and before it is, we need to fill in the rest
1034 of the fields!?!?!?! */
1035 #if 0
1036 raidwrite_component_label(
1037 raidPtr->raid_cinfo[row][column].ci_vp, clabel);
1038 #endif
1039 return (0);
1040
1041 case RAIDFRAME_INIT_LABELS:
1042 clabel = (RF_ComponentLabel_t *) data;
1043 /*
1044 we only want the serial number from
1045 the above. We get all the rest of the information
1046 from the config that was used to create this RAID
1047 set.
1048 */
1049
1050 raidPtr->serial_number = clabel->serial_number;
1051
1052 raid_init_component_label(raidPtr, &ci_label);
1053 ci_label.serial_number = clabel->serial_number;
1054
1055 for(row=0;row<raidPtr->numRow;row++) {
1056 ci_label.row = row;
1057 for(column=0;column<raidPtr->numCol;column++) {
1058 diskPtr = &raidPtr->Disks[row][column];
1059 if (!RF_DEAD_DISK(diskPtr->status)) {
1060 ci_label.partitionSize = diskPtr->partitionSize;
1061 ci_label.column = column;
1062 raidwrite_component_label(
1063 raidPtr->raid_cinfo[row][column].ci_vp,
1064 &ci_label );
1065 }
1066 }
1067 }
1068
1069 return (retcode);
1070 case RAIDFRAME_SET_AUTOCONFIG:
1071 d = rf_set_autoconfig(raidPtr, *(int *) data);
1072 printf("New autoconfig value is: %d\n", d);
1073 *(int *) data = d;
1074 return (retcode);
1075
1076 case RAIDFRAME_SET_ROOT:
1077 d = rf_set_rootpartition(raidPtr, *(int *) data);
1078 printf("New rootpartition value is: %d\n", d);
1079 *(int *) data = d;
1080 return (retcode);
1081
1082 /* initialize all parity */
1083 case RAIDFRAME_REWRITEPARITY:
1084
1085 if (raidPtr->Layout.map->faultsTolerated == 0) {
1086 /* Parity for RAID 0 is trivially correct */
1087 raidPtr->parity_good = RF_RAID_CLEAN;
1088 return(0);
1089 }
1090
1091 if (raidPtr->parity_rewrite_in_progress == 1) {
1092 /* Re-write is already in progress! */
1093 return(EINVAL);
1094 }
1095
1096 retcode = RF_CREATE_THREAD(raidPtr->parity_rewrite_thread,
1097 rf_RewriteParityThread,
1098 raidPtr,"raid_parity");
1099 return (retcode);
1100
1101
1102 case RAIDFRAME_ADD_HOT_SPARE:
1103 sparePtr = (RF_SingleComponent_t *) data;
1104 memcpy( &hot_spare, sparePtr, sizeof(RF_SingleComponent_t));
1105 retcode = rf_add_hot_spare(raidPtr, &hot_spare);
1106 return(retcode);
1107
1108 case RAIDFRAME_REMOVE_HOT_SPARE:
1109 return(retcode);
1110
1111 case RAIDFRAME_DELETE_COMPONENT:
1112 componentPtr = (RF_SingleComponent_t *)data;
1113 memcpy( &component, componentPtr,
1114 sizeof(RF_SingleComponent_t));
1115 retcode = rf_delete_component(raidPtr, &component);
1116 return(retcode);
1117
1118 case RAIDFRAME_INCORPORATE_HOT_SPARE:
1119 componentPtr = (RF_SingleComponent_t *)data;
1120 memcpy( &component, componentPtr,
1121 sizeof(RF_SingleComponent_t));
1122 retcode = rf_incorporate_hot_spare(raidPtr, &component);
1123 return(retcode);
1124
1125 case RAIDFRAME_REBUILD_IN_PLACE:
1126
1127 if (raidPtr->Layout.map->faultsTolerated == 0) {
1128 /* Can't do this on a RAID 0!! */
1129 return(EINVAL);
1130 }
1131
1132 if (raidPtr->recon_in_progress == 1) {
1133 /* a reconstruct is already in progress! */
1134 return(EINVAL);
1135 }
1136
1137 componentPtr = (RF_SingleComponent_t *) data;
1138 memcpy( &component, componentPtr,
1139 sizeof(RF_SingleComponent_t));
1140 row = component.row;
1141 column = component.column;
1142 printf("Rebuild: %d %d\n",row, column);
1143 if ((row < 0) || (row >= raidPtr->numRow) ||
1144 (column < 0) || (column >= raidPtr->numCol)) {
1145 return(EINVAL);
1146 }
1147
1148 RF_Malloc(rrcopy, sizeof(*rrcopy), (struct rf_recon_req *));
1149 if (rrcopy == NULL)
1150 return(ENOMEM);
1151
1152 rrcopy->raidPtr = (void *) raidPtr;
1153 rrcopy->row = row;
1154 rrcopy->col = column;
1155
1156 retcode = RF_CREATE_THREAD(raidPtr->recon_thread,
1157 rf_ReconstructInPlaceThread,
1158 rrcopy,"raid_reconip");
1159 return(retcode);
1160
1161 case RAIDFRAME_GET_INFO:
1162 if (!raidPtr->valid)
1163 return (ENODEV);
1164 ucfgp = (RF_DeviceConfig_t **) data;
1165 RF_Malloc(d_cfg, sizeof(RF_DeviceConfig_t),
1166 (RF_DeviceConfig_t *));
1167 if (d_cfg == NULL)
1168 return (ENOMEM);
1169 memset((char *) d_cfg, 0, sizeof(RF_DeviceConfig_t));
1170 d_cfg->rows = raidPtr->numRow;
1171 d_cfg->cols = raidPtr->numCol;
1172 d_cfg->ndevs = raidPtr->numRow * raidPtr->numCol;
1173 if (d_cfg->ndevs >= RF_MAX_DISKS) {
1174 RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
1175 return (ENOMEM);
1176 }
1177 d_cfg->nspares = raidPtr->numSpare;
1178 if (d_cfg->nspares >= RF_MAX_DISKS) {
1179 RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
1180 return (ENOMEM);
1181 }
1182 d_cfg->maxqdepth = raidPtr->maxQueueDepth;
1183 d = 0;
1184 for (i = 0; i < d_cfg->rows; i++) {
1185 for (j = 0; j < d_cfg->cols; j++) {
1186 d_cfg->devs[d] = raidPtr->Disks[i][j];
1187 d++;
1188 }
1189 }
1190 for (j = d_cfg->cols, i = 0; i < d_cfg->nspares; i++, j++) {
1191 d_cfg->spares[i] = raidPtr->Disks[0][j];
1192 }
1193 retcode = copyout((caddr_t) d_cfg, (caddr_t) * ucfgp,
1194 sizeof(RF_DeviceConfig_t));
1195 RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
1196
1197 return (retcode);
1198
1199 case RAIDFRAME_CHECK_PARITY:
1200 *(int *) data = raidPtr->parity_good;
1201 return (0);
1202
1203 case RAIDFRAME_RESET_ACCTOTALS:
1204 memset(&raidPtr->acc_totals, 0, sizeof(raidPtr->acc_totals));
1205 return (0);
1206
1207 case RAIDFRAME_GET_ACCTOTALS:
1208 totals = (RF_AccTotals_t *) data;
1209 *totals = raidPtr->acc_totals;
1210 return (0);
1211
1212 case RAIDFRAME_KEEP_ACCTOTALS:
1213 raidPtr->keep_acc_totals = *(int *)data;
1214 return (0);
1215
1216 case RAIDFRAME_GET_SIZE:
1217 *(int *) data = raidPtr->totalSectors;
1218 return (0);
1219
1220 /* fail a disk & optionally start reconstruction */
1221 case RAIDFRAME_FAIL_DISK:
1222
1223 if (raidPtr->Layout.map->faultsTolerated == 0) {
1224 /* Can't do this on a RAID 0!! */
1225 return(EINVAL);
1226 }
1227
1228 rr = (struct rf_recon_req *) data;
1229
1230 if (rr->row < 0 || rr->row >= raidPtr->numRow
1231 || rr->col < 0 || rr->col >= raidPtr->numCol)
1232 return (EINVAL);
1233
1234 printf("raid%d: Failing the disk: row: %d col: %d\n",
1235 DISKUNIT(devvp->v_rdev), rr->row, rr->col);
1236
1237 /* make a copy of the recon request so that we don't rely on
1238 * the user's buffer */
1239 RF_Malloc(rrcopy, sizeof(*rrcopy), (struct rf_recon_req *));
1240 if (rrcopy == NULL)
1241 return(ENOMEM);
1242 bcopy(rr, rrcopy, sizeof(*rr));
1243 rrcopy->raidPtr = (void *) raidPtr;
1244
1245 retcode = RF_CREATE_THREAD(raidPtr->recon_thread,
1246 rf_ReconThread,
1247 rrcopy,"raid_recon");
1248 return (0);
1249
1250 /* invoke a copyback operation after recon on whatever disk
1251 * needs it, if any */
1252 case RAIDFRAME_COPYBACK:
1253
1254 if (raidPtr->Layout.map->faultsTolerated == 0) {
1255 /* This makes no sense on a RAID 0!! */
1256 return(EINVAL);
1257 }
1258
1259 if (raidPtr->copyback_in_progress == 1) {
1260 /* Copyback is already in progress! */
1261 return(EINVAL);
1262 }
1263
1264 retcode = RF_CREATE_THREAD(raidPtr->copyback_thread,
1265 rf_CopybackThread,
1266 raidPtr,"raid_copyback");
1267 return (retcode);
1268
1269 /* return the percentage completion of reconstruction */
1270 case RAIDFRAME_CHECK_RECON_STATUS:
1271 if (raidPtr->Layout.map->faultsTolerated == 0) {
1272 /* This makes no sense on a RAID 0, so tell the
1273 user it's done. */
1274 *(int *) data = 100;
1275 return(0);
1276 }
1277 row = 0; /* XXX we only consider a single row... */
1278 if (raidPtr->status[row] != rf_rs_reconstructing)
1279 *(int *) data = 100;
1280 else
1281 *(int *) data = raidPtr->reconControl[row]->percentComplete;
1282 return (0);
1283 case RAIDFRAME_CHECK_RECON_STATUS_EXT:
1284 progressInfoPtr = (RF_ProgressInfo_t **) data;
1285 row = 0; /* XXX we only consider a single row... */
1286 if (raidPtr->status[row] != rf_rs_reconstructing) {
1287 progressInfo.remaining = 0;
1288 progressInfo.completed = 100;
1289 progressInfo.total = 100;
1290 } else {
1291 progressInfo.total =
1292 raidPtr->reconControl[row]->numRUsTotal;
1293 progressInfo.completed =
1294 raidPtr->reconControl[row]->numRUsComplete;
1295 progressInfo.remaining = progressInfo.total -
1296 progressInfo.completed;
1297 }
1298 retcode = copyout((caddr_t) &progressInfo,
1299 (caddr_t) *progressInfoPtr,
1300 sizeof(RF_ProgressInfo_t));
1301 return (retcode);
1302
1303 case RAIDFRAME_CHECK_PARITYREWRITE_STATUS:
1304 if (raidPtr->Layout.map->faultsTolerated == 0) {
1305 /* This makes no sense on a RAID 0, so tell the
1306 user it's done. */
1307 *(int *) data = 100;
1308 return(0);
1309 }
1310 if (raidPtr->parity_rewrite_in_progress == 1) {
1311 *(int *) data = 100 *
1312 raidPtr->parity_rewrite_stripes_done /
1313 raidPtr->Layout.numStripe;
1314 } else {
1315 *(int *) data = 100;
1316 }
1317 return (0);
1318
1319 case RAIDFRAME_CHECK_PARITYREWRITE_STATUS_EXT:
1320 progressInfoPtr = (RF_ProgressInfo_t **) data;
1321 if (raidPtr->parity_rewrite_in_progress == 1) {
1322 progressInfo.total = raidPtr->Layout.numStripe;
1323 progressInfo.completed =
1324 raidPtr->parity_rewrite_stripes_done;
1325 progressInfo.remaining = progressInfo.total -
1326 progressInfo.completed;
1327 } else {
1328 progressInfo.remaining = 0;
1329 progressInfo.completed = 100;
1330 progressInfo.total = 100;
1331 }
1332 retcode = copyout((caddr_t) &progressInfo,
1333 (caddr_t) *progressInfoPtr,
1334 sizeof(RF_ProgressInfo_t));
1335 return (retcode);
1336
1337 case RAIDFRAME_CHECK_COPYBACK_STATUS:
1338 if (raidPtr->Layout.map->faultsTolerated == 0) {
1339 /* This makes no sense on a RAID 0 */
1340 *(int *) data = 100;
1341 return(0);
1342 }
1343 if (raidPtr->copyback_in_progress == 1) {
1344 *(int *) data = 100 * raidPtr->copyback_stripes_done /
1345 raidPtr->Layout.numStripe;
1346 } else {
1347 *(int *) data = 100;
1348 }
1349 return (0);
1350
1351 case RAIDFRAME_CHECK_COPYBACK_STATUS_EXT:
1352 progressInfoPtr = (RF_ProgressInfo_t **) data;
1353 if (raidPtr->copyback_in_progress == 1) {
1354 progressInfo.total = raidPtr->Layout.numStripe;
1355 progressInfo.completed =
1356 raidPtr->copyback_stripes_done;
1357 progressInfo.remaining = progressInfo.total -
1358 progressInfo.completed;
1359 } else {
1360 progressInfo.remaining = 0;
1361 progressInfo.completed = 100;
1362 progressInfo.total = 100;
1363 }
1364 retcode = copyout((caddr_t) &progressInfo,
1365 (caddr_t) *progressInfoPtr,
1366 sizeof(RF_ProgressInfo_t));
1367 return (retcode);
1368
1369 /* the sparetable daemon calls this to wait for the kernel to
1370 * need a spare table. this ioctl does not return until a
1371 * spare table is needed. XXX -- calling mpsleep here in the
1372 * ioctl code is almost certainly wrong and evil. -- XXX XXX
1373 * -- I should either compute the spare table in the kernel,
1374 * or have a different -- XXX XXX -- interface (a different
1375 * character device) for delivering the table -- XXX */
1376 #if 0
1377 case RAIDFRAME_SPARET_WAIT:
1378 RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1379 while (!rf_sparet_wait_queue)
1380 mpsleep(&rf_sparet_wait_queue, (PZERO + 1) | PCATCH, "sparet wait", 0, (void *) simple_lock_addr(rf_sparet_wait_mutex), MS_LOCK_SIMPLE);
1381 waitreq = rf_sparet_wait_queue;
1382 rf_sparet_wait_queue = rf_sparet_wait_queue->next;
1383 RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1384
1385 /* structure assignment */
1386 *((RF_SparetWait_t *) data) = *waitreq;
1387
1388 RF_Free(waitreq, sizeof(*waitreq));
1389 return (0);
1390
1391 /* wakes up a process waiting on SPARET_WAIT and puts an error
1392 * code in it that will cause the dameon to exit */
1393 case RAIDFRAME_ABORT_SPARET_WAIT:
1394 RF_Malloc(waitreq, sizeof(*waitreq), (RF_SparetWait_t *));
1395 waitreq->fcol = -1;
1396 RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1397 waitreq->next = rf_sparet_wait_queue;
1398 rf_sparet_wait_queue = waitreq;
1399 RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1400 wakeup(&rf_sparet_wait_queue);
1401 return (0);
1402
1403 /* used by the spare table daemon to deliver a spare table
1404 * into the kernel */
1405 case RAIDFRAME_SEND_SPARET:
1406
1407 /* install the spare table */
1408 retcode = rf_SetSpareTable(raidPtr, *(void **) data);
1409
1410 /* respond to the requestor. the return status of the spare
1411 * table installation is passed in the "fcol" field */
1412 RF_Malloc(waitreq, sizeof(*waitreq), (RF_SparetWait_t *));
1413 waitreq->fcol = retcode;
1414 RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1415 waitreq->next = rf_sparet_resp_queue;
1416 rf_sparet_resp_queue = waitreq;
1417 wakeup(&rf_sparet_resp_queue);
1418 RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1419
1420 return (retcode);
1421 #endif
1422
1423 default:
1424 break; /* fall through to the os-specific code below */
1425
1426 }
1427
1428 if (!raidPtr->valid)
1429 return (EINVAL);
1430
1431 /*
1432 * Add support for "regular" device ioctls here.
1433 */
1434
1435 switch (cmd) {
1436 case DIOCGDINFO:
1437 *(struct disklabel *) data = *(rs->sc_dkdev.dk_label);
1438 break;
1439 #ifdef __HAVE_OLD_DISKLABEL
1440 case ODIOCGDINFO:
1441 newlabel = *(rs->sc_dkdev.dk_label);
1442 if (newlabel.d_npartitions > OLDMAXPARTITIONS)
1443 return ENOTTY;
1444 memcpy(data, &newlabel, sizeof (struct olddisklabel));
1445 break;
1446 #endif
1447
1448 case DIOCGPART:
1449 ((struct partinfo *) data)->disklab = rs->sc_dkdev.dk_label;
1450 ((struct partinfo *) data)->part =
1451 &rs->sc_dkdev.dk_label->d_partitions[DISKPART(devvp->v_rdev)];
1452 break;
1453
1454 case DIOCWDINFO:
1455 case DIOCSDINFO:
1456 #ifdef __HAVE_OLD_DISKLABEL
1457 case ODIOCWDINFO:
1458 case ODIOCSDINFO:
1459 #endif
1460 {
1461 struct disklabel *lp;
1462 #ifdef __HAVE_OLD_DISKLABEL
1463 if (cmd == ODIOCSDINFO || cmd == ODIOCWDINFO) {
1464 memset(&newlabel, 0, sizeof newlabel);
1465 memcpy(&newlabel, data, sizeof (struct olddisklabel));
1466 lp = &newlabel;
1467 } else
1468 #endif
1469 lp = (struct disklabel *)data;
1470
1471 if ((error = raidlock(rs)) != 0)
1472 return (error);
1473
1474 rs->sc_flags |= RAIDF_LABELLING;
1475
1476 error = setdisklabel(rs->sc_dkdev.dk_label,
1477 lp, 0, rs->sc_dkdev.dk_cpulabel);
1478 if (error == 0) {
1479 if (cmd == DIOCWDINFO
1480 #ifdef __HAVE_OLD_DISKLABEL
1481 || cmd == ODIOCWDINFO
1482 #endif
1483 )
1484 error = writedisklabel(devvp, raidstrategy,
1485 rs->sc_dkdev.dk_label,
1486 rs->sc_dkdev.dk_cpulabel);
1487 }
1488 rs->sc_flags &= ~RAIDF_LABELLING;
1489
1490 raidunlock(rs);
1491
1492 if (error)
1493 return (error);
1494 break;
1495 }
1496
1497 case DIOCWLABEL:
1498 if (*(int *) data != 0)
1499 rs->sc_flags |= RAIDF_WLABEL;
1500 else
1501 rs->sc_flags &= ~RAIDF_WLABEL;
1502 break;
1503
1504 case DIOCGDEFLABEL:
1505 raidgetdefaultlabel(raidPtr, rs, (struct disklabel *) data);
1506 break;
1507
1508 #ifdef __HAVE_OLD_DISKLABEL
1509 case ODIOCGDEFLABEL:
1510 raidgetdefaultlabel(raidPtr, rs, &newlabel);
1511 if (newlabel.d_npartitions > OLDMAXPARTITIONS)
1512 return ENOTTY;
1513 memcpy(data, &newlabel, sizeof (struct olddisklabel));
1514 break;
1515 #endif
1516
1517 default:
1518 retcode = ENOTTY;
1519 }
1520 return (retcode);
1521 }
1522
1523
1524 /* raidinit -- complete the rest of the initialization for the
1525 RAIDframe device. */
1526
1527
1528 static void
1529 raidinit(raidPtr)
1530 RF_Raid_t *raidPtr;
1531 {
1532 struct raid_softc *rs;
1533 int unit;
1534
1535 unit = raidPtr->raidid;
1536
1537 rs = &raid_softc[unit];
1538 pool_init(&rs->sc_cbufpool, sizeof(struct raidbuf), 0,
1539 0, 0, "raidpl", 0, NULL, NULL, M_RAIDFRAME);
1540
1541
1542 /* XXX should check return code first... */
1543 rs->sc_flags |= RAIDF_INITED;
1544
1545 sprintf(rs->sc_xname, "raid%d", unit); /* XXX doesn't check bounds. */
1546
1547 rs->sc_dkdev.dk_name = rs->sc_xname;
1548
1549 /* disk_attach actually creates space for the CPU disklabel, among
1550 * other things, so it's critical to call this *BEFORE* we try putzing
1551 * with disklabels. */
1552
1553 disk_attach(&rs->sc_dkdev);
1554
1555 /* XXX There may be a weird interaction here between this, and
1556 * protectedSectors, as used in RAIDframe. */
1557
1558 rs->sc_size = raidPtr->totalSectors;
1559
1560 }
1561
1562 /* wake up the daemon & tell it to get us a spare table
1563 * XXX
1564 * the entries in the queues should be tagged with the raidPtr
1565 * so that in the extremely rare case that two recons happen at once,
1566 * we know for which device were requesting a spare table
1567 * XXX
1568 *
1569 * XXX This code is not currently used. GO
1570 */
1571 int
1572 rf_GetSpareTableFromDaemon(req)
1573 RF_SparetWait_t *req;
1574 {
1575 int retcode;
1576
1577 RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1578 req->next = rf_sparet_wait_queue;
1579 rf_sparet_wait_queue = req;
1580 wakeup(&rf_sparet_wait_queue);
1581
1582 /* mpsleep unlocks the mutex */
1583 while (!rf_sparet_resp_queue) {
1584 tsleep(&rf_sparet_resp_queue, PRIBIO,
1585 "raidframe getsparetable", 0);
1586 }
1587 req = rf_sparet_resp_queue;
1588 rf_sparet_resp_queue = req->next;
1589 RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1590
1591 retcode = req->fcol;
1592 RF_Free(req, sizeof(*req)); /* this is not the same req as we
1593 * alloc'd */
1594 return (retcode);
1595 }
1596
1597 /* a wrapper around rf_DoAccess that extracts appropriate info from the
1598 * bp & passes it down.
1599 * any calls originating in the kernel must use non-blocking I/O
1600 * do some extra sanity checking to return "appropriate" error values for
1601 * certain conditions (to make some standard utilities work)
1602 *
1603 * Formerly known as: rf_DoAccessKernel
1604 */
1605 void
1606 raidstart(raidPtr)
1607 RF_Raid_t *raidPtr;
1608 {
1609 RF_SectorCount_t num_blocks, pb, sum;
1610 RF_RaidAddr_t raid_addr;
1611 int retcode;
1612 struct partition *pp;
1613 daddr_t blocknum;
1614 int unit;
1615 struct raid_softc *rs;
1616 int do_async;
1617 struct buf *bp;
1618
1619 unit = raidPtr->raidid;
1620 rs = &raid_softc[unit];
1621
1622 /* quick check to see if anything has died recently */
1623 RF_LOCK_MUTEX(raidPtr->mutex);
1624 if (raidPtr->numNewFailures > 0) {
1625 rf_update_component_labels(raidPtr,
1626 RF_NORMAL_COMPONENT_UPDATE);
1627 raidPtr->numNewFailures--;
1628 }
1629 RF_UNLOCK_MUTEX(raidPtr->mutex);
1630
1631 /* Check to see if we're at the limit... */
1632 RF_LOCK_MUTEX(raidPtr->mutex);
1633 while (raidPtr->openings > 0) {
1634 RF_UNLOCK_MUTEX(raidPtr->mutex);
1635
1636 /* get the next item, if any, from the queue */
1637 if ((bp = BUFQ_FIRST(&rs->buf_queue)) == NULL) {
1638 /* nothing more to do */
1639 return;
1640 }
1641 BUFQ_REMOVE(&rs->buf_queue, bp);
1642
1643 /* Ok, for the bp we have here, bp->b_blkno is relative to the
1644 * partition.. Need to make it absolute to the underlying
1645 * device.. */
1646
1647 blocknum = bp->b_blkno;
1648 if (DISKPART(bp->b_devvp->v_rdev) != RAW_PART &&
1649 (bp->b_flags & B_DKLABEL) == 0) {
1650 pp = &rs->sc_dkdev.dk_label->d_partitions[DISKPART(bp->b_devvp->v_rdev)];
1651 blocknum += pp->p_offset;
1652 }
1653
1654 db1_printf(("Blocks: %d, %d\n", (int) bp->b_blkno,
1655 (int) blocknum));
1656
1657 db1_printf(("bp->b_bcount = %d\n", (int) bp->b_bcount));
1658 db1_printf(("bp->b_resid = %d\n", (int) bp->b_resid));
1659
1660 /* *THIS* is where we adjust what block we're going to...
1661 * but DO NOT TOUCH bp->b_blkno!!! */
1662 raid_addr = blocknum;
1663
1664 num_blocks = bp->b_bcount >> raidPtr->logBytesPerSector;
1665 pb = (bp->b_bcount & raidPtr->sectorMask) ? 1 : 0;
1666 sum = raid_addr + num_blocks + pb;
1667 if (1 || rf_debugKernelAccess) {
1668 db1_printf(("raid_addr=%d sum=%d num_blocks=%d(+%d) (%d)\n",
1669 (int) raid_addr, (int) sum, (int) num_blocks,
1670 (int) pb, (int) bp->b_resid));
1671 }
1672 if ((sum > raidPtr->totalSectors) || (sum < raid_addr)
1673 || (sum < num_blocks) || (sum < pb)) {
1674 bp->b_error = ENOSPC;
1675 bp->b_flags |= B_ERROR;
1676 bp->b_resid = bp->b_bcount;
1677 biodone(bp);
1678 RF_LOCK_MUTEX(raidPtr->mutex);
1679 continue;
1680 }
1681 /*
1682 * XXX rf_DoAccess() should do this, not just DoAccessKernel()
1683 */
1684
1685 if (bp->b_bcount & raidPtr->sectorMask) {
1686 bp->b_error = EINVAL;
1687 bp->b_flags |= B_ERROR;
1688 bp->b_resid = bp->b_bcount;
1689 biodone(bp);
1690 RF_LOCK_MUTEX(raidPtr->mutex);
1691 continue;
1692
1693 }
1694 db1_printf(("Calling DoAccess..\n"));
1695
1696
1697 RF_LOCK_MUTEX(raidPtr->mutex);
1698 raidPtr->openings--;
1699 RF_UNLOCK_MUTEX(raidPtr->mutex);
1700
1701 /*
1702 * Everything is async.
1703 */
1704 do_async = 1;
1705
1706 disk_busy(&rs->sc_dkdev);
1707
1708 /* XXX we're still at splbio() here... do we *really*
1709 need to be? */
1710
1711 /* don't ever condition on bp->b_flags & B_WRITE.
1712 * always condition on B_READ instead */
1713
1714 retcode = rf_DoAccess(raidPtr, (bp->b_flags & B_READ) ?
1715 RF_IO_TYPE_READ : RF_IO_TYPE_WRITE,
1716 do_async, raid_addr, num_blocks,
1717 bp->b_data, bp, RF_DAG_NONBLOCKING_IO);
1718
1719 RF_LOCK_MUTEX(raidPtr->mutex);
1720 }
1721 RF_UNLOCK_MUTEX(raidPtr->mutex);
1722 }
1723
1724 /*
1725 * invoke an I/O from kernel mode. Disk queue should be
1726 * locked upon entry
1727 */
1728 int
1729 rf_DispatchKernelIO(queue, req)
1730 RF_DiskQueue_t *queue;
1731 RF_DiskQueueData_t *req;
1732 {
1733 int op = (req->type == RF_IO_TYPE_READ) ? B_READ : B_WRITE;
1734 struct buf *bp;
1735 struct raidbuf *raidbp = NULL;
1736 struct raid_softc *rs;
1737 int unit;
1738 int s;
1739
1740 s=0;
1741 /* s = splbio();*/ /* want to test this */
1742 /* XXX along with the vnode, we also need the softc associated with
1743 * this device.. */
1744
1745 req->queue = queue;
1746
1747 unit = queue->raidPtr->raidid;
1748
1749 db1_printf(("DispatchKernelIO unit: %d\n", unit));
1750
1751 if (unit >= numraid) {
1752 printf("Invalid unit number: %d %d\n", unit, numraid);
1753 panic("Invalid Unit number in rf_DispatchKernelIO\n");
1754 }
1755 rs = &raid_softc[unit];
1756
1757 bp = req->bp;
1758 #if 1
1759 /* XXX when there is a physical disk failure, someone is passing us a
1760 * buffer that contains old stuff!! Attempt to deal with this problem
1761 * without taking a performance hit... (not sure where the real bug
1762 * is. It's buried in RAIDframe somewhere) :-( GO ) */
1763
1764 if (bp->b_flags & B_ERROR) {
1765 bp->b_flags &= ~B_ERROR;
1766 }
1767 if (bp->b_error != 0) {
1768 bp->b_error = 0;
1769 }
1770 #endif
1771 raidbp = RAIDGETBUF(rs);
1772
1773 raidbp->rf_flags = 0; /* XXX not really used anywhere... */
1774
1775 /*
1776 * context for raidiodone
1777 */
1778 raidbp->rf_obp = bp;
1779 raidbp->req = req;
1780
1781 LIST_INIT(&raidbp->rf_buf.b_dep);
1782
1783 switch (req->type) {
1784 case RF_IO_TYPE_NOP: /* used primarily to unlock a locked queue */
1785 /* XXX need to do something extra here.. */
1786 /* I'm leaving this in, as I've never actually seen it used,
1787 * and I'd like folks to report it... GO */
1788 printf(("WAKEUP CALLED\n"));
1789 queue->numOutstanding++;
1790
1791 /* XXX need to glue the original buffer into this?? */
1792
1793 KernelWakeupFunc(&raidbp->rf_buf);
1794 break;
1795
1796 case RF_IO_TYPE_READ:
1797 case RF_IO_TYPE_WRITE:
1798
1799 if (req->tracerec) {
1800 RF_ETIMER_START(req->tracerec->timer);
1801 }
1802 InitBP(&raidbp->rf_buf, queue->rf_cinfo->ci_vp,
1803 op | bp->b_flags, req->sectorOffset, req->numSector,
1804 req->buf, KernelWakeupFunc, (void *) req,
1805 queue->raidPtr->logBytesPerSector, req->b_proc);
1806
1807 if (rf_debugKernelAccess) {
1808 db1_printf(("dispatch: bp->b_blkno = %ld\n",
1809 (long) bp->b_blkno));
1810 }
1811 queue->numOutstanding++;
1812 queue->last_deq_sector = req->sectorOffset;
1813 /* acc wouldn't have been let in if there were any pending
1814 * reqs at any other priority */
1815 queue->curPriority = req->priority;
1816
1817 db1_printf(("Going for %c to unit %d row %d col %d\n",
1818 req->type, unit, queue->row, queue->col));
1819 db1_printf(("sector %d count %d (%d bytes) %d\n",
1820 (int) req->sectorOffset, (int) req->numSector,
1821 (int) (req->numSector <<
1822 queue->raidPtr->logBytesPerSector),
1823 (int) queue->raidPtr->logBytesPerSector));
1824 if ((raidbp->rf_buf.b_flags & B_READ) == 0) {
1825 raidbp->rf_buf.b_vp->v_numoutput++;
1826 }
1827 VOP_STRATEGY(&raidbp->rf_buf);
1828
1829 break;
1830
1831 default:
1832 panic("bad req->type in rf_DispatchKernelIO");
1833 }
1834 db1_printf(("Exiting from DispatchKernelIO\n"));
1835 /* splx(s); */ /* want to test this */
1836 return (0);
1837 }
1838 /* this is the callback function associated with a I/O invoked from
1839 kernel code.
1840 */
1841 static void
1842 KernelWakeupFunc(vbp)
1843 struct buf *vbp;
1844 {
1845 RF_DiskQueueData_t *req = NULL;
1846 RF_DiskQueue_t *queue;
1847 struct raidbuf *raidbp = (struct raidbuf *) vbp;
1848 struct buf *bp;
1849 struct raid_softc *rs;
1850 int unit;
1851 int s;
1852
1853 s = splbio();
1854 db1_printf(("recovering the request queue:\n"));
1855 req = raidbp->req;
1856
1857 bp = raidbp->rf_obp;
1858
1859 queue = (RF_DiskQueue_t *) req->queue;
1860
1861 if (raidbp->rf_buf.b_flags & B_ERROR) {
1862 bp->b_flags |= B_ERROR;
1863 bp->b_error = raidbp->rf_buf.b_error ?
1864 raidbp->rf_buf.b_error : EIO;
1865 }
1866
1867 /* XXX methinks this could be wrong... */
1868 #if 1
1869 bp->b_resid = raidbp->rf_buf.b_resid;
1870 #endif
1871
1872 if (req->tracerec) {
1873 RF_ETIMER_STOP(req->tracerec->timer);
1874 RF_ETIMER_EVAL(req->tracerec->timer);
1875 RF_LOCK_MUTEX(rf_tracing_mutex);
1876 req->tracerec->diskwait_us += RF_ETIMER_VAL_US(req->tracerec->timer);
1877 req->tracerec->phys_io_us += RF_ETIMER_VAL_US(req->tracerec->timer);
1878 req->tracerec->num_phys_ios++;
1879 RF_UNLOCK_MUTEX(rf_tracing_mutex);
1880 }
1881 bp->b_bcount = raidbp->rf_buf.b_bcount; /* XXXX ?? */
1882
1883 unit = queue->raidPtr->raidid; /* *Much* simpler :-> */
1884
1885
1886 /* XXX Ok, let's get aggressive... If B_ERROR is set, let's go
1887 * ballistic, and mark the component as hosed... */
1888
1889 if (bp->b_flags & B_ERROR) {
1890 /* Mark the disk as dead */
1891 /* but only mark it once... */
1892 if (queue->raidPtr->Disks[queue->row][queue->col].status ==
1893 rf_ds_optimal) {
1894 printf("raid%d: IO Error. Marking %s as failed.\n",
1895 unit, queue->raidPtr->Disks[queue->row][queue->col].devname);
1896 queue->raidPtr->Disks[queue->row][queue->col].status =
1897 rf_ds_failed;
1898 queue->raidPtr->status[queue->row] = rf_rs_degraded;
1899 queue->raidPtr->numFailures++;
1900 queue->raidPtr->numNewFailures++;
1901 } else { /* Disk is already dead... */
1902 /* printf("Disk already marked as dead!\n"); */
1903 }
1904
1905 }
1906
1907 rs = &raid_softc[unit];
1908 RAIDPUTBUF(rs, raidbp);
1909
1910 rf_DiskIOComplete(queue, req, (bp->b_flags & B_ERROR) ? 1 : 0);
1911 (req->CompleteFunc) (req->argument, (bp->b_flags & B_ERROR) ? 1 : 0);
1912
1913 splx(s);
1914 }
1915
1916 /*
1917 * initialize a buf structure for doing an I/O in the kernel.
1918 */
1919 static void
1920 InitBP(bp, b_vp, rw_flag, startSect, numSect, buf, cbFunc, cbArg,
1921 logBytesPerSector, b_proc)
1922 struct buf *bp;
1923 struct vnode *b_vp;
1924 unsigned rw_flag;
1925 RF_SectorNum_t startSect;
1926 RF_SectorCount_t numSect;
1927 caddr_t buf;
1928 void (*cbFunc) (struct buf *);
1929 void *cbArg;
1930 int logBytesPerSector;
1931 struct proc *b_proc;
1932 {
1933 /* bp->b_flags = B_PHYS | rw_flag; */
1934 bp->b_flags = B_CALL | rw_flag; /* XXX need B_PHYS here too??? */
1935 bp->b_bcount = numSect << logBytesPerSector;
1936 bp->b_bufsize = bp->b_bcount;
1937 bp->b_error = 0;
1938 bp->b_devvp = b_vp;
1939 bp->b_data = buf;
1940 bp->b_blkno = startSect;
1941 bp->b_resid = bp->b_bcount; /* XXX is this right!??!?!! */
1942 if (bp->b_bcount == 0) {
1943 panic("bp->b_bcount is zero in InitBP!!\n");
1944 }
1945 bp->b_proc = b_proc;
1946 bp->b_iodone = cbFunc;
1947 bp->b_vp = b_vp;
1948
1949 }
1950
1951 static void
1952 raidgetdefaultlabel(raidPtr, rs, lp)
1953 RF_Raid_t *raidPtr;
1954 struct raid_softc *rs;
1955 struct disklabel *lp;
1956 {
1957 db1_printf(("Building a default label...\n"));
1958 memset(lp, 0, sizeof(*lp));
1959
1960 /* fabricate a label... */
1961 lp->d_secperunit = raidPtr->totalSectors;
1962 lp->d_secsize = raidPtr->bytesPerSector;
1963 lp->d_nsectors = raidPtr->Layout.dataSectorsPerStripe;
1964 lp->d_ntracks = 4 * raidPtr->numCol;
1965 lp->d_ncylinders = raidPtr->totalSectors /
1966 (lp->d_nsectors * lp->d_ntracks);
1967 lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
1968
1969 strncpy(lp->d_typename, "raid", sizeof(lp->d_typename));
1970 lp->d_type = DTYPE_RAID;
1971 strncpy(lp->d_packname, "fictitious", sizeof(lp->d_packname));
1972 lp->d_rpm = 3600;
1973 lp->d_interleave = 1;
1974 lp->d_flags = 0;
1975
1976 lp->d_partitions[RAW_PART].p_offset = 0;
1977 lp->d_partitions[RAW_PART].p_size = raidPtr->totalSectors;
1978 lp->d_partitions[RAW_PART].p_fstype = FS_UNUSED;
1979 lp->d_npartitions = RAW_PART + 1;
1980
1981 lp->d_magic = DISKMAGIC;
1982 lp->d_magic2 = DISKMAGIC;
1983 lp->d_checksum = dkcksum(rs->sc_dkdev.dk_label);
1984
1985 }
1986 /*
1987 * Read the disklabel from the raid device. If one is not present, fake one
1988 * up.
1989 */
1990 static void
1991 raidgetdisklabel(devvp)
1992 struct vnode *devvp;
1993 {
1994 struct raid_softc *rs = devvp->v_devcookie;
1995 char *errstring;
1996 struct disklabel *lp = rs->sc_dkdev.dk_label;
1997 struct cpu_disklabel *clp = rs->sc_dkdev.dk_cpulabel;
1998 RF_Raid_t *raidPtr;
1999
2000 db1_printf(("Getting the disklabel...\n"));
2001
2002 memset(clp, 0, sizeof(*clp));
2003
2004 raidPtr = raidPtrs[DISKUNIT(devvp->v_rdev)];
2005
2006 raidgetdefaultlabel(raidPtr, rs, lp);
2007
2008 /*
2009 * Call the generic disklabel extraction routine.
2010 */
2011 errstring = readdisklabel(devvp, raidstrategy,
2012 rs->sc_dkdev.dk_label, rs->sc_dkdev.dk_cpulabel);
2013 if (errstring)
2014 raidmakedisklabel(rs);
2015 else {
2016 int i;
2017 struct partition *pp;
2018
2019 /*
2020 * Sanity check whether the found disklabel is valid.
2021 *
2022 * This is necessary since total size of the raid device
2023 * may vary when an interleave is changed even though exactly
2024 * same componets are used, and old disklabel may used
2025 * if that is found.
2026 */
2027 if (lp->d_secperunit != rs->sc_size)
2028 printf("WARNING: %s: "
2029 "total sector size in disklabel (%d) != "
2030 "the size of raid (%ld)\n", rs->sc_xname,
2031 lp->d_secperunit, (long) rs->sc_size);
2032 for (i = 0; i < lp->d_npartitions; i++) {
2033 pp = &lp->d_partitions[i];
2034 if (pp->p_offset + pp->p_size > rs->sc_size)
2035 printf("WARNING: %s: end of partition `%c' "
2036 "exceeds the size of raid (%ld)\n",
2037 rs->sc_xname, 'a' + i, (long) rs->sc_size);
2038 }
2039 }
2040 }
2041 /*
2042 * Take care of things one might want to take care of in the event
2043 * that a disklabel isn't present.
2044 */
2045 static void
2046 raidmakedisklabel(rs)
2047 struct raid_softc *rs;
2048 {
2049 struct disklabel *lp = rs->sc_dkdev.dk_label;
2050 db1_printf(("Making a label..\n"));
2051
2052 /*
2053 * For historical reasons, if there's no disklabel present
2054 * the raw partition must be marked FS_BSDFFS.
2055 */
2056
2057 lp->d_partitions[RAW_PART].p_fstype = FS_BSDFFS;
2058
2059 strncpy(lp->d_packname, "default label", sizeof(lp->d_packname));
2060
2061 lp->d_checksum = dkcksum(lp);
2062 }
2063 /*
2064 * Lookup the provided name in the filesystem. If the file exists,
2065 * is a valid block device, and isn't being used by anyone else,
2066 * set *vpp to the file's vnode.
2067 * You'll find the original of this in ccd.c
2068 */
2069 int
2070 raidlookup(path, p, vpp)
2071 char *path;
2072 struct proc *p;
2073 struct vnode **vpp; /* result */
2074 {
2075 struct nameidata nd;
2076 struct vnode *vp;
2077 struct vattr va;
2078 int error;
2079
2080 NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, path, p);
2081 if ((error = vn_open(&nd, FREAD | FWRITE, 0)) != 0) {
2082 #ifdef DEBUG
2083 printf("RAIDframe: vn_open returned %d\n", error);
2084 #endif
2085 return (error);
2086 }
2087 vp = nd.ni_vp;
2088 if (vp->v_usecount > 1) {
2089 VOP_UNLOCK(vp, 0);
2090 (void) vn_close(vp, FREAD | FWRITE, p->p_ucred, p);
2091 return (EBUSY);
2092 }
2093 if ((error = VOP_GETATTR(vp, &va, p->p_ucred, p)) != 0) {
2094 VOP_UNLOCK(vp, 0);
2095 (void) vn_close(vp, FREAD | FWRITE, p->p_ucred, p);
2096 return (error);
2097 }
2098 /* XXX: eventually we should handle VREG, too. */
2099 if (va.va_type != VBLK) {
2100 VOP_UNLOCK(vp, 0);
2101 (void) vn_close(vp, FREAD | FWRITE, p->p_ucred, p);
2102 return (ENOTBLK);
2103 }
2104 VOP_UNLOCK(vp, 0);
2105 *vpp = vp;
2106 return (0);
2107 }
2108 /*
2109 * Wait interruptibly for an exclusive lock.
2110 *
2111 * XXX
2112 * Several drivers do this; it should be abstracted and made MP-safe.
2113 * (Hmm... where have we seen this warning before :-> GO )
2114 */
2115 static int
2116 raidlock(rs)
2117 struct raid_softc *rs;
2118 {
2119 int error;
2120
2121 while ((rs->sc_flags & RAIDF_LOCKED) != 0) {
2122 rs->sc_flags |= RAIDF_WANTED;
2123 if ((error =
2124 tsleep(rs, PRIBIO | PCATCH, "raidlck", 0)) != 0)
2125 return (error);
2126 }
2127 rs->sc_flags |= RAIDF_LOCKED;
2128 return (0);
2129 }
2130 /*
2131 * Unlock and wake up any waiters.
2132 */
2133 static void
2134 raidunlock(rs)
2135 struct raid_softc *rs;
2136 {
2137
2138 rs->sc_flags &= ~RAIDF_LOCKED;
2139 if ((rs->sc_flags & RAIDF_WANTED) != 0) {
2140 rs->sc_flags &= ~RAIDF_WANTED;
2141 wakeup(rs);
2142 }
2143 }
2144
2145
2146 #define RF_COMPONENT_INFO_OFFSET 16384 /* bytes */
2147 #define RF_COMPONENT_INFO_SIZE 1024 /* bytes */
2148
2149 int
2150 raidmarkclean(struct vnode *b_vp, int mod_counter)
2151 {
2152 RF_ComponentLabel_t clabel;
2153 raidread_component_label(b_vp, &clabel);
2154 clabel.mod_counter = mod_counter;
2155 clabel.clean = RF_RAID_CLEAN;
2156 raidwrite_component_label(b_vp, &clabel);
2157 return(0);
2158 }
2159
2160
2161 int
2162 raidmarkdirty(struct vnode *b_vp, int mod_counter)
2163 {
2164 RF_ComponentLabel_t clabel;
2165 raidread_component_label(b_vp, &clabel);
2166 clabel.mod_counter = mod_counter;
2167 clabel.clean = RF_RAID_DIRTY;
2168 raidwrite_component_label(b_vp, &clabel);
2169 return(0);
2170 }
2171
2172 /* ARGSUSED */
2173 int
2174 raidread_component_label(b_vp, clabel)
2175 struct vnode *b_vp;
2176 RF_ComponentLabel_t *clabel;
2177 {
2178 struct buf *bp;
2179 int error;
2180
2181 /* XXX should probably ensure that we don't try to do this if
2182 someone has changed rf_protected_sectors. */
2183
2184 if (b_vp == NULL) {
2185 /* For whatever reason, this component is not valid.
2186 Don't try to read a component label from it. */
2187 return(EINVAL);
2188 }
2189
2190 /* get a block of the appropriate size... */
2191 bp = geteblk((int)RF_COMPONENT_INFO_SIZE);
2192 bp->b_devvp = b_vp;
2193
2194 /* get our ducks in a row for the read */
2195 bp->b_blkno = RF_COMPONENT_INFO_OFFSET / DEV_BSIZE;
2196 bp->b_bcount = RF_COMPONENT_INFO_SIZE;
2197 bp->b_flags |= B_READ;
2198 bp->b_resid = RF_COMPONENT_INFO_SIZE / DEV_BSIZE;
2199
2200 (*bdevsw[major(bp->b_devvp->v_rdev)].d_strategy)(bp);
2201
2202 error = biowait(bp);
2203
2204 if (!error) {
2205 memcpy(clabel, bp->b_data,
2206 sizeof(RF_ComponentLabel_t));
2207 #if 0
2208 rf_print_component_label( clabel );
2209 #endif
2210 } else {
2211 #if 0
2212 printf("Failed to read RAID component label!\n");
2213 #endif
2214 }
2215
2216 brelse(bp);
2217 return(error);
2218 }
2219
2220 /* ARGSUSED */
2221 int
2222 raidwrite_component_label(b_vp, clabel)
2223 struct vnode *b_vp;
2224 RF_ComponentLabel_t *clabel;
2225 {
2226 struct buf *bp;
2227 int error;
2228
2229 /* get a block of the appropriate size... */
2230 bp = geteblk((int)RF_COMPONENT_INFO_SIZE);
2231 bgetdevvp(b_vp, bp);
2232
2233 /* get our ducks in a row for the write */
2234 bp->b_blkno = RF_COMPONENT_INFO_OFFSET / DEV_BSIZE;
2235 bp->b_bcount = RF_COMPONENT_INFO_SIZE;
2236 bp->b_flags |= B_WRITE;
2237 bp->b_resid = RF_COMPONENT_INFO_SIZE / DEV_BSIZE;
2238
2239 memset(bp->b_data, 0, RF_COMPONENT_INFO_SIZE );
2240
2241 memcpy(bp->b_data, clabel, sizeof(RF_ComponentLabel_t));
2242
2243 (*bdevsw[major(bp->b_devvp->v_rdev)].d_strategy)(bp);
2244 error = biowait(bp);
2245 bp->b_flags |= B_INVAL;
2246 brelse(bp);
2247 if (error) {
2248 #if 1
2249 printf("Failed to write RAID component info!\n");
2250 #endif
2251 }
2252
2253 return(error);
2254 }
2255
2256 void
2257 rf_markalldirty(raidPtr)
2258 RF_Raid_t *raidPtr;
2259 {
2260 RF_ComponentLabel_t clabel;
2261 int r,c;
2262
2263 raidPtr->mod_counter++;
2264 for (r = 0; r < raidPtr->numRow; r++) {
2265 for (c = 0; c < raidPtr->numCol; c++) {
2266 /* we don't want to touch (at all) a disk that has
2267 failed */
2268 if (!RF_DEAD_DISK(raidPtr->Disks[r][c].status)) {
2269 raidread_component_label(
2270 raidPtr->raid_cinfo[r][c].ci_vp,
2271 &clabel);
2272 if (clabel.status == rf_ds_spared) {
2273 /* XXX do something special...
2274 but whatever you do, don't
2275 try to access it!! */
2276 } else {
2277 #if 0
2278 clabel.status =
2279 raidPtr->Disks[r][c].status;
2280 raidwrite_component_label(
2281 raidPtr->raid_cinfo[r][c].ci_vp,
2282 &clabel);
2283 #endif
2284 raidmarkdirty(
2285 raidPtr->raid_cinfo[r][c].ci_vp,
2286 raidPtr->mod_counter);
2287 }
2288 }
2289 }
2290 }
2291 /* printf("Component labels marked dirty.\n"); */
2292 #if 0
2293 for( c = 0; c < raidPtr->numSpare ; c++) {
2294 sparecol = raidPtr->numCol + c;
2295 if (raidPtr->Disks[r][sparecol].status == rf_ds_used_spare) {
2296 /*
2297
2298 XXX this is where we get fancy and map this spare
2299 into it's correct spot in the array.
2300
2301 */
2302 /*
2303
2304 we claim this disk is "optimal" if it's
2305 rf_ds_used_spare, as that means it should be
2306 directly substitutable for the disk it replaced.
2307 We note that too...
2308
2309 */
2310
2311 for(i=0;i<raidPtr->numRow;i++) {
2312 for(j=0;j<raidPtr->numCol;j++) {
2313 if ((raidPtr->Disks[i][j].spareRow ==
2314 r) &&
2315 (raidPtr->Disks[i][j].spareCol ==
2316 sparecol)) {
2317 srow = r;
2318 scol = sparecol;
2319 break;
2320 }
2321 }
2322 }
2323
2324 raidread_component_label(
2325 raidPtr->raid_cinfo[r][sparecol].ci_vp,
2326 &clabel);
2327 /* make sure status is noted */
2328 clabel.version = RF_COMPONENT_LABEL_VERSION;
2329 clabel.mod_counter = raidPtr->mod_counter;
2330 clabel.serial_number = raidPtr->serial_number;
2331 clabel.row = srow;
2332 clabel.column = scol;
2333 clabel.num_rows = raidPtr->numRow;
2334 clabel.num_columns = raidPtr->numCol;
2335 clabel.clean = RF_RAID_DIRTY; /* changed in a bit*/
2336 clabel.status = rf_ds_optimal;
2337 raidwrite_component_label(
2338 raidPtr->raid_cinfo[r][sparecol].ci_vp,
2339 &clabel);
2340 raidmarkclean(raidPtr->raid_cinfo[r][sparecol].ci_vp);
2341 }
2342 }
2343 #endif
2344 }
2345
2346
2347 void
2348 rf_update_component_labels(raidPtr, final)
2349 RF_Raid_t *raidPtr;
2350 int final;
2351 {
2352 RF_ComponentLabel_t clabel;
2353 int sparecol;
2354 int r,c;
2355 int i,j;
2356 int srow, scol;
2357
2358 srow = -1;
2359 scol = -1;
2360
2361 /* XXX should do extra checks to make sure things really are clean,
2362 rather than blindly setting the clean bit... */
2363
2364 raidPtr->mod_counter++;
2365
2366 for (r = 0; r < raidPtr->numRow; r++) {
2367 for (c = 0; c < raidPtr->numCol; c++) {
2368 if (raidPtr->Disks[r][c].status == rf_ds_optimal) {
2369 raidread_component_label(
2370 raidPtr->raid_cinfo[r][c].ci_vp,
2371 &clabel);
2372 /* make sure status is noted */
2373 clabel.status = rf_ds_optimal;
2374 /* bump the counter */
2375 clabel.mod_counter = raidPtr->mod_counter;
2376
2377 raidwrite_component_label(
2378 raidPtr->raid_cinfo[r][c].ci_vp,
2379 &clabel);
2380 if (final == RF_FINAL_COMPONENT_UPDATE) {
2381 if (raidPtr->parity_good == RF_RAID_CLEAN) {
2382 raidmarkclean(
2383 raidPtr->raid_cinfo[r][c].ci_vp,
2384 raidPtr->mod_counter);
2385 }
2386 }
2387 }
2388 /* else we don't touch it.. */
2389 }
2390 }
2391
2392 for( c = 0; c < raidPtr->numSpare ; c++) {
2393 sparecol = raidPtr->numCol + c;
2394 if (raidPtr->Disks[0][sparecol].status == rf_ds_used_spare) {
2395 /*
2396
2397 we claim this disk is "optimal" if it's
2398 rf_ds_used_spare, as that means it should be
2399 directly substitutable for the disk it replaced.
2400 We note that too...
2401
2402 */
2403
2404 for(i=0;i<raidPtr->numRow;i++) {
2405 for(j=0;j<raidPtr->numCol;j++) {
2406 if ((raidPtr->Disks[i][j].spareRow ==
2407 0) &&
2408 (raidPtr->Disks[i][j].spareCol ==
2409 sparecol)) {
2410 srow = i;
2411 scol = j;
2412 break;
2413 }
2414 }
2415 }
2416
2417 /* XXX shouldn't *really* need this... */
2418 raidread_component_label(
2419 raidPtr->raid_cinfo[0][sparecol].ci_vp,
2420 &clabel);
2421 /* make sure status is noted */
2422
2423 raid_init_component_label(raidPtr, &clabel);
2424
2425 clabel.mod_counter = raidPtr->mod_counter;
2426 clabel.row = srow;
2427 clabel.column = scol;
2428 clabel.status = rf_ds_optimal;
2429
2430 raidwrite_component_label(
2431 raidPtr->raid_cinfo[0][sparecol].ci_vp,
2432 &clabel);
2433 if (final == RF_FINAL_COMPONENT_UPDATE) {
2434 if (raidPtr->parity_good == RF_RAID_CLEAN) {
2435 raidmarkclean(
2436 raidPtr->raid_cinfo[0][sparecol].ci_vp,
2437 raidPtr->mod_counter);
2438 }
2439 }
2440 }
2441 }
2442 /* printf("Component labels updated\n"); */
2443 }
2444
2445 void
2446 rf_close_component(raidPtr, vp, auto_configured)
2447 RF_Raid_t *raidPtr;
2448 struct vnode *vp;
2449 int auto_configured;
2450 {
2451 struct proc *p;
2452
2453 p = raidPtr->engine_thread;
2454
2455 if (vp != NULL) {
2456 if (auto_configured == 1) {
2457 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2458 VOP_CLOSE(vp, FREAD | FWRITE, NOCRED, 0);
2459 vput(vp);
2460
2461 } else {
2462 (void) vn_close(vp, FREAD | FWRITE, p->p_ucred, p);
2463 }
2464 } else {
2465 printf("vnode was NULL\n");
2466 }
2467 }
2468
2469
2470 void
2471 rf_UnconfigureVnodes(raidPtr)
2472 RF_Raid_t *raidPtr;
2473 {
2474 int r,c;
2475 struct proc *p;
2476 struct vnode *vp;
2477 int acd;
2478
2479
2480 /* We take this opportunity to close the vnodes like we should.. */
2481
2482 p = raidPtr->engine_thread;
2483
2484 for (r = 0; r < raidPtr->numRow; r++) {
2485 for (c = 0; c < raidPtr->numCol; c++) {
2486 printf("Closing vnode for row: %d col: %d\n", r, c);
2487 vp = raidPtr->raid_cinfo[r][c].ci_vp;
2488 acd = raidPtr->Disks[r][c].auto_configured;
2489 rf_close_component(raidPtr, vp, acd);
2490 raidPtr->raid_cinfo[r][c].ci_vp = NULL;
2491 raidPtr->Disks[r][c].auto_configured = 0;
2492 }
2493 }
2494 for (r = 0; r < raidPtr->numSpare; r++) {
2495 printf("Closing vnode for spare: %d\n", r);
2496 vp = raidPtr->raid_cinfo[0][raidPtr->numCol + r].ci_vp;
2497 acd = raidPtr->Disks[0][raidPtr->numCol + r].auto_configured;
2498 rf_close_component(raidPtr, vp, acd);
2499 raidPtr->raid_cinfo[0][raidPtr->numCol + r].ci_vp = NULL;
2500 raidPtr->Disks[0][raidPtr->numCol + r].auto_configured = 0;
2501 }
2502 }
2503
2504
2505 void
2506 rf_ReconThread(req)
2507 struct rf_recon_req *req;
2508 {
2509 int s;
2510 RF_Raid_t *raidPtr;
2511
2512 s = splbio();
2513 raidPtr = (RF_Raid_t *) req->raidPtr;
2514 raidPtr->recon_in_progress = 1;
2515
2516 rf_FailDisk((RF_Raid_t *) req->raidPtr, req->row, req->col,
2517 ((req->flags & RF_FDFLAGS_RECON) ? 1 : 0));
2518
2519 /* XXX get rid of this! we don't need it at all.. */
2520 RF_Free(req, sizeof(*req));
2521
2522 raidPtr->recon_in_progress = 0;
2523 splx(s);
2524
2525 /* That's all... */
2526 kthread_exit(0); /* does not return */
2527 }
2528
2529 void
2530 rf_RewriteParityThread(raidPtr)
2531 RF_Raid_t *raidPtr;
2532 {
2533 int retcode;
2534 int s;
2535
2536 raidPtr->parity_rewrite_in_progress = 1;
2537 s = splbio();
2538 retcode = rf_RewriteParity(raidPtr);
2539 splx(s);
2540 if (retcode) {
2541 printf("raid%d: Error re-writing parity!\n",raidPtr->raidid);
2542 } else {
2543 /* set the clean bit! If we shutdown correctly,
2544 the clean bit on each component label will get
2545 set */
2546 raidPtr->parity_good = RF_RAID_CLEAN;
2547 }
2548 raidPtr->parity_rewrite_in_progress = 0;
2549
2550 /* Anyone waiting for us to stop? If so, inform them... */
2551 if (raidPtr->waitShutdown) {
2552 wakeup(&raidPtr->parity_rewrite_in_progress);
2553 }
2554
2555 /* That's all... */
2556 kthread_exit(0); /* does not return */
2557 }
2558
2559
2560 void
2561 rf_CopybackThread(raidPtr)
2562 RF_Raid_t *raidPtr;
2563 {
2564 int s;
2565
2566 raidPtr->copyback_in_progress = 1;
2567 s = splbio();
2568 rf_CopybackReconstructedData(raidPtr);
2569 splx(s);
2570 raidPtr->copyback_in_progress = 0;
2571
2572 /* That's all... */
2573 kthread_exit(0); /* does not return */
2574 }
2575
2576
2577 void
2578 rf_ReconstructInPlaceThread(req)
2579 struct rf_recon_req *req;
2580 {
2581 int retcode;
2582 int s;
2583 RF_Raid_t *raidPtr;
2584
2585 s = splbio();
2586 raidPtr = req->raidPtr;
2587 raidPtr->recon_in_progress = 1;
2588 retcode = rf_ReconstructInPlace(raidPtr, req->row, req->col);
2589 RF_Free(req, sizeof(*req));
2590 raidPtr->recon_in_progress = 0;
2591 splx(s);
2592
2593 /* That's all... */
2594 kthread_exit(0); /* does not return */
2595 }
2596
2597 void
2598 rf_mountroot_hook(dev)
2599 struct device *dev;
2600 {
2601
2602 }
2603
2604
2605 RF_AutoConfig_t *
2606 rf_find_raid_components()
2607 {
2608 struct devnametobdevmaj *dtobdm;
2609 struct vnode *vp;
2610 struct disklabel label;
2611 struct device *dv;
2612 char *cd_name;
2613 dev_t dev;
2614 int error;
2615 int i;
2616 int good_one;
2617 RF_ComponentLabel_t *clabel;
2618 RF_AutoConfig_t *ac_list;
2619 RF_AutoConfig_t *ac;
2620
2621
2622 /* initialize the AutoConfig list */
2623 ac_list = NULL;
2624
2625 /* we begin by trolling through *all* the devices on the system */
2626
2627 for (dv = alldevs.tqh_first; dv != NULL;
2628 dv = dv->dv_list.tqe_next) {
2629
2630 /* we are only interested in disks... */
2631 if (dv->dv_class != DV_DISK)
2632 continue;
2633
2634 /* we don't care about floppies... */
2635 if (!strcmp(dv->dv_cfdata->cf_driver->cd_name,"fd")) {
2636 continue;
2637 }
2638
2639 /* need to find the device_name_to_block_device_major stuff */
2640 cd_name = dv->dv_cfdata->cf_driver->cd_name;
2641 dtobdm = dev_name2blk;
2642 while (dtobdm->d_name && strcmp(dtobdm->d_name, cd_name)) {
2643 dtobdm++;
2644 }
2645
2646 /* get a vnode for the raw partition of this disk */
2647
2648 dev = MAKEDISKDEV(dtobdm->d_maj, dv->dv_unit, RAW_PART);
2649 if (bdevvp(dev, &vp))
2650 panic("RAID can't alloc vnode");
2651
2652 error = VOP_OPEN(vp, FREAD, NOCRED, 0, NULL);
2653
2654 if (error) {
2655 /* "Who cares." Continue looking
2656 for something that exists*/
2657 vput(vp);
2658 continue;
2659 }
2660
2661 /* Ok, the disk exists. Go get the disklabel. */
2662 error = VOP_IOCTL(vp, DIOCGDINFO, (caddr_t)&label,
2663 FREAD, NOCRED, 0);
2664 if (error) {
2665 /*
2666 * XXX can't happen - open() would
2667 * have errored out (or faked up one)
2668 */
2669 printf("can't get label for dev %s%c (%d)!?!?\n",
2670 dv->dv_xname, 'a' + RAW_PART, error);
2671 }
2672
2673 /* don't need this any more. We'll allocate it again
2674 a little later if we really do... */
2675 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2676 VOP_CLOSE(vp, FREAD | FWRITE, NOCRED, 0);
2677 vput(vp);
2678
2679 for (i=0; i < label.d_npartitions; i++) {
2680 /* We only support partitions marked as RAID */
2681 if (label.d_partitions[i].p_fstype != FS_RAID)
2682 continue;
2683
2684 dev = MAKEDISKDEV(dtobdm->d_maj, dv->dv_unit, i);
2685 if (bdevvp(dev, &vp))
2686 panic("RAID can't alloc vnode");
2687
2688 error = VOP_OPEN(vp, FREAD, NOCRED, 0, NULL);
2689 if (error) {
2690 /* Whatever... */
2691 vput(vp);
2692 continue;
2693 }
2694
2695 good_one = 0;
2696
2697 clabel = (RF_ComponentLabel_t *)
2698 malloc(sizeof(RF_ComponentLabel_t),
2699 M_RAIDFRAME, M_NOWAIT);
2700 if (clabel == NULL) {
2701 /* XXX CLEANUP HERE */
2702 printf("RAID auto config: out of memory!\n");
2703 return(NULL); /* XXX probably should panic? */
2704 }
2705
2706 if (!raidread_component_label(vp, clabel)) {
2707 /* Got the label. Does it look reasonable? */
2708 if (rf_reasonable_label(clabel) &&
2709 (clabel->partitionSize <=
2710 label.d_partitions[i].p_size)) {
2711 #if DEBUG
2712 printf("Component on: %s%c: %d\n",
2713 dv->dv_xname, 'a'+i,
2714 label.d_partitions[i].p_size);
2715 rf_print_component_label(clabel);
2716 #endif
2717 /* if it's reasonable, add it,
2718 else ignore it. */
2719 ac = (RF_AutoConfig_t *)
2720 malloc(sizeof(RF_AutoConfig_t),
2721 M_RAIDFRAME,
2722 M_NOWAIT);
2723 if (ac == NULL) {
2724 /* XXX should panic?? */
2725 return(NULL);
2726 }
2727
2728 sprintf(ac->devname, "%s%c",
2729 dv->dv_xname, 'a'+i);
2730 ac->vp = vp;
2731 ac->clabel = clabel;
2732 ac->next = ac_list;
2733 ac_list = ac;
2734 good_one = 1;
2735 }
2736 }
2737 if (!good_one) {
2738 /* cleanup */
2739 free(clabel, M_RAIDFRAME);
2740 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2741 VOP_CLOSE(vp, FREAD | FWRITE, NOCRED, 0);
2742 vput(vp);
2743 }
2744 }
2745 }
2746 return(ac_list);
2747 }
2748
2749 static int
2750 rf_reasonable_label(clabel)
2751 RF_ComponentLabel_t *clabel;
2752 {
2753
2754 if (((clabel->version==RF_COMPONENT_LABEL_VERSION_1) ||
2755 (clabel->version==RF_COMPONENT_LABEL_VERSION)) &&
2756 ((clabel->clean == RF_RAID_CLEAN) ||
2757 (clabel->clean == RF_RAID_DIRTY)) &&
2758 clabel->row >=0 &&
2759 clabel->column >= 0 &&
2760 clabel->num_rows > 0 &&
2761 clabel->num_columns > 0 &&
2762 clabel->row < clabel->num_rows &&
2763 clabel->column < clabel->num_columns &&
2764 clabel->blockSize > 0 &&
2765 clabel->numBlocks > 0) {
2766 /* label looks reasonable enough... */
2767 return(1);
2768 }
2769 return(0);
2770 }
2771
2772
2773 void
2774 rf_print_component_label(clabel)
2775 RF_ComponentLabel_t *clabel;
2776 {
2777 printf(" Row: %d Column: %d Num Rows: %d Num Columns: %d\n",
2778 clabel->row, clabel->column,
2779 clabel->num_rows, clabel->num_columns);
2780 printf(" Version: %d Serial Number: %d Mod Counter: %d\n",
2781 clabel->version, clabel->serial_number,
2782 clabel->mod_counter);
2783 printf(" Clean: %s Status: %d\n",
2784 clabel->clean ? "Yes" : "No", clabel->status );
2785 printf(" sectPerSU: %d SUsPerPU: %d SUsPerRU: %d\n",
2786 clabel->sectPerSU, clabel->SUsPerPU, clabel->SUsPerRU);
2787 printf(" RAID Level: %c blocksize: %d numBlocks: %d\n",
2788 (char) clabel->parityConfig, clabel->blockSize,
2789 clabel->numBlocks);
2790 printf(" Autoconfig: %s\n", clabel->autoconfigure ? "Yes" : "No" );
2791 printf(" Contains root partition: %s\n",
2792 clabel->root_partition ? "Yes" : "No" );
2793 printf(" Last configured as: raid%d\n", clabel->last_unit );
2794 #if 0
2795 printf(" Config order: %d\n", clabel->config_order);
2796 #endif
2797
2798 }
2799
2800 RF_ConfigSet_t *
2801 rf_create_auto_sets(ac_list)
2802 RF_AutoConfig_t *ac_list;
2803 {
2804 RF_AutoConfig_t *ac;
2805 RF_ConfigSet_t *config_sets;
2806 RF_ConfigSet_t *cset;
2807 RF_AutoConfig_t *ac_next;
2808
2809
2810 config_sets = NULL;
2811
2812 /* Go through the AutoConfig list, and figure out which components
2813 belong to what sets. */
2814 ac = ac_list;
2815 while(ac!=NULL) {
2816 /* we're going to putz with ac->next, so save it here
2817 for use at the end of the loop */
2818 ac_next = ac->next;
2819
2820 if (config_sets == NULL) {
2821 /* will need at least this one... */
2822 config_sets = (RF_ConfigSet_t *)
2823 malloc(sizeof(RF_ConfigSet_t),
2824 M_RAIDFRAME, M_NOWAIT);
2825 if (config_sets == NULL) {
2826 panic("rf_create_auto_sets: No memory!\n");
2827 }
2828 /* this one is easy :) */
2829 config_sets->ac = ac;
2830 config_sets->next = NULL;
2831 config_sets->rootable = 0;
2832 ac->next = NULL;
2833 } else {
2834 /* which set does this component fit into? */
2835 cset = config_sets;
2836 while(cset!=NULL) {
2837 if (rf_does_it_fit(cset, ac)) {
2838 /* looks like it matches... */
2839 ac->next = cset->ac;
2840 cset->ac = ac;
2841 break;
2842 }
2843 cset = cset->next;
2844 }
2845 if (cset==NULL) {
2846 /* didn't find a match above... new set..*/
2847 cset = (RF_ConfigSet_t *)
2848 malloc(sizeof(RF_ConfigSet_t),
2849 M_RAIDFRAME, M_NOWAIT);
2850 if (cset == NULL) {
2851 panic("rf_create_auto_sets: No memory!\n");
2852 }
2853 cset->ac = ac;
2854 ac->next = NULL;
2855 cset->next = config_sets;
2856 cset->rootable = 0;
2857 config_sets = cset;
2858 }
2859 }
2860 ac = ac_next;
2861 }
2862
2863
2864 return(config_sets);
2865 }
2866
2867 static int
2868 rf_does_it_fit(cset, ac)
2869 RF_ConfigSet_t *cset;
2870 RF_AutoConfig_t *ac;
2871 {
2872 RF_ComponentLabel_t *clabel1, *clabel2;
2873
2874 /* If this one matches the *first* one in the set, that's good
2875 enough, since the other members of the set would have been
2876 through here too... */
2877 /* note that we are not checking partitionSize here..
2878
2879 Note that we are also not checking the mod_counters here.
2880 If everything else matches execpt the mod_counter, that's
2881 good enough for this test. We will deal with the mod_counters
2882 a little later in the autoconfiguration process.
2883
2884 (clabel1->mod_counter == clabel2->mod_counter) &&
2885
2886 The reason we don't check for this is that failed disks
2887 will have lower modification counts. If those disks are
2888 not added to the set they used to belong to, then they will
2889 form their own set, which may result in 2 different sets,
2890 for example, competing to be configured at raid0, and
2891 perhaps competing to be the root filesystem set. If the
2892 wrong ones get configured, or both attempt to become /,
2893 weird behaviour and or serious lossage will occur. Thus we
2894 need to bring them into the fold here, and kick them out at
2895 a later point.
2896
2897 */
2898
2899 clabel1 = cset->ac->clabel;
2900 clabel2 = ac->clabel;
2901 if ((clabel1->version == clabel2->version) &&
2902 (clabel1->serial_number == clabel2->serial_number) &&
2903 (clabel1->num_rows == clabel2->num_rows) &&
2904 (clabel1->num_columns == clabel2->num_columns) &&
2905 (clabel1->sectPerSU == clabel2->sectPerSU) &&
2906 (clabel1->SUsPerPU == clabel2->SUsPerPU) &&
2907 (clabel1->SUsPerRU == clabel2->SUsPerRU) &&
2908 (clabel1->parityConfig == clabel2->parityConfig) &&
2909 (clabel1->maxOutstanding == clabel2->maxOutstanding) &&
2910 (clabel1->blockSize == clabel2->blockSize) &&
2911 (clabel1->numBlocks == clabel2->numBlocks) &&
2912 (clabel1->autoconfigure == clabel2->autoconfigure) &&
2913 (clabel1->root_partition == clabel2->root_partition) &&
2914 (clabel1->last_unit == clabel2->last_unit) &&
2915 (clabel1->config_order == clabel2->config_order)) {
2916 /* if it get's here, it almost *has* to be a match */
2917 } else {
2918 /* it's not consistent with somebody in the set..
2919 punt */
2920 return(0);
2921 }
2922 /* all was fine.. it must fit... */
2923 return(1);
2924 }
2925
2926 int
2927 rf_have_enough_components(cset)
2928 RF_ConfigSet_t *cset;
2929 {
2930 RF_AutoConfig_t *ac;
2931 RF_AutoConfig_t *auto_config;
2932 RF_ComponentLabel_t *clabel;
2933 int r,c;
2934 int num_rows;
2935 int num_cols;
2936 int num_missing;
2937 int mod_counter;
2938 int mod_counter_found;
2939 int even_pair_failed;
2940 char parity_type;
2941
2942
2943 /* check to see that we have enough 'live' components
2944 of this set. If so, we can configure it if necessary */
2945
2946 num_rows = cset->ac->clabel->num_rows;
2947 num_cols = cset->ac->clabel->num_columns;
2948 parity_type = cset->ac->clabel->parityConfig;
2949
2950 /* XXX Check for duplicate components!?!?!? */
2951
2952 /* Determine what the mod_counter is supposed to be for this set. */
2953
2954 mod_counter_found = 0;
2955 mod_counter = 0;
2956 ac = cset->ac;
2957 while(ac!=NULL) {
2958 if (mod_counter_found==0) {
2959 mod_counter = ac->clabel->mod_counter;
2960 mod_counter_found = 1;
2961 } else {
2962 if (ac->clabel->mod_counter > mod_counter) {
2963 mod_counter = ac->clabel->mod_counter;
2964 }
2965 }
2966 ac = ac->next;
2967 }
2968
2969 num_missing = 0;
2970 auto_config = cset->ac;
2971
2972 for(r=0; r<num_rows; r++) {
2973 even_pair_failed = 0;
2974 for(c=0; c<num_cols; c++) {
2975 ac = auto_config;
2976 while(ac!=NULL) {
2977 if ((ac->clabel->row == r) &&
2978 (ac->clabel->column == c) &&
2979 (ac->clabel->mod_counter == mod_counter)) {
2980 /* it's this one... */
2981 #if DEBUG
2982 printf("Found: %s at %d,%d\n",
2983 ac->devname,r,c);
2984 #endif
2985 break;
2986 }
2987 ac=ac->next;
2988 }
2989 if (ac==NULL) {
2990 /* Didn't find one here! */
2991 /* special case for RAID 1, especially
2992 where there are more than 2
2993 components (where RAIDframe treats
2994 things a little differently :( ) */
2995 if (parity_type == '1') {
2996 if (c%2 == 0) { /* even component */
2997 even_pair_failed = 1;
2998 } else { /* odd component. If
2999 we're failed, and
3000 so is the even
3001 component, it's
3002 "Good Night, Charlie" */
3003 if (even_pair_failed == 1) {
3004 return(0);
3005 }
3006 }
3007 } else {
3008 /* normal accounting */
3009 num_missing++;
3010 }
3011 }
3012 if ((parity_type == '1') && (c%2 == 1)) {
3013 /* Just did an even component, and we didn't
3014 bail.. reset the even_pair_failed flag,
3015 and go on to the next component.... */
3016 even_pair_failed = 0;
3017 }
3018 }
3019 }
3020
3021 clabel = cset->ac->clabel;
3022
3023 if (((clabel->parityConfig == '0') && (num_missing > 0)) ||
3024 ((clabel->parityConfig == '4') && (num_missing > 1)) ||
3025 ((clabel->parityConfig == '5') && (num_missing > 1))) {
3026 /* XXX this needs to be made *much* more general */
3027 /* Too many failures */
3028 return(0);
3029 }
3030 /* otherwise, all is well, and we've got enough to take a kick
3031 at autoconfiguring this set */
3032 return(1);
3033 }
3034
3035 void
3036 rf_create_configuration(ac,config,raidPtr)
3037 RF_AutoConfig_t *ac;
3038 RF_Config_t *config;
3039 RF_Raid_t *raidPtr;
3040 {
3041 RF_ComponentLabel_t *clabel;
3042 int i;
3043
3044 clabel = ac->clabel;
3045
3046 /* 1. Fill in the common stuff */
3047 config->numRow = clabel->num_rows;
3048 config->numCol = clabel->num_columns;
3049 config->numSpare = 0; /* XXX should this be set here? */
3050 config->sectPerSU = clabel->sectPerSU;
3051 config->SUsPerPU = clabel->SUsPerPU;
3052 config->SUsPerRU = clabel->SUsPerRU;
3053 config->parityConfig = clabel->parityConfig;
3054 /* XXX... */
3055 strcpy(config->diskQueueType,"fifo");
3056 config->maxOutstandingDiskReqs = clabel->maxOutstanding;
3057 config->layoutSpecificSize = 0; /* XXX ?? */
3058
3059 while(ac!=NULL) {
3060 /* row/col values will be in range due to the checks
3061 in reasonable_label() */
3062 strcpy(config->devnames[ac->clabel->row][ac->clabel->column],
3063 ac->devname);
3064 ac = ac->next;
3065 }
3066
3067 for(i=0;i<RF_MAXDBGV;i++) {
3068 config->debugVars[i][0] = NULL;
3069 }
3070 }
3071
3072 int
3073 rf_set_autoconfig(raidPtr, new_value)
3074 RF_Raid_t *raidPtr;
3075 int new_value;
3076 {
3077 RF_ComponentLabel_t clabel;
3078 struct vnode *vp;
3079 int row, column;
3080
3081 raidPtr->autoconfigure = new_value;
3082 for(row=0; row<raidPtr->numRow; row++) {
3083 for(column=0; column<raidPtr->numCol; column++) {
3084 if (raidPtr->Disks[row][column].status ==
3085 rf_ds_optimal) {
3086 vp = raidPtr->raid_cinfo[row][column].ci_vp;
3087 raidread_component_label(vp, &clabel);
3088 clabel.autoconfigure = new_value;
3089 raidwrite_component_label(vp, &clabel);
3090 }
3091 }
3092 }
3093 return(new_value);
3094 }
3095
3096 int
3097 rf_set_rootpartition(raidPtr, new_value)
3098 RF_Raid_t *raidPtr;
3099 int new_value;
3100 {
3101 RF_ComponentLabel_t clabel;
3102 struct vnode *vp;
3103 int row, column;
3104
3105 raidPtr->root_partition = new_value;
3106 for(row=0; row<raidPtr->numRow; row++) {
3107 for(column=0; column<raidPtr->numCol; column++) {
3108 if (raidPtr->Disks[row][column].status ==
3109 rf_ds_optimal) {
3110 vp = raidPtr->raid_cinfo[row][column].ci_vp;
3111 raidread_component_label(vp, &clabel);
3112 clabel.root_partition = new_value;
3113 raidwrite_component_label(vp, &clabel);
3114 }
3115 }
3116 }
3117 return(new_value);
3118 }
3119
3120 void
3121 rf_release_all_vps(cset)
3122 RF_ConfigSet_t *cset;
3123 {
3124 RF_AutoConfig_t *ac;
3125
3126 ac = cset->ac;
3127 while(ac!=NULL) {
3128 /* Close the vp, and give it back */
3129 if (ac->vp) {
3130 vn_lock(ac->vp, LK_EXCLUSIVE | LK_RETRY);
3131 VOP_CLOSE(ac->vp, FREAD, NOCRED, 0);
3132 vput(ac->vp);
3133 ac->vp = NULL;
3134 }
3135 ac = ac->next;
3136 }
3137 }
3138
3139
3140 void
3141 rf_cleanup_config_set(cset)
3142 RF_ConfigSet_t *cset;
3143 {
3144 RF_AutoConfig_t *ac;
3145 RF_AutoConfig_t *next_ac;
3146
3147 ac = cset->ac;
3148 while(ac!=NULL) {
3149 next_ac = ac->next;
3150 /* nuke the label */
3151 free(ac->clabel, M_RAIDFRAME);
3152 /* cleanup the config structure */
3153 free(ac, M_RAIDFRAME);
3154 /* "next.." */
3155 ac = next_ac;
3156 }
3157 /* and, finally, nuke the config set */
3158 free(cset, M_RAIDFRAME);
3159 }
3160
3161
3162 void
3163 raid_init_component_label(raidPtr, clabel)
3164 RF_Raid_t *raidPtr;
3165 RF_ComponentLabel_t *clabel;
3166 {
3167 /* current version number */
3168 clabel->version = RF_COMPONENT_LABEL_VERSION;
3169 clabel->serial_number = raidPtr->serial_number;
3170 clabel->mod_counter = raidPtr->mod_counter;
3171 clabel->num_rows = raidPtr->numRow;
3172 clabel->num_columns = raidPtr->numCol;
3173 clabel->clean = RF_RAID_DIRTY; /* not clean */
3174 clabel->status = rf_ds_optimal; /* "It's good!" */
3175
3176 clabel->sectPerSU = raidPtr->Layout.sectorsPerStripeUnit;
3177 clabel->SUsPerPU = raidPtr->Layout.SUsPerPU;
3178 clabel->SUsPerRU = raidPtr->Layout.SUsPerRU;
3179
3180 clabel->blockSize = raidPtr->bytesPerSector;
3181 clabel->numBlocks = raidPtr->sectorsPerDisk;
3182
3183 /* XXX not portable */
3184 clabel->parityConfig = raidPtr->Layout.map->parityConfig;
3185 clabel->maxOutstanding = raidPtr->maxOutstanding;
3186 clabel->autoconfigure = raidPtr->autoconfigure;
3187 clabel->root_partition = raidPtr->root_partition;
3188 clabel->last_unit = raidPtr->raidid;
3189 clabel->config_order = raidPtr->config_order;
3190 }
3191
3192 int
3193 rf_auto_config_set(cset,unit)
3194 RF_ConfigSet_t *cset;
3195 int *unit;
3196 {
3197 RF_Raid_t *raidPtr;
3198 RF_Config_t *config;
3199 int raidID;
3200 int retcode;
3201
3202 printf("RAID autoconfigure\n");
3203
3204 retcode = 0;
3205 *unit = -1;
3206
3207 /* 1. Create a config structure */
3208
3209 config = (RF_Config_t *)malloc(sizeof(RF_Config_t),
3210 M_RAIDFRAME,
3211 M_NOWAIT);
3212 if (config==NULL) {
3213 printf("Out of mem!?!?\n");
3214 /* XXX do something more intelligent here. */
3215 return(1);
3216 }
3217
3218 memset(config, 0, sizeof(RF_Config_t));
3219
3220 /* XXX raidID needs to be set correctly.. */
3221
3222 /*
3223 2. Figure out what RAID ID this one is supposed to live at
3224 See if we can get the same RAID dev that it was configured
3225 on last time..
3226 */
3227
3228 raidID = cset->ac->clabel->last_unit;
3229 if ((raidID < 0) || (raidID >= numraid)) {
3230 /* let's not wander off into lala land. */
3231 raidID = numraid - 1;
3232 }
3233 if (raidPtrs[raidID]->valid != 0) {
3234
3235 /*
3236 Nope... Go looking for an alternative...
3237 Start high so we don't immediately use raid0 if that's
3238 not taken.
3239 */
3240
3241 for(raidID = numraid; raidID >= 0; raidID--) {
3242 if (raidPtrs[raidID]->valid == 0) {
3243 /* can use this one! */
3244 break;
3245 }
3246 }
3247 }
3248
3249 if (raidID < 0) {
3250 /* punt... */
3251 printf("Unable to auto configure this set!\n");
3252 printf("(Out of RAID devs!)\n");
3253 return(1);
3254 }
3255 printf("Configuring raid%d:\n",raidID);
3256 raidPtr = raidPtrs[raidID];
3257
3258 /* XXX all this stuff should be done SOMEWHERE ELSE! */
3259 raidPtr->raidid = raidID;
3260 raidPtr->openings = RAIDOUTSTANDING;
3261
3262 /* 3. Build the configuration structure */
3263 rf_create_configuration(cset->ac, config, raidPtr);
3264
3265 /* 4. Do the configuration */
3266 retcode = rf_Configure(raidPtr, config, cset->ac);
3267
3268 if (retcode == 0) {
3269
3270 raidinit(raidPtrs[raidID]);
3271
3272 rf_markalldirty(raidPtrs[raidID]);
3273 raidPtrs[raidID]->autoconfigure = 1; /* XXX do this here? */
3274 if (cset->ac->clabel->root_partition==1) {
3275 /* everything configured just fine. Make a note
3276 that this set is eligible to be root. */
3277 cset->rootable = 1;
3278 /* XXX do this here? */
3279 raidPtrs[raidID]->root_partition = 1;
3280 }
3281 }
3282
3283 /* 5. Cleanup */
3284 free(config, M_RAIDFRAME);
3285
3286 *unit = raidID;
3287 return(retcode);
3288 }
3289
3290 void
3291 rf_disk_unbusy(desc)
3292 RF_RaidAccessDesc_t *desc;
3293 {
3294 struct buf *bp;
3295
3296 bp = (struct buf *)desc->bp;
3297 disk_unbusy(&raid_softc[desc->raidPtr->raidid].sc_dkdev,
3298 (bp->b_bcount - bp->b_resid));
3299 }
3300