rf_netbsdkintf.c revision 1.104.2.6 1 /* $NetBSD: rf_netbsdkintf.c,v 1.104.2.6 2002/01/08 00:31:35 nathanw Exp $ */
2 /*-
3 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to The NetBSD Foundation
7 * by Greg Oster; Jason R. Thorpe.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the NetBSD
20 * Foundation, Inc. and its contributors.
21 * 4. Neither the name of The NetBSD Foundation nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Copyright (c) 1988 University of Utah.
40 * Copyright (c) 1990, 1993
41 * The Regents of the University of California. All rights reserved.
42 *
43 * This code is derived from software contributed to Berkeley by
44 * the Systems Programming Group of the University of Utah Computer
45 * Science Department.
46 *
47 * Redistribution and use in source and binary forms, with or without
48 * modification, are permitted provided that the following conditions
49 * are met:
50 * 1. Redistributions of source code must retain the above copyright
51 * notice, this list of conditions and the following disclaimer.
52 * 2. Redistributions in binary form must reproduce the above copyright
53 * notice, this list of conditions and the following disclaimer in the
54 * documentation and/or other materials provided with the distribution.
55 * 3. All advertising materials mentioning features or use of this software
56 * must display the following acknowledgement:
57 * This product includes software developed by the University of
58 * California, Berkeley and its contributors.
59 * 4. Neither the name of the University nor the names of its contributors
60 * may be used to endorse or promote products derived from this software
61 * without specific prior written permission.
62 *
63 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
64 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
65 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
66 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
67 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
68 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
69 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
70 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
71 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
72 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
73 * SUCH DAMAGE.
74 *
75 * from: Utah $Hdr: cd.c 1.6 90/11/28$
76 *
77 * @(#)cd.c 8.2 (Berkeley) 11/16/93
78 */
79
80
81
82
83 /*
84 * Copyright (c) 1995 Carnegie-Mellon University.
85 * All rights reserved.
86 *
87 * Authors: Mark Holland, Jim Zelenka
88 *
89 * Permission to use, copy, modify and distribute this software and
90 * its documentation is hereby granted, provided that both the copyright
91 * notice and this permission notice appear in all copies of the
92 * software, derivative works or modified versions, and any portions
93 * thereof, and that both notices appear in supporting documentation.
94 *
95 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
96 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
97 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
98 *
99 * Carnegie Mellon requests users of this software to return to
100 *
101 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
102 * School of Computer Science
103 * Carnegie Mellon University
104 * Pittsburgh PA 15213-3890
105 *
106 * any improvements or extensions that they make and grant Carnegie the
107 * rights to redistribute these changes.
108 */
109
110 /***********************************************************
111 *
112 * rf_kintf.c -- the kernel interface routines for RAIDframe
113 *
114 ***********************************************************/
115
116 #include <sys/cdefs.h>
117 __KERNEL_RCSID(0, "$NetBSD: rf_netbsdkintf.c,v 1.104.2.6 2002/01/08 00:31:35 nathanw Exp $");
118
119 #include <sys/param.h>
120 #include <sys/errno.h>
121 #include <sys/pool.h>
122 #include <sys/lwp.h>
123 #include <sys/proc.h>
124 #include <sys/queue.h>
125 #include <sys/disk.h>
126 #include <sys/device.h>
127 #include <sys/stat.h>
128 #include <sys/ioctl.h>
129 #include <sys/fcntl.h>
130 #include <sys/systm.h>
131 #include <sys/namei.h>
132 #include <sys/vnode.h>
133 #include <sys/disklabel.h>
134 #include <sys/conf.h>
135 #include <sys/lock.h>
136 #include <sys/buf.h>
137 #include <sys/user.h>
138 #include <sys/reboot.h>
139
140 #include <dev/raidframe/raidframevar.h>
141 #include <dev/raidframe/raidframeio.h>
142 #include "raid.h"
143 #include "opt_raid_autoconfig.h"
144 #include "rf_raid.h"
145 #include "rf_copyback.h"
146 #include "rf_dag.h"
147 #include "rf_dagflags.h"
148 #include "rf_desc.h"
149 #include "rf_diskqueue.h"
150 #include "rf_acctrace.h"
151 #include "rf_etimer.h"
152 #include "rf_general.h"
153 #include "rf_debugMem.h"
154 #include "rf_kintf.h"
155 #include "rf_options.h"
156 #include "rf_driver.h"
157 #include "rf_parityscan.h"
158 #include "rf_debugprint.h"
159 #include "rf_threadstuff.h"
160
161 int rf_kdebug_level = 0;
162
163 #ifdef DEBUG
164 #define db1_printf(a) if (rf_kdebug_level > 0) printf a
165 #else /* DEBUG */
166 #define db1_printf(a) { }
167 #endif /* DEBUG */
168
169 static RF_Raid_t **raidPtrs; /* global raid device descriptors */
170
171 RF_DECLARE_STATIC_MUTEX(rf_sparet_wait_mutex)
172
173 static RF_SparetWait_t *rf_sparet_wait_queue; /* requests to install a
174 * spare table */
175 static RF_SparetWait_t *rf_sparet_resp_queue; /* responses from
176 * installation process */
177
178 /* prototypes */
179 static void KernelWakeupFunc(struct buf * bp);
180 static void InitBP(struct buf * bp, struct vnode *, unsigned rw_flag,
181 dev_t dev, RF_SectorNum_t startSect,
182 RF_SectorCount_t numSect, caddr_t buf,
183 void (*cbFunc) (struct buf *), void *cbArg,
184 int logBytesPerSector, struct proc * b_proc);
185 static void raidinit(RF_Raid_t *);
186
187 void raidattach(int);
188 int raidsize(dev_t);
189 int raidopen(dev_t, int, int, struct proc *);
190 int raidclose(dev_t, int, int, struct proc *);
191 int raidioctl(dev_t, u_long, caddr_t, int, struct proc *);
192 int raidwrite(dev_t, struct uio *, int);
193 int raidread(dev_t, struct uio *, int);
194 void raidstrategy(struct buf *);
195 int raiddump(dev_t, daddr_t, caddr_t, size_t);
196
197 /*
198 * Pilfered from ccd.c
199 */
200
201 struct raidbuf {
202 struct buf rf_buf; /* new I/O buf. MUST BE FIRST!!! */
203 struct buf *rf_obp; /* ptr. to original I/O buf */
204 int rf_flags; /* misc. flags */
205 RF_DiskQueueData_t *req;/* the request that this was part of.. */
206 };
207
208
209 #define RAIDGETBUF(rs) pool_get(&(rs)->sc_cbufpool, PR_NOWAIT)
210 #define RAIDPUTBUF(rs, cbp) pool_put(&(rs)->sc_cbufpool, cbp)
211
212 /* XXX Not sure if the following should be replacing the raidPtrs above,
213 or if it should be used in conjunction with that...
214 */
215
216 struct raid_softc {
217 int sc_flags; /* flags */
218 int sc_cflags; /* configuration flags */
219 size_t sc_size; /* size of the raid device */
220 char sc_xname[20]; /* XXX external name */
221 struct disk sc_dkdev; /* generic disk device info */
222 struct pool sc_cbufpool; /* component buffer pool */
223 struct buf_queue buf_queue; /* used for the device queue */
224 };
225 /* sc_flags */
226 #define RAIDF_INITED 0x01 /* unit has been initialized */
227 #define RAIDF_WLABEL 0x02 /* label area is writable */
228 #define RAIDF_LABELLING 0x04 /* unit is currently being labelled */
229 #define RAIDF_WANTED 0x40 /* someone is waiting to obtain a lock */
230 #define RAIDF_LOCKED 0x80 /* unit is locked */
231
232 #define raidunit(x) DISKUNIT(x)
233 int numraid = 0;
234
235 /*
236 * Allow RAIDOUTSTANDING number of simultaneous IO's to this RAID device.
237 * Be aware that large numbers can allow the driver to consume a lot of
238 * kernel memory, especially on writes, and in degraded mode reads.
239 *
240 * For example: with a stripe width of 64 blocks (32k) and 5 disks,
241 * a single 64K write will typically require 64K for the old data,
242 * 64K for the old parity, and 64K for the new parity, for a total
243 * of 192K (if the parity buffer is not re-used immediately).
244 * Even it if is used immediately, that's still 128K, which when multiplied
245 * by say 10 requests, is 1280K, *on top* of the 640K of incoming data.
246 *
247 * Now in degraded mode, for example, a 64K read on the above setup may
248 * require data reconstruction, which will require *all* of the 4 remaining
249 * disks to participate -- 4 * 32K/disk == 128K again.
250 */
251
252 #ifndef RAIDOUTSTANDING
253 #define RAIDOUTSTANDING 6
254 #endif
255
256 #define RAIDLABELDEV(dev) \
257 (MAKEDISKDEV(major((dev)), raidunit((dev)), RAW_PART))
258
259 /* declared here, and made public, for the benefit of KVM stuff.. */
260 struct raid_softc *raid_softc;
261
262 static void raidgetdefaultlabel(RF_Raid_t *, struct raid_softc *,
263 struct disklabel *);
264 static void raidgetdisklabel(dev_t);
265 static void raidmakedisklabel(struct raid_softc *);
266
267 static int raidlock(struct raid_softc *);
268 static void raidunlock(struct raid_softc *);
269
270 static void rf_markalldirty(RF_Raid_t *);
271 void rf_mountroot_hook(struct device *);
272
273 struct device *raidrootdev;
274
275 void rf_ReconThread(struct rf_recon_req *);
276 /* XXX what I want is: */
277 /*void rf_ReconThread(RF_Raid_t *raidPtr); */
278 void rf_RewriteParityThread(RF_Raid_t *raidPtr);
279 void rf_CopybackThread(RF_Raid_t *raidPtr);
280 void rf_ReconstructInPlaceThread(struct rf_recon_req *);
281 void rf_buildroothack(void *);
282
283 RF_AutoConfig_t *rf_find_raid_components(void);
284 RF_ConfigSet_t *rf_create_auto_sets(RF_AutoConfig_t *);
285 static int rf_does_it_fit(RF_ConfigSet_t *,RF_AutoConfig_t *);
286 static int rf_reasonable_label(RF_ComponentLabel_t *);
287 void rf_create_configuration(RF_AutoConfig_t *,RF_Config_t *, RF_Raid_t *);
288 int rf_set_autoconfig(RF_Raid_t *, int);
289 int rf_set_rootpartition(RF_Raid_t *, int);
290 void rf_release_all_vps(RF_ConfigSet_t *);
291 void rf_cleanup_config_set(RF_ConfigSet_t *);
292 int rf_have_enough_components(RF_ConfigSet_t *);
293 int rf_auto_config_set(RF_ConfigSet_t *, int *);
294
295 static int raidautoconfig = 0; /* Debugging, mostly. Set to 0 to not
296 allow autoconfig to take place.
297 Note that this is overridden by having
298 RAID_AUTOCONFIG as an option in the
299 kernel config file. */
300
301 void
302 raidattach(num)
303 int num;
304 {
305 int raidID;
306 int i, rc;
307 RF_AutoConfig_t *ac_list; /* autoconfig list */
308 RF_ConfigSet_t *config_sets;
309
310 #ifdef DEBUG
311 printf("raidattach: Asked for %d units\n", num);
312 #endif
313
314 if (num <= 0) {
315 #ifdef DIAGNOSTIC
316 panic("raidattach: count <= 0");
317 #endif
318 return;
319 }
320 /* This is where all the initialization stuff gets done. */
321
322 numraid = num;
323
324 /* Make some space for requested number of units... */
325
326 RF_Calloc(raidPtrs, num, sizeof(RF_Raid_t *), (RF_Raid_t **));
327 if (raidPtrs == NULL) {
328 panic("raidPtrs is NULL!!\n");
329 }
330
331 rc = rf_mutex_init(&rf_sparet_wait_mutex);
332 if (rc) {
333 RF_PANIC();
334 }
335
336 rf_sparet_wait_queue = rf_sparet_resp_queue = NULL;
337
338 for (i = 0; i < num; i++)
339 raidPtrs[i] = NULL;
340 rc = rf_BootRaidframe();
341 if (rc == 0)
342 printf("Kernelized RAIDframe activated\n");
343 else
344 panic("Serious error booting RAID!!\n");
345
346 /* put together some datastructures like the CCD device does.. This
347 * lets us lock the device and what-not when it gets opened. */
348
349 raid_softc = (struct raid_softc *)
350 malloc(num * sizeof(struct raid_softc),
351 M_RAIDFRAME, M_NOWAIT);
352 if (raid_softc == NULL) {
353 printf("WARNING: no memory for RAIDframe driver\n");
354 return;
355 }
356
357 memset(raid_softc, 0, num * sizeof(struct raid_softc));
358
359 raidrootdev = (struct device *)malloc(num * sizeof(struct device),
360 M_RAIDFRAME, M_NOWAIT);
361 if (raidrootdev == NULL) {
362 panic("No memory for RAIDframe driver!!?!?!\n");
363 }
364
365 for (raidID = 0; raidID < num; raidID++) {
366 BUFQ_INIT(&raid_softc[raidID].buf_queue);
367
368 raidrootdev[raidID].dv_class = DV_DISK;
369 raidrootdev[raidID].dv_cfdata = NULL;
370 raidrootdev[raidID].dv_unit = raidID;
371 raidrootdev[raidID].dv_parent = NULL;
372 raidrootdev[raidID].dv_flags = 0;
373 sprintf(raidrootdev[raidID].dv_xname,"raid%d",raidID);
374
375 RF_Calloc(raidPtrs[raidID], 1, sizeof(RF_Raid_t),
376 (RF_Raid_t *));
377 if (raidPtrs[raidID] == NULL) {
378 printf("WARNING: raidPtrs[%d] is NULL\n", raidID);
379 numraid = raidID;
380 return;
381 }
382 }
383
384 #ifdef RAID_AUTOCONFIG
385 raidautoconfig = 1;
386 #endif
387
388 if (raidautoconfig) {
389 /* 1. locate all RAID components on the system */
390
391 #if DEBUG
392 printf("Searching for raid components...\n");
393 #endif
394 ac_list = rf_find_raid_components();
395
396 /* 2. sort them into their respective sets */
397
398 config_sets = rf_create_auto_sets(ac_list);
399
400 /* 3. evaluate each set and configure the valid ones
401 This gets done in rf_buildroothack() */
402
403 /* schedule the creation of the thread to do the
404 "/ on RAID" stuff */
405
406 kthread_create(rf_buildroothack,config_sets);
407
408 #if 0
409 mountroothook_establish(rf_mountroot_hook, &raidrootdev[0]);
410 #endif
411 }
412
413 }
414
415 void
416 rf_buildroothack(arg)
417 void *arg;
418 {
419 RF_ConfigSet_t *config_sets = arg;
420 RF_ConfigSet_t *cset;
421 RF_ConfigSet_t *next_cset;
422 int retcode;
423 int raidID;
424 int rootID;
425 int num_root;
426
427 rootID = 0;
428 num_root = 0;
429 cset = config_sets;
430 while(cset != NULL ) {
431 next_cset = cset->next;
432 if (rf_have_enough_components(cset) &&
433 cset->ac->clabel->autoconfigure==1) {
434 retcode = rf_auto_config_set(cset,&raidID);
435 if (!retcode) {
436 if (cset->rootable) {
437 rootID = raidID;
438 num_root++;
439 }
440 } else {
441 /* The autoconfig didn't work :( */
442 #if DEBUG
443 printf("Autoconfig failed with code %d for raid%d\n", retcode, raidID);
444 #endif
445 rf_release_all_vps(cset);
446 }
447 } else {
448 /* we're not autoconfiguring this set...
449 release the associated resources */
450 rf_release_all_vps(cset);
451 }
452 /* cleanup */
453 rf_cleanup_config_set(cset);
454 cset = next_cset;
455 }
456 if (boothowto & RB_ASKNAME) {
457 /* We don't auto-config... */
458 } else {
459 /* They didn't ask, and we found something bootable... */
460
461 if (num_root == 1) {
462 booted_device = &raidrootdev[rootID];
463 } else if (num_root > 1) {
464 /* we can't guess.. require the user to answer... */
465 boothowto |= RB_ASKNAME;
466 }
467 }
468 }
469
470
471 int
472 raidsize(dev)
473 dev_t dev;
474 {
475 struct raid_softc *rs;
476 struct disklabel *lp;
477 int part, unit, omask, size;
478
479 unit = raidunit(dev);
480 if (unit >= numraid)
481 return (-1);
482 rs = &raid_softc[unit];
483
484 if ((rs->sc_flags & RAIDF_INITED) == 0)
485 return (-1);
486
487 part = DISKPART(dev);
488 omask = rs->sc_dkdev.dk_openmask & (1 << part);
489 lp = rs->sc_dkdev.dk_label;
490
491 if (omask == 0 && raidopen(dev, 0, S_IFBLK, curproc->l_proc))
492 return (-1);
493
494 if (lp->d_partitions[part].p_fstype != FS_SWAP)
495 size = -1;
496 else
497 size = lp->d_partitions[part].p_size *
498 (lp->d_secsize / DEV_BSIZE);
499
500 if (omask == 0 && raidclose(dev, 0, S_IFBLK, curproc->l_proc))
501 return (-1);
502
503 return (size);
504
505 }
506
507 int
508 raiddump(dev, blkno, va, size)
509 dev_t dev;
510 daddr_t blkno;
511 caddr_t va;
512 size_t size;
513 {
514 /* Not implemented. */
515 return ENXIO;
516 }
517 /* ARGSUSED */
518 int
519 raidopen(dev, flags, fmt, p)
520 dev_t dev;
521 int flags, fmt;
522 struct proc *p;
523 {
524 int unit = raidunit(dev);
525 struct raid_softc *rs;
526 struct disklabel *lp;
527 int part, pmask;
528 int error = 0;
529
530 if (unit >= numraid)
531 return (ENXIO);
532 rs = &raid_softc[unit];
533
534 if ((error = raidlock(rs)) != 0)
535 return (error);
536 lp = rs->sc_dkdev.dk_label;
537
538 part = DISKPART(dev);
539 pmask = (1 << part);
540
541 db1_printf(("Opening raid device number: %d partition: %d\n",
542 unit, part));
543
544
545 if ((rs->sc_flags & RAIDF_INITED) &&
546 (rs->sc_dkdev.dk_openmask == 0))
547 raidgetdisklabel(dev);
548
549 /* make sure that this partition exists */
550
551 if (part != RAW_PART) {
552 db1_printf(("Not a raw partition..\n"));
553 if (((rs->sc_flags & RAIDF_INITED) == 0) ||
554 ((part >= lp->d_npartitions) ||
555 (lp->d_partitions[part].p_fstype == FS_UNUSED))) {
556 error = ENXIO;
557 raidunlock(rs);
558 db1_printf(("Bailing out...\n"));
559 return (error);
560 }
561 }
562 /* Prevent this unit from being unconfigured while open. */
563 switch (fmt) {
564 case S_IFCHR:
565 rs->sc_dkdev.dk_copenmask |= pmask;
566 break;
567
568 case S_IFBLK:
569 rs->sc_dkdev.dk_bopenmask |= pmask;
570 break;
571 }
572
573 if ((rs->sc_dkdev.dk_openmask == 0) &&
574 ((rs->sc_flags & RAIDF_INITED) != 0)) {
575 /* First one... mark things as dirty... Note that we *MUST*
576 have done a configure before this. I DO NOT WANT TO BE
577 SCRIBBLING TO RANDOM COMPONENTS UNTIL IT'S BEEN DETERMINED
578 THAT THEY BELONG TOGETHER!!!!! */
579 /* XXX should check to see if we're only open for reading
580 here... If so, we needn't do this, but then need some
581 other way of keeping track of what's happened.. */
582
583 rf_markalldirty( raidPtrs[unit] );
584 }
585
586
587 rs->sc_dkdev.dk_openmask =
588 rs->sc_dkdev.dk_copenmask | rs->sc_dkdev.dk_bopenmask;
589
590 raidunlock(rs);
591
592 return (error);
593
594
595 }
596 /* ARGSUSED */
597 int
598 raidclose(dev, flags, fmt, p)
599 dev_t dev;
600 int flags, fmt;
601 struct proc *p;
602 {
603 int unit = raidunit(dev);
604 struct raid_softc *rs;
605 int error = 0;
606 int part;
607
608 if (unit >= numraid)
609 return (ENXIO);
610 rs = &raid_softc[unit];
611
612 if ((error = raidlock(rs)) != 0)
613 return (error);
614
615 part = DISKPART(dev);
616
617 /* ...that much closer to allowing unconfiguration... */
618 switch (fmt) {
619 case S_IFCHR:
620 rs->sc_dkdev.dk_copenmask &= ~(1 << part);
621 break;
622
623 case S_IFBLK:
624 rs->sc_dkdev.dk_bopenmask &= ~(1 << part);
625 break;
626 }
627 rs->sc_dkdev.dk_openmask =
628 rs->sc_dkdev.dk_copenmask | rs->sc_dkdev.dk_bopenmask;
629
630 if ((rs->sc_dkdev.dk_openmask == 0) &&
631 ((rs->sc_flags & RAIDF_INITED) != 0)) {
632 /* Last one... device is not unconfigured yet.
633 Device shutdown has taken care of setting the
634 clean bits if RAIDF_INITED is not set
635 mark things as clean... */
636 #if 0
637 printf("Last one on raid%d. Updating status.\n",unit);
638 #endif
639 rf_update_component_labels(raidPtrs[unit],
640 RF_FINAL_COMPONENT_UPDATE);
641 if (doing_shutdown) {
642 /* last one, and we're going down, so
643 lights out for this RAID set too. */
644 error = rf_Shutdown(raidPtrs[unit]);
645 pool_destroy(&rs->sc_cbufpool);
646
647 /* It's no longer initialized... */
648 rs->sc_flags &= ~RAIDF_INITED;
649
650 /* Detach the disk. */
651 disk_detach(&rs->sc_dkdev);
652 }
653 }
654
655 raidunlock(rs);
656 return (0);
657
658 }
659
660 void
661 raidstrategy(bp)
662 struct buf *bp;
663 {
664 int s;
665
666 unsigned int raidID = raidunit(bp->b_dev);
667 RF_Raid_t *raidPtr;
668 struct raid_softc *rs = &raid_softc[raidID];
669 struct disklabel *lp;
670 int wlabel;
671
672 if ((rs->sc_flags & RAIDF_INITED) ==0) {
673 bp->b_error = ENXIO;
674 bp->b_flags |= B_ERROR;
675 bp->b_resid = bp->b_bcount;
676 biodone(bp);
677 return;
678 }
679 if (raidID >= numraid || !raidPtrs[raidID]) {
680 bp->b_error = ENODEV;
681 bp->b_flags |= B_ERROR;
682 bp->b_resid = bp->b_bcount;
683 biodone(bp);
684 return;
685 }
686 raidPtr = raidPtrs[raidID];
687 if (!raidPtr->valid) {
688 bp->b_error = ENODEV;
689 bp->b_flags |= B_ERROR;
690 bp->b_resid = bp->b_bcount;
691 biodone(bp);
692 return;
693 }
694 if (bp->b_bcount == 0) {
695 db1_printf(("b_bcount is zero..\n"));
696 biodone(bp);
697 return;
698 }
699 lp = rs->sc_dkdev.dk_label;
700
701 /*
702 * Do bounds checking and adjust transfer. If there's an
703 * error, the bounds check will flag that for us.
704 */
705
706 wlabel = rs->sc_flags & (RAIDF_WLABEL | RAIDF_LABELLING);
707 if (DISKPART(bp->b_dev) != RAW_PART)
708 if (bounds_check_with_label(bp, lp, wlabel) <= 0) {
709 db1_printf(("Bounds check failed!!:%d %d\n",
710 (int) bp->b_blkno, (int) wlabel));
711 biodone(bp);
712 return;
713 }
714 s = splbio();
715
716 bp->b_resid = 0;
717
718 /* stuff it onto our queue */
719 BUFQ_INSERT_TAIL(&rs->buf_queue, bp);
720
721 raidstart(raidPtrs[raidID]);
722
723 splx(s);
724 }
725 /* ARGSUSED */
726 int
727 raidread(dev, uio, flags)
728 dev_t dev;
729 struct uio *uio;
730 int flags;
731 {
732 int unit = raidunit(dev);
733 struct raid_softc *rs;
734 int part;
735
736 if (unit >= numraid)
737 return (ENXIO);
738 rs = &raid_softc[unit];
739
740 if ((rs->sc_flags & RAIDF_INITED) == 0)
741 return (ENXIO);
742 part = DISKPART(dev);
743
744 db1_printf(("raidread: unit: %d partition: %d\n", unit, part));
745
746 return (physio(raidstrategy, NULL, dev, B_READ, minphys, uio));
747
748 }
749 /* ARGSUSED */
750 int
751 raidwrite(dev, uio, flags)
752 dev_t dev;
753 struct uio *uio;
754 int flags;
755 {
756 int unit = raidunit(dev);
757 struct raid_softc *rs;
758
759 if (unit >= numraid)
760 return (ENXIO);
761 rs = &raid_softc[unit];
762
763 if ((rs->sc_flags & RAIDF_INITED) == 0)
764 return (ENXIO);
765 db1_printf(("raidwrite\n"));
766 return (physio(raidstrategy, NULL, dev, B_WRITE, minphys, uio));
767
768 }
769
770 int
771 raidioctl(dev, cmd, data, flag, p)
772 dev_t dev;
773 u_long cmd;
774 caddr_t data;
775 int flag;
776 struct proc *p;
777 {
778 int unit = raidunit(dev);
779 int error = 0;
780 int part, pmask;
781 struct raid_softc *rs;
782 RF_Config_t *k_cfg, *u_cfg;
783 RF_Raid_t *raidPtr;
784 RF_RaidDisk_t *diskPtr;
785 RF_AccTotals_t *totals;
786 RF_DeviceConfig_t *d_cfg, **ucfgp;
787 u_char *specific_buf;
788 int retcode = 0;
789 int row;
790 int column;
791 struct rf_recon_req *rrcopy, *rr;
792 RF_ComponentLabel_t *clabel;
793 RF_ComponentLabel_t ci_label;
794 RF_ComponentLabel_t **clabel_ptr;
795 RF_SingleComponent_t *sparePtr,*componentPtr;
796 RF_SingleComponent_t hot_spare;
797 RF_SingleComponent_t component;
798 RF_ProgressInfo_t progressInfo, **progressInfoPtr;
799 int i, j, d;
800 #ifdef __HAVE_OLD_DISKLABEL
801 struct disklabel newlabel;
802 #endif
803
804 if (unit >= numraid)
805 return (ENXIO);
806 rs = &raid_softc[unit];
807 raidPtr = raidPtrs[unit];
808
809 db1_printf(("raidioctl: %d %d %d %d\n", (int) dev,
810 (int) DISKPART(dev), (int) unit, (int) cmd));
811
812 /* Must be open for writes for these commands... */
813 switch (cmd) {
814 case DIOCSDINFO:
815 case DIOCWDINFO:
816 #ifdef __HAVE_OLD_DISKLABEL
817 case ODIOCWDINFO:
818 case ODIOCSDINFO:
819 #endif
820 case DIOCWLABEL:
821 if ((flag & FWRITE) == 0)
822 return (EBADF);
823 }
824
825 /* Must be initialized for these... */
826 switch (cmd) {
827 case DIOCGDINFO:
828 case DIOCSDINFO:
829 case DIOCWDINFO:
830 #ifdef __HAVE_OLD_DISKLABEL
831 case ODIOCGDINFO:
832 case ODIOCWDINFO:
833 case ODIOCSDINFO:
834 case ODIOCGDEFLABEL:
835 #endif
836 case DIOCGPART:
837 case DIOCWLABEL:
838 case DIOCGDEFLABEL:
839 case RAIDFRAME_SHUTDOWN:
840 case RAIDFRAME_REWRITEPARITY:
841 case RAIDFRAME_GET_INFO:
842 case RAIDFRAME_RESET_ACCTOTALS:
843 case RAIDFRAME_GET_ACCTOTALS:
844 case RAIDFRAME_KEEP_ACCTOTALS:
845 case RAIDFRAME_GET_SIZE:
846 case RAIDFRAME_FAIL_DISK:
847 case RAIDFRAME_COPYBACK:
848 case RAIDFRAME_CHECK_RECON_STATUS:
849 case RAIDFRAME_CHECK_RECON_STATUS_EXT:
850 case RAIDFRAME_GET_COMPONENT_LABEL:
851 case RAIDFRAME_SET_COMPONENT_LABEL:
852 case RAIDFRAME_ADD_HOT_SPARE:
853 case RAIDFRAME_REMOVE_HOT_SPARE:
854 case RAIDFRAME_INIT_LABELS:
855 case RAIDFRAME_REBUILD_IN_PLACE:
856 case RAIDFRAME_CHECK_PARITY:
857 case RAIDFRAME_CHECK_PARITYREWRITE_STATUS:
858 case RAIDFRAME_CHECK_PARITYREWRITE_STATUS_EXT:
859 case RAIDFRAME_CHECK_COPYBACK_STATUS:
860 case RAIDFRAME_CHECK_COPYBACK_STATUS_EXT:
861 case RAIDFRAME_SET_AUTOCONFIG:
862 case RAIDFRAME_SET_ROOT:
863 case RAIDFRAME_DELETE_COMPONENT:
864 case RAIDFRAME_INCORPORATE_HOT_SPARE:
865 if ((rs->sc_flags & RAIDF_INITED) == 0)
866 return (ENXIO);
867 }
868
869 switch (cmd) {
870
871 /* configure the system */
872 case RAIDFRAME_CONFIGURE:
873
874 if (raidPtr->valid) {
875 /* There is a valid RAID set running on this unit! */
876 printf("raid%d: Device already configured!\n",unit);
877 return(EINVAL);
878 }
879
880 /* copy-in the configuration information */
881 /* data points to a pointer to the configuration structure */
882
883 u_cfg = *((RF_Config_t **) data);
884 RF_Malloc(k_cfg, sizeof(RF_Config_t), (RF_Config_t *));
885 if (k_cfg == NULL) {
886 return (ENOMEM);
887 }
888 retcode = copyin((caddr_t) u_cfg, (caddr_t) k_cfg,
889 sizeof(RF_Config_t));
890 if (retcode) {
891 RF_Free(k_cfg, sizeof(RF_Config_t));
892 db1_printf(("rf_ioctl: retcode=%d copyin.1\n",
893 retcode));
894 return (retcode);
895 }
896 /* allocate a buffer for the layout-specific data, and copy it
897 * in */
898 if (k_cfg->layoutSpecificSize) {
899 if (k_cfg->layoutSpecificSize > 10000) {
900 /* sanity check */
901 RF_Free(k_cfg, sizeof(RF_Config_t));
902 return (EINVAL);
903 }
904 RF_Malloc(specific_buf, k_cfg->layoutSpecificSize,
905 (u_char *));
906 if (specific_buf == NULL) {
907 RF_Free(k_cfg, sizeof(RF_Config_t));
908 return (ENOMEM);
909 }
910 retcode = copyin(k_cfg->layoutSpecific,
911 (caddr_t) specific_buf,
912 k_cfg->layoutSpecificSize);
913 if (retcode) {
914 RF_Free(k_cfg, sizeof(RF_Config_t));
915 RF_Free(specific_buf,
916 k_cfg->layoutSpecificSize);
917 db1_printf(("rf_ioctl: retcode=%d copyin.2\n",
918 retcode));
919 return (retcode);
920 }
921 } else
922 specific_buf = NULL;
923 k_cfg->layoutSpecific = specific_buf;
924
925 /* should do some kind of sanity check on the configuration.
926 * Store the sum of all the bytes in the last byte? */
927
928 /* configure the system */
929
930 /*
931 * Clear the entire RAID descriptor, just to make sure
932 * there is no stale data left in the case of a
933 * reconfiguration
934 */
935 memset((char *) raidPtr, 0, sizeof(RF_Raid_t));
936 raidPtr->raidid = unit;
937
938 retcode = rf_Configure(raidPtr, k_cfg, NULL);
939
940 if (retcode == 0) {
941
942 /* allow this many simultaneous IO's to
943 this RAID device */
944 raidPtr->openings = RAIDOUTSTANDING;
945
946 raidinit(raidPtr);
947 rf_markalldirty(raidPtr);
948 }
949 /* free the buffers. No return code here. */
950 if (k_cfg->layoutSpecificSize) {
951 RF_Free(specific_buf, k_cfg->layoutSpecificSize);
952 }
953 RF_Free(k_cfg, sizeof(RF_Config_t));
954
955 return (retcode);
956
957 /* shutdown the system */
958 case RAIDFRAME_SHUTDOWN:
959
960 if ((error = raidlock(rs)) != 0)
961 return (error);
962
963 /*
964 * If somebody has a partition mounted, we shouldn't
965 * shutdown.
966 */
967
968 part = DISKPART(dev);
969 pmask = (1 << part);
970 if ((rs->sc_dkdev.dk_openmask & ~pmask) ||
971 ((rs->sc_dkdev.dk_bopenmask & pmask) &&
972 (rs->sc_dkdev.dk_copenmask & pmask))) {
973 raidunlock(rs);
974 return (EBUSY);
975 }
976
977 retcode = rf_Shutdown(raidPtr);
978
979 pool_destroy(&rs->sc_cbufpool);
980
981 /* It's no longer initialized... */
982 rs->sc_flags &= ~RAIDF_INITED;
983
984 /* Detach the disk. */
985 disk_detach(&rs->sc_dkdev);
986
987 raidunlock(rs);
988
989 return (retcode);
990 case RAIDFRAME_GET_COMPONENT_LABEL:
991 clabel_ptr = (RF_ComponentLabel_t **) data;
992 /* need to read the component label for the disk indicated
993 by row,column in clabel */
994
995 /* For practice, let's get it directly fromdisk, rather
996 than from the in-core copy */
997 RF_Malloc( clabel, sizeof( RF_ComponentLabel_t ),
998 (RF_ComponentLabel_t *));
999 if (clabel == NULL)
1000 return (ENOMEM);
1001
1002 memset((char *) clabel, 0, sizeof(RF_ComponentLabel_t));
1003
1004 retcode = copyin( *clabel_ptr, clabel,
1005 sizeof(RF_ComponentLabel_t));
1006
1007 if (retcode) {
1008 RF_Free( clabel, sizeof(RF_ComponentLabel_t));
1009 return(retcode);
1010 }
1011
1012 row = clabel->row;
1013 column = clabel->column;
1014
1015 if ((row < 0) || (row >= raidPtr->numRow) ||
1016 (column < 0) || (column >= raidPtr->numCol +
1017 raidPtr->numSpare)) {
1018 RF_Free( clabel, sizeof(RF_ComponentLabel_t));
1019 return(EINVAL);
1020 }
1021
1022 raidread_component_label(raidPtr->Disks[row][column].dev,
1023 raidPtr->raid_cinfo[row][column].ci_vp,
1024 clabel );
1025
1026 retcode = copyout((caddr_t) clabel,
1027 (caddr_t) *clabel_ptr,
1028 sizeof(RF_ComponentLabel_t));
1029 RF_Free( clabel, sizeof(RF_ComponentLabel_t));
1030 return (retcode);
1031
1032 case RAIDFRAME_SET_COMPONENT_LABEL:
1033 clabel = (RF_ComponentLabel_t *) data;
1034
1035 /* XXX check the label for valid stuff... */
1036 /* Note that some things *should not* get modified --
1037 the user should be re-initing the labels instead of
1038 trying to patch things.
1039 */
1040
1041 printf("Got component label:\n");
1042 printf("Version: %d\n",clabel->version);
1043 printf("Serial Number: %d\n",clabel->serial_number);
1044 printf("Mod counter: %d\n",clabel->mod_counter);
1045 printf("Row: %d\n", clabel->row);
1046 printf("Column: %d\n", clabel->column);
1047 printf("Num Rows: %d\n", clabel->num_rows);
1048 printf("Num Columns: %d\n", clabel->num_columns);
1049 printf("Clean: %d\n", clabel->clean);
1050 printf("Status: %d\n", clabel->status);
1051
1052 row = clabel->row;
1053 column = clabel->column;
1054
1055 if ((row < 0) || (row >= raidPtr->numRow) ||
1056 (column < 0) || (column >= raidPtr->numCol)) {
1057 return(EINVAL);
1058 }
1059
1060 /* XXX this isn't allowed to do anything for now :-) */
1061
1062 /* XXX and before it is, we need to fill in the rest
1063 of the fields!?!?!?! */
1064 #if 0
1065 raidwrite_component_label(
1066 raidPtr->Disks[row][column].dev,
1067 raidPtr->raid_cinfo[row][column].ci_vp,
1068 clabel );
1069 #endif
1070 return (0);
1071
1072 case RAIDFRAME_INIT_LABELS:
1073 clabel = (RF_ComponentLabel_t *) data;
1074 /*
1075 we only want the serial number from
1076 the above. We get all the rest of the information
1077 from the config that was used to create this RAID
1078 set.
1079 */
1080
1081 raidPtr->serial_number = clabel->serial_number;
1082
1083 raid_init_component_label(raidPtr, &ci_label);
1084 ci_label.serial_number = clabel->serial_number;
1085
1086 for(row=0;row<raidPtr->numRow;row++) {
1087 ci_label.row = row;
1088 for(column=0;column<raidPtr->numCol;column++) {
1089 diskPtr = &raidPtr->Disks[row][column];
1090 if (!RF_DEAD_DISK(diskPtr->status)) {
1091 ci_label.partitionSize = diskPtr->partitionSize;
1092 ci_label.column = column;
1093 raidwrite_component_label(
1094 raidPtr->Disks[row][column].dev,
1095 raidPtr->raid_cinfo[row][column].ci_vp,
1096 &ci_label );
1097 }
1098 }
1099 }
1100
1101 return (retcode);
1102 case RAIDFRAME_SET_AUTOCONFIG:
1103 d = rf_set_autoconfig(raidPtr, *(int *) data);
1104 printf("New autoconfig value is: %d\n", d);
1105 *(int *) data = d;
1106 return (retcode);
1107
1108 case RAIDFRAME_SET_ROOT:
1109 d = rf_set_rootpartition(raidPtr, *(int *) data);
1110 printf("New rootpartition value is: %d\n", d);
1111 *(int *) data = d;
1112 return (retcode);
1113
1114 /* initialize all parity */
1115 case RAIDFRAME_REWRITEPARITY:
1116
1117 if (raidPtr->Layout.map->faultsTolerated == 0) {
1118 /* Parity for RAID 0 is trivially correct */
1119 raidPtr->parity_good = RF_RAID_CLEAN;
1120 return(0);
1121 }
1122
1123 if (raidPtr->parity_rewrite_in_progress == 1) {
1124 /* Re-write is already in progress! */
1125 return(EINVAL);
1126 }
1127
1128 retcode = RF_CREATE_THREAD(raidPtr->parity_rewrite_thread,
1129 rf_RewriteParityThread,
1130 raidPtr,"raid_parity");
1131 return (retcode);
1132
1133
1134 case RAIDFRAME_ADD_HOT_SPARE:
1135 sparePtr = (RF_SingleComponent_t *) data;
1136 memcpy( &hot_spare, sparePtr, sizeof(RF_SingleComponent_t));
1137 retcode = rf_add_hot_spare(raidPtr, &hot_spare);
1138 return(retcode);
1139
1140 case RAIDFRAME_REMOVE_HOT_SPARE:
1141 return(retcode);
1142
1143 case RAIDFRAME_DELETE_COMPONENT:
1144 componentPtr = (RF_SingleComponent_t *)data;
1145 memcpy( &component, componentPtr,
1146 sizeof(RF_SingleComponent_t));
1147 retcode = rf_delete_component(raidPtr, &component);
1148 return(retcode);
1149
1150 case RAIDFRAME_INCORPORATE_HOT_SPARE:
1151 componentPtr = (RF_SingleComponent_t *)data;
1152 memcpy( &component, componentPtr,
1153 sizeof(RF_SingleComponent_t));
1154 retcode = rf_incorporate_hot_spare(raidPtr, &component);
1155 return(retcode);
1156
1157 case RAIDFRAME_REBUILD_IN_PLACE:
1158
1159 if (raidPtr->Layout.map->faultsTolerated == 0) {
1160 /* Can't do this on a RAID 0!! */
1161 return(EINVAL);
1162 }
1163
1164 if (raidPtr->recon_in_progress == 1) {
1165 /* a reconstruct is already in progress! */
1166 return(EINVAL);
1167 }
1168
1169 componentPtr = (RF_SingleComponent_t *) data;
1170 memcpy( &component, componentPtr,
1171 sizeof(RF_SingleComponent_t));
1172 row = component.row;
1173 column = component.column;
1174 printf("Rebuild: %d %d\n",row, column);
1175 if ((row < 0) || (row >= raidPtr->numRow) ||
1176 (column < 0) || (column >= raidPtr->numCol)) {
1177 return(EINVAL);
1178 }
1179
1180 RF_Malloc(rrcopy, sizeof(*rrcopy), (struct rf_recon_req *));
1181 if (rrcopy == NULL)
1182 return(ENOMEM);
1183
1184 rrcopy->raidPtr = (void *) raidPtr;
1185 rrcopy->row = row;
1186 rrcopy->col = column;
1187
1188 retcode = RF_CREATE_THREAD(raidPtr->recon_thread,
1189 rf_ReconstructInPlaceThread,
1190 rrcopy,"raid_reconip");
1191 return(retcode);
1192
1193 case RAIDFRAME_GET_INFO:
1194 if (!raidPtr->valid)
1195 return (ENODEV);
1196 ucfgp = (RF_DeviceConfig_t **) data;
1197 RF_Malloc(d_cfg, sizeof(RF_DeviceConfig_t),
1198 (RF_DeviceConfig_t *));
1199 if (d_cfg == NULL)
1200 return (ENOMEM);
1201 memset((char *) d_cfg, 0, sizeof(RF_DeviceConfig_t));
1202 d_cfg->rows = raidPtr->numRow;
1203 d_cfg->cols = raidPtr->numCol;
1204 d_cfg->ndevs = raidPtr->numRow * raidPtr->numCol;
1205 if (d_cfg->ndevs >= RF_MAX_DISKS) {
1206 RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
1207 return (ENOMEM);
1208 }
1209 d_cfg->nspares = raidPtr->numSpare;
1210 if (d_cfg->nspares >= RF_MAX_DISKS) {
1211 RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
1212 return (ENOMEM);
1213 }
1214 d_cfg->maxqdepth = raidPtr->maxQueueDepth;
1215 d = 0;
1216 for (i = 0; i < d_cfg->rows; i++) {
1217 for (j = 0; j < d_cfg->cols; j++) {
1218 d_cfg->devs[d] = raidPtr->Disks[i][j];
1219 d++;
1220 }
1221 }
1222 for (j = d_cfg->cols, i = 0; i < d_cfg->nspares; i++, j++) {
1223 d_cfg->spares[i] = raidPtr->Disks[0][j];
1224 }
1225 retcode = copyout((caddr_t) d_cfg, (caddr_t) * ucfgp,
1226 sizeof(RF_DeviceConfig_t));
1227 RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
1228
1229 return (retcode);
1230
1231 case RAIDFRAME_CHECK_PARITY:
1232 *(int *) data = raidPtr->parity_good;
1233 return (0);
1234
1235 case RAIDFRAME_RESET_ACCTOTALS:
1236 memset(&raidPtr->acc_totals, 0, sizeof(raidPtr->acc_totals));
1237 return (0);
1238
1239 case RAIDFRAME_GET_ACCTOTALS:
1240 totals = (RF_AccTotals_t *) data;
1241 *totals = raidPtr->acc_totals;
1242 return (0);
1243
1244 case RAIDFRAME_KEEP_ACCTOTALS:
1245 raidPtr->keep_acc_totals = *(int *)data;
1246 return (0);
1247
1248 case RAIDFRAME_GET_SIZE:
1249 *(int *) data = raidPtr->totalSectors;
1250 return (0);
1251
1252 /* fail a disk & optionally start reconstruction */
1253 case RAIDFRAME_FAIL_DISK:
1254
1255 if (raidPtr->Layout.map->faultsTolerated == 0) {
1256 /* Can't do this on a RAID 0!! */
1257 return(EINVAL);
1258 }
1259
1260 rr = (struct rf_recon_req *) data;
1261
1262 if (rr->row < 0 || rr->row >= raidPtr->numRow
1263 || rr->col < 0 || rr->col >= raidPtr->numCol)
1264 return (EINVAL);
1265
1266 printf("raid%d: Failing the disk: row: %d col: %d\n",
1267 unit, rr->row, rr->col);
1268
1269 /* make a copy of the recon request so that we don't rely on
1270 * the user's buffer */
1271 RF_Malloc(rrcopy, sizeof(*rrcopy), (struct rf_recon_req *));
1272 if (rrcopy == NULL)
1273 return(ENOMEM);
1274 bcopy(rr, rrcopy, sizeof(*rr));
1275 rrcopy->raidPtr = (void *) raidPtr;
1276
1277 retcode = RF_CREATE_THREAD(raidPtr->recon_thread,
1278 rf_ReconThread,
1279 rrcopy,"raid_recon");
1280 return (0);
1281
1282 /* invoke a copyback operation after recon on whatever disk
1283 * needs it, if any */
1284 case RAIDFRAME_COPYBACK:
1285
1286 if (raidPtr->Layout.map->faultsTolerated == 0) {
1287 /* This makes no sense on a RAID 0!! */
1288 return(EINVAL);
1289 }
1290
1291 if (raidPtr->copyback_in_progress == 1) {
1292 /* Copyback is already in progress! */
1293 return(EINVAL);
1294 }
1295
1296 retcode = RF_CREATE_THREAD(raidPtr->copyback_thread,
1297 rf_CopybackThread,
1298 raidPtr,"raid_copyback");
1299 return (retcode);
1300
1301 /* return the percentage completion of reconstruction */
1302 case RAIDFRAME_CHECK_RECON_STATUS:
1303 if (raidPtr->Layout.map->faultsTolerated == 0) {
1304 /* This makes no sense on a RAID 0, so tell the
1305 user it's done. */
1306 *(int *) data = 100;
1307 return(0);
1308 }
1309 row = 0; /* XXX we only consider a single row... */
1310 if (raidPtr->status[row] != rf_rs_reconstructing)
1311 *(int *) data = 100;
1312 else
1313 *(int *) data = raidPtr->reconControl[row]->percentComplete;
1314 return (0);
1315 case RAIDFRAME_CHECK_RECON_STATUS_EXT:
1316 progressInfoPtr = (RF_ProgressInfo_t **) data;
1317 row = 0; /* XXX we only consider a single row... */
1318 if (raidPtr->status[row] != rf_rs_reconstructing) {
1319 progressInfo.remaining = 0;
1320 progressInfo.completed = 100;
1321 progressInfo.total = 100;
1322 } else {
1323 progressInfo.total =
1324 raidPtr->reconControl[row]->numRUsTotal;
1325 progressInfo.completed =
1326 raidPtr->reconControl[row]->numRUsComplete;
1327 progressInfo.remaining = progressInfo.total -
1328 progressInfo.completed;
1329 }
1330 retcode = copyout((caddr_t) &progressInfo,
1331 (caddr_t) *progressInfoPtr,
1332 sizeof(RF_ProgressInfo_t));
1333 return (retcode);
1334
1335 case RAIDFRAME_CHECK_PARITYREWRITE_STATUS:
1336 if (raidPtr->Layout.map->faultsTolerated == 0) {
1337 /* This makes no sense on a RAID 0, so tell the
1338 user it's done. */
1339 *(int *) data = 100;
1340 return(0);
1341 }
1342 if (raidPtr->parity_rewrite_in_progress == 1) {
1343 *(int *) data = 100 *
1344 raidPtr->parity_rewrite_stripes_done /
1345 raidPtr->Layout.numStripe;
1346 } else {
1347 *(int *) data = 100;
1348 }
1349 return (0);
1350
1351 case RAIDFRAME_CHECK_PARITYREWRITE_STATUS_EXT:
1352 progressInfoPtr = (RF_ProgressInfo_t **) data;
1353 if (raidPtr->parity_rewrite_in_progress == 1) {
1354 progressInfo.total = raidPtr->Layout.numStripe;
1355 progressInfo.completed =
1356 raidPtr->parity_rewrite_stripes_done;
1357 progressInfo.remaining = progressInfo.total -
1358 progressInfo.completed;
1359 } else {
1360 progressInfo.remaining = 0;
1361 progressInfo.completed = 100;
1362 progressInfo.total = 100;
1363 }
1364 retcode = copyout((caddr_t) &progressInfo,
1365 (caddr_t) *progressInfoPtr,
1366 sizeof(RF_ProgressInfo_t));
1367 return (retcode);
1368
1369 case RAIDFRAME_CHECK_COPYBACK_STATUS:
1370 if (raidPtr->Layout.map->faultsTolerated == 0) {
1371 /* This makes no sense on a RAID 0 */
1372 *(int *) data = 100;
1373 return(0);
1374 }
1375 if (raidPtr->copyback_in_progress == 1) {
1376 *(int *) data = 100 * raidPtr->copyback_stripes_done /
1377 raidPtr->Layout.numStripe;
1378 } else {
1379 *(int *) data = 100;
1380 }
1381 return (0);
1382
1383 case RAIDFRAME_CHECK_COPYBACK_STATUS_EXT:
1384 progressInfoPtr = (RF_ProgressInfo_t **) data;
1385 if (raidPtr->copyback_in_progress == 1) {
1386 progressInfo.total = raidPtr->Layout.numStripe;
1387 progressInfo.completed =
1388 raidPtr->copyback_stripes_done;
1389 progressInfo.remaining = progressInfo.total -
1390 progressInfo.completed;
1391 } else {
1392 progressInfo.remaining = 0;
1393 progressInfo.completed = 100;
1394 progressInfo.total = 100;
1395 }
1396 retcode = copyout((caddr_t) &progressInfo,
1397 (caddr_t) *progressInfoPtr,
1398 sizeof(RF_ProgressInfo_t));
1399 return (retcode);
1400
1401 /* the sparetable daemon calls this to wait for the kernel to
1402 * need a spare table. this ioctl does not return until a
1403 * spare table is needed. XXX -- calling mpsleep here in the
1404 * ioctl code is almost certainly wrong and evil. -- XXX XXX
1405 * -- I should either compute the spare table in the kernel,
1406 * or have a different -- XXX XXX -- interface (a different
1407 * character device) for delivering the table -- XXX */
1408 #if 0
1409 case RAIDFRAME_SPARET_WAIT:
1410 RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1411 while (!rf_sparet_wait_queue)
1412 mpsleep(&rf_sparet_wait_queue, (PZERO + 1) | PCATCH, "sparet wait", 0, (void *) simple_lock_addr(rf_sparet_wait_mutex), MS_LOCK_SIMPLE);
1413 waitreq = rf_sparet_wait_queue;
1414 rf_sparet_wait_queue = rf_sparet_wait_queue->next;
1415 RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1416
1417 /* structure assignment */
1418 *((RF_SparetWait_t *) data) = *waitreq;
1419
1420 RF_Free(waitreq, sizeof(*waitreq));
1421 return (0);
1422
1423 /* wakes up a process waiting on SPARET_WAIT and puts an error
1424 * code in it that will cause the dameon to exit */
1425 case RAIDFRAME_ABORT_SPARET_WAIT:
1426 RF_Malloc(waitreq, sizeof(*waitreq), (RF_SparetWait_t *));
1427 waitreq->fcol = -1;
1428 RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1429 waitreq->next = rf_sparet_wait_queue;
1430 rf_sparet_wait_queue = waitreq;
1431 RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1432 wakeup(&rf_sparet_wait_queue);
1433 return (0);
1434
1435 /* used by the spare table daemon to deliver a spare table
1436 * into the kernel */
1437 case RAIDFRAME_SEND_SPARET:
1438
1439 /* install the spare table */
1440 retcode = rf_SetSpareTable(raidPtr, *(void **) data);
1441
1442 /* respond to the requestor. the return status of the spare
1443 * table installation is passed in the "fcol" field */
1444 RF_Malloc(waitreq, sizeof(*waitreq), (RF_SparetWait_t *));
1445 waitreq->fcol = retcode;
1446 RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1447 waitreq->next = rf_sparet_resp_queue;
1448 rf_sparet_resp_queue = waitreq;
1449 wakeup(&rf_sparet_resp_queue);
1450 RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1451
1452 return (retcode);
1453 #endif
1454
1455 default:
1456 break; /* fall through to the os-specific code below */
1457
1458 }
1459
1460 if (!raidPtr->valid)
1461 return (EINVAL);
1462
1463 /*
1464 * Add support for "regular" device ioctls here.
1465 */
1466
1467 switch (cmd) {
1468 case DIOCGDINFO:
1469 *(struct disklabel *) data = *(rs->sc_dkdev.dk_label);
1470 break;
1471 #ifdef __HAVE_OLD_DISKLABEL
1472 case ODIOCGDINFO:
1473 newlabel = *(rs->sc_dkdev.dk_label);
1474 if (newlabel.d_npartitions > OLDMAXPARTITIONS)
1475 return ENOTTY;
1476 memcpy(data, &newlabel, sizeof (struct olddisklabel));
1477 break;
1478 #endif
1479
1480 case DIOCGPART:
1481 ((struct partinfo *) data)->disklab = rs->sc_dkdev.dk_label;
1482 ((struct partinfo *) data)->part =
1483 &rs->sc_dkdev.dk_label->d_partitions[DISKPART(dev)];
1484 break;
1485
1486 case DIOCWDINFO:
1487 case DIOCSDINFO:
1488 #ifdef __HAVE_OLD_DISKLABEL
1489 case ODIOCWDINFO:
1490 case ODIOCSDINFO:
1491 #endif
1492 {
1493 struct disklabel *lp;
1494 #ifdef __HAVE_OLD_DISKLABEL
1495 if (cmd == ODIOCSDINFO || cmd == ODIOCWDINFO) {
1496 memset(&newlabel, 0, sizeof newlabel);
1497 memcpy(&newlabel, data, sizeof (struct olddisklabel));
1498 lp = &newlabel;
1499 } else
1500 #endif
1501 lp = (struct disklabel *)data;
1502
1503 if ((error = raidlock(rs)) != 0)
1504 return (error);
1505
1506 rs->sc_flags |= RAIDF_LABELLING;
1507
1508 error = setdisklabel(rs->sc_dkdev.dk_label,
1509 lp, 0, rs->sc_dkdev.dk_cpulabel);
1510 if (error == 0) {
1511 if (cmd == DIOCWDINFO
1512 #ifdef __HAVE_OLD_DISKLABEL
1513 || cmd == ODIOCWDINFO
1514 #endif
1515 )
1516 error = writedisklabel(RAIDLABELDEV(dev),
1517 raidstrategy, rs->sc_dkdev.dk_label,
1518 rs->sc_dkdev.dk_cpulabel);
1519 }
1520 rs->sc_flags &= ~RAIDF_LABELLING;
1521
1522 raidunlock(rs);
1523
1524 if (error)
1525 return (error);
1526 break;
1527 }
1528
1529 case DIOCWLABEL:
1530 if (*(int *) data != 0)
1531 rs->sc_flags |= RAIDF_WLABEL;
1532 else
1533 rs->sc_flags &= ~RAIDF_WLABEL;
1534 break;
1535
1536 case DIOCGDEFLABEL:
1537 raidgetdefaultlabel(raidPtr, rs, (struct disklabel *) data);
1538 break;
1539
1540 #ifdef __HAVE_OLD_DISKLABEL
1541 case ODIOCGDEFLABEL:
1542 raidgetdefaultlabel(raidPtr, rs, &newlabel);
1543 if (newlabel.d_npartitions > OLDMAXPARTITIONS)
1544 return ENOTTY;
1545 memcpy(data, &newlabel, sizeof (struct olddisklabel));
1546 break;
1547 #endif
1548
1549 default:
1550 retcode = ENOTTY;
1551 }
1552 return (retcode);
1553
1554 }
1555
1556
1557 /* raidinit -- complete the rest of the initialization for the
1558 RAIDframe device. */
1559
1560
1561 static void
1562 raidinit(raidPtr)
1563 RF_Raid_t *raidPtr;
1564 {
1565 struct raid_softc *rs;
1566 int unit;
1567
1568 unit = raidPtr->raidid;
1569
1570 rs = &raid_softc[unit];
1571 pool_init(&rs->sc_cbufpool, sizeof(struct raidbuf), 0,
1572 0, 0, "raidpl", 0, NULL, NULL, M_RAIDFRAME);
1573
1574
1575 /* XXX should check return code first... */
1576 rs->sc_flags |= RAIDF_INITED;
1577
1578 sprintf(rs->sc_xname, "raid%d", unit); /* XXX doesn't check bounds. */
1579
1580 rs->sc_dkdev.dk_name = rs->sc_xname;
1581
1582 /* disk_attach actually creates space for the CPU disklabel, among
1583 * other things, so it's critical to call this *BEFORE* we try putzing
1584 * with disklabels. */
1585
1586 disk_attach(&rs->sc_dkdev);
1587
1588 /* XXX There may be a weird interaction here between this, and
1589 * protectedSectors, as used in RAIDframe. */
1590
1591 rs->sc_size = raidPtr->totalSectors;
1592
1593 }
1594
1595 /* wake up the daemon & tell it to get us a spare table
1596 * XXX
1597 * the entries in the queues should be tagged with the raidPtr
1598 * so that in the extremely rare case that two recons happen at once,
1599 * we know for which device were requesting a spare table
1600 * XXX
1601 *
1602 * XXX This code is not currently used. GO
1603 */
1604 int
1605 rf_GetSpareTableFromDaemon(req)
1606 RF_SparetWait_t *req;
1607 {
1608 int retcode;
1609
1610 RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1611 req->next = rf_sparet_wait_queue;
1612 rf_sparet_wait_queue = req;
1613 wakeup(&rf_sparet_wait_queue);
1614
1615 /* mpsleep unlocks the mutex */
1616 while (!rf_sparet_resp_queue) {
1617 tsleep(&rf_sparet_resp_queue, PRIBIO,
1618 "raidframe getsparetable", 0);
1619 }
1620 req = rf_sparet_resp_queue;
1621 rf_sparet_resp_queue = req->next;
1622 RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1623
1624 retcode = req->fcol;
1625 RF_Free(req, sizeof(*req)); /* this is not the same req as we
1626 * alloc'd */
1627 return (retcode);
1628 }
1629
1630 /* a wrapper around rf_DoAccess that extracts appropriate info from the
1631 * bp & passes it down.
1632 * any calls originating in the kernel must use non-blocking I/O
1633 * do some extra sanity checking to return "appropriate" error values for
1634 * certain conditions (to make some standard utilities work)
1635 *
1636 * Formerly known as: rf_DoAccessKernel
1637 */
1638 void
1639 raidstart(raidPtr)
1640 RF_Raid_t *raidPtr;
1641 {
1642 RF_SectorCount_t num_blocks, pb, sum;
1643 RF_RaidAddr_t raid_addr;
1644 int retcode;
1645 struct partition *pp;
1646 daddr_t blocknum;
1647 int unit;
1648 struct raid_softc *rs;
1649 int do_async;
1650 struct buf *bp;
1651
1652 unit = raidPtr->raidid;
1653 rs = &raid_softc[unit];
1654
1655 /* quick check to see if anything has died recently */
1656 RF_LOCK_MUTEX(raidPtr->mutex);
1657 if (raidPtr->numNewFailures > 0) {
1658 rf_update_component_labels(raidPtr,
1659 RF_NORMAL_COMPONENT_UPDATE);
1660 raidPtr->numNewFailures--;
1661 }
1662 RF_UNLOCK_MUTEX(raidPtr->mutex);
1663
1664 /* Check to see if we're at the limit... */
1665 RF_LOCK_MUTEX(raidPtr->mutex);
1666 while (raidPtr->openings > 0) {
1667 RF_UNLOCK_MUTEX(raidPtr->mutex);
1668
1669 /* get the next item, if any, from the queue */
1670 if ((bp = BUFQ_FIRST(&rs->buf_queue)) == NULL) {
1671 /* nothing more to do */
1672 return;
1673 }
1674 BUFQ_REMOVE(&rs->buf_queue, bp);
1675
1676 /* Ok, for the bp we have here, bp->b_blkno is relative to the
1677 * partition.. Need to make it absolute to the underlying
1678 * device.. */
1679
1680 blocknum = bp->b_blkno;
1681 if (DISKPART(bp->b_dev) != RAW_PART) {
1682 pp = &rs->sc_dkdev.dk_label->d_partitions[DISKPART(bp->b_dev)];
1683 blocknum += pp->p_offset;
1684 }
1685
1686 db1_printf(("Blocks: %d, %d\n", (int) bp->b_blkno,
1687 (int) blocknum));
1688
1689 db1_printf(("bp->b_bcount = %d\n", (int) bp->b_bcount));
1690 db1_printf(("bp->b_resid = %d\n", (int) bp->b_resid));
1691
1692 /* *THIS* is where we adjust what block we're going to...
1693 * but DO NOT TOUCH bp->b_blkno!!! */
1694 raid_addr = blocknum;
1695
1696 num_blocks = bp->b_bcount >> raidPtr->logBytesPerSector;
1697 pb = (bp->b_bcount & raidPtr->sectorMask) ? 1 : 0;
1698 sum = raid_addr + num_blocks + pb;
1699 if (1 || rf_debugKernelAccess) {
1700 db1_printf(("raid_addr=%d sum=%d num_blocks=%d(+%d) (%d)\n",
1701 (int) raid_addr, (int) sum, (int) num_blocks,
1702 (int) pb, (int) bp->b_resid));
1703 }
1704 if ((sum > raidPtr->totalSectors) || (sum < raid_addr)
1705 || (sum < num_blocks) || (sum < pb)) {
1706 bp->b_error = ENOSPC;
1707 bp->b_flags |= B_ERROR;
1708 bp->b_resid = bp->b_bcount;
1709 biodone(bp);
1710 RF_LOCK_MUTEX(raidPtr->mutex);
1711 continue;
1712 }
1713 /*
1714 * XXX rf_DoAccess() should do this, not just DoAccessKernel()
1715 */
1716
1717 if (bp->b_bcount & raidPtr->sectorMask) {
1718 bp->b_error = EINVAL;
1719 bp->b_flags |= B_ERROR;
1720 bp->b_resid = bp->b_bcount;
1721 biodone(bp);
1722 RF_LOCK_MUTEX(raidPtr->mutex);
1723 continue;
1724
1725 }
1726 db1_printf(("Calling DoAccess..\n"));
1727
1728
1729 RF_LOCK_MUTEX(raidPtr->mutex);
1730 raidPtr->openings--;
1731 RF_UNLOCK_MUTEX(raidPtr->mutex);
1732
1733 /*
1734 * Everything is async.
1735 */
1736 do_async = 1;
1737
1738 disk_busy(&rs->sc_dkdev);
1739
1740 /* XXX we're still at splbio() here... do we *really*
1741 need to be? */
1742
1743 /* don't ever condition on bp->b_flags & B_WRITE.
1744 * always condition on B_READ instead */
1745
1746 retcode = rf_DoAccess(raidPtr, (bp->b_flags & B_READ) ?
1747 RF_IO_TYPE_READ : RF_IO_TYPE_WRITE,
1748 do_async, raid_addr, num_blocks,
1749 bp->b_data, bp, RF_DAG_NONBLOCKING_IO);
1750
1751 RF_LOCK_MUTEX(raidPtr->mutex);
1752 }
1753 RF_UNLOCK_MUTEX(raidPtr->mutex);
1754 }
1755
1756
1757
1758
1759 /* invoke an I/O from kernel mode. Disk queue should be locked upon entry */
1760
1761 int
1762 rf_DispatchKernelIO(queue, req)
1763 RF_DiskQueue_t *queue;
1764 RF_DiskQueueData_t *req;
1765 {
1766 int op = (req->type == RF_IO_TYPE_READ) ? B_READ : B_WRITE;
1767 struct buf *bp;
1768 struct raidbuf *raidbp = NULL;
1769 struct raid_softc *rs;
1770 int unit;
1771 int s;
1772
1773 s=0;
1774 /* s = splbio();*/ /* want to test this */
1775 /* XXX along with the vnode, we also need the softc associated with
1776 * this device.. */
1777
1778 req->queue = queue;
1779
1780 unit = queue->raidPtr->raidid;
1781
1782 db1_printf(("DispatchKernelIO unit: %d\n", unit));
1783
1784 if (unit >= numraid) {
1785 printf("Invalid unit number: %d %d\n", unit, numraid);
1786 panic("Invalid Unit number in rf_DispatchKernelIO\n");
1787 }
1788 rs = &raid_softc[unit];
1789
1790 bp = req->bp;
1791 #if 1
1792 /* XXX when there is a physical disk failure, someone is passing us a
1793 * buffer that contains old stuff!! Attempt to deal with this problem
1794 * without taking a performance hit... (not sure where the real bug
1795 * is. It's buried in RAIDframe somewhere) :-( GO ) */
1796
1797 if (bp->b_flags & B_ERROR) {
1798 bp->b_flags &= ~B_ERROR;
1799 }
1800 if (bp->b_error != 0) {
1801 bp->b_error = 0;
1802 }
1803 #endif
1804 raidbp = RAIDGETBUF(rs);
1805
1806 raidbp->rf_flags = 0; /* XXX not really used anywhere... */
1807
1808 /*
1809 * context for raidiodone
1810 */
1811 raidbp->rf_obp = bp;
1812 raidbp->req = req;
1813
1814 LIST_INIT(&raidbp->rf_buf.b_dep);
1815
1816 switch (req->type) {
1817 case RF_IO_TYPE_NOP: /* used primarily to unlock a locked queue */
1818 /* XXX need to do something extra here.. */
1819 /* I'm leaving this in, as I've never actually seen it used,
1820 * and I'd like folks to report it... GO */
1821 printf(("WAKEUP CALLED\n"));
1822 queue->numOutstanding++;
1823
1824 /* XXX need to glue the original buffer into this?? */
1825
1826 KernelWakeupFunc(&raidbp->rf_buf);
1827 break;
1828
1829 case RF_IO_TYPE_READ:
1830 case RF_IO_TYPE_WRITE:
1831
1832 if (req->tracerec) {
1833 RF_ETIMER_START(req->tracerec->timer);
1834 }
1835 InitBP(&raidbp->rf_buf, queue->rf_cinfo->ci_vp,
1836 op | bp->b_flags, queue->rf_cinfo->ci_dev,
1837 req->sectorOffset, req->numSector,
1838 req->buf, KernelWakeupFunc, (void *) req,
1839 queue->raidPtr->logBytesPerSector, req->b_proc);
1840
1841 if (rf_debugKernelAccess) {
1842 db1_printf(("dispatch: bp->b_blkno = %ld\n",
1843 (long) bp->b_blkno));
1844 }
1845 queue->numOutstanding++;
1846 queue->last_deq_sector = req->sectorOffset;
1847 /* acc wouldn't have been let in if there were any pending
1848 * reqs at any other priority */
1849 queue->curPriority = req->priority;
1850
1851 db1_printf(("Going for %c to unit %d row %d col %d\n",
1852 req->type, unit, queue->row, queue->col));
1853 db1_printf(("sector %d count %d (%d bytes) %d\n",
1854 (int) req->sectorOffset, (int) req->numSector,
1855 (int) (req->numSector <<
1856 queue->raidPtr->logBytesPerSector),
1857 (int) queue->raidPtr->logBytesPerSector));
1858 if ((raidbp->rf_buf.b_flags & B_READ) == 0) {
1859 raidbp->rf_buf.b_vp->v_numoutput++;
1860 }
1861 VOP_STRATEGY(&raidbp->rf_buf);
1862
1863 break;
1864
1865 default:
1866 panic("bad req->type in rf_DispatchKernelIO");
1867 }
1868 db1_printf(("Exiting from DispatchKernelIO\n"));
1869 /* splx(s); */ /* want to test this */
1870 return (0);
1871 }
1872 /* this is the callback function associated with a I/O invoked from
1873 kernel code.
1874 */
1875 static void
1876 KernelWakeupFunc(vbp)
1877 struct buf *vbp;
1878 {
1879 RF_DiskQueueData_t *req = NULL;
1880 RF_DiskQueue_t *queue;
1881 struct raidbuf *raidbp = (struct raidbuf *) vbp;
1882 struct buf *bp;
1883 struct raid_softc *rs;
1884 int unit;
1885 int s;
1886
1887 s = splbio();
1888 db1_printf(("recovering the request queue:\n"));
1889 req = raidbp->req;
1890
1891 bp = raidbp->rf_obp;
1892
1893 queue = (RF_DiskQueue_t *) req->queue;
1894
1895 if (raidbp->rf_buf.b_flags & B_ERROR) {
1896 bp->b_flags |= B_ERROR;
1897 bp->b_error = raidbp->rf_buf.b_error ?
1898 raidbp->rf_buf.b_error : EIO;
1899 }
1900
1901 /* XXX methinks this could be wrong... */
1902 #if 1
1903 bp->b_resid = raidbp->rf_buf.b_resid;
1904 #endif
1905
1906 if (req->tracerec) {
1907 RF_ETIMER_STOP(req->tracerec->timer);
1908 RF_ETIMER_EVAL(req->tracerec->timer);
1909 RF_LOCK_MUTEX(rf_tracing_mutex);
1910 req->tracerec->diskwait_us += RF_ETIMER_VAL_US(req->tracerec->timer);
1911 req->tracerec->phys_io_us += RF_ETIMER_VAL_US(req->tracerec->timer);
1912 req->tracerec->num_phys_ios++;
1913 RF_UNLOCK_MUTEX(rf_tracing_mutex);
1914 }
1915 bp->b_bcount = raidbp->rf_buf.b_bcount; /* XXXX ?? */
1916
1917 unit = queue->raidPtr->raidid; /* *Much* simpler :-> */
1918
1919
1920 /* XXX Ok, let's get aggressive... If B_ERROR is set, let's go
1921 * ballistic, and mark the component as hosed... */
1922
1923 if (bp->b_flags & B_ERROR) {
1924 /* Mark the disk as dead */
1925 /* but only mark it once... */
1926 if (queue->raidPtr->Disks[queue->row][queue->col].status ==
1927 rf_ds_optimal) {
1928 printf("raid%d: IO Error. Marking %s as failed.\n",
1929 unit, queue->raidPtr->Disks[queue->row][queue->col].devname);
1930 queue->raidPtr->Disks[queue->row][queue->col].status =
1931 rf_ds_failed;
1932 queue->raidPtr->status[queue->row] = rf_rs_degraded;
1933 queue->raidPtr->numFailures++;
1934 queue->raidPtr->numNewFailures++;
1935 } else { /* Disk is already dead... */
1936 /* printf("Disk already marked as dead!\n"); */
1937 }
1938
1939 }
1940
1941 rs = &raid_softc[unit];
1942 RAIDPUTBUF(rs, raidbp);
1943
1944 rf_DiskIOComplete(queue, req, (bp->b_flags & B_ERROR) ? 1 : 0);
1945 (req->CompleteFunc) (req->argument, (bp->b_flags & B_ERROR) ? 1 : 0);
1946
1947 splx(s);
1948 }
1949
1950
1951
1952 /*
1953 * initialize a buf structure for doing an I/O in the kernel.
1954 */
1955 static void
1956 InitBP(bp, b_vp, rw_flag, dev, startSect, numSect, buf, cbFunc, cbArg,
1957 logBytesPerSector, b_proc)
1958 struct buf *bp;
1959 struct vnode *b_vp;
1960 unsigned rw_flag;
1961 dev_t dev;
1962 RF_SectorNum_t startSect;
1963 RF_SectorCount_t numSect;
1964 caddr_t buf;
1965 void (*cbFunc) (struct buf *);
1966 void *cbArg;
1967 int logBytesPerSector;
1968 struct proc *b_proc;
1969 {
1970 /* bp->b_flags = B_PHYS | rw_flag; */
1971 bp->b_flags = B_CALL | rw_flag; /* XXX need B_PHYS here too??? */
1972 bp->b_bcount = numSect << logBytesPerSector;
1973 bp->b_bufsize = bp->b_bcount;
1974 bp->b_error = 0;
1975 bp->b_dev = dev;
1976 bp->b_data = buf;
1977 bp->b_blkno = startSect;
1978 bp->b_resid = bp->b_bcount; /* XXX is this right!??!?!! */
1979 if (bp->b_bcount == 0) {
1980 panic("bp->b_bcount is zero in InitBP!!\n");
1981 }
1982 bp->b_proc = b_proc;
1983 bp->b_iodone = cbFunc;
1984 bp->b_vp = b_vp;
1985
1986 }
1987
1988 static void
1989 raidgetdefaultlabel(raidPtr, rs, lp)
1990 RF_Raid_t *raidPtr;
1991 struct raid_softc *rs;
1992 struct disklabel *lp;
1993 {
1994 db1_printf(("Building a default label...\n"));
1995 memset(lp, 0, sizeof(*lp));
1996
1997 /* fabricate a label... */
1998 lp->d_secperunit = raidPtr->totalSectors;
1999 lp->d_secsize = raidPtr->bytesPerSector;
2000 lp->d_nsectors = raidPtr->Layout.dataSectorsPerStripe;
2001 lp->d_ntracks = 4 * raidPtr->numCol;
2002 lp->d_ncylinders = raidPtr->totalSectors /
2003 (lp->d_nsectors * lp->d_ntracks);
2004 lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
2005
2006 strncpy(lp->d_typename, "raid", sizeof(lp->d_typename));
2007 lp->d_type = DTYPE_RAID;
2008 strncpy(lp->d_packname, "fictitious", sizeof(lp->d_packname));
2009 lp->d_rpm = 3600;
2010 lp->d_interleave = 1;
2011 lp->d_flags = 0;
2012
2013 lp->d_partitions[RAW_PART].p_offset = 0;
2014 lp->d_partitions[RAW_PART].p_size = raidPtr->totalSectors;
2015 lp->d_partitions[RAW_PART].p_fstype = FS_UNUSED;
2016 lp->d_npartitions = RAW_PART + 1;
2017
2018 lp->d_magic = DISKMAGIC;
2019 lp->d_magic2 = DISKMAGIC;
2020 lp->d_checksum = dkcksum(rs->sc_dkdev.dk_label);
2021
2022 }
2023 /*
2024 * Read the disklabel from the raid device. If one is not present, fake one
2025 * up.
2026 */
2027 static void
2028 raidgetdisklabel(dev)
2029 dev_t dev;
2030 {
2031 int unit = raidunit(dev);
2032 struct raid_softc *rs = &raid_softc[unit];
2033 char *errstring;
2034 struct disklabel *lp = rs->sc_dkdev.dk_label;
2035 struct cpu_disklabel *clp = rs->sc_dkdev.dk_cpulabel;
2036 RF_Raid_t *raidPtr;
2037
2038 db1_printf(("Getting the disklabel...\n"));
2039
2040 memset(clp, 0, sizeof(*clp));
2041
2042 raidPtr = raidPtrs[unit];
2043
2044 raidgetdefaultlabel(raidPtr, rs, lp);
2045
2046 /*
2047 * Call the generic disklabel extraction routine.
2048 */
2049 errstring = readdisklabel(RAIDLABELDEV(dev), raidstrategy,
2050 rs->sc_dkdev.dk_label, rs->sc_dkdev.dk_cpulabel);
2051 if (errstring)
2052 raidmakedisklabel(rs);
2053 else {
2054 int i;
2055 struct partition *pp;
2056
2057 /*
2058 * Sanity check whether the found disklabel is valid.
2059 *
2060 * This is necessary since total size of the raid device
2061 * may vary when an interleave is changed even though exactly
2062 * same componets are used, and old disklabel may used
2063 * if that is found.
2064 */
2065 if (lp->d_secperunit != rs->sc_size)
2066 printf("WARNING: %s: "
2067 "total sector size in disklabel (%d) != "
2068 "the size of raid (%ld)\n", rs->sc_xname,
2069 lp->d_secperunit, (long) rs->sc_size);
2070 for (i = 0; i < lp->d_npartitions; i++) {
2071 pp = &lp->d_partitions[i];
2072 if (pp->p_offset + pp->p_size > rs->sc_size)
2073 printf("WARNING: %s: end of partition `%c' "
2074 "exceeds the size of raid (%ld)\n",
2075 rs->sc_xname, 'a' + i, (long) rs->sc_size);
2076 }
2077 }
2078
2079 }
2080 /*
2081 * Take care of things one might want to take care of in the event
2082 * that a disklabel isn't present.
2083 */
2084 static void
2085 raidmakedisklabel(rs)
2086 struct raid_softc *rs;
2087 {
2088 struct disklabel *lp = rs->sc_dkdev.dk_label;
2089 db1_printf(("Making a label..\n"));
2090
2091 /*
2092 * For historical reasons, if there's no disklabel present
2093 * the raw partition must be marked FS_BSDFFS.
2094 */
2095
2096 lp->d_partitions[RAW_PART].p_fstype = FS_BSDFFS;
2097
2098 strncpy(lp->d_packname, "default label", sizeof(lp->d_packname));
2099
2100 lp->d_checksum = dkcksum(lp);
2101 }
2102 /*
2103 * Lookup the provided name in the filesystem. If the file exists,
2104 * is a valid block device, and isn't being used by anyone else,
2105 * set *vpp to the file's vnode.
2106 * You'll find the original of this in ccd.c
2107 */
2108 int
2109 raidlookup(path, p, vpp)
2110 char *path;
2111 struct proc *p;
2112 struct vnode **vpp; /* result */
2113 {
2114 struct nameidata nd;
2115 struct vnode *vp;
2116 struct vattr va;
2117 int error;
2118
2119 NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, path, p);
2120 if ((error = vn_open(&nd, FREAD | FWRITE, 0)) != 0) {
2121 #ifdef DEBUG
2122 printf("RAIDframe: vn_open returned %d\n", error);
2123 #endif
2124 return (error);
2125 }
2126 vp = nd.ni_vp;
2127 if (vp->v_usecount > 1) {
2128 VOP_UNLOCK(vp, 0);
2129 (void) vn_close(vp, FREAD | FWRITE, p->p_ucred, p);
2130 return (EBUSY);
2131 }
2132 if ((error = VOP_GETATTR(vp, &va, p->p_ucred, p)) != 0) {
2133 VOP_UNLOCK(vp, 0);
2134 (void) vn_close(vp, FREAD | FWRITE, p->p_ucred, p);
2135 return (error);
2136 }
2137 /* XXX: eventually we should handle VREG, too. */
2138 if (va.va_type != VBLK) {
2139 VOP_UNLOCK(vp, 0);
2140 (void) vn_close(vp, FREAD | FWRITE, p->p_ucred, p);
2141 return (ENOTBLK);
2142 }
2143 VOP_UNLOCK(vp, 0);
2144 *vpp = vp;
2145 return (0);
2146 }
2147 /*
2148 * Wait interruptibly for an exclusive lock.
2149 *
2150 * XXX
2151 * Several drivers do this; it should be abstracted and made MP-safe.
2152 * (Hmm... where have we seen this warning before :-> GO )
2153 */
2154 static int
2155 raidlock(rs)
2156 struct raid_softc *rs;
2157 {
2158 int error;
2159
2160 while ((rs->sc_flags & RAIDF_LOCKED) != 0) {
2161 rs->sc_flags |= RAIDF_WANTED;
2162 if ((error =
2163 tsleep(rs, PRIBIO | PCATCH, "raidlck", 0)) != 0)
2164 return (error);
2165 }
2166 rs->sc_flags |= RAIDF_LOCKED;
2167 return (0);
2168 }
2169 /*
2170 * Unlock and wake up any waiters.
2171 */
2172 static void
2173 raidunlock(rs)
2174 struct raid_softc *rs;
2175 {
2176
2177 rs->sc_flags &= ~RAIDF_LOCKED;
2178 if ((rs->sc_flags & RAIDF_WANTED) != 0) {
2179 rs->sc_flags &= ~RAIDF_WANTED;
2180 wakeup(rs);
2181 }
2182 }
2183
2184
2185 #define RF_COMPONENT_INFO_OFFSET 16384 /* bytes */
2186 #define RF_COMPONENT_INFO_SIZE 1024 /* bytes */
2187
2188 int
2189 raidmarkclean(dev_t dev, struct vnode *b_vp, int mod_counter)
2190 {
2191 RF_ComponentLabel_t clabel;
2192 raidread_component_label(dev, b_vp, &clabel);
2193 clabel.mod_counter = mod_counter;
2194 clabel.clean = RF_RAID_CLEAN;
2195 raidwrite_component_label(dev, b_vp, &clabel);
2196 return(0);
2197 }
2198
2199
2200 int
2201 raidmarkdirty(dev_t dev, struct vnode *b_vp, int mod_counter)
2202 {
2203 RF_ComponentLabel_t clabel;
2204 raidread_component_label(dev, b_vp, &clabel);
2205 clabel.mod_counter = mod_counter;
2206 clabel.clean = RF_RAID_DIRTY;
2207 raidwrite_component_label(dev, b_vp, &clabel);
2208 return(0);
2209 }
2210
2211 /* ARGSUSED */
2212 int
2213 raidread_component_label(dev, b_vp, clabel)
2214 dev_t dev;
2215 struct vnode *b_vp;
2216 RF_ComponentLabel_t *clabel;
2217 {
2218 struct buf *bp;
2219 int error;
2220
2221 /* XXX should probably ensure that we don't try to do this if
2222 someone has changed rf_protected_sectors. */
2223
2224 if (b_vp == NULL) {
2225 /* For whatever reason, this component is not valid.
2226 Don't try to read a component label from it. */
2227 return(EINVAL);
2228 }
2229
2230 /* get a block of the appropriate size... */
2231 bp = geteblk((int)RF_COMPONENT_INFO_SIZE);
2232 bp->b_dev = dev;
2233
2234 /* get our ducks in a row for the read */
2235 bp->b_blkno = RF_COMPONENT_INFO_OFFSET / DEV_BSIZE;
2236 bp->b_bcount = RF_COMPONENT_INFO_SIZE;
2237 bp->b_flags |= B_READ;
2238 bp->b_resid = RF_COMPONENT_INFO_SIZE / DEV_BSIZE;
2239
2240 (*bdevsw[major(bp->b_dev)].d_strategy)(bp);
2241
2242 error = biowait(bp);
2243
2244 if (!error) {
2245 memcpy(clabel, bp->b_data,
2246 sizeof(RF_ComponentLabel_t));
2247 #if 0
2248 rf_print_component_label( clabel );
2249 #endif
2250 } else {
2251 #if 0
2252 printf("Failed to read RAID component label!\n");
2253 #endif
2254 }
2255
2256 brelse(bp);
2257 return(error);
2258 }
2259 /* ARGSUSED */
2260 int
2261 raidwrite_component_label(dev, b_vp, clabel)
2262 dev_t dev;
2263 struct vnode *b_vp;
2264 RF_ComponentLabel_t *clabel;
2265 {
2266 struct buf *bp;
2267 int error;
2268
2269 /* get a block of the appropriate size... */
2270 bp = geteblk((int)RF_COMPONENT_INFO_SIZE);
2271 bp->b_dev = dev;
2272
2273 /* get our ducks in a row for the write */
2274 bp->b_blkno = RF_COMPONENT_INFO_OFFSET / DEV_BSIZE;
2275 bp->b_bcount = RF_COMPONENT_INFO_SIZE;
2276 bp->b_flags |= B_WRITE;
2277 bp->b_resid = RF_COMPONENT_INFO_SIZE / DEV_BSIZE;
2278
2279 memset(bp->b_data, 0, RF_COMPONENT_INFO_SIZE );
2280
2281 memcpy(bp->b_data, clabel, sizeof(RF_ComponentLabel_t));
2282
2283 (*bdevsw[major(bp->b_dev)].d_strategy)(bp);
2284 error = biowait(bp);
2285 brelse(bp);
2286 if (error) {
2287 #if 1
2288 printf("Failed to write RAID component info!\n");
2289 #endif
2290 }
2291
2292 return(error);
2293 }
2294
2295 void
2296 rf_markalldirty(raidPtr)
2297 RF_Raid_t *raidPtr;
2298 {
2299 RF_ComponentLabel_t clabel;
2300 int r,c;
2301
2302 raidPtr->mod_counter++;
2303 for (r = 0; r < raidPtr->numRow; r++) {
2304 for (c = 0; c < raidPtr->numCol; c++) {
2305 /* we don't want to touch (at all) a disk that has
2306 failed */
2307 if (!RF_DEAD_DISK(raidPtr->Disks[r][c].status)) {
2308 raidread_component_label(
2309 raidPtr->Disks[r][c].dev,
2310 raidPtr->raid_cinfo[r][c].ci_vp,
2311 &clabel);
2312 if (clabel.status == rf_ds_spared) {
2313 /* XXX do something special...
2314 but whatever you do, don't
2315 try to access it!! */
2316 } else {
2317 #if 0
2318 clabel.status =
2319 raidPtr->Disks[r][c].status;
2320 raidwrite_component_label(
2321 raidPtr->Disks[r][c].dev,
2322 raidPtr->raid_cinfo[r][c].ci_vp,
2323 &clabel);
2324 #endif
2325 raidmarkdirty(
2326 raidPtr->Disks[r][c].dev,
2327 raidPtr->raid_cinfo[r][c].ci_vp,
2328 raidPtr->mod_counter);
2329 }
2330 }
2331 }
2332 }
2333 /* printf("Component labels marked dirty.\n"); */
2334 #if 0
2335 for( c = 0; c < raidPtr->numSpare ; c++) {
2336 sparecol = raidPtr->numCol + c;
2337 if (raidPtr->Disks[r][sparecol].status == rf_ds_used_spare) {
2338 /*
2339
2340 XXX this is where we get fancy and map this spare
2341 into it's correct spot in the array.
2342
2343 */
2344 /*
2345
2346 we claim this disk is "optimal" if it's
2347 rf_ds_used_spare, as that means it should be
2348 directly substitutable for the disk it replaced.
2349 We note that too...
2350
2351 */
2352
2353 for(i=0;i<raidPtr->numRow;i++) {
2354 for(j=0;j<raidPtr->numCol;j++) {
2355 if ((raidPtr->Disks[i][j].spareRow ==
2356 r) &&
2357 (raidPtr->Disks[i][j].spareCol ==
2358 sparecol)) {
2359 srow = r;
2360 scol = sparecol;
2361 break;
2362 }
2363 }
2364 }
2365
2366 raidread_component_label(
2367 raidPtr->Disks[r][sparecol].dev,
2368 raidPtr->raid_cinfo[r][sparecol].ci_vp,
2369 &clabel);
2370 /* make sure status is noted */
2371 clabel.version = RF_COMPONENT_LABEL_VERSION;
2372 clabel.mod_counter = raidPtr->mod_counter;
2373 clabel.serial_number = raidPtr->serial_number;
2374 clabel.row = srow;
2375 clabel.column = scol;
2376 clabel.num_rows = raidPtr->numRow;
2377 clabel.num_columns = raidPtr->numCol;
2378 clabel.clean = RF_RAID_DIRTY; /* changed in a bit*/
2379 clabel.status = rf_ds_optimal;
2380 raidwrite_component_label(
2381 raidPtr->Disks[r][sparecol].dev,
2382 raidPtr->raid_cinfo[r][sparecol].ci_vp,
2383 &clabel);
2384 raidmarkclean( raidPtr->Disks[r][sparecol].dev,
2385 raidPtr->raid_cinfo[r][sparecol].ci_vp);
2386 }
2387 }
2388
2389 #endif
2390 }
2391
2392
2393 void
2394 rf_update_component_labels(raidPtr, final)
2395 RF_Raid_t *raidPtr;
2396 int final;
2397 {
2398 RF_ComponentLabel_t clabel;
2399 int sparecol;
2400 int r,c;
2401 int i,j;
2402 int srow, scol;
2403
2404 srow = -1;
2405 scol = -1;
2406
2407 /* XXX should do extra checks to make sure things really are clean,
2408 rather than blindly setting the clean bit... */
2409
2410 raidPtr->mod_counter++;
2411
2412 for (r = 0; r < raidPtr->numRow; r++) {
2413 for (c = 0; c < raidPtr->numCol; c++) {
2414 if (raidPtr->Disks[r][c].status == rf_ds_optimal) {
2415 raidread_component_label(
2416 raidPtr->Disks[r][c].dev,
2417 raidPtr->raid_cinfo[r][c].ci_vp,
2418 &clabel);
2419 /* make sure status is noted */
2420 clabel.status = rf_ds_optimal;
2421 /* bump the counter */
2422 clabel.mod_counter = raidPtr->mod_counter;
2423
2424 raidwrite_component_label(
2425 raidPtr->Disks[r][c].dev,
2426 raidPtr->raid_cinfo[r][c].ci_vp,
2427 &clabel);
2428 if (final == RF_FINAL_COMPONENT_UPDATE) {
2429 if (raidPtr->parity_good == RF_RAID_CLEAN) {
2430 raidmarkclean(
2431 raidPtr->Disks[r][c].dev,
2432 raidPtr->raid_cinfo[r][c].ci_vp,
2433 raidPtr->mod_counter);
2434 }
2435 }
2436 }
2437 /* else we don't touch it.. */
2438 }
2439 }
2440
2441 for( c = 0; c < raidPtr->numSpare ; c++) {
2442 sparecol = raidPtr->numCol + c;
2443 /* Need to ensure that the reconstruct actually completed! */
2444 if (raidPtr->Disks[0][sparecol].status == rf_ds_used_spare) {
2445 /*
2446
2447 we claim this disk is "optimal" if it's
2448 rf_ds_used_spare, as that means it should be
2449 directly substitutable for the disk it replaced.
2450 We note that too...
2451
2452 */
2453
2454 for(i=0;i<raidPtr->numRow;i++) {
2455 for(j=0;j<raidPtr->numCol;j++) {
2456 if ((raidPtr->Disks[i][j].spareRow ==
2457 0) &&
2458 (raidPtr->Disks[i][j].spareCol ==
2459 sparecol)) {
2460 srow = i;
2461 scol = j;
2462 break;
2463 }
2464 }
2465 }
2466
2467 /* XXX shouldn't *really* need this... */
2468 raidread_component_label(
2469 raidPtr->Disks[0][sparecol].dev,
2470 raidPtr->raid_cinfo[0][sparecol].ci_vp,
2471 &clabel);
2472 /* make sure status is noted */
2473
2474 raid_init_component_label(raidPtr, &clabel);
2475
2476 clabel.mod_counter = raidPtr->mod_counter;
2477 clabel.row = srow;
2478 clabel.column = scol;
2479 clabel.status = rf_ds_optimal;
2480
2481 raidwrite_component_label(
2482 raidPtr->Disks[0][sparecol].dev,
2483 raidPtr->raid_cinfo[0][sparecol].ci_vp,
2484 &clabel);
2485 if (final == RF_FINAL_COMPONENT_UPDATE) {
2486 if (raidPtr->parity_good == RF_RAID_CLEAN) {
2487 raidmarkclean( raidPtr->Disks[0][sparecol].dev,
2488 raidPtr->raid_cinfo[0][sparecol].ci_vp,
2489 raidPtr->mod_counter);
2490 }
2491 }
2492 }
2493 }
2494 /* printf("Component labels updated\n"); */
2495 }
2496
2497 void
2498 rf_close_component(raidPtr, vp, auto_configured)
2499 RF_Raid_t *raidPtr;
2500 struct vnode *vp;
2501 int auto_configured;
2502 {
2503 struct proc *p;
2504
2505 p = raidPtr->engine_thread;
2506
2507 if (vp != NULL) {
2508 if (auto_configured == 1) {
2509 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2510 VOP_CLOSE(vp, FREAD | FWRITE, NOCRED, 0);
2511 vput(vp);
2512
2513 } else {
2514 (void) vn_close(vp, FREAD | FWRITE, p->p_ucred, p);
2515 }
2516 } else {
2517 printf("vnode was NULL\n");
2518 }
2519 }
2520
2521
2522 void
2523 rf_UnconfigureVnodes(raidPtr)
2524 RF_Raid_t *raidPtr;
2525 {
2526 int r,c;
2527 struct proc *p;
2528 struct vnode *vp;
2529 int acd;
2530
2531
2532 /* We take this opportunity to close the vnodes like we should.. */
2533
2534 p = raidPtr->engine_thread;
2535
2536 for (r = 0; r < raidPtr->numRow; r++) {
2537 for (c = 0; c < raidPtr->numCol; c++) {
2538 printf("Closing vnode for row: %d col: %d\n", r, c);
2539 vp = raidPtr->raid_cinfo[r][c].ci_vp;
2540 acd = raidPtr->Disks[r][c].auto_configured;
2541 rf_close_component(raidPtr, vp, acd);
2542 raidPtr->raid_cinfo[r][c].ci_vp = NULL;
2543 raidPtr->Disks[r][c].auto_configured = 0;
2544 }
2545 }
2546 for (r = 0; r < raidPtr->numSpare; r++) {
2547 printf("Closing vnode for spare: %d\n", r);
2548 vp = raidPtr->raid_cinfo[0][raidPtr->numCol + r].ci_vp;
2549 acd = raidPtr->Disks[0][raidPtr->numCol + r].auto_configured;
2550 rf_close_component(raidPtr, vp, acd);
2551 raidPtr->raid_cinfo[0][raidPtr->numCol + r].ci_vp = NULL;
2552 raidPtr->Disks[0][raidPtr->numCol + r].auto_configured = 0;
2553 }
2554 }
2555
2556
2557 void
2558 rf_ReconThread(req)
2559 struct rf_recon_req *req;
2560 {
2561 int s;
2562 RF_Raid_t *raidPtr;
2563
2564 s = splbio();
2565 raidPtr = (RF_Raid_t *) req->raidPtr;
2566 raidPtr->recon_in_progress = 1;
2567
2568 rf_FailDisk((RF_Raid_t *) req->raidPtr, req->row, req->col,
2569 ((req->flags & RF_FDFLAGS_RECON) ? 1 : 0));
2570
2571 /* XXX get rid of this! we don't need it at all.. */
2572 RF_Free(req, sizeof(*req));
2573
2574 raidPtr->recon_in_progress = 0;
2575 splx(s);
2576
2577 /* That's all... */
2578 kthread_exit(0); /* does not return */
2579 }
2580
2581 void
2582 rf_RewriteParityThread(raidPtr)
2583 RF_Raid_t *raidPtr;
2584 {
2585 int retcode;
2586 int s;
2587
2588 raidPtr->parity_rewrite_in_progress = 1;
2589 s = splbio();
2590 retcode = rf_RewriteParity(raidPtr);
2591 splx(s);
2592 if (retcode) {
2593 printf("raid%d: Error re-writing parity!\n",raidPtr->raidid);
2594 } else {
2595 /* set the clean bit! If we shutdown correctly,
2596 the clean bit on each component label will get
2597 set */
2598 raidPtr->parity_good = RF_RAID_CLEAN;
2599 }
2600 raidPtr->parity_rewrite_in_progress = 0;
2601
2602 /* Anyone waiting for us to stop? If so, inform them... */
2603 if (raidPtr->waitShutdown) {
2604 wakeup(&raidPtr->parity_rewrite_in_progress);
2605 }
2606
2607 /* That's all... */
2608 kthread_exit(0); /* does not return */
2609 }
2610
2611
2612 void
2613 rf_CopybackThread(raidPtr)
2614 RF_Raid_t *raidPtr;
2615 {
2616 int s;
2617
2618 raidPtr->copyback_in_progress = 1;
2619 s = splbio();
2620 rf_CopybackReconstructedData(raidPtr);
2621 splx(s);
2622 raidPtr->copyback_in_progress = 0;
2623
2624 /* That's all... */
2625 kthread_exit(0); /* does not return */
2626 }
2627
2628
2629 void
2630 rf_ReconstructInPlaceThread(req)
2631 struct rf_recon_req *req;
2632 {
2633 int retcode;
2634 int s;
2635 RF_Raid_t *raidPtr;
2636
2637 s = splbio();
2638 raidPtr = req->raidPtr;
2639 raidPtr->recon_in_progress = 1;
2640 retcode = rf_ReconstructInPlace(raidPtr, req->row, req->col);
2641 RF_Free(req, sizeof(*req));
2642 raidPtr->recon_in_progress = 0;
2643 splx(s);
2644
2645 /* That's all... */
2646 kthread_exit(0); /* does not return */
2647 }
2648
2649 void
2650 rf_mountroot_hook(dev)
2651 struct device *dev;
2652 {
2653
2654 }
2655
2656
2657 RF_AutoConfig_t *
2658 rf_find_raid_components()
2659 {
2660 struct devnametobdevmaj *dtobdm;
2661 struct vnode *vp;
2662 struct disklabel label;
2663 struct device *dv;
2664 char *cd_name;
2665 dev_t dev;
2666 int error;
2667 int i;
2668 int good_one;
2669 RF_ComponentLabel_t *clabel;
2670 RF_AutoConfig_t *ac_list;
2671 RF_AutoConfig_t *ac;
2672
2673
2674 /* initialize the AutoConfig list */
2675 ac_list = NULL;
2676
2677 /* we begin by trolling through *all* the devices on the system */
2678
2679 for (dv = alldevs.tqh_first; dv != NULL;
2680 dv = dv->dv_list.tqe_next) {
2681
2682 /* we are only interested in disks... */
2683 if (dv->dv_class != DV_DISK)
2684 continue;
2685
2686 /* we don't care about floppies... */
2687 if (!strcmp(dv->dv_cfdata->cf_driver->cd_name,"fd")) {
2688 continue;
2689 }
2690
2691 /* need to find the device_name_to_block_device_major stuff */
2692 cd_name = dv->dv_cfdata->cf_driver->cd_name;
2693 dtobdm = dev_name2blk;
2694 while (dtobdm->d_name && strcmp(dtobdm->d_name, cd_name)) {
2695 dtobdm++;
2696 }
2697
2698 /* get a vnode for the raw partition of this disk */
2699
2700 dev = MAKEDISKDEV(dtobdm->d_maj, dv->dv_unit, RAW_PART);
2701 if (bdevvp(dev, &vp))
2702 panic("RAID can't alloc vnode");
2703
2704 error = VOP_OPEN(vp, FREAD, NOCRED, 0);
2705
2706 if (error) {
2707 /* "Who cares." Continue looking
2708 for something that exists*/
2709 vput(vp);
2710 continue;
2711 }
2712
2713 /* Ok, the disk exists. Go get the disklabel. */
2714 error = VOP_IOCTL(vp, DIOCGDINFO, (caddr_t)&label,
2715 FREAD, NOCRED, 0);
2716 if (error) {
2717 /*
2718 * XXX can't happen - open() would
2719 * have errored out (or faked up one)
2720 */
2721 printf("can't get label for dev %s%c (%d)!?!?\n",
2722 dv->dv_xname, 'a' + RAW_PART, error);
2723 }
2724
2725 /* don't need this any more. We'll allocate it again
2726 a little later if we really do... */
2727 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2728 VOP_CLOSE(vp, FREAD | FWRITE, NOCRED, 0);
2729 vput(vp);
2730
2731 for (i=0; i < label.d_npartitions; i++) {
2732 /* We only support partitions marked as RAID */
2733 if (label.d_partitions[i].p_fstype != FS_RAID)
2734 continue;
2735
2736 dev = MAKEDISKDEV(dtobdm->d_maj, dv->dv_unit, i);
2737 if (bdevvp(dev, &vp))
2738 panic("RAID can't alloc vnode");
2739
2740 error = VOP_OPEN(vp, FREAD, NOCRED, 0);
2741 if (error) {
2742 /* Whatever... */
2743 vput(vp);
2744 continue;
2745 }
2746
2747 good_one = 0;
2748
2749 clabel = (RF_ComponentLabel_t *)
2750 malloc(sizeof(RF_ComponentLabel_t),
2751 M_RAIDFRAME, M_NOWAIT);
2752 if (clabel == NULL) {
2753 /* XXX CLEANUP HERE */
2754 printf("RAID auto config: out of memory!\n");
2755 return(NULL); /* XXX probably should panic? */
2756 }
2757
2758 if (!raidread_component_label(dev, vp, clabel)) {
2759 /* Got the label. Does it look reasonable? */
2760 if (rf_reasonable_label(clabel) &&
2761 (clabel->partitionSize <=
2762 label.d_partitions[i].p_size)) {
2763 #if DEBUG
2764 printf("Component on: %s%c: %d\n",
2765 dv->dv_xname, 'a'+i,
2766 label.d_partitions[i].p_size);
2767 rf_print_component_label(clabel);
2768 #endif
2769 /* if it's reasonable, add it,
2770 else ignore it. */
2771 ac = (RF_AutoConfig_t *)
2772 malloc(sizeof(RF_AutoConfig_t),
2773 M_RAIDFRAME,
2774 M_NOWAIT);
2775 if (ac == NULL) {
2776 /* XXX should panic?? */
2777 return(NULL);
2778 }
2779
2780 sprintf(ac->devname, "%s%c",
2781 dv->dv_xname, 'a'+i);
2782 ac->dev = dev;
2783 ac->vp = vp;
2784 ac->clabel = clabel;
2785 ac->next = ac_list;
2786 ac_list = ac;
2787 good_one = 1;
2788 }
2789 }
2790 if (!good_one) {
2791 /* cleanup */
2792 free(clabel, M_RAIDFRAME);
2793 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2794 VOP_CLOSE(vp, FREAD | FWRITE, NOCRED, 0);
2795 vput(vp);
2796 }
2797 }
2798 }
2799 return(ac_list);
2800 }
2801
2802 static int
2803 rf_reasonable_label(clabel)
2804 RF_ComponentLabel_t *clabel;
2805 {
2806
2807 if (((clabel->version==RF_COMPONENT_LABEL_VERSION_1) ||
2808 (clabel->version==RF_COMPONENT_LABEL_VERSION)) &&
2809 ((clabel->clean == RF_RAID_CLEAN) ||
2810 (clabel->clean == RF_RAID_DIRTY)) &&
2811 clabel->row >=0 &&
2812 clabel->column >= 0 &&
2813 clabel->num_rows > 0 &&
2814 clabel->num_columns > 0 &&
2815 clabel->row < clabel->num_rows &&
2816 clabel->column < clabel->num_columns &&
2817 clabel->blockSize > 0 &&
2818 clabel->numBlocks > 0) {
2819 /* label looks reasonable enough... */
2820 return(1);
2821 }
2822 return(0);
2823 }
2824
2825
2826 void
2827 rf_print_component_label(clabel)
2828 RF_ComponentLabel_t *clabel;
2829 {
2830 printf(" Row: %d Column: %d Num Rows: %d Num Columns: %d\n",
2831 clabel->row, clabel->column,
2832 clabel->num_rows, clabel->num_columns);
2833 printf(" Version: %d Serial Number: %d Mod Counter: %d\n",
2834 clabel->version, clabel->serial_number,
2835 clabel->mod_counter);
2836 printf(" Clean: %s Status: %d\n",
2837 clabel->clean ? "Yes" : "No", clabel->status );
2838 printf(" sectPerSU: %d SUsPerPU: %d SUsPerRU: %d\n",
2839 clabel->sectPerSU, clabel->SUsPerPU, clabel->SUsPerRU);
2840 printf(" RAID Level: %c blocksize: %d numBlocks: %d\n",
2841 (char) clabel->parityConfig, clabel->blockSize,
2842 clabel->numBlocks);
2843 printf(" Autoconfig: %s\n", clabel->autoconfigure ? "Yes" : "No" );
2844 printf(" Contains root partition: %s\n",
2845 clabel->root_partition ? "Yes" : "No" );
2846 printf(" Last configured as: raid%d\n", clabel->last_unit );
2847 #if 0
2848 printf(" Config order: %d\n", clabel->config_order);
2849 #endif
2850
2851 }
2852
2853 RF_ConfigSet_t *
2854 rf_create_auto_sets(ac_list)
2855 RF_AutoConfig_t *ac_list;
2856 {
2857 RF_AutoConfig_t *ac;
2858 RF_ConfigSet_t *config_sets;
2859 RF_ConfigSet_t *cset;
2860 RF_AutoConfig_t *ac_next;
2861
2862
2863 config_sets = NULL;
2864
2865 /* Go through the AutoConfig list, and figure out which components
2866 belong to what sets. */
2867 ac = ac_list;
2868 while(ac!=NULL) {
2869 /* we're going to putz with ac->next, so save it here
2870 for use at the end of the loop */
2871 ac_next = ac->next;
2872
2873 if (config_sets == NULL) {
2874 /* will need at least this one... */
2875 config_sets = (RF_ConfigSet_t *)
2876 malloc(sizeof(RF_ConfigSet_t),
2877 M_RAIDFRAME, M_NOWAIT);
2878 if (config_sets == NULL) {
2879 panic("rf_create_auto_sets: No memory!\n");
2880 }
2881 /* this one is easy :) */
2882 config_sets->ac = ac;
2883 config_sets->next = NULL;
2884 config_sets->rootable = 0;
2885 ac->next = NULL;
2886 } else {
2887 /* which set does this component fit into? */
2888 cset = config_sets;
2889 while(cset!=NULL) {
2890 if (rf_does_it_fit(cset, ac)) {
2891 /* looks like it matches... */
2892 ac->next = cset->ac;
2893 cset->ac = ac;
2894 break;
2895 }
2896 cset = cset->next;
2897 }
2898 if (cset==NULL) {
2899 /* didn't find a match above... new set..*/
2900 cset = (RF_ConfigSet_t *)
2901 malloc(sizeof(RF_ConfigSet_t),
2902 M_RAIDFRAME, M_NOWAIT);
2903 if (cset == NULL) {
2904 panic("rf_create_auto_sets: No memory!\n");
2905 }
2906 cset->ac = ac;
2907 ac->next = NULL;
2908 cset->next = config_sets;
2909 cset->rootable = 0;
2910 config_sets = cset;
2911 }
2912 }
2913 ac = ac_next;
2914 }
2915
2916
2917 return(config_sets);
2918 }
2919
2920 static int
2921 rf_does_it_fit(cset, ac)
2922 RF_ConfigSet_t *cset;
2923 RF_AutoConfig_t *ac;
2924 {
2925 RF_ComponentLabel_t *clabel1, *clabel2;
2926
2927 /* If this one matches the *first* one in the set, that's good
2928 enough, since the other members of the set would have been
2929 through here too... */
2930 /* note that we are not checking partitionSize here..
2931
2932 Note that we are also not checking the mod_counters here.
2933 If everything else matches execpt the mod_counter, that's
2934 good enough for this test. We will deal with the mod_counters
2935 a little later in the autoconfiguration process.
2936
2937 (clabel1->mod_counter == clabel2->mod_counter) &&
2938
2939 The reason we don't check for this is that failed disks
2940 will have lower modification counts. If those disks are
2941 not added to the set they used to belong to, then they will
2942 form their own set, which may result in 2 different sets,
2943 for example, competing to be configured at raid0, and
2944 perhaps competing to be the root filesystem set. If the
2945 wrong ones get configured, or both attempt to become /,
2946 weird behaviour and or serious lossage will occur. Thus we
2947 need to bring them into the fold here, and kick them out at
2948 a later point.
2949
2950 */
2951
2952 clabel1 = cset->ac->clabel;
2953 clabel2 = ac->clabel;
2954 if ((clabel1->version == clabel2->version) &&
2955 (clabel1->serial_number == clabel2->serial_number) &&
2956 (clabel1->num_rows == clabel2->num_rows) &&
2957 (clabel1->num_columns == clabel2->num_columns) &&
2958 (clabel1->sectPerSU == clabel2->sectPerSU) &&
2959 (clabel1->SUsPerPU == clabel2->SUsPerPU) &&
2960 (clabel1->SUsPerRU == clabel2->SUsPerRU) &&
2961 (clabel1->parityConfig == clabel2->parityConfig) &&
2962 (clabel1->maxOutstanding == clabel2->maxOutstanding) &&
2963 (clabel1->blockSize == clabel2->blockSize) &&
2964 (clabel1->numBlocks == clabel2->numBlocks) &&
2965 (clabel1->autoconfigure == clabel2->autoconfigure) &&
2966 (clabel1->root_partition == clabel2->root_partition) &&
2967 (clabel1->last_unit == clabel2->last_unit) &&
2968 (clabel1->config_order == clabel2->config_order)) {
2969 /* if it get's here, it almost *has* to be a match */
2970 } else {
2971 /* it's not consistent with somebody in the set..
2972 punt */
2973 return(0);
2974 }
2975 /* all was fine.. it must fit... */
2976 return(1);
2977 }
2978
2979 int
2980 rf_have_enough_components(cset)
2981 RF_ConfigSet_t *cset;
2982 {
2983 RF_AutoConfig_t *ac;
2984 RF_AutoConfig_t *auto_config;
2985 RF_ComponentLabel_t *clabel;
2986 int r,c;
2987 int num_rows;
2988 int num_cols;
2989 int num_missing;
2990 int mod_counter;
2991 int mod_counter_found;
2992 int even_pair_failed;
2993 char parity_type;
2994
2995
2996 /* check to see that we have enough 'live' components
2997 of this set. If so, we can configure it if necessary */
2998
2999 num_rows = cset->ac->clabel->num_rows;
3000 num_cols = cset->ac->clabel->num_columns;
3001 parity_type = cset->ac->clabel->parityConfig;
3002
3003 /* XXX Check for duplicate components!?!?!? */
3004
3005 /* Determine what the mod_counter is supposed to be for this set. */
3006
3007 mod_counter_found = 0;
3008 mod_counter = 0;
3009 ac = cset->ac;
3010 while(ac!=NULL) {
3011 if (mod_counter_found==0) {
3012 mod_counter = ac->clabel->mod_counter;
3013 mod_counter_found = 1;
3014 } else {
3015 if (ac->clabel->mod_counter > mod_counter) {
3016 mod_counter = ac->clabel->mod_counter;
3017 }
3018 }
3019 ac = ac->next;
3020 }
3021
3022 num_missing = 0;
3023 auto_config = cset->ac;
3024
3025 for(r=0; r<num_rows; r++) {
3026 even_pair_failed = 0;
3027 for(c=0; c<num_cols; c++) {
3028 ac = auto_config;
3029 while(ac!=NULL) {
3030 if ((ac->clabel->row == r) &&
3031 (ac->clabel->column == c) &&
3032 (ac->clabel->mod_counter == mod_counter)) {
3033 /* it's this one... */
3034 #if DEBUG
3035 printf("Found: %s at %d,%d\n",
3036 ac->devname,r,c);
3037 #endif
3038 break;
3039 }
3040 ac=ac->next;
3041 }
3042 if (ac==NULL) {
3043 /* Didn't find one here! */
3044 /* special case for RAID 1, especially
3045 where there are more than 2
3046 components (where RAIDframe treats
3047 things a little differently :( ) */
3048 if (parity_type == '1') {
3049 if (c%2 == 0) { /* even component */
3050 even_pair_failed = 1;
3051 } else { /* odd component. If
3052 we're failed, and
3053 so is the even
3054 component, it's
3055 "Good Night, Charlie" */
3056 if (even_pair_failed == 1) {
3057 return(0);
3058 }
3059 }
3060 } else {
3061 /* normal accounting */
3062 num_missing++;
3063 }
3064 }
3065 if ((parity_type == '1') && (c%2 == 1)) {
3066 /* Just did an even component, and we didn't
3067 bail.. reset the even_pair_failed flag,
3068 and go on to the next component.... */
3069 even_pair_failed = 0;
3070 }
3071 }
3072 }
3073
3074 clabel = cset->ac->clabel;
3075
3076 if (((clabel->parityConfig == '0') && (num_missing > 0)) ||
3077 ((clabel->parityConfig == '4') && (num_missing > 1)) ||
3078 ((clabel->parityConfig == '5') && (num_missing > 1))) {
3079 /* XXX this needs to be made *much* more general */
3080 /* Too many failures */
3081 return(0);
3082 }
3083 /* otherwise, all is well, and we've got enough to take a kick
3084 at autoconfiguring this set */
3085 return(1);
3086 }
3087
3088 void
3089 rf_create_configuration(ac,config,raidPtr)
3090 RF_AutoConfig_t *ac;
3091 RF_Config_t *config;
3092 RF_Raid_t *raidPtr;
3093 {
3094 RF_ComponentLabel_t *clabel;
3095 int i;
3096
3097 clabel = ac->clabel;
3098
3099 /* 1. Fill in the common stuff */
3100 config->numRow = clabel->num_rows;
3101 config->numCol = clabel->num_columns;
3102 config->numSpare = 0; /* XXX should this be set here? */
3103 config->sectPerSU = clabel->sectPerSU;
3104 config->SUsPerPU = clabel->SUsPerPU;
3105 config->SUsPerRU = clabel->SUsPerRU;
3106 config->parityConfig = clabel->parityConfig;
3107 /* XXX... */
3108 strcpy(config->diskQueueType,"fifo");
3109 config->maxOutstandingDiskReqs = clabel->maxOutstanding;
3110 config->layoutSpecificSize = 0; /* XXX ?? */
3111
3112 while(ac!=NULL) {
3113 /* row/col values will be in range due to the checks
3114 in reasonable_label() */
3115 strcpy(config->devnames[ac->clabel->row][ac->clabel->column],
3116 ac->devname);
3117 ac = ac->next;
3118 }
3119
3120 for(i=0;i<RF_MAXDBGV;i++) {
3121 config->debugVars[i][0] = NULL;
3122 }
3123 }
3124
3125 int
3126 rf_set_autoconfig(raidPtr, new_value)
3127 RF_Raid_t *raidPtr;
3128 int new_value;
3129 {
3130 RF_ComponentLabel_t clabel;
3131 struct vnode *vp;
3132 dev_t dev;
3133 int row, column;
3134
3135 raidPtr->autoconfigure = new_value;
3136 for(row=0; row<raidPtr->numRow; row++) {
3137 for(column=0; column<raidPtr->numCol; column++) {
3138 if (raidPtr->Disks[row][column].status ==
3139 rf_ds_optimal) {
3140 dev = raidPtr->Disks[row][column].dev;
3141 vp = raidPtr->raid_cinfo[row][column].ci_vp;
3142 raidread_component_label(dev, vp, &clabel);
3143 clabel.autoconfigure = new_value;
3144 raidwrite_component_label(dev, vp, &clabel);
3145 }
3146 }
3147 }
3148 return(new_value);
3149 }
3150
3151 int
3152 rf_set_rootpartition(raidPtr, new_value)
3153 RF_Raid_t *raidPtr;
3154 int new_value;
3155 {
3156 RF_ComponentLabel_t clabel;
3157 struct vnode *vp;
3158 dev_t dev;
3159 int row, column;
3160
3161 raidPtr->root_partition = new_value;
3162 for(row=0; row<raidPtr->numRow; row++) {
3163 for(column=0; column<raidPtr->numCol; column++) {
3164 if (raidPtr->Disks[row][column].status ==
3165 rf_ds_optimal) {
3166 dev = raidPtr->Disks[row][column].dev;
3167 vp = raidPtr->raid_cinfo[row][column].ci_vp;
3168 raidread_component_label(dev, vp, &clabel);
3169 clabel.root_partition = new_value;
3170 raidwrite_component_label(dev, vp, &clabel);
3171 }
3172 }
3173 }
3174 return(new_value);
3175 }
3176
3177 void
3178 rf_release_all_vps(cset)
3179 RF_ConfigSet_t *cset;
3180 {
3181 RF_AutoConfig_t *ac;
3182
3183 ac = cset->ac;
3184 while(ac!=NULL) {
3185 /* Close the vp, and give it back */
3186 if (ac->vp) {
3187 vn_lock(ac->vp, LK_EXCLUSIVE | LK_RETRY);
3188 VOP_CLOSE(ac->vp, FREAD, NOCRED, 0);
3189 vput(ac->vp);
3190 ac->vp = NULL;
3191 }
3192 ac = ac->next;
3193 }
3194 }
3195
3196
3197 void
3198 rf_cleanup_config_set(cset)
3199 RF_ConfigSet_t *cset;
3200 {
3201 RF_AutoConfig_t *ac;
3202 RF_AutoConfig_t *next_ac;
3203
3204 ac = cset->ac;
3205 while(ac!=NULL) {
3206 next_ac = ac->next;
3207 /* nuke the label */
3208 free(ac->clabel, M_RAIDFRAME);
3209 /* cleanup the config structure */
3210 free(ac, M_RAIDFRAME);
3211 /* "next.." */
3212 ac = next_ac;
3213 }
3214 /* and, finally, nuke the config set */
3215 free(cset, M_RAIDFRAME);
3216 }
3217
3218
3219 void
3220 raid_init_component_label(raidPtr, clabel)
3221 RF_Raid_t *raidPtr;
3222 RF_ComponentLabel_t *clabel;
3223 {
3224 /* current version number */
3225 clabel->version = RF_COMPONENT_LABEL_VERSION;
3226 clabel->serial_number = raidPtr->serial_number;
3227 clabel->mod_counter = raidPtr->mod_counter;
3228 clabel->num_rows = raidPtr->numRow;
3229 clabel->num_columns = raidPtr->numCol;
3230 clabel->clean = RF_RAID_DIRTY; /* not clean */
3231 clabel->status = rf_ds_optimal; /* "It's good!" */
3232
3233 clabel->sectPerSU = raidPtr->Layout.sectorsPerStripeUnit;
3234 clabel->SUsPerPU = raidPtr->Layout.SUsPerPU;
3235 clabel->SUsPerRU = raidPtr->Layout.SUsPerRU;
3236
3237 clabel->blockSize = raidPtr->bytesPerSector;
3238 clabel->numBlocks = raidPtr->sectorsPerDisk;
3239
3240 /* XXX not portable */
3241 clabel->parityConfig = raidPtr->Layout.map->parityConfig;
3242 clabel->maxOutstanding = raidPtr->maxOutstanding;
3243 clabel->autoconfigure = raidPtr->autoconfigure;
3244 clabel->root_partition = raidPtr->root_partition;
3245 clabel->last_unit = raidPtr->raidid;
3246 clabel->config_order = raidPtr->config_order;
3247 }
3248
3249 int
3250 rf_auto_config_set(cset,unit)
3251 RF_ConfigSet_t *cset;
3252 int *unit;
3253 {
3254 RF_Raid_t *raidPtr;
3255 RF_Config_t *config;
3256 int raidID;
3257 int retcode;
3258
3259 printf("RAID autoconfigure\n");
3260
3261 retcode = 0;
3262 *unit = -1;
3263
3264 /* 1. Create a config structure */
3265
3266 config = (RF_Config_t *)malloc(sizeof(RF_Config_t),
3267 M_RAIDFRAME,
3268 M_NOWAIT);
3269 if (config==NULL) {
3270 printf("Out of mem!?!?\n");
3271 /* XXX do something more intelligent here. */
3272 return(1);
3273 }
3274
3275 memset(config, 0, sizeof(RF_Config_t));
3276
3277 /* XXX raidID needs to be set correctly.. */
3278
3279 /*
3280 2. Figure out what RAID ID this one is supposed to live at
3281 See if we can get the same RAID dev that it was configured
3282 on last time..
3283 */
3284
3285 raidID = cset->ac->clabel->last_unit;
3286 if ((raidID < 0) || (raidID >= numraid)) {
3287 /* let's not wander off into lala land. */
3288 raidID = numraid - 1;
3289 }
3290 if (raidPtrs[raidID]->valid != 0) {
3291
3292 /*
3293 Nope... Go looking for an alternative...
3294 Start high so we don't immediately use raid0 if that's
3295 not taken.
3296 */
3297
3298 for(raidID = numraid - 1; raidID >= 0; raidID--) {
3299 if (raidPtrs[raidID]->valid == 0) {
3300 /* can use this one! */
3301 break;
3302 }
3303 }
3304 }
3305
3306 if (raidID < 0) {
3307 /* punt... */
3308 printf("Unable to auto configure this set!\n");
3309 printf("(Out of RAID devs!)\n");
3310 return(1);
3311 }
3312 printf("Configuring raid%d:\n",raidID);
3313 raidPtr = raidPtrs[raidID];
3314
3315 /* XXX all this stuff should be done SOMEWHERE ELSE! */
3316 raidPtr->raidid = raidID;
3317 raidPtr->openings = RAIDOUTSTANDING;
3318
3319 /* 3. Build the configuration structure */
3320 rf_create_configuration(cset->ac, config, raidPtr);
3321
3322 /* 4. Do the configuration */
3323 retcode = rf_Configure(raidPtr, config, cset->ac);
3324
3325 if (retcode == 0) {
3326
3327 raidinit(raidPtrs[raidID]);
3328
3329 rf_markalldirty(raidPtrs[raidID]);
3330 raidPtrs[raidID]->autoconfigure = 1; /* XXX do this here? */
3331 if (cset->ac->clabel->root_partition==1) {
3332 /* everything configured just fine. Make a note
3333 that this set is eligible to be root. */
3334 cset->rootable = 1;
3335 /* XXX do this here? */
3336 raidPtrs[raidID]->root_partition = 1;
3337 }
3338 }
3339
3340 /* 5. Cleanup */
3341 free(config, M_RAIDFRAME);
3342
3343 *unit = raidID;
3344 return(retcode);
3345 }
3346
3347 void
3348 rf_disk_unbusy(desc)
3349 RF_RaidAccessDesc_t *desc;
3350 {
3351 struct buf *bp;
3352
3353 bp = (struct buf *)desc->bp;
3354 disk_unbusy(&raid_softc[desc->raidPtr->raidid].sc_dkdev,
3355 (bp->b_bcount - bp->b_resid));
3356 }
3357