rf_netbsdkintf.c revision 1.124 1 /* $NetBSD: rf_netbsdkintf.c,v 1.124 2002/07/13 17:47:44 oster Exp $ */
2 /*-
3 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to The NetBSD Foundation
7 * by Greg Oster; Jason R. Thorpe.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the NetBSD
20 * Foundation, Inc. and its contributors.
21 * 4. Neither the name of The NetBSD Foundation nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Copyright (c) 1988 University of Utah.
40 * Copyright (c) 1990, 1993
41 * The Regents of the University of California. All rights reserved.
42 *
43 * This code is derived from software contributed to Berkeley by
44 * the Systems Programming Group of the University of Utah Computer
45 * Science Department.
46 *
47 * Redistribution and use in source and binary forms, with or without
48 * modification, are permitted provided that the following conditions
49 * are met:
50 * 1. Redistributions of source code must retain the above copyright
51 * notice, this list of conditions and the following disclaimer.
52 * 2. Redistributions in binary form must reproduce the above copyright
53 * notice, this list of conditions and the following disclaimer in the
54 * documentation and/or other materials provided with the distribution.
55 * 3. All advertising materials mentioning features or use of this software
56 * must display the following acknowledgement:
57 * This product includes software developed by the University of
58 * California, Berkeley and its contributors.
59 * 4. Neither the name of the University nor the names of its contributors
60 * may be used to endorse or promote products derived from this software
61 * without specific prior written permission.
62 *
63 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
64 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
65 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
66 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
67 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
68 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
69 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
70 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
71 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
72 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
73 * SUCH DAMAGE.
74 *
75 * from: Utah $Hdr: cd.c 1.6 90/11/28$
76 *
77 * @(#)cd.c 8.2 (Berkeley) 11/16/93
78 */
79
80
81
82
83 /*
84 * Copyright (c) 1995 Carnegie-Mellon University.
85 * All rights reserved.
86 *
87 * Authors: Mark Holland, Jim Zelenka
88 *
89 * Permission to use, copy, modify and distribute this software and
90 * its documentation is hereby granted, provided that both the copyright
91 * notice and this permission notice appear in all copies of the
92 * software, derivative works or modified versions, and any portions
93 * thereof, and that both notices appear in supporting documentation.
94 *
95 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
96 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
97 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
98 *
99 * Carnegie Mellon requests users of this software to return to
100 *
101 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
102 * School of Computer Science
103 * Carnegie Mellon University
104 * Pittsburgh PA 15213-3890
105 *
106 * any improvements or extensions that they make and grant Carnegie the
107 * rights to redistribute these changes.
108 */
109
110 /***********************************************************
111 *
112 * rf_kintf.c -- the kernel interface routines for RAIDframe
113 *
114 ***********************************************************/
115
116 #include <sys/cdefs.h>
117 __KERNEL_RCSID(0, "$NetBSD: rf_netbsdkintf.c,v 1.124 2002/07/13 17:47:44 oster Exp $");
118
119 #include <sys/param.h>
120 #include <sys/errno.h>
121 #include <sys/pool.h>
122 #include <sys/queue.h>
123 #include <sys/disk.h>
124 #include <sys/device.h>
125 #include <sys/stat.h>
126 #include <sys/ioctl.h>
127 #include <sys/fcntl.h>
128 #include <sys/systm.h>
129 #include <sys/namei.h>
130 #include <sys/vnode.h>
131 #include <sys/disklabel.h>
132 #include <sys/conf.h>
133 #include <sys/lock.h>
134 #include <sys/buf.h>
135 #include <sys/user.h>
136 #include <sys/reboot.h>
137
138 #include <dev/raidframe/raidframevar.h>
139 #include <dev/raidframe/raidframeio.h>
140 #include "raid.h"
141 #include "opt_raid_autoconfig.h"
142 #include "rf_raid.h"
143 #include "rf_copyback.h"
144 #include "rf_dag.h"
145 #include "rf_dagflags.h"
146 #include "rf_desc.h"
147 #include "rf_diskqueue.h"
148 #include "rf_acctrace.h"
149 #include "rf_etimer.h"
150 #include "rf_general.h"
151 #include "rf_debugMem.h"
152 #include "rf_kintf.h"
153 #include "rf_options.h"
154 #include "rf_driver.h"
155 #include "rf_parityscan.h"
156 #include "rf_debugprint.h"
157 #include "rf_threadstuff.h"
158
159 int rf_kdebug_level = 0;
160
161 #ifdef DEBUG
162 #define db1_printf(a) if (rf_kdebug_level > 0) printf a
163 #else /* DEBUG */
164 #define db1_printf(a) { }
165 #endif /* DEBUG */
166
167 static RF_Raid_t **raidPtrs; /* global raid device descriptors */
168
169 RF_DECLARE_STATIC_MUTEX(rf_sparet_wait_mutex)
170
171 static RF_SparetWait_t *rf_sparet_wait_queue; /* requests to install a
172 * spare table */
173 static RF_SparetWait_t *rf_sparet_resp_queue; /* responses from
174 * installation process */
175
176 /* prototypes */
177 static void KernelWakeupFunc(struct buf * bp);
178 static void InitBP(struct buf * bp, struct vnode *, unsigned rw_flag,
179 dev_t dev, RF_SectorNum_t startSect,
180 RF_SectorCount_t numSect, caddr_t buf,
181 void (*cbFunc) (struct buf *), void *cbArg,
182 int logBytesPerSector, struct proc * b_proc);
183 static void raidinit(RF_Raid_t *);
184
185 void raidattach(int);
186 int raidsize(dev_t);
187 int raidopen(dev_t, int, int, struct proc *);
188 int raidclose(dev_t, int, int, struct proc *);
189 int raidioctl(dev_t, u_long, caddr_t, int, struct proc *);
190 int raidwrite(dev_t, struct uio *, int);
191 int raidread(dev_t, struct uio *, int);
192 void raidstrategy(struct buf *);
193 int raiddump(dev_t, daddr_t, caddr_t, size_t);
194
195 /*
196 * Pilfered from ccd.c
197 */
198
199 struct raidbuf {
200 struct buf rf_buf; /* new I/O buf. MUST BE FIRST!!! */
201 struct buf *rf_obp; /* ptr. to original I/O buf */
202 int rf_flags; /* misc. flags */
203 RF_DiskQueueData_t *req;/* the request that this was part of.. */
204 };
205
206 /* component buffer pool */
207 struct pool raidframe_cbufpool;
208
209 #define RAIDGETBUF(rs) pool_get(&raidframe_cbufpool, PR_NOWAIT)
210 #define RAIDPUTBUF(rs, cbp) pool_put(&raidframe_cbufpool, cbp)
211
212 /* XXX Not sure if the following should be replacing the raidPtrs above,
213 or if it should be used in conjunction with that...
214 */
215
216 struct raid_softc {
217 int sc_flags; /* flags */
218 int sc_cflags; /* configuration flags */
219 size_t sc_size; /* size of the raid device */
220 char sc_xname[20]; /* XXX external name */
221 struct disk sc_dkdev; /* generic disk device info */
222 struct buf_queue buf_queue; /* used for the device queue */
223 };
224 /* sc_flags */
225 #define RAIDF_INITED 0x01 /* unit has been initialized */
226 #define RAIDF_WLABEL 0x02 /* label area is writable */
227 #define RAIDF_LABELLING 0x04 /* unit is currently being labelled */
228 #define RAIDF_WANTED 0x40 /* someone is waiting to obtain a lock */
229 #define RAIDF_LOCKED 0x80 /* unit is locked */
230
231 #define raidunit(x) DISKUNIT(x)
232 int numraid = 0;
233
234 /*
235 * Allow RAIDOUTSTANDING number of simultaneous IO's to this RAID device.
236 * Be aware that large numbers can allow the driver to consume a lot of
237 * kernel memory, especially on writes, and in degraded mode reads.
238 *
239 * For example: with a stripe width of 64 blocks (32k) and 5 disks,
240 * a single 64K write will typically require 64K for the old data,
241 * 64K for the old parity, and 64K for the new parity, for a total
242 * of 192K (if the parity buffer is not re-used immediately).
243 * Even it if is used immediately, that's still 128K, which when multiplied
244 * by say 10 requests, is 1280K, *on top* of the 640K of incoming data.
245 *
246 * Now in degraded mode, for example, a 64K read on the above setup may
247 * require data reconstruction, which will require *all* of the 4 remaining
248 * disks to participate -- 4 * 32K/disk == 128K again.
249 */
250
251 #ifndef RAIDOUTSTANDING
252 #define RAIDOUTSTANDING 6
253 #endif
254
255 #define RAIDLABELDEV(dev) \
256 (MAKEDISKDEV(major((dev)), raidunit((dev)), RAW_PART))
257
258 /* declared here, and made public, for the benefit of KVM stuff.. */
259 struct raid_softc *raid_softc;
260
261 static void raidgetdefaultlabel(RF_Raid_t *, struct raid_softc *,
262 struct disklabel *);
263 static void raidgetdisklabel(dev_t);
264 static void raidmakedisklabel(struct raid_softc *);
265
266 static int raidlock(struct raid_softc *);
267 static void raidunlock(struct raid_softc *);
268
269 static void rf_markalldirty(RF_Raid_t *);
270 void rf_mountroot_hook(struct device *);
271
272 struct device *raidrootdev;
273
274 void rf_ReconThread(struct rf_recon_req *);
275 /* XXX what I want is: */
276 /*void rf_ReconThread(RF_Raid_t *raidPtr); */
277 void rf_RewriteParityThread(RF_Raid_t *raidPtr);
278 void rf_CopybackThread(RF_Raid_t *raidPtr);
279 void rf_ReconstructInPlaceThread(struct rf_recon_req *);
280 void rf_buildroothack(void *);
281
282 RF_AutoConfig_t *rf_find_raid_components(void);
283 RF_ConfigSet_t *rf_create_auto_sets(RF_AutoConfig_t *);
284 static int rf_does_it_fit(RF_ConfigSet_t *,RF_AutoConfig_t *);
285 static int rf_reasonable_label(RF_ComponentLabel_t *);
286 void rf_create_configuration(RF_AutoConfig_t *,RF_Config_t *, RF_Raid_t *);
287 int rf_set_autoconfig(RF_Raid_t *, int);
288 int rf_set_rootpartition(RF_Raid_t *, int);
289 void rf_release_all_vps(RF_ConfigSet_t *);
290 void rf_cleanup_config_set(RF_ConfigSet_t *);
291 int rf_have_enough_components(RF_ConfigSet_t *);
292 int rf_auto_config_set(RF_ConfigSet_t *, int *);
293
294 static int raidautoconfig = 0; /* Debugging, mostly. Set to 0 to not
295 allow autoconfig to take place.
296 Note that this is overridden by having
297 RAID_AUTOCONFIG as an option in the
298 kernel config file. */
299
300 void
301 raidattach(num)
302 int num;
303 {
304 int raidID;
305 int i, rc;
306 RF_AutoConfig_t *ac_list; /* autoconfig list */
307 RF_ConfigSet_t *config_sets;
308
309 #ifdef DEBUG
310 printf("raidattach: Asked for %d units\n", num);
311 #endif
312
313 if (num <= 0) {
314 #ifdef DIAGNOSTIC
315 panic("raidattach: count <= 0");
316 #endif
317 return;
318 }
319 /* This is where all the initialization stuff gets done. */
320
321 numraid = num;
322
323 /* Make some space for requested number of units... */
324
325 RF_Calloc(raidPtrs, num, sizeof(RF_Raid_t *), (RF_Raid_t **));
326 if (raidPtrs == NULL) {
327 panic("raidPtrs is NULL!!\n");
328 }
329
330 /* Initialize the component buffer pool. */
331 pool_init(&raidframe_cbufpool, sizeof(struct raidbuf), 0,
332 0, 0, "raidpl", NULL);
333
334 rc = rf_mutex_init(&rf_sparet_wait_mutex);
335 if (rc) {
336 RF_PANIC();
337 }
338
339 rf_sparet_wait_queue = rf_sparet_resp_queue = NULL;
340
341 for (i = 0; i < num; i++)
342 raidPtrs[i] = NULL;
343 rc = rf_BootRaidframe();
344 if (rc == 0)
345 printf("Kernelized RAIDframe activated\n");
346 else
347 panic("Serious error booting RAID!!\n");
348
349 /* put together some datastructures like the CCD device does.. This
350 * lets us lock the device and what-not when it gets opened. */
351
352 raid_softc = (struct raid_softc *)
353 malloc(num * sizeof(struct raid_softc),
354 M_RAIDFRAME, M_NOWAIT);
355 if (raid_softc == NULL) {
356 printf("WARNING: no memory for RAIDframe driver\n");
357 return;
358 }
359
360 memset(raid_softc, 0, num * sizeof(struct raid_softc));
361
362 raidrootdev = (struct device *)malloc(num * sizeof(struct device),
363 M_RAIDFRAME, M_NOWAIT);
364 if (raidrootdev == NULL) {
365 panic("No memory for RAIDframe driver!!?!?!\n");
366 }
367
368 for (raidID = 0; raidID < num; raidID++) {
369 BUFQ_INIT(&raid_softc[raidID].buf_queue);
370
371 raidrootdev[raidID].dv_class = DV_DISK;
372 raidrootdev[raidID].dv_cfdata = NULL;
373 raidrootdev[raidID].dv_unit = raidID;
374 raidrootdev[raidID].dv_parent = NULL;
375 raidrootdev[raidID].dv_flags = 0;
376 sprintf(raidrootdev[raidID].dv_xname,"raid%d",raidID);
377
378 RF_Calloc(raidPtrs[raidID], 1, sizeof(RF_Raid_t),
379 (RF_Raid_t *));
380 if (raidPtrs[raidID] == NULL) {
381 printf("WARNING: raidPtrs[%d] is NULL\n", raidID);
382 numraid = raidID;
383 return;
384 }
385 }
386
387 #ifdef RAID_AUTOCONFIG
388 raidautoconfig = 1;
389 #endif
390
391 if (raidautoconfig) {
392 /* 1. locate all RAID components on the system */
393
394 #if DEBUG
395 printf("Searching for raid components...\n");
396 #endif
397 ac_list = rf_find_raid_components();
398
399 /* 2. sort them into their respective sets */
400
401 config_sets = rf_create_auto_sets(ac_list);
402
403 /* 3. evaluate each set and configure the valid ones
404 This gets done in rf_buildroothack() */
405
406 /* schedule the creation of the thread to do the
407 "/ on RAID" stuff */
408
409 kthread_create(rf_buildroothack,config_sets);
410
411 #if 0
412 mountroothook_establish(rf_mountroot_hook, &raidrootdev[0]);
413 #endif
414 }
415
416 }
417
418 void
419 rf_buildroothack(arg)
420 void *arg;
421 {
422 RF_ConfigSet_t *config_sets = arg;
423 RF_ConfigSet_t *cset;
424 RF_ConfigSet_t *next_cset;
425 int retcode;
426 int raidID;
427 int rootID;
428 int num_root;
429
430 rootID = 0;
431 num_root = 0;
432 cset = config_sets;
433 while(cset != NULL ) {
434 next_cset = cset->next;
435 if (rf_have_enough_components(cset) &&
436 cset->ac->clabel->autoconfigure==1) {
437 retcode = rf_auto_config_set(cset,&raidID);
438 if (!retcode) {
439 if (cset->rootable) {
440 rootID = raidID;
441 num_root++;
442 }
443 } else {
444 /* The autoconfig didn't work :( */
445 #if DEBUG
446 printf("Autoconfig failed with code %d for raid%d\n", retcode, raidID);
447 #endif
448 rf_release_all_vps(cset);
449 }
450 } else {
451 /* we're not autoconfiguring this set...
452 release the associated resources */
453 rf_release_all_vps(cset);
454 }
455 /* cleanup */
456 rf_cleanup_config_set(cset);
457 cset = next_cset;
458 }
459
460 /* we found something bootable... */
461
462 if (num_root == 1) {
463 booted_device = &raidrootdev[rootID];
464 } else if (num_root > 1) {
465 /* we can't guess.. require the user to answer... */
466 boothowto |= RB_ASKNAME;
467 }
468 }
469
470
471 int
472 raidsize(dev)
473 dev_t dev;
474 {
475 struct raid_softc *rs;
476 struct disklabel *lp;
477 int part, unit, omask, size;
478
479 unit = raidunit(dev);
480 if (unit >= numraid)
481 return (-1);
482 rs = &raid_softc[unit];
483
484 if ((rs->sc_flags & RAIDF_INITED) == 0)
485 return (-1);
486
487 part = DISKPART(dev);
488 omask = rs->sc_dkdev.dk_openmask & (1 << part);
489 lp = rs->sc_dkdev.dk_label;
490
491 if (omask == 0 && raidopen(dev, 0, S_IFBLK, curproc))
492 return (-1);
493
494 if (lp->d_partitions[part].p_fstype != FS_SWAP)
495 size = -1;
496 else
497 size = lp->d_partitions[part].p_size *
498 (lp->d_secsize / DEV_BSIZE);
499
500 if (omask == 0 && raidclose(dev, 0, S_IFBLK, curproc))
501 return (-1);
502
503 return (size);
504
505 }
506
507 int
508 raiddump(dev, blkno, va, size)
509 dev_t dev;
510 daddr_t blkno;
511 caddr_t va;
512 size_t size;
513 {
514 /* Not implemented. */
515 return ENXIO;
516 }
517 /* ARGSUSED */
518 int
519 raidopen(dev, flags, fmt, p)
520 dev_t dev;
521 int flags, fmt;
522 struct proc *p;
523 {
524 int unit = raidunit(dev);
525 struct raid_softc *rs;
526 struct disklabel *lp;
527 int part, pmask;
528 int error = 0;
529
530 if (unit >= numraid)
531 return (ENXIO);
532 rs = &raid_softc[unit];
533
534 if ((error = raidlock(rs)) != 0)
535 return (error);
536 lp = rs->sc_dkdev.dk_label;
537
538 part = DISKPART(dev);
539 pmask = (1 << part);
540
541 db1_printf(("Opening raid device number: %d partition: %d\n",
542 unit, part));
543
544
545 if ((rs->sc_flags & RAIDF_INITED) &&
546 (rs->sc_dkdev.dk_openmask == 0))
547 raidgetdisklabel(dev);
548
549 /* make sure that this partition exists */
550
551 if (part != RAW_PART) {
552 db1_printf(("Not a raw partition..\n"));
553 if (((rs->sc_flags & RAIDF_INITED) == 0) ||
554 ((part >= lp->d_npartitions) ||
555 (lp->d_partitions[part].p_fstype == FS_UNUSED))) {
556 error = ENXIO;
557 raidunlock(rs);
558 db1_printf(("Bailing out...\n"));
559 return (error);
560 }
561 }
562 /* Prevent this unit from being unconfigured while open. */
563 switch (fmt) {
564 case S_IFCHR:
565 rs->sc_dkdev.dk_copenmask |= pmask;
566 break;
567
568 case S_IFBLK:
569 rs->sc_dkdev.dk_bopenmask |= pmask;
570 break;
571 }
572
573 if ((rs->sc_dkdev.dk_openmask == 0) &&
574 ((rs->sc_flags & RAIDF_INITED) != 0)) {
575 /* First one... mark things as dirty... Note that we *MUST*
576 have done a configure before this. I DO NOT WANT TO BE
577 SCRIBBLING TO RANDOM COMPONENTS UNTIL IT'S BEEN DETERMINED
578 THAT THEY BELONG TOGETHER!!!!! */
579 /* XXX should check to see if we're only open for reading
580 here... If so, we needn't do this, but then need some
581 other way of keeping track of what's happened.. */
582
583 rf_markalldirty( raidPtrs[unit] );
584 }
585
586
587 rs->sc_dkdev.dk_openmask =
588 rs->sc_dkdev.dk_copenmask | rs->sc_dkdev.dk_bopenmask;
589
590 raidunlock(rs);
591
592 return (error);
593
594
595 }
596 /* ARGSUSED */
597 int
598 raidclose(dev, flags, fmt, p)
599 dev_t dev;
600 int flags, fmt;
601 struct proc *p;
602 {
603 int unit = raidunit(dev);
604 struct raid_softc *rs;
605 int error = 0;
606 int part;
607
608 if (unit >= numraid)
609 return (ENXIO);
610 rs = &raid_softc[unit];
611
612 if ((error = raidlock(rs)) != 0)
613 return (error);
614
615 part = DISKPART(dev);
616
617 /* ...that much closer to allowing unconfiguration... */
618 switch (fmt) {
619 case S_IFCHR:
620 rs->sc_dkdev.dk_copenmask &= ~(1 << part);
621 break;
622
623 case S_IFBLK:
624 rs->sc_dkdev.dk_bopenmask &= ~(1 << part);
625 break;
626 }
627 rs->sc_dkdev.dk_openmask =
628 rs->sc_dkdev.dk_copenmask | rs->sc_dkdev.dk_bopenmask;
629
630 if ((rs->sc_dkdev.dk_openmask == 0) &&
631 ((rs->sc_flags & RAIDF_INITED) != 0)) {
632 /* Last one... device is not unconfigured yet.
633 Device shutdown has taken care of setting the
634 clean bits if RAIDF_INITED is not set
635 mark things as clean... */
636 #if 0
637 printf("Last one on raid%d. Updating status.\n",unit);
638 #endif
639 rf_update_component_labels(raidPtrs[unit],
640 RF_FINAL_COMPONENT_UPDATE);
641 if (doing_shutdown) {
642 /* last one, and we're going down, so
643 lights out for this RAID set too. */
644 error = rf_Shutdown(raidPtrs[unit]);
645
646 /* It's no longer initialized... */
647 rs->sc_flags &= ~RAIDF_INITED;
648
649 /* Detach the disk. */
650 disk_detach(&rs->sc_dkdev);
651 }
652 }
653
654 raidunlock(rs);
655 return (0);
656
657 }
658
659 void
660 raidstrategy(bp)
661 struct buf *bp;
662 {
663 int s;
664
665 unsigned int raidID = raidunit(bp->b_dev);
666 RF_Raid_t *raidPtr;
667 struct raid_softc *rs = &raid_softc[raidID];
668 struct disklabel *lp;
669 int wlabel;
670
671 if ((rs->sc_flags & RAIDF_INITED) ==0) {
672 bp->b_error = ENXIO;
673 bp->b_flags |= B_ERROR;
674 bp->b_resid = bp->b_bcount;
675 biodone(bp);
676 return;
677 }
678 if (raidID >= numraid || !raidPtrs[raidID]) {
679 bp->b_error = ENODEV;
680 bp->b_flags |= B_ERROR;
681 bp->b_resid = bp->b_bcount;
682 biodone(bp);
683 return;
684 }
685 raidPtr = raidPtrs[raidID];
686 if (!raidPtr->valid) {
687 bp->b_error = ENODEV;
688 bp->b_flags |= B_ERROR;
689 bp->b_resid = bp->b_bcount;
690 biodone(bp);
691 return;
692 }
693 if (bp->b_bcount == 0) {
694 db1_printf(("b_bcount is zero..\n"));
695 biodone(bp);
696 return;
697 }
698 lp = rs->sc_dkdev.dk_label;
699
700 /*
701 * Do bounds checking and adjust transfer. If there's an
702 * error, the bounds check will flag that for us.
703 */
704
705 wlabel = rs->sc_flags & (RAIDF_WLABEL | RAIDF_LABELLING);
706 if (DISKPART(bp->b_dev) != RAW_PART)
707 if (bounds_check_with_label(bp, lp, wlabel) <= 0) {
708 db1_printf(("Bounds check failed!!:%d %d\n",
709 (int) bp->b_blkno, (int) wlabel));
710 biodone(bp);
711 return;
712 }
713 s = splbio();
714
715 bp->b_resid = 0;
716
717 /* stuff it onto our queue */
718 BUFQ_INSERT_TAIL(&rs->buf_queue, bp);
719
720 raidstart(raidPtrs[raidID]);
721
722 splx(s);
723 }
724 /* ARGSUSED */
725 int
726 raidread(dev, uio, flags)
727 dev_t dev;
728 struct uio *uio;
729 int flags;
730 {
731 int unit = raidunit(dev);
732 struct raid_softc *rs;
733 int part;
734
735 if (unit >= numraid)
736 return (ENXIO);
737 rs = &raid_softc[unit];
738
739 if ((rs->sc_flags & RAIDF_INITED) == 0)
740 return (ENXIO);
741 part = DISKPART(dev);
742
743 db1_printf(("raidread: unit: %d partition: %d\n", unit, part));
744
745 return (physio(raidstrategy, NULL, dev, B_READ, minphys, uio));
746
747 }
748 /* ARGSUSED */
749 int
750 raidwrite(dev, uio, flags)
751 dev_t dev;
752 struct uio *uio;
753 int flags;
754 {
755 int unit = raidunit(dev);
756 struct raid_softc *rs;
757
758 if (unit >= numraid)
759 return (ENXIO);
760 rs = &raid_softc[unit];
761
762 if ((rs->sc_flags & RAIDF_INITED) == 0)
763 return (ENXIO);
764 db1_printf(("raidwrite\n"));
765 return (physio(raidstrategy, NULL, dev, B_WRITE, minphys, uio));
766
767 }
768
769 int
770 raidioctl(dev, cmd, data, flag, p)
771 dev_t dev;
772 u_long cmd;
773 caddr_t data;
774 int flag;
775 struct proc *p;
776 {
777 int unit = raidunit(dev);
778 int error = 0;
779 int part, pmask;
780 struct raid_softc *rs;
781 RF_Config_t *k_cfg, *u_cfg;
782 RF_Raid_t *raidPtr;
783 RF_RaidDisk_t *diskPtr;
784 RF_AccTotals_t *totals;
785 RF_DeviceConfig_t *d_cfg, **ucfgp;
786 u_char *specific_buf;
787 int retcode = 0;
788 int row;
789 int column;
790 int raidid;
791 struct rf_recon_req *rrcopy, *rr;
792 RF_ComponentLabel_t *clabel;
793 RF_ComponentLabel_t ci_label;
794 RF_ComponentLabel_t **clabel_ptr;
795 RF_SingleComponent_t *sparePtr,*componentPtr;
796 RF_SingleComponent_t hot_spare;
797 RF_SingleComponent_t component;
798 RF_ProgressInfo_t progressInfo, **progressInfoPtr;
799 int i, j, d;
800 #ifdef __HAVE_OLD_DISKLABEL
801 struct disklabel newlabel;
802 #endif
803
804 if (unit >= numraid)
805 return (ENXIO);
806 rs = &raid_softc[unit];
807 raidPtr = raidPtrs[unit];
808
809 db1_printf(("raidioctl: %d %d %d %d\n", (int) dev,
810 (int) DISKPART(dev), (int) unit, (int) cmd));
811
812 /* Must be open for writes for these commands... */
813 switch (cmd) {
814 case DIOCSDINFO:
815 case DIOCWDINFO:
816 #ifdef __HAVE_OLD_DISKLABEL
817 case ODIOCWDINFO:
818 case ODIOCSDINFO:
819 #endif
820 case DIOCWLABEL:
821 if ((flag & FWRITE) == 0)
822 return (EBADF);
823 }
824
825 /* Must be initialized for these... */
826 switch (cmd) {
827 case DIOCGDINFO:
828 case DIOCSDINFO:
829 case DIOCWDINFO:
830 #ifdef __HAVE_OLD_DISKLABEL
831 case ODIOCGDINFO:
832 case ODIOCWDINFO:
833 case ODIOCSDINFO:
834 case ODIOCGDEFLABEL:
835 #endif
836 case DIOCGPART:
837 case DIOCWLABEL:
838 case DIOCGDEFLABEL:
839 case RAIDFRAME_SHUTDOWN:
840 case RAIDFRAME_REWRITEPARITY:
841 case RAIDFRAME_GET_INFO:
842 case RAIDFRAME_RESET_ACCTOTALS:
843 case RAIDFRAME_GET_ACCTOTALS:
844 case RAIDFRAME_KEEP_ACCTOTALS:
845 case RAIDFRAME_GET_SIZE:
846 case RAIDFRAME_FAIL_DISK:
847 case RAIDFRAME_COPYBACK:
848 case RAIDFRAME_CHECK_RECON_STATUS:
849 case RAIDFRAME_CHECK_RECON_STATUS_EXT:
850 case RAIDFRAME_GET_COMPONENT_LABEL:
851 case RAIDFRAME_SET_COMPONENT_LABEL:
852 case RAIDFRAME_ADD_HOT_SPARE:
853 case RAIDFRAME_REMOVE_HOT_SPARE:
854 case RAIDFRAME_INIT_LABELS:
855 case RAIDFRAME_REBUILD_IN_PLACE:
856 case RAIDFRAME_CHECK_PARITY:
857 case RAIDFRAME_CHECK_PARITYREWRITE_STATUS:
858 case RAIDFRAME_CHECK_PARITYREWRITE_STATUS_EXT:
859 case RAIDFRAME_CHECK_COPYBACK_STATUS:
860 case RAIDFRAME_CHECK_COPYBACK_STATUS_EXT:
861 case RAIDFRAME_SET_AUTOCONFIG:
862 case RAIDFRAME_SET_ROOT:
863 case RAIDFRAME_DELETE_COMPONENT:
864 case RAIDFRAME_INCORPORATE_HOT_SPARE:
865 if ((rs->sc_flags & RAIDF_INITED) == 0)
866 return (ENXIO);
867 }
868
869 switch (cmd) {
870
871 /* configure the system */
872 case RAIDFRAME_CONFIGURE:
873
874 if (raidPtr->valid) {
875 /* There is a valid RAID set running on this unit! */
876 printf("raid%d: Device already configured!\n",unit);
877 return(EINVAL);
878 }
879
880 /* copy-in the configuration information */
881 /* data points to a pointer to the configuration structure */
882
883 u_cfg = *((RF_Config_t **) data);
884 RF_Malloc(k_cfg, sizeof(RF_Config_t), (RF_Config_t *));
885 if (k_cfg == NULL) {
886 return (ENOMEM);
887 }
888 retcode = copyin((caddr_t) u_cfg, (caddr_t) k_cfg,
889 sizeof(RF_Config_t));
890 if (retcode) {
891 RF_Free(k_cfg, sizeof(RF_Config_t));
892 db1_printf(("rf_ioctl: retcode=%d copyin.1\n",
893 retcode));
894 return (retcode);
895 }
896 /* allocate a buffer for the layout-specific data, and copy it
897 * in */
898 if (k_cfg->layoutSpecificSize) {
899 if (k_cfg->layoutSpecificSize > 10000) {
900 /* sanity check */
901 RF_Free(k_cfg, sizeof(RF_Config_t));
902 return (EINVAL);
903 }
904 RF_Malloc(specific_buf, k_cfg->layoutSpecificSize,
905 (u_char *));
906 if (specific_buf == NULL) {
907 RF_Free(k_cfg, sizeof(RF_Config_t));
908 return (ENOMEM);
909 }
910 retcode = copyin(k_cfg->layoutSpecific,
911 (caddr_t) specific_buf,
912 k_cfg->layoutSpecificSize);
913 if (retcode) {
914 RF_Free(k_cfg, sizeof(RF_Config_t));
915 RF_Free(specific_buf,
916 k_cfg->layoutSpecificSize);
917 db1_printf(("rf_ioctl: retcode=%d copyin.2\n",
918 retcode));
919 return (retcode);
920 }
921 } else
922 specific_buf = NULL;
923 k_cfg->layoutSpecific = specific_buf;
924
925 /* should do some kind of sanity check on the configuration.
926 * Store the sum of all the bytes in the last byte? */
927
928 /* configure the system */
929
930 /*
931 * Clear the entire RAID descriptor, just to make sure
932 * there is no stale data left in the case of a
933 * reconfiguration
934 */
935 memset((char *) raidPtr, 0, sizeof(RF_Raid_t));
936 raidPtr->raidid = unit;
937
938 retcode = rf_Configure(raidPtr, k_cfg, NULL);
939
940 if (retcode == 0) {
941
942 /* allow this many simultaneous IO's to
943 this RAID device */
944 raidPtr->openings = RAIDOUTSTANDING;
945
946 raidinit(raidPtr);
947 rf_markalldirty(raidPtr);
948 }
949 /* free the buffers. No return code here. */
950 if (k_cfg->layoutSpecificSize) {
951 RF_Free(specific_buf, k_cfg->layoutSpecificSize);
952 }
953 RF_Free(k_cfg, sizeof(RF_Config_t));
954
955 return (retcode);
956
957 /* shutdown the system */
958 case RAIDFRAME_SHUTDOWN:
959
960 if ((error = raidlock(rs)) != 0)
961 return (error);
962
963 /*
964 * If somebody has a partition mounted, we shouldn't
965 * shutdown.
966 */
967
968 part = DISKPART(dev);
969 pmask = (1 << part);
970 if ((rs->sc_dkdev.dk_openmask & ~pmask) ||
971 ((rs->sc_dkdev.dk_bopenmask & pmask) &&
972 (rs->sc_dkdev.dk_copenmask & pmask))) {
973 raidunlock(rs);
974 return (EBUSY);
975 }
976
977 retcode = rf_Shutdown(raidPtr);
978
979 /* It's no longer initialized... */
980 rs->sc_flags &= ~RAIDF_INITED;
981
982 /* Detach the disk. */
983 disk_detach(&rs->sc_dkdev);
984
985 raidunlock(rs);
986
987 return (retcode);
988 case RAIDFRAME_GET_COMPONENT_LABEL:
989 clabel_ptr = (RF_ComponentLabel_t **) data;
990 /* need to read the component label for the disk indicated
991 by row,column in clabel */
992
993 /* For practice, let's get it directly fromdisk, rather
994 than from the in-core copy */
995 RF_Malloc( clabel, sizeof( RF_ComponentLabel_t ),
996 (RF_ComponentLabel_t *));
997 if (clabel == NULL)
998 return (ENOMEM);
999
1000 memset((char *) clabel, 0, sizeof(RF_ComponentLabel_t));
1001
1002 retcode = copyin( *clabel_ptr, clabel,
1003 sizeof(RF_ComponentLabel_t));
1004
1005 if (retcode) {
1006 RF_Free( clabel, sizeof(RF_ComponentLabel_t));
1007 return(retcode);
1008 }
1009
1010 row = clabel->row;
1011 column = clabel->column;
1012
1013 if ((row < 0) || (row >= raidPtr->numRow) ||
1014 (column < 0) || (column >= raidPtr->numCol +
1015 raidPtr->numSpare)) {
1016 RF_Free( clabel, sizeof(RF_ComponentLabel_t));
1017 return(EINVAL);
1018 }
1019
1020 raidread_component_label(raidPtr->Disks[row][column].dev,
1021 raidPtr->raid_cinfo[row][column].ci_vp,
1022 clabel );
1023
1024 retcode = copyout((caddr_t) clabel,
1025 (caddr_t) *clabel_ptr,
1026 sizeof(RF_ComponentLabel_t));
1027 RF_Free( clabel, sizeof(RF_ComponentLabel_t));
1028 return (retcode);
1029
1030 case RAIDFRAME_SET_COMPONENT_LABEL:
1031 clabel = (RF_ComponentLabel_t *) data;
1032
1033 /* XXX check the label for valid stuff... */
1034 /* Note that some things *should not* get modified --
1035 the user should be re-initing the labels instead of
1036 trying to patch things.
1037 */
1038
1039 raidid = raidPtr->raidid;
1040 printf("raid%d: Got component label:\n", raidid);
1041 printf("raid%d: Version: %d\n", raidid, clabel->version);
1042 printf("raid%d: Serial Number: %d\n", raidid, clabel->serial_number);
1043 printf("raid%d: Mod counter: %d\n", raidid, clabel->mod_counter);
1044 printf("raid%d: Row: %d\n", raidid, clabel->row);
1045 printf("raid%d: Column: %d\n", raidid, clabel->column);
1046 printf("raid%d: Num Rows: %d\n", raidid, clabel->num_rows);
1047 printf("raid%d: Num Columns: %d\n", raidid, clabel->num_columns);
1048 printf("raid%d: Clean: %d\n", raidid, clabel->clean);
1049 printf("raid%d: Status: %d\n", raidid, clabel->status);
1050
1051 row = clabel->row;
1052 column = clabel->column;
1053
1054 if ((row < 0) || (row >= raidPtr->numRow) ||
1055 (column < 0) || (column >= raidPtr->numCol)) {
1056 return(EINVAL);
1057 }
1058
1059 /* XXX this isn't allowed to do anything for now :-) */
1060
1061 /* XXX and before it is, we need to fill in the rest
1062 of the fields!?!?!?! */
1063 #if 0
1064 raidwrite_component_label(
1065 raidPtr->Disks[row][column].dev,
1066 raidPtr->raid_cinfo[row][column].ci_vp,
1067 clabel );
1068 #endif
1069 return (0);
1070
1071 case RAIDFRAME_INIT_LABELS:
1072 clabel = (RF_ComponentLabel_t *) data;
1073 /*
1074 we only want the serial number from
1075 the above. We get all the rest of the information
1076 from the config that was used to create this RAID
1077 set.
1078 */
1079
1080 raidPtr->serial_number = clabel->serial_number;
1081
1082 raid_init_component_label(raidPtr, &ci_label);
1083 ci_label.serial_number = clabel->serial_number;
1084
1085 for(row=0;row<raidPtr->numRow;row++) {
1086 ci_label.row = row;
1087 for(column=0;column<raidPtr->numCol;column++) {
1088 diskPtr = &raidPtr->Disks[row][column];
1089 if (!RF_DEAD_DISK(diskPtr->status)) {
1090 ci_label.partitionSize = diskPtr->partitionSize;
1091 ci_label.column = column;
1092 raidwrite_component_label(
1093 raidPtr->Disks[row][column].dev,
1094 raidPtr->raid_cinfo[row][column].ci_vp,
1095 &ci_label );
1096 }
1097 }
1098 }
1099
1100 return (retcode);
1101 case RAIDFRAME_SET_AUTOCONFIG:
1102 d = rf_set_autoconfig(raidPtr, *(int *) data);
1103 printf("raid%d: New autoconfig value is: %d\n",
1104 raidPtr->raidid, d);
1105 *(int *) data = d;
1106 return (retcode);
1107
1108 case RAIDFRAME_SET_ROOT:
1109 d = rf_set_rootpartition(raidPtr, *(int *) data);
1110 printf("raid%d: New rootpartition value is: %d\n",
1111 raidPtr->raidid, d);
1112 *(int *) data = d;
1113 return (retcode);
1114
1115 /* initialize all parity */
1116 case RAIDFRAME_REWRITEPARITY:
1117
1118 if (raidPtr->Layout.map->faultsTolerated == 0) {
1119 /* Parity for RAID 0 is trivially correct */
1120 raidPtr->parity_good = RF_RAID_CLEAN;
1121 return(0);
1122 }
1123
1124 if (raidPtr->parity_rewrite_in_progress == 1) {
1125 /* Re-write is already in progress! */
1126 return(EINVAL);
1127 }
1128
1129 retcode = RF_CREATE_THREAD(raidPtr->parity_rewrite_thread,
1130 rf_RewriteParityThread,
1131 raidPtr,"raid_parity");
1132 return (retcode);
1133
1134
1135 case RAIDFRAME_ADD_HOT_SPARE:
1136 sparePtr = (RF_SingleComponent_t *) data;
1137 memcpy( &hot_spare, sparePtr, sizeof(RF_SingleComponent_t));
1138 retcode = rf_add_hot_spare(raidPtr, &hot_spare);
1139 return(retcode);
1140
1141 case RAIDFRAME_REMOVE_HOT_SPARE:
1142 return(retcode);
1143
1144 case RAIDFRAME_DELETE_COMPONENT:
1145 componentPtr = (RF_SingleComponent_t *)data;
1146 memcpy( &component, componentPtr,
1147 sizeof(RF_SingleComponent_t));
1148 retcode = rf_delete_component(raidPtr, &component);
1149 return(retcode);
1150
1151 case RAIDFRAME_INCORPORATE_HOT_SPARE:
1152 componentPtr = (RF_SingleComponent_t *)data;
1153 memcpy( &component, componentPtr,
1154 sizeof(RF_SingleComponent_t));
1155 retcode = rf_incorporate_hot_spare(raidPtr, &component);
1156 return(retcode);
1157
1158 case RAIDFRAME_REBUILD_IN_PLACE:
1159
1160 if (raidPtr->Layout.map->faultsTolerated == 0) {
1161 /* Can't do this on a RAID 0!! */
1162 return(EINVAL);
1163 }
1164
1165 if (raidPtr->recon_in_progress == 1) {
1166 /* a reconstruct is already in progress! */
1167 return(EINVAL);
1168 }
1169
1170 componentPtr = (RF_SingleComponent_t *) data;
1171 memcpy( &component, componentPtr,
1172 sizeof(RF_SingleComponent_t));
1173 row = component.row;
1174 column = component.column;
1175 printf("raid%d: Rebuild: %d %d\n", raidPtr->raidid,
1176 row, column);
1177 if ((row < 0) || (row >= raidPtr->numRow) ||
1178 (column < 0) || (column >= raidPtr->numCol)) {
1179 return(EINVAL);
1180 }
1181
1182 RF_Malloc(rrcopy, sizeof(*rrcopy), (struct rf_recon_req *));
1183 if (rrcopy == NULL)
1184 return(ENOMEM);
1185
1186 rrcopy->raidPtr = (void *) raidPtr;
1187 rrcopy->row = row;
1188 rrcopy->col = column;
1189
1190 retcode = RF_CREATE_THREAD(raidPtr->recon_thread,
1191 rf_ReconstructInPlaceThread,
1192 rrcopy,"raid_reconip");
1193 return(retcode);
1194
1195 case RAIDFRAME_GET_INFO:
1196 if (!raidPtr->valid)
1197 return (ENODEV);
1198 ucfgp = (RF_DeviceConfig_t **) data;
1199 RF_Malloc(d_cfg, sizeof(RF_DeviceConfig_t),
1200 (RF_DeviceConfig_t *));
1201 if (d_cfg == NULL)
1202 return (ENOMEM);
1203 memset((char *) d_cfg, 0, sizeof(RF_DeviceConfig_t));
1204 d_cfg->rows = raidPtr->numRow;
1205 d_cfg->cols = raidPtr->numCol;
1206 d_cfg->ndevs = raidPtr->numRow * raidPtr->numCol;
1207 if (d_cfg->ndevs >= RF_MAX_DISKS) {
1208 RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
1209 return (ENOMEM);
1210 }
1211 d_cfg->nspares = raidPtr->numSpare;
1212 if (d_cfg->nspares >= RF_MAX_DISKS) {
1213 RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
1214 return (ENOMEM);
1215 }
1216 d_cfg->maxqdepth = raidPtr->maxQueueDepth;
1217 d = 0;
1218 for (i = 0; i < d_cfg->rows; i++) {
1219 for (j = 0; j < d_cfg->cols; j++) {
1220 d_cfg->devs[d] = raidPtr->Disks[i][j];
1221 d++;
1222 }
1223 }
1224 for (j = d_cfg->cols, i = 0; i < d_cfg->nspares; i++, j++) {
1225 d_cfg->spares[i] = raidPtr->Disks[0][j];
1226 }
1227 retcode = copyout((caddr_t) d_cfg, (caddr_t) * ucfgp,
1228 sizeof(RF_DeviceConfig_t));
1229 RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
1230
1231 return (retcode);
1232
1233 case RAIDFRAME_CHECK_PARITY:
1234 *(int *) data = raidPtr->parity_good;
1235 return (0);
1236
1237 case RAIDFRAME_RESET_ACCTOTALS:
1238 memset(&raidPtr->acc_totals, 0, sizeof(raidPtr->acc_totals));
1239 return (0);
1240
1241 case RAIDFRAME_GET_ACCTOTALS:
1242 totals = (RF_AccTotals_t *) data;
1243 *totals = raidPtr->acc_totals;
1244 return (0);
1245
1246 case RAIDFRAME_KEEP_ACCTOTALS:
1247 raidPtr->keep_acc_totals = *(int *)data;
1248 return (0);
1249
1250 case RAIDFRAME_GET_SIZE:
1251 *(int *) data = raidPtr->totalSectors;
1252 return (0);
1253
1254 /* fail a disk & optionally start reconstruction */
1255 case RAIDFRAME_FAIL_DISK:
1256
1257 if (raidPtr->Layout.map->faultsTolerated == 0) {
1258 /* Can't do this on a RAID 0!! */
1259 return(EINVAL);
1260 }
1261
1262 rr = (struct rf_recon_req *) data;
1263
1264 if (rr->row < 0 || rr->row >= raidPtr->numRow
1265 || rr->col < 0 || rr->col >= raidPtr->numCol)
1266 return (EINVAL);
1267
1268 printf("raid%d: Failing the disk: row: %d col: %d\n",
1269 unit, rr->row, rr->col);
1270
1271 /* make a copy of the recon request so that we don't rely on
1272 * the user's buffer */
1273 RF_Malloc(rrcopy, sizeof(*rrcopy), (struct rf_recon_req *));
1274 if (rrcopy == NULL)
1275 return(ENOMEM);
1276 memcpy(rrcopy, rr, sizeof(*rr));
1277 rrcopy->raidPtr = (void *) raidPtr;
1278
1279 retcode = RF_CREATE_THREAD(raidPtr->recon_thread,
1280 rf_ReconThread,
1281 rrcopy,"raid_recon");
1282 return (0);
1283
1284 /* invoke a copyback operation after recon on whatever disk
1285 * needs it, if any */
1286 case RAIDFRAME_COPYBACK:
1287
1288 if (raidPtr->Layout.map->faultsTolerated == 0) {
1289 /* This makes no sense on a RAID 0!! */
1290 return(EINVAL);
1291 }
1292
1293 if (raidPtr->copyback_in_progress == 1) {
1294 /* Copyback is already in progress! */
1295 return(EINVAL);
1296 }
1297
1298 retcode = RF_CREATE_THREAD(raidPtr->copyback_thread,
1299 rf_CopybackThread,
1300 raidPtr,"raid_copyback");
1301 return (retcode);
1302
1303 /* return the percentage completion of reconstruction */
1304 case RAIDFRAME_CHECK_RECON_STATUS:
1305 if (raidPtr->Layout.map->faultsTolerated == 0) {
1306 /* This makes no sense on a RAID 0, so tell the
1307 user it's done. */
1308 *(int *) data = 100;
1309 return(0);
1310 }
1311 row = 0; /* XXX we only consider a single row... */
1312 if (raidPtr->status[row] != rf_rs_reconstructing)
1313 *(int *) data = 100;
1314 else
1315 *(int *) data = raidPtr->reconControl[row]->percentComplete;
1316 return (0);
1317 case RAIDFRAME_CHECK_RECON_STATUS_EXT:
1318 progressInfoPtr = (RF_ProgressInfo_t **) data;
1319 row = 0; /* XXX we only consider a single row... */
1320 if (raidPtr->status[row] != rf_rs_reconstructing) {
1321 progressInfo.remaining = 0;
1322 progressInfo.completed = 100;
1323 progressInfo.total = 100;
1324 } else {
1325 progressInfo.total =
1326 raidPtr->reconControl[row]->numRUsTotal;
1327 progressInfo.completed =
1328 raidPtr->reconControl[row]->numRUsComplete;
1329 progressInfo.remaining = progressInfo.total -
1330 progressInfo.completed;
1331 }
1332 retcode = copyout((caddr_t) &progressInfo,
1333 (caddr_t) *progressInfoPtr,
1334 sizeof(RF_ProgressInfo_t));
1335 return (retcode);
1336
1337 case RAIDFRAME_CHECK_PARITYREWRITE_STATUS:
1338 if (raidPtr->Layout.map->faultsTolerated == 0) {
1339 /* This makes no sense on a RAID 0, so tell the
1340 user it's done. */
1341 *(int *) data = 100;
1342 return(0);
1343 }
1344 if (raidPtr->parity_rewrite_in_progress == 1) {
1345 *(int *) data = 100 *
1346 raidPtr->parity_rewrite_stripes_done /
1347 raidPtr->Layout.numStripe;
1348 } else {
1349 *(int *) data = 100;
1350 }
1351 return (0);
1352
1353 case RAIDFRAME_CHECK_PARITYREWRITE_STATUS_EXT:
1354 progressInfoPtr = (RF_ProgressInfo_t **) data;
1355 if (raidPtr->parity_rewrite_in_progress == 1) {
1356 progressInfo.total = raidPtr->Layout.numStripe;
1357 progressInfo.completed =
1358 raidPtr->parity_rewrite_stripes_done;
1359 progressInfo.remaining = progressInfo.total -
1360 progressInfo.completed;
1361 } else {
1362 progressInfo.remaining = 0;
1363 progressInfo.completed = 100;
1364 progressInfo.total = 100;
1365 }
1366 retcode = copyout((caddr_t) &progressInfo,
1367 (caddr_t) *progressInfoPtr,
1368 sizeof(RF_ProgressInfo_t));
1369 return (retcode);
1370
1371 case RAIDFRAME_CHECK_COPYBACK_STATUS:
1372 if (raidPtr->Layout.map->faultsTolerated == 0) {
1373 /* This makes no sense on a RAID 0 */
1374 *(int *) data = 100;
1375 return(0);
1376 }
1377 if (raidPtr->copyback_in_progress == 1) {
1378 *(int *) data = 100 * raidPtr->copyback_stripes_done /
1379 raidPtr->Layout.numStripe;
1380 } else {
1381 *(int *) data = 100;
1382 }
1383 return (0);
1384
1385 case RAIDFRAME_CHECK_COPYBACK_STATUS_EXT:
1386 progressInfoPtr = (RF_ProgressInfo_t **) data;
1387 if (raidPtr->copyback_in_progress == 1) {
1388 progressInfo.total = raidPtr->Layout.numStripe;
1389 progressInfo.completed =
1390 raidPtr->copyback_stripes_done;
1391 progressInfo.remaining = progressInfo.total -
1392 progressInfo.completed;
1393 } else {
1394 progressInfo.remaining = 0;
1395 progressInfo.completed = 100;
1396 progressInfo.total = 100;
1397 }
1398 retcode = copyout((caddr_t) &progressInfo,
1399 (caddr_t) *progressInfoPtr,
1400 sizeof(RF_ProgressInfo_t));
1401 return (retcode);
1402
1403 /* the sparetable daemon calls this to wait for the kernel to
1404 * need a spare table. this ioctl does not return until a
1405 * spare table is needed. XXX -- calling mpsleep here in the
1406 * ioctl code is almost certainly wrong and evil. -- XXX XXX
1407 * -- I should either compute the spare table in the kernel,
1408 * or have a different -- XXX XXX -- interface (a different
1409 * character device) for delivering the table -- XXX */
1410 #if 0
1411 case RAIDFRAME_SPARET_WAIT:
1412 RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1413 while (!rf_sparet_wait_queue)
1414 mpsleep(&rf_sparet_wait_queue, (PZERO + 1) | PCATCH, "sparet wait", 0, (void *) simple_lock_addr(rf_sparet_wait_mutex), MS_LOCK_SIMPLE);
1415 waitreq = rf_sparet_wait_queue;
1416 rf_sparet_wait_queue = rf_sparet_wait_queue->next;
1417 RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1418
1419 /* structure assignment */
1420 *((RF_SparetWait_t *) data) = *waitreq;
1421
1422 RF_Free(waitreq, sizeof(*waitreq));
1423 return (0);
1424
1425 /* wakes up a process waiting on SPARET_WAIT and puts an error
1426 * code in it that will cause the dameon to exit */
1427 case RAIDFRAME_ABORT_SPARET_WAIT:
1428 RF_Malloc(waitreq, sizeof(*waitreq), (RF_SparetWait_t *));
1429 waitreq->fcol = -1;
1430 RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1431 waitreq->next = rf_sparet_wait_queue;
1432 rf_sparet_wait_queue = waitreq;
1433 RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1434 wakeup(&rf_sparet_wait_queue);
1435 return (0);
1436
1437 /* used by the spare table daemon to deliver a spare table
1438 * into the kernel */
1439 case RAIDFRAME_SEND_SPARET:
1440
1441 /* install the spare table */
1442 retcode = rf_SetSpareTable(raidPtr, *(void **) data);
1443
1444 /* respond to the requestor. the return status of the spare
1445 * table installation is passed in the "fcol" field */
1446 RF_Malloc(waitreq, sizeof(*waitreq), (RF_SparetWait_t *));
1447 waitreq->fcol = retcode;
1448 RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1449 waitreq->next = rf_sparet_resp_queue;
1450 rf_sparet_resp_queue = waitreq;
1451 wakeup(&rf_sparet_resp_queue);
1452 RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1453
1454 return (retcode);
1455 #endif
1456
1457 default:
1458 break; /* fall through to the os-specific code below */
1459
1460 }
1461
1462 if (!raidPtr->valid)
1463 return (EINVAL);
1464
1465 /*
1466 * Add support for "regular" device ioctls here.
1467 */
1468
1469 switch (cmd) {
1470 case DIOCGDINFO:
1471 *(struct disklabel *) data = *(rs->sc_dkdev.dk_label);
1472 break;
1473 #ifdef __HAVE_OLD_DISKLABEL
1474 case ODIOCGDINFO:
1475 newlabel = *(rs->sc_dkdev.dk_label);
1476 if (newlabel.d_npartitions > OLDMAXPARTITIONS)
1477 return ENOTTY;
1478 memcpy(data, &newlabel, sizeof (struct olddisklabel));
1479 break;
1480 #endif
1481
1482 case DIOCGPART:
1483 ((struct partinfo *) data)->disklab = rs->sc_dkdev.dk_label;
1484 ((struct partinfo *) data)->part =
1485 &rs->sc_dkdev.dk_label->d_partitions[DISKPART(dev)];
1486 break;
1487
1488 case DIOCWDINFO:
1489 case DIOCSDINFO:
1490 #ifdef __HAVE_OLD_DISKLABEL
1491 case ODIOCWDINFO:
1492 case ODIOCSDINFO:
1493 #endif
1494 {
1495 struct disklabel *lp;
1496 #ifdef __HAVE_OLD_DISKLABEL
1497 if (cmd == ODIOCSDINFO || cmd == ODIOCWDINFO) {
1498 memset(&newlabel, 0, sizeof newlabel);
1499 memcpy(&newlabel, data, sizeof (struct olddisklabel));
1500 lp = &newlabel;
1501 } else
1502 #endif
1503 lp = (struct disklabel *)data;
1504
1505 if ((error = raidlock(rs)) != 0)
1506 return (error);
1507
1508 rs->sc_flags |= RAIDF_LABELLING;
1509
1510 error = setdisklabel(rs->sc_dkdev.dk_label,
1511 lp, 0, rs->sc_dkdev.dk_cpulabel);
1512 if (error == 0) {
1513 if (cmd == DIOCWDINFO
1514 #ifdef __HAVE_OLD_DISKLABEL
1515 || cmd == ODIOCWDINFO
1516 #endif
1517 )
1518 error = writedisklabel(RAIDLABELDEV(dev),
1519 raidstrategy, rs->sc_dkdev.dk_label,
1520 rs->sc_dkdev.dk_cpulabel);
1521 }
1522 rs->sc_flags &= ~RAIDF_LABELLING;
1523
1524 raidunlock(rs);
1525
1526 if (error)
1527 return (error);
1528 break;
1529 }
1530
1531 case DIOCWLABEL:
1532 if (*(int *) data != 0)
1533 rs->sc_flags |= RAIDF_WLABEL;
1534 else
1535 rs->sc_flags &= ~RAIDF_WLABEL;
1536 break;
1537
1538 case DIOCGDEFLABEL:
1539 raidgetdefaultlabel(raidPtr, rs, (struct disklabel *) data);
1540 break;
1541
1542 #ifdef __HAVE_OLD_DISKLABEL
1543 case ODIOCGDEFLABEL:
1544 raidgetdefaultlabel(raidPtr, rs, &newlabel);
1545 if (newlabel.d_npartitions > OLDMAXPARTITIONS)
1546 return ENOTTY;
1547 memcpy(data, &newlabel, sizeof (struct olddisklabel));
1548 break;
1549 #endif
1550
1551 default:
1552 retcode = ENOTTY;
1553 }
1554 return (retcode);
1555
1556 }
1557
1558
1559 /* raidinit -- complete the rest of the initialization for the
1560 RAIDframe device. */
1561
1562
1563 static void
1564 raidinit(raidPtr)
1565 RF_Raid_t *raidPtr;
1566 {
1567 struct raid_softc *rs;
1568 int unit;
1569
1570 unit = raidPtr->raidid;
1571
1572 rs = &raid_softc[unit];
1573
1574 /* XXX should check return code first... */
1575 rs->sc_flags |= RAIDF_INITED;
1576
1577 sprintf(rs->sc_xname, "raid%d", unit); /* XXX doesn't check bounds. */
1578
1579 rs->sc_dkdev.dk_name = rs->sc_xname;
1580
1581 /* disk_attach actually creates space for the CPU disklabel, among
1582 * other things, so it's critical to call this *BEFORE* we try putzing
1583 * with disklabels. */
1584
1585 disk_attach(&rs->sc_dkdev);
1586
1587 /* XXX There may be a weird interaction here between this, and
1588 * protectedSectors, as used in RAIDframe. */
1589
1590 rs->sc_size = raidPtr->totalSectors;
1591
1592 }
1593
1594 /* wake up the daemon & tell it to get us a spare table
1595 * XXX
1596 * the entries in the queues should be tagged with the raidPtr
1597 * so that in the extremely rare case that two recons happen at once,
1598 * we know for which device were requesting a spare table
1599 * XXX
1600 *
1601 * XXX This code is not currently used. GO
1602 */
1603 int
1604 rf_GetSpareTableFromDaemon(req)
1605 RF_SparetWait_t *req;
1606 {
1607 int retcode;
1608
1609 RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1610 req->next = rf_sparet_wait_queue;
1611 rf_sparet_wait_queue = req;
1612 wakeup(&rf_sparet_wait_queue);
1613
1614 /* mpsleep unlocks the mutex */
1615 while (!rf_sparet_resp_queue) {
1616 tsleep(&rf_sparet_resp_queue, PRIBIO,
1617 "raidframe getsparetable", 0);
1618 }
1619 req = rf_sparet_resp_queue;
1620 rf_sparet_resp_queue = req->next;
1621 RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1622
1623 retcode = req->fcol;
1624 RF_Free(req, sizeof(*req)); /* this is not the same req as we
1625 * alloc'd */
1626 return (retcode);
1627 }
1628
1629 /* a wrapper around rf_DoAccess that extracts appropriate info from the
1630 * bp & passes it down.
1631 * any calls originating in the kernel must use non-blocking I/O
1632 * do some extra sanity checking to return "appropriate" error values for
1633 * certain conditions (to make some standard utilities work)
1634 *
1635 * Formerly known as: rf_DoAccessKernel
1636 */
1637 void
1638 raidstart(raidPtr)
1639 RF_Raid_t *raidPtr;
1640 {
1641 RF_SectorCount_t num_blocks, pb, sum;
1642 RF_RaidAddr_t raid_addr;
1643 int retcode;
1644 struct partition *pp;
1645 daddr_t blocknum;
1646 int unit;
1647 struct raid_softc *rs;
1648 int do_async;
1649 struct buf *bp;
1650
1651 unit = raidPtr->raidid;
1652 rs = &raid_softc[unit];
1653
1654 /* quick check to see if anything has died recently */
1655 RF_LOCK_MUTEX(raidPtr->mutex);
1656 if (raidPtr->numNewFailures > 0) {
1657 rf_update_component_labels(raidPtr,
1658 RF_NORMAL_COMPONENT_UPDATE);
1659 raidPtr->numNewFailures--;
1660 }
1661
1662 /* Check to see if we're at the limit... */
1663 while (raidPtr->openings > 0) {
1664 RF_UNLOCK_MUTEX(raidPtr->mutex);
1665
1666 /* get the next item, if any, from the queue */
1667 if ((bp = BUFQ_FIRST(&rs->buf_queue)) == NULL) {
1668 /* nothing more to do */
1669 return;
1670 }
1671 BUFQ_REMOVE(&rs->buf_queue, bp);
1672
1673 /* Ok, for the bp we have here, bp->b_blkno is relative to the
1674 * partition.. Need to make it absolute to the underlying
1675 * device.. */
1676
1677 blocknum = bp->b_blkno;
1678 if (DISKPART(bp->b_dev) != RAW_PART) {
1679 pp = &rs->sc_dkdev.dk_label->d_partitions[DISKPART(bp->b_dev)];
1680 blocknum += pp->p_offset;
1681 }
1682
1683 db1_printf(("Blocks: %d, %d\n", (int) bp->b_blkno,
1684 (int) blocknum));
1685
1686 db1_printf(("bp->b_bcount = %d\n", (int) bp->b_bcount));
1687 db1_printf(("bp->b_resid = %d\n", (int) bp->b_resid));
1688
1689 /* *THIS* is where we adjust what block we're going to...
1690 * but DO NOT TOUCH bp->b_blkno!!! */
1691 raid_addr = blocknum;
1692
1693 num_blocks = bp->b_bcount >> raidPtr->logBytesPerSector;
1694 pb = (bp->b_bcount & raidPtr->sectorMask) ? 1 : 0;
1695 sum = raid_addr + num_blocks + pb;
1696 if (1 || rf_debugKernelAccess) {
1697 db1_printf(("raid_addr=%d sum=%d num_blocks=%d(+%d) (%d)\n",
1698 (int) raid_addr, (int) sum, (int) num_blocks,
1699 (int) pb, (int) bp->b_resid));
1700 }
1701 if ((sum > raidPtr->totalSectors) || (sum < raid_addr)
1702 || (sum < num_blocks) || (sum < pb)) {
1703 bp->b_error = ENOSPC;
1704 bp->b_flags |= B_ERROR;
1705 bp->b_resid = bp->b_bcount;
1706 biodone(bp);
1707 RF_LOCK_MUTEX(raidPtr->mutex);
1708 continue;
1709 }
1710 /*
1711 * XXX rf_DoAccess() should do this, not just DoAccessKernel()
1712 */
1713
1714 if (bp->b_bcount & raidPtr->sectorMask) {
1715 bp->b_error = EINVAL;
1716 bp->b_flags |= B_ERROR;
1717 bp->b_resid = bp->b_bcount;
1718 biodone(bp);
1719 RF_LOCK_MUTEX(raidPtr->mutex);
1720 continue;
1721
1722 }
1723 db1_printf(("Calling DoAccess..\n"));
1724
1725
1726 RF_LOCK_MUTEX(raidPtr->mutex);
1727 raidPtr->openings--;
1728 RF_UNLOCK_MUTEX(raidPtr->mutex);
1729
1730 /*
1731 * Everything is async.
1732 */
1733 do_async = 1;
1734
1735 disk_busy(&rs->sc_dkdev);
1736
1737 /* XXX we're still at splbio() here... do we *really*
1738 need to be? */
1739
1740 /* don't ever condition on bp->b_flags & B_WRITE.
1741 * always condition on B_READ instead */
1742
1743 retcode = rf_DoAccess(raidPtr, (bp->b_flags & B_READ) ?
1744 RF_IO_TYPE_READ : RF_IO_TYPE_WRITE,
1745 do_async, raid_addr, num_blocks,
1746 bp->b_data, bp, RF_DAG_NONBLOCKING_IO);
1747
1748 RF_LOCK_MUTEX(raidPtr->mutex);
1749 }
1750 RF_UNLOCK_MUTEX(raidPtr->mutex);
1751 }
1752
1753
1754
1755
1756 /* invoke an I/O from kernel mode. Disk queue should be locked upon entry */
1757
1758 int
1759 rf_DispatchKernelIO(queue, req)
1760 RF_DiskQueue_t *queue;
1761 RF_DiskQueueData_t *req;
1762 {
1763 int op = (req->type == RF_IO_TYPE_READ) ? B_READ : B_WRITE;
1764 struct buf *bp;
1765 struct raidbuf *raidbp = NULL;
1766 struct raid_softc *rs;
1767 int unit;
1768 int s;
1769
1770 s=0;
1771 /* s = splbio();*/ /* want to test this */
1772 /* XXX along with the vnode, we also need the softc associated with
1773 * this device.. */
1774
1775 req->queue = queue;
1776
1777 unit = queue->raidPtr->raidid;
1778
1779 db1_printf(("DispatchKernelIO unit: %d\n", unit));
1780
1781 if (unit >= numraid) {
1782 printf("Invalid unit number: %d %d\n", unit, numraid);
1783 panic("Invalid Unit number in rf_DispatchKernelIO\n");
1784 }
1785 rs = &raid_softc[unit];
1786
1787 bp = req->bp;
1788 #if 1
1789 /* XXX when there is a physical disk failure, someone is passing us a
1790 * buffer that contains old stuff!! Attempt to deal with this problem
1791 * without taking a performance hit... (not sure where the real bug
1792 * is. It's buried in RAIDframe somewhere) :-( GO ) */
1793
1794 if (bp->b_flags & B_ERROR) {
1795 bp->b_flags &= ~B_ERROR;
1796 }
1797 if (bp->b_error != 0) {
1798 bp->b_error = 0;
1799 }
1800 #endif
1801 raidbp = RAIDGETBUF(rs);
1802
1803 raidbp->rf_flags = 0; /* XXX not really used anywhere... */
1804
1805 /*
1806 * context for raidiodone
1807 */
1808 raidbp->rf_obp = bp;
1809 raidbp->req = req;
1810
1811 LIST_INIT(&raidbp->rf_buf.b_dep);
1812
1813 switch (req->type) {
1814 case RF_IO_TYPE_NOP: /* used primarily to unlock a locked queue */
1815 /* XXX need to do something extra here.. */
1816 /* I'm leaving this in, as I've never actually seen it used,
1817 * and I'd like folks to report it... GO */
1818 printf(("WAKEUP CALLED\n"));
1819 queue->numOutstanding++;
1820
1821 /* XXX need to glue the original buffer into this?? */
1822
1823 KernelWakeupFunc(&raidbp->rf_buf);
1824 break;
1825
1826 case RF_IO_TYPE_READ:
1827 case RF_IO_TYPE_WRITE:
1828
1829 if (req->tracerec) {
1830 RF_ETIMER_START(req->tracerec->timer);
1831 }
1832 InitBP(&raidbp->rf_buf, queue->rf_cinfo->ci_vp,
1833 op | bp->b_flags, queue->rf_cinfo->ci_dev,
1834 req->sectorOffset, req->numSector,
1835 req->buf, KernelWakeupFunc, (void *) req,
1836 queue->raidPtr->logBytesPerSector, req->b_proc);
1837
1838 if (rf_debugKernelAccess) {
1839 db1_printf(("dispatch: bp->b_blkno = %ld\n",
1840 (long) bp->b_blkno));
1841 }
1842 queue->numOutstanding++;
1843 queue->last_deq_sector = req->sectorOffset;
1844 /* acc wouldn't have been let in if there were any pending
1845 * reqs at any other priority */
1846 queue->curPriority = req->priority;
1847
1848 db1_printf(("Going for %c to unit %d row %d col %d\n",
1849 req->type, unit, queue->row, queue->col));
1850 db1_printf(("sector %d count %d (%d bytes) %d\n",
1851 (int) req->sectorOffset, (int) req->numSector,
1852 (int) (req->numSector <<
1853 queue->raidPtr->logBytesPerSector),
1854 (int) queue->raidPtr->logBytesPerSector));
1855 if ((raidbp->rf_buf.b_flags & B_READ) == 0) {
1856 raidbp->rf_buf.b_vp->v_numoutput++;
1857 }
1858 VOP_STRATEGY(&raidbp->rf_buf);
1859
1860 break;
1861
1862 default:
1863 panic("bad req->type in rf_DispatchKernelIO");
1864 }
1865 db1_printf(("Exiting from DispatchKernelIO\n"));
1866 /* splx(s); */ /* want to test this */
1867 return (0);
1868 }
1869 /* this is the callback function associated with a I/O invoked from
1870 kernel code.
1871 */
1872 static void
1873 KernelWakeupFunc(vbp)
1874 struct buf *vbp;
1875 {
1876 RF_DiskQueueData_t *req = NULL;
1877 RF_DiskQueue_t *queue;
1878 struct raidbuf *raidbp = (struct raidbuf *) vbp;
1879 struct buf *bp;
1880 struct raid_softc *rs;
1881 int unit;
1882 int s;
1883
1884 s = splbio();
1885 db1_printf(("recovering the request queue:\n"));
1886 req = raidbp->req;
1887
1888 bp = raidbp->rf_obp;
1889
1890 queue = (RF_DiskQueue_t *) req->queue;
1891
1892 if (raidbp->rf_buf.b_flags & B_ERROR) {
1893 bp->b_flags |= B_ERROR;
1894 bp->b_error = raidbp->rf_buf.b_error ?
1895 raidbp->rf_buf.b_error : EIO;
1896 }
1897
1898 /* XXX methinks this could be wrong... */
1899 #if 1
1900 bp->b_resid = raidbp->rf_buf.b_resid;
1901 #endif
1902
1903 if (req->tracerec) {
1904 RF_ETIMER_STOP(req->tracerec->timer);
1905 RF_ETIMER_EVAL(req->tracerec->timer);
1906 RF_LOCK_MUTEX(rf_tracing_mutex);
1907 req->tracerec->diskwait_us += RF_ETIMER_VAL_US(req->tracerec->timer);
1908 req->tracerec->phys_io_us += RF_ETIMER_VAL_US(req->tracerec->timer);
1909 req->tracerec->num_phys_ios++;
1910 RF_UNLOCK_MUTEX(rf_tracing_mutex);
1911 }
1912 bp->b_bcount = raidbp->rf_buf.b_bcount; /* XXXX ?? */
1913
1914 unit = queue->raidPtr->raidid; /* *Much* simpler :-> */
1915
1916
1917 /* XXX Ok, let's get aggressive... If B_ERROR is set, let's go
1918 * ballistic, and mark the component as hosed... */
1919
1920 if (bp->b_flags & B_ERROR) {
1921 /* Mark the disk as dead */
1922 /* but only mark it once... */
1923 if (queue->raidPtr->Disks[queue->row][queue->col].status ==
1924 rf_ds_optimal) {
1925 printf("raid%d: IO Error. Marking %s as failed.\n",
1926 unit, queue->raidPtr->Disks[queue->row][queue->col].devname);
1927 queue->raidPtr->Disks[queue->row][queue->col].status =
1928 rf_ds_failed;
1929 queue->raidPtr->status[queue->row] = rf_rs_degraded;
1930 queue->raidPtr->numFailures++;
1931 queue->raidPtr->numNewFailures++;
1932 } else { /* Disk is already dead... */
1933 /* printf("Disk already marked as dead!\n"); */
1934 }
1935
1936 }
1937
1938 rs = &raid_softc[unit];
1939 RAIDPUTBUF(rs, raidbp);
1940
1941 rf_DiskIOComplete(queue, req, (bp->b_flags & B_ERROR) ? 1 : 0);
1942 (req->CompleteFunc) (req->argument, (bp->b_flags & B_ERROR) ? 1 : 0);
1943
1944 splx(s);
1945 }
1946
1947
1948
1949 /*
1950 * initialize a buf structure for doing an I/O in the kernel.
1951 */
1952 static void
1953 InitBP(bp, b_vp, rw_flag, dev, startSect, numSect, buf, cbFunc, cbArg,
1954 logBytesPerSector, b_proc)
1955 struct buf *bp;
1956 struct vnode *b_vp;
1957 unsigned rw_flag;
1958 dev_t dev;
1959 RF_SectorNum_t startSect;
1960 RF_SectorCount_t numSect;
1961 caddr_t buf;
1962 void (*cbFunc) (struct buf *);
1963 void *cbArg;
1964 int logBytesPerSector;
1965 struct proc *b_proc;
1966 {
1967 /* bp->b_flags = B_PHYS | rw_flag; */
1968 bp->b_flags = B_CALL | rw_flag; /* XXX need B_PHYS here too??? */
1969 bp->b_bcount = numSect << logBytesPerSector;
1970 bp->b_bufsize = bp->b_bcount;
1971 bp->b_error = 0;
1972 bp->b_dev = dev;
1973 bp->b_data = buf;
1974 bp->b_blkno = startSect;
1975 bp->b_resid = bp->b_bcount; /* XXX is this right!??!?!! */
1976 if (bp->b_bcount == 0) {
1977 panic("bp->b_bcount is zero in InitBP!!\n");
1978 }
1979 bp->b_proc = b_proc;
1980 bp->b_iodone = cbFunc;
1981 bp->b_vp = b_vp;
1982
1983 }
1984
1985 static void
1986 raidgetdefaultlabel(raidPtr, rs, lp)
1987 RF_Raid_t *raidPtr;
1988 struct raid_softc *rs;
1989 struct disklabel *lp;
1990 {
1991 db1_printf(("Building a default label...\n"));
1992 memset(lp, 0, sizeof(*lp));
1993
1994 /* fabricate a label... */
1995 lp->d_secperunit = raidPtr->totalSectors;
1996 lp->d_secsize = raidPtr->bytesPerSector;
1997 lp->d_nsectors = raidPtr->Layout.dataSectorsPerStripe;
1998 lp->d_ntracks = 4 * raidPtr->numCol;
1999 lp->d_ncylinders = raidPtr->totalSectors /
2000 (lp->d_nsectors * lp->d_ntracks);
2001 lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
2002
2003 strncpy(lp->d_typename, "raid", sizeof(lp->d_typename));
2004 lp->d_type = DTYPE_RAID;
2005 strncpy(lp->d_packname, "fictitious", sizeof(lp->d_packname));
2006 lp->d_rpm = 3600;
2007 lp->d_interleave = 1;
2008 lp->d_flags = 0;
2009
2010 lp->d_partitions[RAW_PART].p_offset = 0;
2011 lp->d_partitions[RAW_PART].p_size = raidPtr->totalSectors;
2012 lp->d_partitions[RAW_PART].p_fstype = FS_UNUSED;
2013 lp->d_npartitions = RAW_PART + 1;
2014
2015 lp->d_magic = DISKMAGIC;
2016 lp->d_magic2 = DISKMAGIC;
2017 lp->d_checksum = dkcksum(rs->sc_dkdev.dk_label);
2018
2019 }
2020 /*
2021 * Read the disklabel from the raid device. If one is not present, fake one
2022 * up.
2023 */
2024 static void
2025 raidgetdisklabel(dev)
2026 dev_t dev;
2027 {
2028 int unit = raidunit(dev);
2029 struct raid_softc *rs = &raid_softc[unit];
2030 char *errstring;
2031 struct disklabel *lp = rs->sc_dkdev.dk_label;
2032 struct cpu_disklabel *clp = rs->sc_dkdev.dk_cpulabel;
2033 RF_Raid_t *raidPtr;
2034
2035 db1_printf(("Getting the disklabel...\n"));
2036
2037 memset(clp, 0, sizeof(*clp));
2038
2039 raidPtr = raidPtrs[unit];
2040
2041 raidgetdefaultlabel(raidPtr, rs, lp);
2042
2043 /*
2044 * Call the generic disklabel extraction routine.
2045 */
2046 errstring = readdisklabel(RAIDLABELDEV(dev), raidstrategy,
2047 rs->sc_dkdev.dk_label, rs->sc_dkdev.dk_cpulabel);
2048 if (errstring)
2049 raidmakedisklabel(rs);
2050 else {
2051 int i;
2052 struct partition *pp;
2053
2054 /*
2055 * Sanity check whether the found disklabel is valid.
2056 *
2057 * This is necessary since total size of the raid device
2058 * may vary when an interleave is changed even though exactly
2059 * same componets are used, and old disklabel may used
2060 * if that is found.
2061 */
2062 if (lp->d_secperunit != rs->sc_size)
2063 printf("raid%d: WARNING: %s: "
2064 "total sector size in disklabel (%d) != "
2065 "the size of raid (%ld)\n", unit, rs->sc_xname,
2066 lp->d_secperunit, (long) rs->sc_size);
2067 for (i = 0; i < lp->d_npartitions; i++) {
2068 pp = &lp->d_partitions[i];
2069 if (pp->p_offset + pp->p_size > rs->sc_size)
2070 printf("raid%d: WARNING: %s: end of partition `%c' "
2071 "exceeds the size of raid (%ld)\n",
2072 unit, rs->sc_xname, 'a' + i, (long) rs->sc_size);
2073 }
2074 }
2075
2076 }
2077 /*
2078 * Take care of things one might want to take care of in the event
2079 * that a disklabel isn't present.
2080 */
2081 static void
2082 raidmakedisklabel(rs)
2083 struct raid_softc *rs;
2084 {
2085 struct disklabel *lp = rs->sc_dkdev.dk_label;
2086 db1_printf(("Making a label..\n"));
2087
2088 /*
2089 * For historical reasons, if there's no disklabel present
2090 * the raw partition must be marked FS_BSDFFS.
2091 */
2092
2093 lp->d_partitions[RAW_PART].p_fstype = FS_BSDFFS;
2094
2095 strncpy(lp->d_packname, "default label", sizeof(lp->d_packname));
2096
2097 lp->d_checksum = dkcksum(lp);
2098 }
2099 /*
2100 * Lookup the provided name in the filesystem. If the file exists,
2101 * is a valid block device, and isn't being used by anyone else,
2102 * set *vpp to the file's vnode.
2103 * You'll find the original of this in ccd.c
2104 */
2105 int
2106 raidlookup(path, p, vpp)
2107 char *path;
2108 struct proc *p;
2109 struct vnode **vpp; /* result */
2110 {
2111 struct nameidata nd;
2112 struct vnode *vp;
2113 struct vattr va;
2114 int error;
2115
2116 NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, path, p);
2117 if ((error = vn_open(&nd, FREAD | FWRITE, 0)) != 0) {
2118 #if 0
2119 printf("RAIDframe: vn_open returned %d\n", error);
2120 #endif
2121 return (error);
2122 }
2123 vp = nd.ni_vp;
2124 if (vp->v_usecount > 1) {
2125 VOP_UNLOCK(vp, 0);
2126 (void) vn_close(vp, FREAD | FWRITE, p->p_ucred, p);
2127 return (EBUSY);
2128 }
2129 if ((error = VOP_GETATTR(vp, &va, p->p_ucred, p)) != 0) {
2130 VOP_UNLOCK(vp, 0);
2131 (void) vn_close(vp, FREAD | FWRITE, p->p_ucred, p);
2132 return (error);
2133 }
2134 /* XXX: eventually we should handle VREG, too. */
2135 if (va.va_type != VBLK) {
2136 VOP_UNLOCK(vp, 0);
2137 (void) vn_close(vp, FREAD | FWRITE, p->p_ucred, p);
2138 return (ENOTBLK);
2139 }
2140 VOP_UNLOCK(vp, 0);
2141 *vpp = vp;
2142 return (0);
2143 }
2144 /*
2145 * Wait interruptibly for an exclusive lock.
2146 *
2147 * XXX
2148 * Several drivers do this; it should be abstracted and made MP-safe.
2149 * (Hmm... where have we seen this warning before :-> GO )
2150 */
2151 static int
2152 raidlock(rs)
2153 struct raid_softc *rs;
2154 {
2155 int error;
2156
2157 while ((rs->sc_flags & RAIDF_LOCKED) != 0) {
2158 rs->sc_flags |= RAIDF_WANTED;
2159 if ((error =
2160 tsleep(rs, PRIBIO | PCATCH, "raidlck", 0)) != 0)
2161 return (error);
2162 }
2163 rs->sc_flags |= RAIDF_LOCKED;
2164 return (0);
2165 }
2166 /*
2167 * Unlock and wake up any waiters.
2168 */
2169 static void
2170 raidunlock(rs)
2171 struct raid_softc *rs;
2172 {
2173
2174 rs->sc_flags &= ~RAIDF_LOCKED;
2175 if ((rs->sc_flags & RAIDF_WANTED) != 0) {
2176 rs->sc_flags &= ~RAIDF_WANTED;
2177 wakeup(rs);
2178 }
2179 }
2180
2181
2182 #define RF_COMPONENT_INFO_OFFSET 16384 /* bytes */
2183 #define RF_COMPONENT_INFO_SIZE 1024 /* bytes */
2184
2185 int
2186 raidmarkclean(dev_t dev, struct vnode *b_vp, int mod_counter)
2187 {
2188 RF_ComponentLabel_t clabel;
2189 raidread_component_label(dev, b_vp, &clabel);
2190 clabel.mod_counter = mod_counter;
2191 clabel.clean = RF_RAID_CLEAN;
2192 raidwrite_component_label(dev, b_vp, &clabel);
2193 return(0);
2194 }
2195
2196
2197 int
2198 raidmarkdirty(dev_t dev, struct vnode *b_vp, int mod_counter)
2199 {
2200 RF_ComponentLabel_t clabel;
2201 raidread_component_label(dev, b_vp, &clabel);
2202 clabel.mod_counter = mod_counter;
2203 clabel.clean = RF_RAID_DIRTY;
2204 raidwrite_component_label(dev, b_vp, &clabel);
2205 return(0);
2206 }
2207
2208 /* ARGSUSED */
2209 int
2210 raidread_component_label(dev, b_vp, clabel)
2211 dev_t dev;
2212 struct vnode *b_vp;
2213 RF_ComponentLabel_t *clabel;
2214 {
2215 struct buf *bp;
2216 int error;
2217
2218 /* XXX should probably ensure that we don't try to do this if
2219 someone has changed rf_protected_sectors. */
2220
2221 if (b_vp == NULL) {
2222 /* For whatever reason, this component is not valid.
2223 Don't try to read a component label from it. */
2224 return(EINVAL);
2225 }
2226
2227 /* get a block of the appropriate size... */
2228 bp = geteblk((int)RF_COMPONENT_INFO_SIZE);
2229 bp->b_dev = dev;
2230
2231 /* get our ducks in a row for the read */
2232 bp->b_blkno = RF_COMPONENT_INFO_OFFSET / DEV_BSIZE;
2233 bp->b_bcount = RF_COMPONENT_INFO_SIZE;
2234 bp->b_flags |= B_READ;
2235 bp->b_resid = RF_COMPONENT_INFO_SIZE / DEV_BSIZE;
2236
2237 (*bdevsw[major(bp->b_dev)].d_strategy)(bp);
2238
2239 error = biowait(bp);
2240
2241 if (!error) {
2242 memcpy(clabel, bp->b_data,
2243 sizeof(RF_ComponentLabel_t));
2244 #if 0
2245 rf_print_component_label( clabel );
2246 #endif
2247 } else {
2248 #if 0
2249 printf("Failed to read RAID component label!\n");
2250 #endif
2251 }
2252
2253 brelse(bp);
2254 return(error);
2255 }
2256 /* ARGSUSED */
2257 int
2258 raidwrite_component_label(dev, b_vp, clabel)
2259 dev_t dev;
2260 struct vnode *b_vp;
2261 RF_ComponentLabel_t *clabel;
2262 {
2263 struct buf *bp;
2264 int error;
2265
2266 /* get a block of the appropriate size... */
2267 bp = geteblk((int)RF_COMPONENT_INFO_SIZE);
2268 bp->b_dev = dev;
2269
2270 /* get our ducks in a row for the write */
2271 bp->b_blkno = RF_COMPONENT_INFO_OFFSET / DEV_BSIZE;
2272 bp->b_bcount = RF_COMPONENT_INFO_SIZE;
2273 bp->b_flags |= B_WRITE;
2274 bp->b_resid = RF_COMPONENT_INFO_SIZE / DEV_BSIZE;
2275
2276 memset(bp->b_data, 0, RF_COMPONENT_INFO_SIZE );
2277
2278 memcpy(bp->b_data, clabel, sizeof(RF_ComponentLabel_t));
2279
2280 (*bdevsw[major(bp->b_dev)].d_strategy)(bp);
2281 error = biowait(bp);
2282 brelse(bp);
2283 if (error) {
2284 #if 1
2285 printf("Failed to write RAID component info!\n");
2286 #endif
2287 }
2288
2289 return(error);
2290 }
2291
2292 void
2293 rf_markalldirty(raidPtr)
2294 RF_Raid_t *raidPtr;
2295 {
2296 RF_ComponentLabel_t clabel;
2297 int r,c;
2298
2299 raidPtr->mod_counter++;
2300 for (r = 0; r < raidPtr->numRow; r++) {
2301 for (c = 0; c < raidPtr->numCol; c++) {
2302 /* we don't want to touch (at all) a disk that has
2303 failed */
2304 if (!RF_DEAD_DISK(raidPtr->Disks[r][c].status)) {
2305 raidread_component_label(
2306 raidPtr->Disks[r][c].dev,
2307 raidPtr->raid_cinfo[r][c].ci_vp,
2308 &clabel);
2309 if (clabel.status == rf_ds_spared) {
2310 /* XXX do something special...
2311 but whatever you do, don't
2312 try to access it!! */
2313 } else {
2314 #if 0
2315 clabel.status =
2316 raidPtr->Disks[r][c].status;
2317 raidwrite_component_label(
2318 raidPtr->Disks[r][c].dev,
2319 raidPtr->raid_cinfo[r][c].ci_vp,
2320 &clabel);
2321 #endif
2322 raidmarkdirty(
2323 raidPtr->Disks[r][c].dev,
2324 raidPtr->raid_cinfo[r][c].ci_vp,
2325 raidPtr->mod_counter);
2326 }
2327 }
2328 }
2329 }
2330 /* printf("Component labels marked dirty.\n"); */
2331 #if 0
2332 for( c = 0; c < raidPtr->numSpare ; c++) {
2333 sparecol = raidPtr->numCol + c;
2334 if (raidPtr->Disks[r][sparecol].status == rf_ds_used_spare) {
2335 /*
2336
2337 XXX this is where we get fancy and map this spare
2338 into it's correct spot in the array.
2339
2340 */
2341 /*
2342
2343 we claim this disk is "optimal" if it's
2344 rf_ds_used_spare, as that means it should be
2345 directly substitutable for the disk it replaced.
2346 We note that too...
2347
2348 */
2349
2350 for(i=0;i<raidPtr->numRow;i++) {
2351 for(j=0;j<raidPtr->numCol;j++) {
2352 if ((raidPtr->Disks[i][j].spareRow ==
2353 r) &&
2354 (raidPtr->Disks[i][j].spareCol ==
2355 sparecol)) {
2356 srow = r;
2357 scol = sparecol;
2358 break;
2359 }
2360 }
2361 }
2362
2363 raidread_component_label(
2364 raidPtr->Disks[r][sparecol].dev,
2365 raidPtr->raid_cinfo[r][sparecol].ci_vp,
2366 &clabel);
2367 /* make sure status is noted */
2368 clabel.version = RF_COMPONENT_LABEL_VERSION;
2369 clabel.mod_counter = raidPtr->mod_counter;
2370 clabel.serial_number = raidPtr->serial_number;
2371 clabel.row = srow;
2372 clabel.column = scol;
2373 clabel.num_rows = raidPtr->numRow;
2374 clabel.num_columns = raidPtr->numCol;
2375 clabel.clean = RF_RAID_DIRTY; /* changed in a bit*/
2376 clabel.status = rf_ds_optimal;
2377 raidwrite_component_label(
2378 raidPtr->Disks[r][sparecol].dev,
2379 raidPtr->raid_cinfo[r][sparecol].ci_vp,
2380 &clabel);
2381 raidmarkclean( raidPtr->Disks[r][sparecol].dev,
2382 raidPtr->raid_cinfo[r][sparecol].ci_vp);
2383 }
2384 }
2385
2386 #endif
2387 }
2388
2389
2390 void
2391 rf_update_component_labels(raidPtr, final)
2392 RF_Raid_t *raidPtr;
2393 int final;
2394 {
2395 RF_ComponentLabel_t clabel;
2396 int sparecol;
2397 int r,c;
2398 int i,j;
2399 int srow, scol;
2400
2401 srow = -1;
2402 scol = -1;
2403
2404 /* XXX should do extra checks to make sure things really are clean,
2405 rather than blindly setting the clean bit... */
2406
2407 raidPtr->mod_counter++;
2408
2409 for (r = 0; r < raidPtr->numRow; r++) {
2410 for (c = 0; c < raidPtr->numCol; c++) {
2411 if (raidPtr->Disks[r][c].status == rf_ds_optimal) {
2412 raidread_component_label(
2413 raidPtr->Disks[r][c].dev,
2414 raidPtr->raid_cinfo[r][c].ci_vp,
2415 &clabel);
2416 /* make sure status is noted */
2417 clabel.status = rf_ds_optimal;
2418 /* bump the counter */
2419 clabel.mod_counter = raidPtr->mod_counter;
2420
2421 raidwrite_component_label(
2422 raidPtr->Disks[r][c].dev,
2423 raidPtr->raid_cinfo[r][c].ci_vp,
2424 &clabel);
2425 if (final == RF_FINAL_COMPONENT_UPDATE) {
2426 if (raidPtr->parity_good == RF_RAID_CLEAN) {
2427 raidmarkclean(
2428 raidPtr->Disks[r][c].dev,
2429 raidPtr->raid_cinfo[r][c].ci_vp,
2430 raidPtr->mod_counter);
2431 }
2432 }
2433 }
2434 /* else we don't touch it.. */
2435 }
2436 }
2437
2438 for( c = 0; c < raidPtr->numSpare ; c++) {
2439 sparecol = raidPtr->numCol + c;
2440 /* Need to ensure that the reconstruct actually completed! */
2441 if (raidPtr->Disks[0][sparecol].status == rf_ds_used_spare) {
2442 /*
2443
2444 we claim this disk is "optimal" if it's
2445 rf_ds_used_spare, as that means it should be
2446 directly substitutable for the disk it replaced.
2447 We note that too...
2448
2449 */
2450
2451 for(i=0;i<raidPtr->numRow;i++) {
2452 for(j=0;j<raidPtr->numCol;j++) {
2453 if ((raidPtr->Disks[i][j].spareRow ==
2454 0) &&
2455 (raidPtr->Disks[i][j].spareCol ==
2456 sparecol)) {
2457 srow = i;
2458 scol = j;
2459 break;
2460 }
2461 }
2462 }
2463
2464 /* XXX shouldn't *really* need this... */
2465 raidread_component_label(
2466 raidPtr->Disks[0][sparecol].dev,
2467 raidPtr->raid_cinfo[0][sparecol].ci_vp,
2468 &clabel);
2469 /* make sure status is noted */
2470
2471 raid_init_component_label(raidPtr, &clabel);
2472
2473 clabel.mod_counter = raidPtr->mod_counter;
2474 clabel.row = srow;
2475 clabel.column = scol;
2476 clabel.status = rf_ds_optimal;
2477
2478 raidwrite_component_label(
2479 raidPtr->Disks[0][sparecol].dev,
2480 raidPtr->raid_cinfo[0][sparecol].ci_vp,
2481 &clabel);
2482 if (final == RF_FINAL_COMPONENT_UPDATE) {
2483 if (raidPtr->parity_good == RF_RAID_CLEAN) {
2484 raidmarkclean( raidPtr->Disks[0][sparecol].dev,
2485 raidPtr->raid_cinfo[0][sparecol].ci_vp,
2486 raidPtr->mod_counter);
2487 }
2488 }
2489 }
2490 }
2491 /* printf("Component labels updated\n"); */
2492 }
2493
2494 void
2495 rf_close_component(raidPtr, vp, auto_configured)
2496 RF_Raid_t *raidPtr;
2497 struct vnode *vp;
2498 int auto_configured;
2499 {
2500 struct proc *p;
2501
2502 p = raidPtr->engine_thread;
2503
2504 if (vp != NULL) {
2505 if (auto_configured == 1) {
2506 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2507 VOP_CLOSE(vp, FREAD | FWRITE, NOCRED, 0);
2508 vput(vp);
2509
2510 } else {
2511 (void) vn_close(vp, FREAD | FWRITE, p->p_ucred, p);
2512 }
2513 } else {
2514 #if 0
2515 printf("vnode was NULL\n");
2516 #endif
2517 }
2518 }
2519
2520
2521 void
2522 rf_UnconfigureVnodes(raidPtr)
2523 RF_Raid_t *raidPtr;
2524 {
2525 int r,c;
2526 struct proc *p;
2527 struct vnode *vp;
2528 int acd;
2529
2530
2531 /* We take this opportunity to close the vnodes like we should.. */
2532
2533 p = raidPtr->engine_thread;
2534
2535 for (r = 0; r < raidPtr->numRow; r++) {
2536 for (c = 0; c < raidPtr->numCol; c++) {
2537 #if 0
2538 printf("raid%d: Closing vnode for row: %d col: %d\n",
2539 raidPtr->raidid, r, c);
2540 #endif
2541 vp = raidPtr->raid_cinfo[r][c].ci_vp;
2542 acd = raidPtr->Disks[r][c].auto_configured;
2543 rf_close_component(raidPtr, vp, acd);
2544 raidPtr->raid_cinfo[r][c].ci_vp = NULL;
2545 raidPtr->Disks[r][c].auto_configured = 0;
2546 }
2547 }
2548 for (r = 0; r < raidPtr->numSpare; r++) {
2549 #if 0
2550 printf("raid%d: Closing vnode for spare: %d\n",
2551 raidPtr->raidid, r);
2552 #endif
2553 vp = raidPtr->raid_cinfo[0][raidPtr->numCol + r].ci_vp;
2554 acd = raidPtr->Disks[0][raidPtr->numCol + r].auto_configured;
2555 rf_close_component(raidPtr, vp, acd);
2556 raidPtr->raid_cinfo[0][raidPtr->numCol + r].ci_vp = NULL;
2557 raidPtr->Disks[0][raidPtr->numCol + r].auto_configured = 0;
2558 }
2559 }
2560
2561
2562 void
2563 rf_ReconThread(req)
2564 struct rf_recon_req *req;
2565 {
2566 int s;
2567 RF_Raid_t *raidPtr;
2568
2569 s = splbio();
2570 raidPtr = (RF_Raid_t *) req->raidPtr;
2571 raidPtr->recon_in_progress = 1;
2572
2573 rf_FailDisk((RF_Raid_t *) req->raidPtr, req->row, req->col,
2574 ((req->flags & RF_FDFLAGS_RECON) ? 1 : 0));
2575
2576 /* XXX get rid of this! we don't need it at all.. */
2577 RF_Free(req, sizeof(*req));
2578
2579 raidPtr->recon_in_progress = 0;
2580 splx(s);
2581
2582 /* That's all... */
2583 kthread_exit(0); /* does not return */
2584 }
2585
2586 void
2587 rf_RewriteParityThread(raidPtr)
2588 RF_Raid_t *raidPtr;
2589 {
2590 int retcode;
2591 int s;
2592
2593 raidPtr->parity_rewrite_in_progress = 1;
2594 s = splbio();
2595 retcode = rf_RewriteParity(raidPtr);
2596 splx(s);
2597 if (retcode) {
2598 printf("raid%d: Error re-writing parity!\n",raidPtr->raidid);
2599 } else {
2600 /* set the clean bit! If we shutdown correctly,
2601 the clean bit on each component label will get
2602 set */
2603 raidPtr->parity_good = RF_RAID_CLEAN;
2604 }
2605 raidPtr->parity_rewrite_in_progress = 0;
2606
2607 /* Anyone waiting for us to stop? If so, inform them... */
2608 if (raidPtr->waitShutdown) {
2609 wakeup(&raidPtr->parity_rewrite_in_progress);
2610 }
2611
2612 /* That's all... */
2613 kthread_exit(0); /* does not return */
2614 }
2615
2616
2617 void
2618 rf_CopybackThread(raidPtr)
2619 RF_Raid_t *raidPtr;
2620 {
2621 int s;
2622
2623 raidPtr->copyback_in_progress = 1;
2624 s = splbio();
2625 rf_CopybackReconstructedData(raidPtr);
2626 splx(s);
2627 raidPtr->copyback_in_progress = 0;
2628
2629 /* That's all... */
2630 kthread_exit(0); /* does not return */
2631 }
2632
2633
2634 void
2635 rf_ReconstructInPlaceThread(req)
2636 struct rf_recon_req *req;
2637 {
2638 int retcode;
2639 int s;
2640 RF_Raid_t *raidPtr;
2641
2642 s = splbio();
2643 raidPtr = req->raidPtr;
2644 raidPtr->recon_in_progress = 1;
2645 retcode = rf_ReconstructInPlace(raidPtr, req->row, req->col);
2646 RF_Free(req, sizeof(*req));
2647 raidPtr->recon_in_progress = 0;
2648 splx(s);
2649
2650 /* That's all... */
2651 kthread_exit(0); /* does not return */
2652 }
2653
2654 void
2655 rf_mountroot_hook(dev)
2656 struct device *dev;
2657 {
2658
2659 }
2660
2661
2662 RF_AutoConfig_t *
2663 rf_find_raid_components()
2664 {
2665 struct devnametobdevmaj *dtobdm;
2666 struct vnode *vp;
2667 struct disklabel label;
2668 struct device *dv;
2669 char *cd_name;
2670 dev_t dev;
2671 int error;
2672 int i;
2673 int good_one;
2674 RF_ComponentLabel_t *clabel;
2675 RF_AutoConfig_t *ac_list;
2676 RF_AutoConfig_t *ac;
2677
2678
2679 /* initialize the AutoConfig list */
2680 ac_list = NULL;
2681
2682 /* we begin by trolling through *all* the devices on the system */
2683
2684 for (dv = alldevs.tqh_first; dv != NULL;
2685 dv = dv->dv_list.tqe_next) {
2686
2687 /* we are only interested in disks... */
2688 if (dv->dv_class != DV_DISK)
2689 continue;
2690
2691 /* we don't care about floppies... */
2692 if (!strcmp(dv->dv_cfdata->cf_driver->cd_name,"fd")) {
2693 continue;
2694 }
2695 /* hdfd is the Atari/Hades floppy driver */
2696 if (!strcmp(dv->dv_cfdata->cf_driver->cd_name,"hdfd")) {
2697 continue;
2698 }
2699 /* fdisa is the Atari/Milan floppy driver */
2700 if (!strcmp(dv->dv_cfdata->cf_driver->cd_name,"fdisa")) {
2701 continue;
2702 }
2703
2704 /* need to find the device_name_to_block_device_major stuff */
2705 cd_name = dv->dv_cfdata->cf_driver->cd_name;
2706 dtobdm = dev_name2blk;
2707 while (dtobdm->d_name && strcmp(dtobdm->d_name, cd_name)) {
2708 dtobdm++;
2709 }
2710
2711 /* get a vnode for the raw partition of this disk */
2712
2713 dev = MAKEDISKDEV(dtobdm->d_maj, dv->dv_unit, RAW_PART);
2714 if (bdevvp(dev, &vp))
2715 panic("RAID can't alloc vnode");
2716
2717 error = VOP_OPEN(vp, FREAD, NOCRED, 0);
2718
2719 if (error) {
2720 /* "Who cares." Continue looking
2721 for something that exists*/
2722 vput(vp);
2723 continue;
2724 }
2725
2726 /* Ok, the disk exists. Go get the disklabel. */
2727 error = VOP_IOCTL(vp, DIOCGDINFO, (caddr_t)&label,
2728 FREAD, NOCRED, 0);
2729 if (error) {
2730 /*
2731 * XXX can't happen - open() would
2732 * have errored out (or faked up one)
2733 */
2734 printf("can't get label for dev %s%c (%d)!?!?\n",
2735 dv->dv_xname, 'a' + RAW_PART, error);
2736 }
2737
2738 /* don't need this any more. We'll allocate it again
2739 a little later if we really do... */
2740 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2741 VOP_CLOSE(vp, FREAD | FWRITE, NOCRED, 0);
2742 vput(vp);
2743
2744 for (i=0; i < label.d_npartitions; i++) {
2745 /* We only support partitions marked as RAID */
2746 if (label.d_partitions[i].p_fstype != FS_RAID)
2747 continue;
2748
2749 dev = MAKEDISKDEV(dtobdm->d_maj, dv->dv_unit, i);
2750 if (bdevvp(dev, &vp))
2751 panic("RAID can't alloc vnode");
2752
2753 error = VOP_OPEN(vp, FREAD, NOCRED, 0);
2754 if (error) {
2755 /* Whatever... */
2756 vput(vp);
2757 continue;
2758 }
2759
2760 good_one = 0;
2761
2762 clabel = (RF_ComponentLabel_t *)
2763 malloc(sizeof(RF_ComponentLabel_t),
2764 M_RAIDFRAME, M_NOWAIT);
2765 if (clabel == NULL) {
2766 /* XXX CLEANUP HERE */
2767 printf("RAID auto config: out of memory!\n");
2768 return(NULL); /* XXX probably should panic? */
2769 }
2770
2771 if (!raidread_component_label(dev, vp, clabel)) {
2772 /* Got the label. Does it look reasonable? */
2773 if (rf_reasonable_label(clabel) &&
2774 (clabel->partitionSize <=
2775 label.d_partitions[i].p_size)) {
2776 #if DEBUG
2777 printf("Component on: %s%c: %d\n",
2778 dv->dv_xname, 'a'+i,
2779 label.d_partitions[i].p_size);
2780 rf_print_component_label(clabel);
2781 #endif
2782 /* if it's reasonable, add it,
2783 else ignore it. */
2784 ac = (RF_AutoConfig_t *)
2785 malloc(sizeof(RF_AutoConfig_t),
2786 M_RAIDFRAME,
2787 M_NOWAIT);
2788 if (ac == NULL) {
2789 /* XXX should panic?? */
2790 return(NULL);
2791 }
2792
2793 sprintf(ac->devname, "%s%c",
2794 dv->dv_xname, 'a'+i);
2795 ac->dev = dev;
2796 ac->vp = vp;
2797 ac->clabel = clabel;
2798 ac->next = ac_list;
2799 ac_list = ac;
2800 good_one = 1;
2801 }
2802 }
2803 if (!good_one) {
2804 /* cleanup */
2805 free(clabel, M_RAIDFRAME);
2806 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2807 VOP_CLOSE(vp, FREAD | FWRITE, NOCRED, 0);
2808 vput(vp);
2809 }
2810 }
2811 }
2812 return(ac_list);
2813 }
2814
2815 static int
2816 rf_reasonable_label(clabel)
2817 RF_ComponentLabel_t *clabel;
2818 {
2819
2820 if (((clabel->version==RF_COMPONENT_LABEL_VERSION_1) ||
2821 (clabel->version==RF_COMPONENT_LABEL_VERSION)) &&
2822 ((clabel->clean == RF_RAID_CLEAN) ||
2823 (clabel->clean == RF_RAID_DIRTY)) &&
2824 clabel->row >=0 &&
2825 clabel->column >= 0 &&
2826 clabel->num_rows > 0 &&
2827 clabel->num_columns > 0 &&
2828 clabel->row < clabel->num_rows &&
2829 clabel->column < clabel->num_columns &&
2830 clabel->blockSize > 0 &&
2831 clabel->numBlocks > 0) {
2832 /* label looks reasonable enough... */
2833 return(1);
2834 }
2835 return(0);
2836 }
2837
2838
2839 void
2840 rf_print_component_label(clabel)
2841 RF_ComponentLabel_t *clabel;
2842 {
2843 printf(" Row: %d Column: %d Num Rows: %d Num Columns: %d\n",
2844 clabel->row, clabel->column,
2845 clabel->num_rows, clabel->num_columns);
2846 printf(" Version: %d Serial Number: %d Mod Counter: %d\n",
2847 clabel->version, clabel->serial_number,
2848 clabel->mod_counter);
2849 printf(" Clean: %s Status: %d\n",
2850 clabel->clean ? "Yes" : "No", clabel->status );
2851 printf(" sectPerSU: %d SUsPerPU: %d SUsPerRU: %d\n",
2852 clabel->sectPerSU, clabel->SUsPerPU, clabel->SUsPerRU);
2853 printf(" RAID Level: %c blocksize: %d numBlocks: %d\n",
2854 (char) clabel->parityConfig, clabel->blockSize,
2855 clabel->numBlocks);
2856 printf(" Autoconfig: %s\n", clabel->autoconfigure ? "Yes" : "No" );
2857 printf(" Contains root partition: %s\n",
2858 clabel->root_partition ? "Yes" : "No" );
2859 printf(" Last configured as: raid%d\n", clabel->last_unit );
2860 #if 0
2861 printf(" Config order: %d\n", clabel->config_order);
2862 #endif
2863
2864 }
2865
2866 RF_ConfigSet_t *
2867 rf_create_auto_sets(ac_list)
2868 RF_AutoConfig_t *ac_list;
2869 {
2870 RF_AutoConfig_t *ac;
2871 RF_ConfigSet_t *config_sets;
2872 RF_ConfigSet_t *cset;
2873 RF_AutoConfig_t *ac_next;
2874
2875
2876 config_sets = NULL;
2877
2878 /* Go through the AutoConfig list, and figure out which components
2879 belong to what sets. */
2880 ac = ac_list;
2881 while(ac!=NULL) {
2882 /* we're going to putz with ac->next, so save it here
2883 for use at the end of the loop */
2884 ac_next = ac->next;
2885
2886 if (config_sets == NULL) {
2887 /* will need at least this one... */
2888 config_sets = (RF_ConfigSet_t *)
2889 malloc(sizeof(RF_ConfigSet_t),
2890 M_RAIDFRAME, M_NOWAIT);
2891 if (config_sets == NULL) {
2892 panic("rf_create_auto_sets: No memory!\n");
2893 }
2894 /* this one is easy :) */
2895 config_sets->ac = ac;
2896 config_sets->next = NULL;
2897 config_sets->rootable = 0;
2898 ac->next = NULL;
2899 } else {
2900 /* which set does this component fit into? */
2901 cset = config_sets;
2902 while(cset!=NULL) {
2903 if (rf_does_it_fit(cset, ac)) {
2904 /* looks like it matches... */
2905 ac->next = cset->ac;
2906 cset->ac = ac;
2907 break;
2908 }
2909 cset = cset->next;
2910 }
2911 if (cset==NULL) {
2912 /* didn't find a match above... new set..*/
2913 cset = (RF_ConfigSet_t *)
2914 malloc(sizeof(RF_ConfigSet_t),
2915 M_RAIDFRAME, M_NOWAIT);
2916 if (cset == NULL) {
2917 panic("rf_create_auto_sets: No memory!\n");
2918 }
2919 cset->ac = ac;
2920 ac->next = NULL;
2921 cset->next = config_sets;
2922 cset->rootable = 0;
2923 config_sets = cset;
2924 }
2925 }
2926 ac = ac_next;
2927 }
2928
2929
2930 return(config_sets);
2931 }
2932
2933 static int
2934 rf_does_it_fit(cset, ac)
2935 RF_ConfigSet_t *cset;
2936 RF_AutoConfig_t *ac;
2937 {
2938 RF_ComponentLabel_t *clabel1, *clabel2;
2939
2940 /* If this one matches the *first* one in the set, that's good
2941 enough, since the other members of the set would have been
2942 through here too... */
2943 /* note that we are not checking partitionSize here..
2944
2945 Note that we are also not checking the mod_counters here.
2946 If everything else matches execpt the mod_counter, that's
2947 good enough for this test. We will deal with the mod_counters
2948 a little later in the autoconfiguration process.
2949
2950 (clabel1->mod_counter == clabel2->mod_counter) &&
2951
2952 The reason we don't check for this is that failed disks
2953 will have lower modification counts. If those disks are
2954 not added to the set they used to belong to, then they will
2955 form their own set, which may result in 2 different sets,
2956 for example, competing to be configured at raid0, and
2957 perhaps competing to be the root filesystem set. If the
2958 wrong ones get configured, or both attempt to become /,
2959 weird behaviour and or serious lossage will occur. Thus we
2960 need to bring them into the fold here, and kick them out at
2961 a later point.
2962
2963 */
2964
2965 clabel1 = cset->ac->clabel;
2966 clabel2 = ac->clabel;
2967 if ((clabel1->version == clabel2->version) &&
2968 (clabel1->serial_number == clabel2->serial_number) &&
2969 (clabel1->num_rows == clabel2->num_rows) &&
2970 (clabel1->num_columns == clabel2->num_columns) &&
2971 (clabel1->sectPerSU == clabel2->sectPerSU) &&
2972 (clabel1->SUsPerPU == clabel2->SUsPerPU) &&
2973 (clabel1->SUsPerRU == clabel2->SUsPerRU) &&
2974 (clabel1->parityConfig == clabel2->parityConfig) &&
2975 (clabel1->maxOutstanding == clabel2->maxOutstanding) &&
2976 (clabel1->blockSize == clabel2->blockSize) &&
2977 (clabel1->numBlocks == clabel2->numBlocks) &&
2978 (clabel1->autoconfigure == clabel2->autoconfigure) &&
2979 (clabel1->root_partition == clabel2->root_partition) &&
2980 (clabel1->last_unit == clabel2->last_unit) &&
2981 (clabel1->config_order == clabel2->config_order)) {
2982 /* if it get's here, it almost *has* to be a match */
2983 } else {
2984 /* it's not consistent with somebody in the set..
2985 punt */
2986 return(0);
2987 }
2988 /* all was fine.. it must fit... */
2989 return(1);
2990 }
2991
2992 int
2993 rf_have_enough_components(cset)
2994 RF_ConfigSet_t *cset;
2995 {
2996 RF_AutoConfig_t *ac;
2997 RF_AutoConfig_t *auto_config;
2998 RF_ComponentLabel_t *clabel;
2999 int r,c;
3000 int num_rows;
3001 int num_cols;
3002 int num_missing;
3003 int mod_counter;
3004 int mod_counter_found;
3005 int even_pair_failed;
3006 char parity_type;
3007
3008
3009 /* check to see that we have enough 'live' components
3010 of this set. If so, we can configure it if necessary */
3011
3012 num_rows = cset->ac->clabel->num_rows;
3013 num_cols = cset->ac->clabel->num_columns;
3014 parity_type = cset->ac->clabel->parityConfig;
3015
3016 /* XXX Check for duplicate components!?!?!? */
3017
3018 /* Determine what the mod_counter is supposed to be for this set. */
3019
3020 mod_counter_found = 0;
3021 mod_counter = 0;
3022 ac = cset->ac;
3023 while(ac!=NULL) {
3024 if (mod_counter_found==0) {
3025 mod_counter = ac->clabel->mod_counter;
3026 mod_counter_found = 1;
3027 } else {
3028 if (ac->clabel->mod_counter > mod_counter) {
3029 mod_counter = ac->clabel->mod_counter;
3030 }
3031 }
3032 ac = ac->next;
3033 }
3034
3035 num_missing = 0;
3036 auto_config = cset->ac;
3037
3038 for(r=0; r<num_rows; r++) {
3039 even_pair_failed = 0;
3040 for(c=0; c<num_cols; c++) {
3041 ac = auto_config;
3042 while(ac!=NULL) {
3043 if ((ac->clabel->row == r) &&
3044 (ac->clabel->column == c) &&
3045 (ac->clabel->mod_counter == mod_counter)) {
3046 /* it's this one... */
3047 #if DEBUG
3048 printf("Found: %s at %d,%d\n",
3049 ac->devname,r,c);
3050 #endif
3051 break;
3052 }
3053 ac=ac->next;
3054 }
3055 if (ac==NULL) {
3056 /* Didn't find one here! */
3057 /* special case for RAID 1, especially
3058 where there are more than 2
3059 components (where RAIDframe treats
3060 things a little differently :( ) */
3061 if (parity_type == '1') {
3062 if (c%2 == 0) { /* even component */
3063 even_pair_failed = 1;
3064 } else { /* odd component. If
3065 we're failed, and
3066 so is the even
3067 component, it's
3068 "Good Night, Charlie" */
3069 if (even_pair_failed == 1) {
3070 return(0);
3071 }
3072 }
3073 } else {
3074 /* normal accounting */
3075 num_missing++;
3076 }
3077 }
3078 if ((parity_type == '1') && (c%2 == 1)) {
3079 /* Just did an even component, and we didn't
3080 bail.. reset the even_pair_failed flag,
3081 and go on to the next component.... */
3082 even_pair_failed = 0;
3083 }
3084 }
3085 }
3086
3087 clabel = cset->ac->clabel;
3088
3089 if (((clabel->parityConfig == '0') && (num_missing > 0)) ||
3090 ((clabel->parityConfig == '4') && (num_missing > 1)) ||
3091 ((clabel->parityConfig == '5') && (num_missing > 1))) {
3092 /* XXX this needs to be made *much* more general */
3093 /* Too many failures */
3094 return(0);
3095 }
3096 /* otherwise, all is well, and we've got enough to take a kick
3097 at autoconfiguring this set */
3098 return(1);
3099 }
3100
3101 void
3102 rf_create_configuration(ac,config,raidPtr)
3103 RF_AutoConfig_t *ac;
3104 RF_Config_t *config;
3105 RF_Raid_t *raidPtr;
3106 {
3107 RF_ComponentLabel_t *clabel;
3108 int i;
3109
3110 clabel = ac->clabel;
3111
3112 /* 1. Fill in the common stuff */
3113 config->numRow = clabel->num_rows;
3114 config->numCol = clabel->num_columns;
3115 config->numSpare = 0; /* XXX should this be set here? */
3116 config->sectPerSU = clabel->sectPerSU;
3117 config->SUsPerPU = clabel->SUsPerPU;
3118 config->SUsPerRU = clabel->SUsPerRU;
3119 config->parityConfig = clabel->parityConfig;
3120 /* XXX... */
3121 strcpy(config->diskQueueType,"fifo");
3122 config->maxOutstandingDiskReqs = clabel->maxOutstanding;
3123 config->layoutSpecificSize = 0; /* XXX ?? */
3124
3125 while(ac!=NULL) {
3126 /* row/col values will be in range due to the checks
3127 in reasonable_label() */
3128 strcpy(config->devnames[ac->clabel->row][ac->clabel->column],
3129 ac->devname);
3130 ac = ac->next;
3131 }
3132
3133 for(i=0;i<RF_MAXDBGV;i++) {
3134 config->debugVars[i][0] = NULL;
3135 }
3136 }
3137
3138 int
3139 rf_set_autoconfig(raidPtr, new_value)
3140 RF_Raid_t *raidPtr;
3141 int new_value;
3142 {
3143 RF_ComponentLabel_t clabel;
3144 struct vnode *vp;
3145 dev_t dev;
3146 int row, column;
3147
3148 raidPtr->autoconfigure = new_value;
3149 for(row=0; row<raidPtr->numRow; row++) {
3150 for(column=0; column<raidPtr->numCol; column++) {
3151 if (raidPtr->Disks[row][column].status ==
3152 rf_ds_optimal) {
3153 dev = raidPtr->Disks[row][column].dev;
3154 vp = raidPtr->raid_cinfo[row][column].ci_vp;
3155 raidread_component_label(dev, vp, &clabel);
3156 clabel.autoconfigure = new_value;
3157 raidwrite_component_label(dev, vp, &clabel);
3158 }
3159 }
3160 }
3161 return(new_value);
3162 }
3163
3164 int
3165 rf_set_rootpartition(raidPtr, new_value)
3166 RF_Raid_t *raidPtr;
3167 int new_value;
3168 {
3169 RF_ComponentLabel_t clabel;
3170 struct vnode *vp;
3171 dev_t dev;
3172 int row, column;
3173
3174 raidPtr->root_partition = new_value;
3175 for(row=0; row<raidPtr->numRow; row++) {
3176 for(column=0; column<raidPtr->numCol; column++) {
3177 if (raidPtr->Disks[row][column].status ==
3178 rf_ds_optimal) {
3179 dev = raidPtr->Disks[row][column].dev;
3180 vp = raidPtr->raid_cinfo[row][column].ci_vp;
3181 raidread_component_label(dev, vp, &clabel);
3182 clabel.root_partition = new_value;
3183 raidwrite_component_label(dev, vp, &clabel);
3184 }
3185 }
3186 }
3187 return(new_value);
3188 }
3189
3190 void
3191 rf_release_all_vps(cset)
3192 RF_ConfigSet_t *cset;
3193 {
3194 RF_AutoConfig_t *ac;
3195
3196 ac = cset->ac;
3197 while(ac!=NULL) {
3198 /* Close the vp, and give it back */
3199 if (ac->vp) {
3200 vn_lock(ac->vp, LK_EXCLUSIVE | LK_RETRY);
3201 VOP_CLOSE(ac->vp, FREAD, NOCRED, 0);
3202 vput(ac->vp);
3203 ac->vp = NULL;
3204 }
3205 ac = ac->next;
3206 }
3207 }
3208
3209
3210 void
3211 rf_cleanup_config_set(cset)
3212 RF_ConfigSet_t *cset;
3213 {
3214 RF_AutoConfig_t *ac;
3215 RF_AutoConfig_t *next_ac;
3216
3217 ac = cset->ac;
3218 while(ac!=NULL) {
3219 next_ac = ac->next;
3220 /* nuke the label */
3221 free(ac->clabel, M_RAIDFRAME);
3222 /* cleanup the config structure */
3223 free(ac, M_RAIDFRAME);
3224 /* "next.." */
3225 ac = next_ac;
3226 }
3227 /* and, finally, nuke the config set */
3228 free(cset, M_RAIDFRAME);
3229 }
3230
3231
3232 void
3233 raid_init_component_label(raidPtr, clabel)
3234 RF_Raid_t *raidPtr;
3235 RF_ComponentLabel_t *clabel;
3236 {
3237 /* current version number */
3238 clabel->version = RF_COMPONENT_LABEL_VERSION;
3239 clabel->serial_number = raidPtr->serial_number;
3240 clabel->mod_counter = raidPtr->mod_counter;
3241 clabel->num_rows = raidPtr->numRow;
3242 clabel->num_columns = raidPtr->numCol;
3243 clabel->clean = RF_RAID_DIRTY; /* not clean */
3244 clabel->status = rf_ds_optimal; /* "It's good!" */
3245
3246 clabel->sectPerSU = raidPtr->Layout.sectorsPerStripeUnit;
3247 clabel->SUsPerPU = raidPtr->Layout.SUsPerPU;
3248 clabel->SUsPerRU = raidPtr->Layout.SUsPerRU;
3249
3250 clabel->blockSize = raidPtr->bytesPerSector;
3251 clabel->numBlocks = raidPtr->sectorsPerDisk;
3252
3253 /* XXX not portable */
3254 clabel->parityConfig = raidPtr->Layout.map->parityConfig;
3255 clabel->maxOutstanding = raidPtr->maxOutstanding;
3256 clabel->autoconfigure = raidPtr->autoconfigure;
3257 clabel->root_partition = raidPtr->root_partition;
3258 clabel->last_unit = raidPtr->raidid;
3259 clabel->config_order = raidPtr->config_order;
3260 }
3261
3262 int
3263 rf_auto_config_set(cset,unit)
3264 RF_ConfigSet_t *cset;
3265 int *unit;
3266 {
3267 RF_Raid_t *raidPtr;
3268 RF_Config_t *config;
3269 int raidID;
3270 int retcode;
3271
3272 printf("RAID autoconfigure\n");
3273
3274 retcode = 0;
3275 *unit = -1;
3276
3277 /* 1. Create a config structure */
3278
3279 config = (RF_Config_t *)malloc(sizeof(RF_Config_t),
3280 M_RAIDFRAME,
3281 M_NOWAIT);
3282 if (config==NULL) {
3283 printf("Out of mem!?!?\n");
3284 /* XXX do something more intelligent here. */
3285 return(1);
3286 }
3287
3288 memset(config, 0, sizeof(RF_Config_t));
3289
3290 /* XXX raidID needs to be set correctly.. */
3291
3292 /*
3293 2. Figure out what RAID ID this one is supposed to live at
3294 See if we can get the same RAID dev that it was configured
3295 on last time..
3296 */
3297
3298 raidID = cset->ac->clabel->last_unit;
3299 if ((raidID < 0) || (raidID >= numraid)) {
3300 /* let's not wander off into lala land. */
3301 raidID = numraid - 1;
3302 }
3303 if (raidPtrs[raidID]->valid != 0) {
3304
3305 /*
3306 Nope... Go looking for an alternative...
3307 Start high so we don't immediately use raid0 if that's
3308 not taken.
3309 */
3310
3311 for(raidID = numraid - 1; raidID >= 0; raidID--) {
3312 if (raidPtrs[raidID]->valid == 0) {
3313 /* can use this one! */
3314 break;
3315 }
3316 }
3317 }
3318
3319 if (raidID < 0) {
3320 /* punt... */
3321 printf("Unable to auto configure this set!\n");
3322 printf("(Out of RAID devs!)\n");
3323 return(1);
3324 }
3325 printf("Configuring raid%d:\n",raidID);
3326 raidPtr = raidPtrs[raidID];
3327
3328 /* XXX all this stuff should be done SOMEWHERE ELSE! */
3329 raidPtr->raidid = raidID;
3330 raidPtr->openings = RAIDOUTSTANDING;
3331
3332 /* 3. Build the configuration structure */
3333 rf_create_configuration(cset->ac, config, raidPtr);
3334
3335 /* 4. Do the configuration */
3336 retcode = rf_Configure(raidPtr, config, cset->ac);
3337
3338 if (retcode == 0) {
3339
3340 raidinit(raidPtrs[raidID]);
3341
3342 rf_markalldirty(raidPtrs[raidID]);
3343 raidPtrs[raidID]->autoconfigure = 1; /* XXX do this here? */
3344 if (cset->ac->clabel->root_partition==1) {
3345 /* everything configured just fine. Make a note
3346 that this set is eligible to be root. */
3347 cset->rootable = 1;
3348 /* XXX do this here? */
3349 raidPtrs[raidID]->root_partition = 1;
3350 }
3351 }
3352
3353 /* 5. Cleanup */
3354 free(config, M_RAIDFRAME);
3355
3356 *unit = raidID;
3357 return(retcode);
3358 }
3359
3360 void
3361 rf_disk_unbusy(desc)
3362 RF_RaidAccessDesc_t *desc;
3363 {
3364 struct buf *bp;
3365
3366 bp = (struct buf *)desc->bp;
3367 disk_unbusy(&raid_softc[desc->raidPtr->raidid].sc_dkdev,
3368 (bp->b_bcount - bp->b_resid));
3369 }
3370