rf_netbsdkintf.c revision 1.104.2.15 1 /* $NetBSD: rf_netbsdkintf.c,v 1.104.2.15 2002/09/17 21:20:52 nathanw Exp $ */
2 /*-
3 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to The NetBSD Foundation
7 * by Greg Oster; Jason R. Thorpe.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the NetBSD
20 * Foundation, Inc. and its contributors.
21 * 4. Neither the name of The NetBSD Foundation nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Copyright (c) 1988 University of Utah.
40 * Copyright (c) 1990, 1993
41 * The Regents of the University of California. All rights reserved.
42 *
43 * This code is derived from software contributed to Berkeley by
44 * the Systems Programming Group of the University of Utah Computer
45 * Science Department.
46 *
47 * Redistribution and use in source and binary forms, with or without
48 * modification, are permitted provided that the following conditions
49 * are met:
50 * 1. Redistributions of source code must retain the above copyright
51 * notice, this list of conditions and the following disclaimer.
52 * 2. Redistributions in binary form must reproduce the above copyright
53 * notice, this list of conditions and the following disclaimer in the
54 * documentation and/or other materials provided with the distribution.
55 * 3. All advertising materials mentioning features or use of this software
56 * must display the following acknowledgement:
57 * This product includes software developed by the University of
58 * California, Berkeley and its contributors.
59 * 4. Neither the name of the University nor the names of its contributors
60 * may be used to endorse or promote products derived from this software
61 * without specific prior written permission.
62 *
63 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
64 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
65 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
66 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
67 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
68 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
69 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
70 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
71 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
72 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
73 * SUCH DAMAGE.
74 *
75 * from: Utah $Hdr: cd.c 1.6 90/11/28$
76 *
77 * @(#)cd.c 8.2 (Berkeley) 11/16/93
78 */
79
80
81
82
83 /*
84 * Copyright (c) 1995 Carnegie-Mellon University.
85 * All rights reserved.
86 *
87 * Authors: Mark Holland, Jim Zelenka
88 *
89 * Permission to use, copy, modify and distribute this software and
90 * its documentation is hereby granted, provided that both the copyright
91 * notice and this permission notice appear in all copies of the
92 * software, derivative works or modified versions, and any portions
93 * thereof, and that both notices appear in supporting documentation.
94 *
95 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
96 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
97 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
98 *
99 * Carnegie Mellon requests users of this software to return to
100 *
101 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
102 * School of Computer Science
103 * Carnegie Mellon University
104 * Pittsburgh PA 15213-3890
105 *
106 * any improvements or extensions that they make and grant Carnegie the
107 * rights to redistribute these changes.
108 */
109
110 /***********************************************************
111 *
112 * rf_kintf.c -- the kernel interface routines for RAIDframe
113 *
114 ***********************************************************/
115
116 #include <sys/cdefs.h>
117 __KERNEL_RCSID(0, "$NetBSD: rf_netbsdkintf.c,v 1.104.2.15 2002/09/17 21:20:52 nathanw Exp $");
118
119 #include <sys/param.h>
120 #include <sys/errno.h>
121 #include <sys/pool.h>
122 #include <sys/proc.h>
123 #include <sys/queue.h>
124 #include <sys/disk.h>
125 #include <sys/device.h>
126 #include <sys/stat.h>
127 #include <sys/ioctl.h>
128 #include <sys/fcntl.h>
129 #include <sys/systm.h>
130 #include <sys/namei.h>
131 #include <sys/vnode.h>
132 #include <sys/disklabel.h>
133 #include <sys/conf.h>
134 #include <sys/lock.h>
135 #include <sys/buf.h>
136 #include <sys/user.h>
137 #include <sys/reboot.h>
138
139 #include <dev/raidframe/raidframevar.h>
140 #include <dev/raidframe/raidframeio.h>
141 #include "raid.h"
142 #include "opt_raid_autoconfig.h"
143 #include "rf_raid.h"
144 #include "rf_copyback.h"
145 #include "rf_dag.h"
146 #include "rf_dagflags.h"
147 #include "rf_desc.h"
148 #include "rf_diskqueue.h"
149 #include "rf_etimer.h"
150 #include "rf_general.h"
151 #include "rf_kintf.h"
152 #include "rf_options.h"
153 #include "rf_driver.h"
154 #include "rf_parityscan.h"
155 #include "rf_threadstuff.h"
156
157 int rf_kdebug_level = 0;
158
159 #ifdef DEBUG
160 #define db1_printf(a) if (rf_kdebug_level > 0) printf a
161 #else /* DEBUG */
162 #define db1_printf(a) { }
163 #endif /* DEBUG */
164
165 static RF_Raid_t **raidPtrs; /* global raid device descriptors */
166
167 RF_DECLARE_STATIC_MUTEX(rf_sparet_wait_mutex)
168
169 static RF_SparetWait_t *rf_sparet_wait_queue; /* requests to install a
170 * spare table */
171 static RF_SparetWait_t *rf_sparet_resp_queue; /* responses from
172 * installation process */
173
174 /* prototypes */
175 static void KernelWakeupFunc(struct buf * bp);
176 static void InitBP(struct buf * bp, struct vnode *, unsigned rw_flag,
177 dev_t dev, RF_SectorNum_t startSect,
178 RF_SectorCount_t numSect, caddr_t buf,
179 void (*cbFunc) (struct buf *), void *cbArg,
180 int logBytesPerSector, struct proc * b_proc);
181 static void raidinit(RF_Raid_t *);
182
183 void raidattach(int);
184
185 dev_type_open(raidopen);
186 dev_type_close(raidclose);
187 dev_type_read(raidread);
188 dev_type_write(raidwrite);
189 dev_type_ioctl(raidioctl);
190 dev_type_strategy(raidstrategy);
191 dev_type_dump(raiddump);
192 dev_type_size(raidsize);
193
194 const struct bdevsw raid_bdevsw = {
195 raidopen, raidclose, raidstrategy, raidioctl,
196 raiddump, raidsize, D_DISK
197 };
198
199 const struct cdevsw raid_cdevsw = {
200 raidopen, raidclose, raidread, raidwrite, raidioctl,
201 nostop, notty, nopoll, nommap, D_DISK
202 };
203
204 /*
205 * Pilfered from ccd.c
206 */
207
208 struct raidbuf {
209 struct buf rf_buf; /* new I/O buf. MUST BE FIRST!!! */
210 struct buf *rf_obp; /* ptr. to original I/O buf */
211 int rf_flags; /* misc. flags */
212 RF_DiskQueueData_t *req;/* the request that this was part of.. */
213 };
214
215 /* component buffer pool */
216 struct pool raidframe_cbufpool;
217
218 #define RAIDGETBUF(rs) pool_get(&raidframe_cbufpool, PR_NOWAIT)
219 #define RAIDPUTBUF(rs, cbp) pool_put(&raidframe_cbufpool, cbp)
220
221 /* XXX Not sure if the following should be replacing the raidPtrs above,
222 or if it should be used in conjunction with that...
223 */
224
225 struct raid_softc {
226 int sc_flags; /* flags */
227 int sc_cflags; /* configuration flags */
228 size_t sc_size; /* size of the raid device */
229 char sc_xname[20]; /* XXX external name */
230 struct disk sc_dkdev; /* generic disk device info */
231 struct bufq_state buf_queue; /* used for the device queue */
232 };
233 /* sc_flags */
234 #define RAIDF_INITED 0x01 /* unit has been initialized */
235 #define RAIDF_WLABEL 0x02 /* label area is writable */
236 #define RAIDF_LABELLING 0x04 /* unit is currently being labelled */
237 #define RAIDF_WANTED 0x40 /* someone is waiting to obtain a lock */
238 #define RAIDF_LOCKED 0x80 /* unit is locked */
239
240 #define raidunit(x) DISKUNIT(x)
241 int numraid = 0;
242
243 /*
244 * Allow RAIDOUTSTANDING number of simultaneous IO's to this RAID device.
245 * Be aware that large numbers can allow the driver to consume a lot of
246 * kernel memory, especially on writes, and in degraded mode reads.
247 *
248 * For example: with a stripe width of 64 blocks (32k) and 5 disks,
249 * a single 64K write will typically require 64K for the old data,
250 * 64K for the old parity, and 64K for the new parity, for a total
251 * of 192K (if the parity buffer is not re-used immediately).
252 * Even it if is used immediately, that's still 128K, which when multiplied
253 * by say 10 requests, is 1280K, *on top* of the 640K of incoming data.
254 *
255 * Now in degraded mode, for example, a 64K read on the above setup may
256 * require data reconstruction, which will require *all* of the 4 remaining
257 * disks to participate -- 4 * 32K/disk == 128K again.
258 */
259
260 #ifndef RAIDOUTSTANDING
261 #define RAIDOUTSTANDING 6
262 #endif
263
264 #define RAIDLABELDEV(dev) \
265 (MAKEDISKDEV(major((dev)), raidunit((dev)), RAW_PART))
266
267 /* declared here, and made public, for the benefit of KVM stuff.. */
268 struct raid_softc *raid_softc;
269
270 static void raidgetdefaultlabel(RF_Raid_t *, struct raid_softc *,
271 struct disklabel *);
272 static void raidgetdisklabel(dev_t);
273 static void raidmakedisklabel(struct raid_softc *);
274
275 static int raidlock(struct raid_softc *);
276 static void raidunlock(struct raid_softc *);
277
278 static void rf_markalldirty(RF_Raid_t *);
279 void rf_mountroot_hook(struct device *);
280
281 struct device *raidrootdev;
282
283 void rf_ReconThread(struct rf_recon_req *);
284 /* XXX what I want is: */
285 /*void rf_ReconThread(RF_Raid_t *raidPtr); */
286 void rf_RewriteParityThread(RF_Raid_t *raidPtr);
287 void rf_CopybackThread(RF_Raid_t *raidPtr);
288 void rf_ReconstructInPlaceThread(struct rf_recon_req *);
289 void rf_buildroothack(void *);
290
291 RF_AutoConfig_t *rf_find_raid_components(void);
292 RF_ConfigSet_t *rf_create_auto_sets(RF_AutoConfig_t *);
293 static int rf_does_it_fit(RF_ConfigSet_t *,RF_AutoConfig_t *);
294 static int rf_reasonable_label(RF_ComponentLabel_t *);
295 void rf_create_configuration(RF_AutoConfig_t *,RF_Config_t *, RF_Raid_t *);
296 int rf_set_autoconfig(RF_Raid_t *, int);
297 int rf_set_rootpartition(RF_Raid_t *, int);
298 void rf_release_all_vps(RF_ConfigSet_t *);
299 void rf_cleanup_config_set(RF_ConfigSet_t *);
300 int rf_have_enough_components(RF_ConfigSet_t *);
301 int rf_auto_config_set(RF_ConfigSet_t *, int *);
302
303 static int raidautoconfig = 0; /* Debugging, mostly. Set to 0 to not
304 allow autoconfig to take place.
305 Note that this is overridden by having
306 RAID_AUTOCONFIG as an option in the
307 kernel config file. */
308
309 void
310 raidattach(num)
311 int num;
312 {
313 int raidID;
314 int i, rc;
315 RF_AutoConfig_t *ac_list; /* autoconfig list */
316 RF_ConfigSet_t *config_sets;
317
318 #ifdef DEBUG
319 printf("raidattach: Asked for %d units\n", num);
320 #endif
321
322 if (num <= 0) {
323 #ifdef DIAGNOSTIC
324 panic("raidattach: count <= 0");
325 #endif
326 return;
327 }
328 /* This is where all the initialization stuff gets done. */
329
330 numraid = num;
331
332 /* Make some space for requested number of units... */
333
334 RF_Calloc(raidPtrs, num, sizeof(RF_Raid_t *), (RF_Raid_t **));
335 if (raidPtrs == NULL) {
336 panic("raidPtrs is NULL!!\n");
337 }
338
339 /* Initialize the component buffer pool. */
340 pool_init(&raidframe_cbufpool, sizeof(struct raidbuf), 0,
341 0, 0, "raidpl", NULL);
342
343 rc = rf_mutex_init(&rf_sparet_wait_mutex);
344 if (rc) {
345 RF_PANIC();
346 }
347
348 rf_sparet_wait_queue = rf_sparet_resp_queue = NULL;
349
350 for (i = 0; i < num; i++)
351 raidPtrs[i] = NULL;
352 rc = rf_BootRaidframe();
353 if (rc == 0)
354 printf("Kernelized RAIDframe activated\n");
355 else
356 panic("Serious error booting RAID!!\n");
357
358 /* put together some datastructures like the CCD device does.. This
359 * lets us lock the device and what-not when it gets opened. */
360
361 raid_softc = (struct raid_softc *)
362 malloc(num * sizeof(struct raid_softc),
363 M_RAIDFRAME, M_NOWAIT);
364 if (raid_softc == NULL) {
365 printf("WARNING: no memory for RAIDframe driver\n");
366 return;
367 }
368
369 memset(raid_softc, 0, num * sizeof(struct raid_softc));
370
371 raidrootdev = (struct device *)malloc(num * sizeof(struct device),
372 M_RAIDFRAME, M_NOWAIT);
373 if (raidrootdev == NULL) {
374 panic("No memory for RAIDframe driver!!?!?!\n");
375 }
376
377 for (raidID = 0; raidID < num; raidID++) {
378 bufq_alloc(&raid_softc[raidID].buf_queue, BUFQ_FCFS);
379
380 raidrootdev[raidID].dv_class = DV_DISK;
381 raidrootdev[raidID].dv_cfdata = NULL;
382 raidrootdev[raidID].dv_unit = raidID;
383 raidrootdev[raidID].dv_parent = NULL;
384 raidrootdev[raidID].dv_flags = 0;
385 sprintf(raidrootdev[raidID].dv_xname,"raid%d",raidID);
386
387 RF_Calloc(raidPtrs[raidID], 1, sizeof(RF_Raid_t),
388 (RF_Raid_t *));
389 if (raidPtrs[raidID] == NULL) {
390 printf("WARNING: raidPtrs[%d] is NULL\n", raidID);
391 numraid = raidID;
392 return;
393 }
394 }
395
396 #ifdef RAID_AUTOCONFIG
397 raidautoconfig = 1;
398 #endif
399
400 if (raidautoconfig) {
401 /* 1. locate all RAID components on the system */
402
403 #if DEBUG
404 printf("Searching for raid components...\n");
405 #endif
406 ac_list = rf_find_raid_components();
407
408 /* 2. sort them into their respective sets */
409
410 config_sets = rf_create_auto_sets(ac_list);
411
412 /* 3. evaluate each set and configure the valid ones
413 This gets done in rf_buildroothack() */
414
415 /* schedule the creation of the thread to do the
416 "/ on RAID" stuff */
417
418 kthread_create(rf_buildroothack,config_sets);
419
420 #if 0
421 mountroothook_establish(rf_mountroot_hook, &raidrootdev[0]);
422 #endif
423 }
424
425 }
426
427 void
428 rf_buildroothack(arg)
429 void *arg;
430 {
431 RF_ConfigSet_t *config_sets = arg;
432 RF_ConfigSet_t *cset;
433 RF_ConfigSet_t *next_cset;
434 int retcode;
435 int raidID;
436 int rootID;
437 int num_root;
438
439 rootID = 0;
440 num_root = 0;
441 cset = config_sets;
442 while(cset != NULL ) {
443 next_cset = cset->next;
444 if (rf_have_enough_components(cset) &&
445 cset->ac->clabel->autoconfigure==1) {
446 retcode = rf_auto_config_set(cset,&raidID);
447 if (!retcode) {
448 if (cset->rootable) {
449 rootID = raidID;
450 num_root++;
451 }
452 } else {
453 /* The autoconfig didn't work :( */
454 #if DEBUG
455 printf("Autoconfig failed with code %d for raid%d\n", retcode, raidID);
456 #endif
457 rf_release_all_vps(cset);
458 }
459 } else {
460 /* we're not autoconfiguring this set...
461 release the associated resources */
462 rf_release_all_vps(cset);
463 }
464 /* cleanup */
465 rf_cleanup_config_set(cset);
466 cset = next_cset;
467 }
468
469 /* we found something bootable... */
470
471 if (num_root == 1) {
472 booted_device = &raidrootdev[rootID];
473 } else if (num_root > 1) {
474 /* we can't guess.. require the user to answer... */
475 boothowto |= RB_ASKNAME;
476 }
477 }
478
479
480 int
481 raidsize(dev)
482 dev_t dev;
483 {
484 struct raid_softc *rs;
485 struct disklabel *lp;
486 int part, unit, omask, size;
487
488 unit = raidunit(dev);
489 if (unit >= numraid)
490 return (-1);
491 rs = &raid_softc[unit];
492
493 if ((rs->sc_flags & RAIDF_INITED) == 0)
494 return (-1);
495
496 part = DISKPART(dev);
497 omask = rs->sc_dkdev.dk_openmask & (1 << part);
498 lp = rs->sc_dkdev.dk_label;
499
500 if (omask == 0 && raidopen(dev, 0, S_IFBLK, curproc))
501 return (-1);
502
503 if (lp->d_partitions[part].p_fstype != FS_SWAP)
504 size = -1;
505 else
506 size = lp->d_partitions[part].p_size *
507 (lp->d_secsize / DEV_BSIZE);
508
509 if (omask == 0 && raidclose(dev, 0, S_IFBLK, curproc))
510 return (-1);
511
512 return (size);
513
514 }
515
516 int
517 raiddump(dev, blkno, va, size)
518 dev_t dev;
519 daddr_t blkno;
520 caddr_t va;
521 size_t size;
522 {
523 /* Not implemented. */
524 return ENXIO;
525 }
526 /* ARGSUSED */
527 int
528 raidopen(dev, flags, fmt, p)
529 dev_t dev;
530 int flags, fmt;
531 struct proc *p;
532 {
533 int unit = raidunit(dev);
534 struct raid_softc *rs;
535 struct disklabel *lp;
536 int part, pmask;
537 int error = 0;
538
539 if (unit >= numraid)
540 return (ENXIO);
541 rs = &raid_softc[unit];
542
543 if ((error = raidlock(rs)) != 0)
544 return (error);
545 lp = rs->sc_dkdev.dk_label;
546
547 part = DISKPART(dev);
548 pmask = (1 << part);
549
550 db1_printf(("Opening raid device number: %d partition: %d\n",
551 unit, part));
552
553
554 if ((rs->sc_flags & RAIDF_INITED) &&
555 (rs->sc_dkdev.dk_openmask == 0))
556 raidgetdisklabel(dev);
557
558 /* make sure that this partition exists */
559
560 if (part != RAW_PART) {
561 db1_printf(("Not a raw partition..\n"));
562 if (((rs->sc_flags & RAIDF_INITED) == 0) ||
563 ((part >= lp->d_npartitions) ||
564 (lp->d_partitions[part].p_fstype == FS_UNUSED))) {
565 error = ENXIO;
566 raidunlock(rs);
567 db1_printf(("Bailing out...\n"));
568 return (error);
569 }
570 }
571 /* Prevent this unit from being unconfigured while open. */
572 switch (fmt) {
573 case S_IFCHR:
574 rs->sc_dkdev.dk_copenmask |= pmask;
575 break;
576
577 case S_IFBLK:
578 rs->sc_dkdev.dk_bopenmask |= pmask;
579 break;
580 }
581
582 if ((rs->sc_dkdev.dk_openmask == 0) &&
583 ((rs->sc_flags & RAIDF_INITED) != 0)) {
584 /* First one... mark things as dirty... Note that we *MUST*
585 have done a configure before this. I DO NOT WANT TO BE
586 SCRIBBLING TO RANDOM COMPONENTS UNTIL IT'S BEEN DETERMINED
587 THAT THEY BELONG TOGETHER!!!!! */
588 /* XXX should check to see if we're only open for reading
589 here... If so, we needn't do this, but then need some
590 other way of keeping track of what's happened.. */
591
592 rf_markalldirty( raidPtrs[unit] );
593 }
594
595
596 rs->sc_dkdev.dk_openmask =
597 rs->sc_dkdev.dk_copenmask | rs->sc_dkdev.dk_bopenmask;
598
599 raidunlock(rs);
600
601 return (error);
602
603
604 }
605 /* ARGSUSED */
606 int
607 raidclose(dev, flags, fmt, p)
608 dev_t dev;
609 int flags, fmt;
610 struct proc *p;
611 {
612 int unit = raidunit(dev);
613 struct raid_softc *rs;
614 int error = 0;
615 int part;
616
617 if (unit >= numraid)
618 return (ENXIO);
619 rs = &raid_softc[unit];
620
621 if ((error = raidlock(rs)) != 0)
622 return (error);
623
624 part = DISKPART(dev);
625
626 /* ...that much closer to allowing unconfiguration... */
627 switch (fmt) {
628 case S_IFCHR:
629 rs->sc_dkdev.dk_copenmask &= ~(1 << part);
630 break;
631
632 case S_IFBLK:
633 rs->sc_dkdev.dk_bopenmask &= ~(1 << part);
634 break;
635 }
636 rs->sc_dkdev.dk_openmask =
637 rs->sc_dkdev.dk_copenmask | rs->sc_dkdev.dk_bopenmask;
638
639 if ((rs->sc_dkdev.dk_openmask == 0) &&
640 ((rs->sc_flags & RAIDF_INITED) != 0)) {
641 /* Last one... device is not unconfigured yet.
642 Device shutdown has taken care of setting the
643 clean bits if RAIDF_INITED is not set
644 mark things as clean... */
645 #if 0
646 printf("Last one on raid%d. Updating status.\n",unit);
647 #endif
648 rf_update_component_labels(raidPtrs[unit],
649 RF_FINAL_COMPONENT_UPDATE);
650 if (doing_shutdown) {
651 /* last one, and we're going down, so
652 lights out for this RAID set too. */
653 error = rf_Shutdown(raidPtrs[unit]);
654
655 /* It's no longer initialized... */
656 rs->sc_flags &= ~RAIDF_INITED;
657
658 /* Detach the disk. */
659 disk_detach(&rs->sc_dkdev);
660 }
661 }
662
663 raidunlock(rs);
664 return (0);
665
666 }
667
668 void
669 raidstrategy(bp)
670 struct buf *bp;
671 {
672 int s;
673
674 unsigned int raidID = raidunit(bp->b_dev);
675 RF_Raid_t *raidPtr;
676 struct raid_softc *rs = &raid_softc[raidID];
677 struct disklabel *lp;
678 int wlabel;
679
680 if ((rs->sc_flags & RAIDF_INITED) ==0) {
681 bp->b_error = ENXIO;
682 bp->b_flags |= B_ERROR;
683 bp->b_resid = bp->b_bcount;
684 biodone(bp);
685 return;
686 }
687 if (raidID >= numraid || !raidPtrs[raidID]) {
688 bp->b_error = ENODEV;
689 bp->b_flags |= B_ERROR;
690 bp->b_resid = bp->b_bcount;
691 biodone(bp);
692 return;
693 }
694 raidPtr = raidPtrs[raidID];
695 if (!raidPtr->valid) {
696 bp->b_error = ENODEV;
697 bp->b_flags |= B_ERROR;
698 bp->b_resid = bp->b_bcount;
699 biodone(bp);
700 return;
701 }
702 if (bp->b_bcount == 0) {
703 db1_printf(("b_bcount is zero..\n"));
704 biodone(bp);
705 return;
706 }
707 lp = rs->sc_dkdev.dk_label;
708
709 /*
710 * Do bounds checking and adjust transfer. If there's an
711 * error, the bounds check will flag that for us.
712 */
713
714 wlabel = rs->sc_flags & (RAIDF_WLABEL | RAIDF_LABELLING);
715 if (DISKPART(bp->b_dev) != RAW_PART)
716 if (bounds_check_with_label(bp, lp, wlabel) <= 0) {
717 db1_printf(("Bounds check failed!!:%d %d\n",
718 (int) bp->b_blkno, (int) wlabel));
719 biodone(bp);
720 return;
721 }
722 s = splbio();
723
724 bp->b_resid = 0;
725
726 /* stuff it onto our queue */
727 BUFQ_PUT(&rs->buf_queue, bp);
728
729 raidstart(raidPtrs[raidID]);
730
731 splx(s);
732 }
733 /* ARGSUSED */
734 int
735 raidread(dev, uio, flags)
736 dev_t dev;
737 struct uio *uio;
738 int flags;
739 {
740 int unit = raidunit(dev);
741 struct raid_softc *rs;
742 int part;
743
744 if (unit >= numraid)
745 return (ENXIO);
746 rs = &raid_softc[unit];
747
748 if ((rs->sc_flags & RAIDF_INITED) == 0)
749 return (ENXIO);
750 part = DISKPART(dev);
751
752 db1_printf(("raidread: unit: %d partition: %d\n", unit, part));
753
754 return (physio(raidstrategy, NULL, dev, B_READ, minphys, uio));
755
756 }
757 /* ARGSUSED */
758 int
759 raidwrite(dev, uio, flags)
760 dev_t dev;
761 struct uio *uio;
762 int flags;
763 {
764 int unit = raidunit(dev);
765 struct raid_softc *rs;
766
767 if (unit >= numraid)
768 return (ENXIO);
769 rs = &raid_softc[unit];
770
771 if ((rs->sc_flags & RAIDF_INITED) == 0)
772 return (ENXIO);
773 db1_printf(("raidwrite\n"));
774 return (physio(raidstrategy, NULL, dev, B_WRITE, minphys, uio));
775
776 }
777
778 int
779 raidioctl(dev, cmd, data, flag, p)
780 dev_t dev;
781 u_long cmd;
782 caddr_t data;
783 int flag;
784 struct proc *p;
785 {
786 int unit = raidunit(dev);
787 int error = 0;
788 int part, pmask;
789 struct raid_softc *rs;
790 RF_Config_t *k_cfg, *u_cfg;
791 RF_Raid_t *raidPtr;
792 RF_RaidDisk_t *diskPtr;
793 RF_AccTotals_t *totals;
794 RF_DeviceConfig_t *d_cfg, **ucfgp;
795 u_char *specific_buf;
796 int retcode = 0;
797 int row;
798 int column;
799 int raidid;
800 struct rf_recon_req *rrcopy, *rr;
801 RF_ComponentLabel_t *clabel;
802 RF_ComponentLabel_t ci_label;
803 RF_ComponentLabel_t **clabel_ptr;
804 RF_SingleComponent_t *sparePtr,*componentPtr;
805 RF_SingleComponent_t hot_spare;
806 RF_SingleComponent_t component;
807 RF_ProgressInfo_t progressInfo, **progressInfoPtr;
808 int i, j, d;
809 #ifdef __HAVE_OLD_DISKLABEL
810 struct disklabel newlabel;
811 #endif
812
813 if (unit >= numraid)
814 return (ENXIO);
815 rs = &raid_softc[unit];
816 raidPtr = raidPtrs[unit];
817
818 db1_printf(("raidioctl: %d %d %d %d\n", (int) dev,
819 (int) DISKPART(dev), (int) unit, (int) cmd));
820
821 /* Must be open for writes for these commands... */
822 switch (cmd) {
823 case DIOCSDINFO:
824 case DIOCWDINFO:
825 #ifdef __HAVE_OLD_DISKLABEL
826 case ODIOCWDINFO:
827 case ODIOCSDINFO:
828 #endif
829 case DIOCWLABEL:
830 if ((flag & FWRITE) == 0)
831 return (EBADF);
832 }
833
834 /* Must be initialized for these... */
835 switch (cmd) {
836 case DIOCGDINFO:
837 case DIOCSDINFO:
838 case DIOCWDINFO:
839 #ifdef __HAVE_OLD_DISKLABEL
840 case ODIOCGDINFO:
841 case ODIOCWDINFO:
842 case ODIOCSDINFO:
843 case ODIOCGDEFLABEL:
844 #endif
845 case DIOCGPART:
846 case DIOCWLABEL:
847 case DIOCGDEFLABEL:
848 case RAIDFRAME_SHUTDOWN:
849 case RAIDFRAME_REWRITEPARITY:
850 case RAIDFRAME_GET_INFO:
851 case RAIDFRAME_RESET_ACCTOTALS:
852 case RAIDFRAME_GET_ACCTOTALS:
853 case RAIDFRAME_KEEP_ACCTOTALS:
854 case RAIDFRAME_GET_SIZE:
855 case RAIDFRAME_FAIL_DISK:
856 case RAIDFRAME_COPYBACK:
857 case RAIDFRAME_CHECK_RECON_STATUS:
858 case RAIDFRAME_CHECK_RECON_STATUS_EXT:
859 case RAIDFRAME_GET_COMPONENT_LABEL:
860 case RAIDFRAME_SET_COMPONENT_LABEL:
861 case RAIDFRAME_ADD_HOT_SPARE:
862 case RAIDFRAME_REMOVE_HOT_SPARE:
863 case RAIDFRAME_INIT_LABELS:
864 case RAIDFRAME_REBUILD_IN_PLACE:
865 case RAIDFRAME_CHECK_PARITY:
866 case RAIDFRAME_CHECK_PARITYREWRITE_STATUS:
867 case RAIDFRAME_CHECK_PARITYREWRITE_STATUS_EXT:
868 case RAIDFRAME_CHECK_COPYBACK_STATUS:
869 case RAIDFRAME_CHECK_COPYBACK_STATUS_EXT:
870 case RAIDFRAME_SET_AUTOCONFIG:
871 case RAIDFRAME_SET_ROOT:
872 case RAIDFRAME_DELETE_COMPONENT:
873 case RAIDFRAME_INCORPORATE_HOT_SPARE:
874 if ((rs->sc_flags & RAIDF_INITED) == 0)
875 return (ENXIO);
876 }
877
878 switch (cmd) {
879
880 /* configure the system */
881 case RAIDFRAME_CONFIGURE:
882
883 if (raidPtr->valid) {
884 /* There is a valid RAID set running on this unit! */
885 printf("raid%d: Device already configured!\n",unit);
886 return(EINVAL);
887 }
888
889 /* copy-in the configuration information */
890 /* data points to a pointer to the configuration structure */
891
892 u_cfg = *((RF_Config_t **) data);
893 RF_Malloc(k_cfg, sizeof(RF_Config_t), (RF_Config_t *));
894 if (k_cfg == NULL) {
895 return (ENOMEM);
896 }
897 retcode = copyin((caddr_t) u_cfg, (caddr_t) k_cfg,
898 sizeof(RF_Config_t));
899 if (retcode) {
900 RF_Free(k_cfg, sizeof(RF_Config_t));
901 db1_printf(("rf_ioctl: retcode=%d copyin.1\n",
902 retcode));
903 return (retcode);
904 }
905 /* allocate a buffer for the layout-specific data, and copy it
906 * in */
907 if (k_cfg->layoutSpecificSize) {
908 if (k_cfg->layoutSpecificSize > 10000) {
909 /* sanity check */
910 RF_Free(k_cfg, sizeof(RF_Config_t));
911 return (EINVAL);
912 }
913 RF_Malloc(specific_buf, k_cfg->layoutSpecificSize,
914 (u_char *));
915 if (specific_buf == NULL) {
916 RF_Free(k_cfg, sizeof(RF_Config_t));
917 return (ENOMEM);
918 }
919 retcode = copyin(k_cfg->layoutSpecific,
920 (caddr_t) specific_buf,
921 k_cfg->layoutSpecificSize);
922 if (retcode) {
923 RF_Free(k_cfg, sizeof(RF_Config_t));
924 RF_Free(specific_buf,
925 k_cfg->layoutSpecificSize);
926 db1_printf(("rf_ioctl: retcode=%d copyin.2\n",
927 retcode));
928 return (retcode);
929 }
930 } else
931 specific_buf = NULL;
932 k_cfg->layoutSpecific = specific_buf;
933
934 /* should do some kind of sanity check on the configuration.
935 * Store the sum of all the bytes in the last byte? */
936
937 /* configure the system */
938
939 /*
940 * Clear the entire RAID descriptor, just to make sure
941 * there is no stale data left in the case of a
942 * reconfiguration
943 */
944 memset((char *) raidPtr, 0, sizeof(RF_Raid_t));
945 raidPtr->raidid = unit;
946
947 retcode = rf_Configure(raidPtr, k_cfg, NULL);
948
949 if (retcode == 0) {
950
951 /* allow this many simultaneous IO's to
952 this RAID device */
953 raidPtr->openings = RAIDOUTSTANDING;
954
955 raidinit(raidPtr);
956 rf_markalldirty(raidPtr);
957 }
958 /* free the buffers. No return code here. */
959 if (k_cfg->layoutSpecificSize) {
960 RF_Free(specific_buf, k_cfg->layoutSpecificSize);
961 }
962 RF_Free(k_cfg, sizeof(RF_Config_t));
963
964 return (retcode);
965
966 /* shutdown the system */
967 case RAIDFRAME_SHUTDOWN:
968
969 if ((error = raidlock(rs)) != 0)
970 return (error);
971
972 /*
973 * If somebody has a partition mounted, we shouldn't
974 * shutdown.
975 */
976
977 part = DISKPART(dev);
978 pmask = (1 << part);
979 if ((rs->sc_dkdev.dk_openmask & ~pmask) ||
980 ((rs->sc_dkdev.dk_bopenmask & pmask) &&
981 (rs->sc_dkdev.dk_copenmask & pmask))) {
982 raidunlock(rs);
983 return (EBUSY);
984 }
985
986 retcode = rf_Shutdown(raidPtr);
987
988 /* It's no longer initialized... */
989 rs->sc_flags &= ~RAIDF_INITED;
990
991 /* Detach the disk. */
992 disk_detach(&rs->sc_dkdev);
993
994 raidunlock(rs);
995
996 return (retcode);
997 case RAIDFRAME_GET_COMPONENT_LABEL:
998 clabel_ptr = (RF_ComponentLabel_t **) data;
999 /* need to read the component label for the disk indicated
1000 by row,column in clabel */
1001
1002 /* For practice, let's get it directly fromdisk, rather
1003 than from the in-core copy */
1004 RF_Malloc( clabel, sizeof( RF_ComponentLabel_t ),
1005 (RF_ComponentLabel_t *));
1006 if (clabel == NULL)
1007 return (ENOMEM);
1008
1009 memset((char *) clabel, 0, sizeof(RF_ComponentLabel_t));
1010
1011 retcode = copyin( *clabel_ptr, clabel,
1012 sizeof(RF_ComponentLabel_t));
1013
1014 if (retcode) {
1015 RF_Free( clabel, sizeof(RF_ComponentLabel_t));
1016 return(retcode);
1017 }
1018
1019 row = clabel->row;
1020 column = clabel->column;
1021
1022 if ((row < 0) || (row >= raidPtr->numRow) ||
1023 (column < 0) || (column >= raidPtr->numCol +
1024 raidPtr->numSpare)) {
1025 RF_Free( clabel, sizeof(RF_ComponentLabel_t));
1026 return(EINVAL);
1027 }
1028
1029 raidread_component_label(raidPtr->Disks[row][column].dev,
1030 raidPtr->raid_cinfo[row][column].ci_vp,
1031 clabel );
1032
1033 retcode = copyout((caddr_t) clabel,
1034 (caddr_t) *clabel_ptr,
1035 sizeof(RF_ComponentLabel_t));
1036 RF_Free( clabel, sizeof(RF_ComponentLabel_t));
1037 return (retcode);
1038
1039 case RAIDFRAME_SET_COMPONENT_LABEL:
1040 clabel = (RF_ComponentLabel_t *) data;
1041
1042 /* XXX check the label for valid stuff... */
1043 /* Note that some things *should not* get modified --
1044 the user should be re-initing the labels instead of
1045 trying to patch things.
1046 */
1047
1048 raidid = raidPtr->raidid;
1049 printf("raid%d: Got component label:\n", raidid);
1050 printf("raid%d: Version: %d\n", raidid, clabel->version);
1051 printf("raid%d: Serial Number: %d\n", raidid, clabel->serial_number);
1052 printf("raid%d: Mod counter: %d\n", raidid, clabel->mod_counter);
1053 printf("raid%d: Row: %d\n", raidid, clabel->row);
1054 printf("raid%d: Column: %d\n", raidid, clabel->column);
1055 printf("raid%d: Num Rows: %d\n", raidid, clabel->num_rows);
1056 printf("raid%d: Num Columns: %d\n", raidid, clabel->num_columns);
1057 printf("raid%d: Clean: %d\n", raidid, clabel->clean);
1058 printf("raid%d: Status: %d\n", raidid, clabel->status);
1059
1060 row = clabel->row;
1061 column = clabel->column;
1062
1063 if ((row < 0) || (row >= raidPtr->numRow) ||
1064 (column < 0) || (column >= raidPtr->numCol)) {
1065 return(EINVAL);
1066 }
1067
1068 /* XXX this isn't allowed to do anything for now :-) */
1069
1070 /* XXX and before it is, we need to fill in the rest
1071 of the fields!?!?!?! */
1072 #if 0
1073 raidwrite_component_label(
1074 raidPtr->Disks[row][column].dev,
1075 raidPtr->raid_cinfo[row][column].ci_vp,
1076 clabel );
1077 #endif
1078 return (0);
1079
1080 case RAIDFRAME_INIT_LABELS:
1081 clabel = (RF_ComponentLabel_t *) data;
1082 /*
1083 we only want the serial number from
1084 the above. We get all the rest of the information
1085 from the config that was used to create this RAID
1086 set.
1087 */
1088
1089 raidPtr->serial_number = clabel->serial_number;
1090
1091 raid_init_component_label(raidPtr, &ci_label);
1092 ci_label.serial_number = clabel->serial_number;
1093
1094 for(row=0;row<raidPtr->numRow;row++) {
1095 ci_label.row = row;
1096 for(column=0;column<raidPtr->numCol;column++) {
1097 diskPtr = &raidPtr->Disks[row][column];
1098 if (!RF_DEAD_DISK(diskPtr->status)) {
1099 ci_label.partitionSize = diskPtr->partitionSize;
1100 ci_label.column = column;
1101 raidwrite_component_label(
1102 raidPtr->Disks[row][column].dev,
1103 raidPtr->raid_cinfo[row][column].ci_vp,
1104 &ci_label );
1105 }
1106 }
1107 }
1108
1109 return (retcode);
1110 case RAIDFRAME_SET_AUTOCONFIG:
1111 d = rf_set_autoconfig(raidPtr, *(int *) data);
1112 printf("raid%d: New autoconfig value is: %d\n",
1113 raidPtr->raidid, d);
1114 *(int *) data = d;
1115 return (retcode);
1116
1117 case RAIDFRAME_SET_ROOT:
1118 d = rf_set_rootpartition(raidPtr, *(int *) data);
1119 printf("raid%d: New rootpartition value is: %d\n",
1120 raidPtr->raidid, d);
1121 *(int *) data = d;
1122 return (retcode);
1123
1124 /* initialize all parity */
1125 case RAIDFRAME_REWRITEPARITY:
1126
1127 if (raidPtr->Layout.map->faultsTolerated == 0) {
1128 /* Parity for RAID 0 is trivially correct */
1129 raidPtr->parity_good = RF_RAID_CLEAN;
1130 return(0);
1131 }
1132
1133 if (raidPtr->parity_rewrite_in_progress == 1) {
1134 /* Re-write is already in progress! */
1135 return(EINVAL);
1136 }
1137
1138 retcode = RF_CREATE_THREAD(raidPtr->parity_rewrite_thread,
1139 rf_RewriteParityThread,
1140 raidPtr,"raid_parity");
1141 return (retcode);
1142
1143
1144 case RAIDFRAME_ADD_HOT_SPARE:
1145 sparePtr = (RF_SingleComponent_t *) data;
1146 memcpy( &hot_spare, sparePtr, sizeof(RF_SingleComponent_t));
1147 retcode = rf_add_hot_spare(raidPtr, &hot_spare);
1148 return(retcode);
1149
1150 case RAIDFRAME_REMOVE_HOT_SPARE:
1151 return(retcode);
1152
1153 case RAIDFRAME_DELETE_COMPONENT:
1154 componentPtr = (RF_SingleComponent_t *)data;
1155 memcpy( &component, componentPtr,
1156 sizeof(RF_SingleComponent_t));
1157 retcode = rf_delete_component(raidPtr, &component);
1158 return(retcode);
1159
1160 case RAIDFRAME_INCORPORATE_HOT_SPARE:
1161 componentPtr = (RF_SingleComponent_t *)data;
1162 memcpy( &component, componentPtr,
1163 sizeof(RF_SingleComponent_t));
1164 retcode = rf_incorporate_hot_spare(raidPtr, &component);
1165 return(retcode);
1166
1167 case RAIDFRAME_REBUILD_IN_PLACE:
1168
1169 if (raidPtr->Layout.map->faultsTolerated == 0) {
1170 /* Can't do this on a RAID 0!! */
1171 return(EINVAL);
1172 }
1173
1174 if (raidPtr->recon_in_progress == 1) {
1175 /* a reconstruct is already in progress! */
1176 return(EINVAL);
1177 }
1178
1179 componentPtr = (RF_SingleComponent_t *) data;
1180 memcpy( &component, componentPtr,
1181 sizeof(RF_SingleComponent_t));
1182 row = component.row;
1183 column = component.column;
1184 printf("raid%d: Rebuild: %d %d\n", raidPtr->raidid,
1185 row, column);
1186 if ((row < 0) || (row >= raidPtr->numRow) ||
1187 (column < 0) || (column >= raidPtr->numCol)) {
1188 return(EINVAL);
1189 }
1190
1191 RF_Malloc(rrcopy, sizeof(*rrcopy), (struct rf_recon_req *));
1192 if (rrcopy == NULL)
1193 return(ENOMEM);
1194
1195 rrcopy->raidPtr = (void *) raidPtr;
1196 rrcopy->row = row;
1197 rrcopy->col = column;
1198
1199 retcode = RF_CREATE_THREAD(raidPtr->recon_thread,
1200 rf_ReconstructInPlaceThread,
1201 rrcopy,"raid_reconip");
1202 return(retcode);
1203
1204 case RAIDFRAME_GET_INFO:
1205 if (!raidPtr->valid)
1206 return (ENODEV);
1207 ucfgp = (RF_DeviceConfig_t **) data;
1208 RF_Malloc(d_cfg, sizeof(RF_DeviceConfig_t),
1209 (RF_DeviceConfig_t *));
1210 if (d_cfg == NULL)
1211 return (ENOMEM);
1212 memset((char *) d_cfg, 0, sizeof(RF_DeviceConfig_t));
1213 d_cfg->rows = raidPtr->numRow;
1214 d_cfg->cols = raidPtr->numCol;
1215 d_cfg->ndevs = raidPtr->numRow * raidPtr->numCol;
1216 if (d_cfg->ndevs >= RF_MAX_DISKS) {
1217 RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
1218 return (ENOMEM);
1219 }
1220 d_cfg->nspares = raidPtr->numSpare;
1221 if (d_cfg->nspares >= RF_MAX_DISKS) {
1222 RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
1223 return (ENOMEM);
1224 }
1225 d_cfg->maxqdepth = raidPtr->maxQueueDepth;
1226 d = 0;
1227 for (i = 0; i < d_cfg->rows; i++) {
1228 for (j = 0; j < d_cfg->cols; j++) {
1229 d_cfg->devs[d] = raidPtr->Disks[i][j];
1230 d++;
1231 }
1232 }
1233 for (j = d_cfg->cols, i = 0; i < d_cfg->nspares; i++, j++) {
1234 d_cfg->spares[i] = raidPtr->Disks[0][j];
1235 }
1236 retcode = copyout((caddr_t) d_cfg, (caddr_t) * ucfgp,
1237 sizeof(RF_DeviceConfig_t));
1238 RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
1239
1240 return (retcode);
1241
1242 case RAIDFRAME_CHECK_PARITY:
1243 *(int *) data = raidPtr->parity_good;
1244 return (0);
1245
1246 case RAIDFRAME_RESET_ACCTOTALS:
1247 memset(&raidPtr->acc_totals, 0, sizeof(raidPtr->acc_totals));
1248 return (0);
1249
1250 case RAIDFRAME_GET_ACCTOTALS:
1251 totals = (RF_AccTotals_t *) data;
1252 *totals = raidPtr->acc_totals;
1253 return (0);
1254
1255 case RAIDFRAME_KEEP_ACCTOTALS:
1256 raidPtr->keep_acc_totals = *(int *)data;
1257 return (0);
1258
1259 case RAIDFRAME_GET_SIZE:
1260 *(int *) data = raidPtr->totalSectors;
1261 return (0);
1262
1263 /* fail a disk & optionally start reconstruction */
1264 case RAIDFRAME_FAIL_DISK:
1265
1266 if (raidPtr->Layout.map->faultsTolerated == 0) {
1267 /* Can't do this on a RAID 0!! */
1268 return(EINVAL);
1269 }
1270
1271 rr = (struct rf_recon_req *) data;
1272
1273 if (rr->row < 0 || rr->row >= raidPtr->numRow
1274 || rr->col < 0 || rr->col >= raidPtr->numCol)
1275 return (EINVAL);
1276
1277 printf("raid%d: Failing the disk: row: %d col: %d\n",
1278 unit, rr->row, rr->col);
1279
1280 /* make a copy of the recon request so that we don't rely on
1281 * the user's buffer */
1282 RF_Malloc(rrcopy, sizeof(*rrcopy), (struct rf_recon_req *));
1283 if (rrcopy == NULL)
1284 return(ENOMEM);
1285 memcpy(rrcopy, rr, sizeof(*rr));
1286 rrcopy->raidPtr = (void *) raidPtr;
1287
1288 retcode = RF_CREATE_THREAD(raidPtr->recon_thread,
1289 rf_ReconThread,
1290 rrcopy,"raid_recon");
1291 return (0);
1292
1293 /* invoke a copyback operation after recon on whatever disk
1294 * needs it, if any */
1295 case RAIDFRAME_COPYBACK:
1296
1297 if (raidPtr->Layout.map->faultsTolerated == 0) {
1298 /* This makes no sense on a RAID 0!! */
1299 return(EINVAL);
1300 }
1301
1302 if (raidPtr->copyback_in_progress == 1) {
1303 /* Copyback is already in progress! */
1304 return(EINVAL);
1305 }
1306
1307 retcode = RF_CREATE_THREAD(raidPtr->copyback_thread,
1308 rf_CopybackThread,
1309 raidPtr,"raid_copyback");
1310 return (retcode);
1311
1312 /* return the percentage completion of reconstruction */
1313 case RAIDFRAME_CHECK_RECON_STATUS:
1314 if (raidPtr->Layout.map->faultsTolerated == 0) {
1315 /* This makes no sense on a RAID 0, so tell the
1316 user it's done. */
1317 *(int *) data = 100;
1318 return(0);
1319 }
1320 row = 0; /* XXX we only consider a single row... */
1321 if (raidPtr->status[row] != rf_rs_reconstructing)
1322 *(int *) data = 100;
1323 else
1324 *(int *) data = raidPtr->reconControl[row]->percentComplete;
1325 return (0);
1326 case RAIDFRAME_CHECK_RECON_STATUS_EXT:
1327 progressInfoPtr = (RF_ProgressInfo_t **) data;
1328 row = 0; /* XXX we only consider a single row... */
1329 if (raidPtr->status[row] != rf_rs_reconstructing) {
1330 progressInfo.remaining = 0;
1331 progressInfo.completed = 100;
1332 progressInfo.total = 100;
1333 } else {
1334 progressInfo.total =
1335 raidPtr->reconControl[row]->numRUsTotal;
1336 progressInfo.completed =
1337 raidPtr->reconControl[row]->numRUsComplete;
1338 progressInfo.remaining = progressInfo.total -
1339 progressInfo.completed;
1340 }
1341 retcode = copyout((caddr_t) &progressInfo,
1342 (caddr_t) *progressInfoPtr,
1343 sizeof(RF_ProgressInfo_t));
1344 return (retcode);
1345
1346 case RAIDFRAME_CHECK_PARITYREWRITE_STATUS:
1347 if (raidPtr->Layout.map->faultsTolerated == 0) {
1348 /* This makes no sense on a RAID 0, so tell the
1349 user it's done. */
1350 *(int *) data = 100;
1351 return(0);
1352 }
1353 if (raidPtr->parity_rewrite_in_progress == 1) {
1354 *(int *) data = 100 *
1355 raidPtr->parity_rewrite_stripes_done /
1356 raidPtr->Layout.numStripe;
1357 } else {
1358 *(int *) data = 100;
1359 }
1360 return (0);
1361
1362 case RAIDFRAME_CHECK_PARITYREWRITE_STATUS_EXT:
1363 progressInfoPtr = (RF_ProgressInfo_t **) data;
1364 if (raidPtr->parity_rewrite_in_progress == 1) {
1365 progressInfo.total = raidPtr->Layout.numStripe;
1366 progressInfo.completed =
1367 raidPtr->parity_rewrite_stripes_done;
1368 progressInfo.remaining = progressInfo.total -
1369 progressInfo.completed;
1370 } else {
1371 progressInfo.remaining = 0;
1372 progressInfo.completed = 100;
1373 progressInfo.total = 100;
1374 }
1375 retcode = copyout((caddr_t) &progressInfo,
1376 (caddr_t) *progressInfoPtr,
1377 sizeof(RF_ProgressInfo_t));
1378 return (retcode);
1379
1380 case RAIDFRAME_CHECK_COPYBACK_STATUS:
1381 if (raidPtr->Layout.map->faultsTolerated == 0) {
1382 /* This makes no sense on a RAID 0 */
1383 *(int *) data = 100;
1384 return(0);
1385 }
1386 if (raidPtr->copyback_in_progress == 1) {
1387 *(int *) data = 100 * raidPtr->copyback_stripes_done /
1388 raidPtr->Layout.numStripe;
1389 } else {
1390 *(int *) data = 100;
1391 }
1392 return (0);
1393
1394 case RAIDFRAME_CHECK_COPYBACK_STATUS_EXT:
1395 progressInfoPtr = (RF_ProgressInfo_t **) data;
1396 if (raidPtr->copyback_in_progress == 1) {
1397 progressInfo.total = raidPtr->Layout.numStripe;
1398 progressInfo.completed =
1399 raidPtr->copyback_stripes_done;
1400 progressInfo.remaining = progressInfo.total -
1401 progressInfo.completed;
1402 } else {
1403 progressInfo.remaining = 0;
1404 progressInfo.completed = 100;
1405 progressInfo.total = 100;
1406 }
1407 retcode = copyout((caddr_t) &progressInfo,
1408 (caddr_t) *progressInfoPtr,
1409 sizeof(RF_ProgressInfo_t));
1410 return (retcode);
1411
1412 /* the sparetable daemon calls this to wait for the kernel to
1413 * need a spare table. this ioctl does not return until a
1414 * spare table is needed. XXX -- calling mpsleep here in the
1415 * ioctl code is almost certainly wrong and evil. -- XXX XXX
1416 * -- I should either compute the spare table in the kernel,
1417 * or have a different -- XXX XXX -- interface (a different
1418 * character device) for delivering the table -- XXX */
1419 #if 0
1420 case RAIDFRAME_SPARET_WAIT:
1421 RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1422 while (!rf_sparet_wait_queue)
1423 mpsleep(&rf_sparet_wait_queue, (PZERO + 1) | PCATCH, "sparet wait", 0, (void *) simple_lock_addr(rf_sparet_wait_mutex), MS_LOCK_SIMPLE);
1424 waitreq = rf_sparet_wait_queue;
1425 rf_sparet_wait_queue = rf_sparet_wait_queue->next;
1426 RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1427
1428 /* structure assignment */
1429 *((RF_SparetWait_t *) data) = *waitreq;
1430
1431 RF_Free(waitreq, sizeof(*waitreq));
1432 return (0);
1433
1434 /* wakes up a process waiting on SPARET_WAIT and puts an error
1435 * code in it that will cause the dameon to exit */
1436 case RAIDFRAME_ABORT_SPARET_WAIT:
1437 RF_Malloc(waitreq, sizeof(*waitreq), (RF_SparetWait_t *));
1438 waitreq->fcol = -1;
1439 RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1440 waitreq->next = rf_sparet_wait_queue;
1441 rf_sparet_wait_queue = waitreq;
1442 RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1443 wakeup(&rf_sparet_wait_queue);
1444 return (0);
1445
1446 /* used by the spare table daemon to deliver a spare table
1447 * into the kernel */
1448 case RAIDFRAME_SEND_SPARET:
1449
1450 /* install the spare table */
1451 retcode = rf_SetSpareTable(raidPtr, *(void **) data);
1452
1453 /* respond to the requestor. the return status of the spare
1454 * table installation is passed in the "fcol" field */
1455 RF_Malloc(waitreq, sizeof(*waitreq), (RF_SparetWait_t *));
1456 waitreq->fcol = retcode;
1457 RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1458 waitreq->next = rf_sparet_resp_queue;
1459 rf_sparet_resp_queue = waitreq;
1460 wakeup(&rf_sparet_resp_queue);
1461 RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1462
1463 return (retcode);
1464 #endif
1465
1466 default:
1467 break; /* fall through to the os-specific code below */
1468
1469 }
1470
1471 if (!raidPtr->valid)
1472 return (EINVAL);
1473
1474 /*
1475 * Add support for "regular" device ioctls here.
1476 */
1477
1478 switch (cmd) {
1479 case DIOCGDINFO:
1480 *(struct disklabel *) data = *(rs->sc_dkdev.dk_label);
1481 break;
1482 #ifdef __HAVE_OLD_DISKLABEL
1483 case ODIOCGDINFO:
1484 newlabel = *(rs->sc_dkdev.dk_label);
1485 if (newlabel.d_npartitions > OLDMAXPARTITIONS)
1486 return ENOTTY;
1487 memcpy(data, &newlabel, sizeof (struct olddisklabel));
1488 break;
1489 #endif
1490
1491 case DIOCGPART:
1492 ((struct partinfo *) data)->disklab = rs->sc_dkdev.dk_label;
1493 ((struct partinfo *) data)->part =
1494 &rs->sc_dkdev.dk_label->d_partitions[DISKPART(dev)];
1495 break;
1496
1497 case DIOCWDINFO:
1498 case DIOCSDINFO:
1499 #ifdef __HAVE_OLD_DISKLABEL
1500 case ODIOCWDINFO:
1501 case ODIOCSDINFO:
1502 #endif
1503 {
1504 struct disklabel *lp;
1505 #ifdef __HAVE_OLD_DISKLABEL
1506 if (cmd == ODIOCSDINFO || cmd == ODIOCWDINFO) {
1507 memset(&newlabel, 0, sizeof newlabel);
1508 memcpy(&newlabel, data, sizeof (struct olddisklabel));
1509 lp = &newlabel;
1510 } else
1511 #endif
1512 lp = (struct disklabel *)data;
1513
1514 if ((error = raidlock(rs)) != 0)
1515 return (error);
1516
1517 rs->sc_flags |= RAIDF_LABELLING;
1518
1519 error = setdisklabel(rs->sc_dkdev.dk_label,
1520 lp, 0, rs->sc_dkdev.dk_cpulabel);
1521 if (error == 0) {
1522 if (cmd == DIOCWDINFO
1523 #ifdef __HAVE_OLD_DISKLABEL
1524 || cmd == ODIOCWDINFO
1525 #endif
1526 )
1527 error = writedisklabel(RAIDLABELDEV(dev),
1528 raidstrategy, rs->sc_dkdev.dk_label,
1529 rs->sc_dkdev.dk_cpulabel);
1530 }
1531 rs->sc_flags &= ~RAIDF_LABELLING;
1532
1533 raidunlock(rs);
1534
1535 if (error)
1536 return (error);
1537 break;
1538 }
1539
1540 case DIOCWLABEL:
1541 if (*(int *) data != 0)
1542 rs->sc_flags |= RAIDF_WLABEL;
1543 else
1544 rs->sc_flags &= ~RAIDF_WLABEL;
1545 break;
1546
1547 case DIOCGDEFLABEL:
1548 raidgetdefaultlabel(raidPtr, rs, (struct disklabel *) data);
1549 break;
1550
1551 #ifdef __HAVE_OLD_DISKLABEL
1552 case ODIOCGDEFLABEL:
1553 raidgetdefaultlabel(raidPtr, rs, &newlabel);
1554 if (newlabel.d_npartitions > OLDMAXPARTITIONS)
1555 return ENOTTY;
1556 memcpy(data, &newlabel, sizeof (struct olddisklabel));
1557 break;
1558 #endif
1559
1560 default:
1561 retcode = ENOTTY;
1562 }
1563 return (retcode);
1564
1565 }
1566
1567
1568 /* raidinit -- complete the rest of the initialization for the
1569 RAIDframe device. */
1570
1571
1572 static void
1573 raidinit(raidPtr)
1574 RF_Raid_t *raidPtr;
1575 {
1576 struct raid_softc *rs;
1577 int unit;
1578
1579 unit = raidPtr->raidid;
1580
1581 rs = &raid_softc[unit];
1582
1583 /* XXX should check return code first... */
1584 rs->sc_flags |= RAIDF_INITED;
1585
1586 sprintf(rs->sc_xname, "raid%d", unit); /* XXX doesn't check bounds. */
1587
1588 rs->sc_dkdev.dk_name = rs->sc_xname;
1589
1590 /* disk_attach actually creates space for the CPU disklabel, among
1591 * other things, so it's critical to call this *BEFORE* we try putzing
1592 * with disklabels. */
1593
1594 disk_attach(&rs->sc_dkdev);
1595
1596 /* XXX There may be a weird interaction here between this, and
1597 * protectedSectors, as used in RAIDframe. */
1598
1599 rs->sc_size = raidPtr->totalSectors;
1600
1601 }
1602
1603 /* wake up the daemon & tell it to get us a spare table
1604 * XXX
1605 * the entries in the queues should be tagged with the raidPtr
1606 * so that in the extremely rare case that two recons happen at once,
1607 * we know for which device were requesting a spare table
1608 * XXX
1609 *
1610 * XXX This code is not currently used. GO
1611 */
1612 int
1613 rf_GetSpareTableFromDaemon(req)
1614 RF_SparetWait_t *req;
1615 {
1616 int retcode;
1617
1618 RF_LOCK_MUTEX(rf_sparet_wait_mutex);
1619 req->next = rf_sparet_wait_queue;
1620 rf_sparet_wait_queue = req;
1621 wakeup(&rf_sparet_wait_queue);
1622
1623 /* mpsleep unlocks the mutex */
1624 while (!rf_sparet_resp_queue) {
1625 tsleep(&rf_sparet_resp_queue, PRIBIO,
1626 "raidframe getsparetable", 0);
1627 }
1628 req = rf_sparet_resp_queue;
1629 rf_sparet_resp_queue = req->next;
1630 RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
1631
1632 retcode = req->fcol;
1633 RF_Free(req, sizeof(*req)); /* this is not the same req as we
1634 * alloc'd */
1635 return (retcode);
1636 }
1637
1638 /* a wrapper around rf_DoAccess that extracts appropriate info from the
1639 * bp & passes it down.
1640 * any calls originating in the kernel must use non-blocking I/O
1641 * do some extra sanity checking to return "appropriate" error values for
1642 * certain conditions (to make some standard utilities work)
1643 *
1644 * Formerly known as: rf_DoAccessKernel
1645 */
1646 void
1647 raidstart(raidPtr)
1648 RF_Raid_t *raidPtr;
1649 {
1650 RF_SectorCount_t num_blocks, pb, sum;
1651 RF_RaidAddr_t raid_addr;
1652 int retcode;
1653 struct partition *pp;
1654 daddr_t blocknum;
1655 int unit;
1656 struct raid_softc *rs;
1657 int do_async;
1658 struct buf *bp;
1659
1660 unit = raidPtr->raidid;
1661 rs = &raid_softc[unit];
1662
1663 /* quick check to see if anything has died recently */
1664 RF_LOCK_MUTEX(raidPtr->mutex);
1665 if (raidPtr->numNewFailures > 0) {
1666 rf_update_component_labels(raidPtr,
1667 RF_NORMAL_COMPONENT_UPDATE);
1668 raidPtr->numNewFailures--;
1669 }
1670
1671 /* Check to see if we're at the limit... */
1672 while (raidPtr->openings > 0) {
1673 RF_UNLOCK_MUTEX(raidPtr->mutex);
1674
1675 /* get the next item, if any, from the queue */
1676 if ((bp = BUFQ_GET(&rs->buf_queue)) == NULL) {
1677 /* nothing more to do */
1678 return;
1679 }
1680
1681 /* Ok, for the bp we have here, bp->b_blkno is relative to the
1682 * partition.. Need to make it absolute to the underlying
1683 * device.. */
1684
1685 blocknum = bp->b_blkno;
1686 if (DISKPART(bp->b_dev) != RAW_PART) {
1687 pp = &rs->sc_dkdev.dk_label->d_partitions[DISKPART(bp->b_dev)];
1688 blocknum += pp->p_offset;
1689 }
1690
1691 db1_printf(("Blocks: %d, %d\n", (int) bp->b_blkno,
1692 (int) blocknum));
1693
1694 db1_printf(("bp->b_bcount = %d\n", (int) bp->b_bcount));
1695 db1_printf(("bp->b_resid = %d\n", (int) bp->b_resid));
1696
1697 /* *THIS* is where we adjust what block we're going to...
1698 * but DO NOT TOUCH bp->b_blkno!!! */
1699 raid_addr = blocknum;
1700
1701 num_blocks = bp->b_bcount >> raidPtr->logBytesPerSector;
1702 pb = (bp->b_bcount & raidPtr->sectorMask) ? 1 : 0;
1703 sum = raid_addr + num_blocks + pb;
1704 if (1 || rf_debugKernelAccess) {
1705 db1_printf(("raid_addr=%d sum=%d num_blocks=%d(+%d) (%d)\n",
1706 (int) raid_addr, (int) sum, (int) num_blocks,
1707 (int) pb, (int) bp->b_resid));
1708 }
1709 if ((sum > raidPtr->totalSectors) || (sum < raid_addr)
1710 || (sum < num_blocks) || (sum < pb)) {
1711 bp->b_error = ENOSPC;
1712 bp->b_flags |= B_ERROR;
1713 bp->b_resid = bp->b_bcount;
1714 biodone(bp);
1715 RF_LOCK_MUTEX(raidPtr->mutex);
1716 continue;
1717 }
1718 /*
1719 * XXX rf_DoAccess() should do this, not just DoAccessKernel()
1720 */
1721
1722 if (bp->b_bcount & raidPtr->sectorMask) {
1723 bp->b_error = EINVAL;
1724 bp->b_flags |= B_ERROR;
1725 bp->b_resid = bp->b_bcount;
1726 biodone(bp);
1727 RF_LOCK_MUTEX(raidPtr->mutex);
1728 continue;
1729
1730 }
1731 db1_printf(("Calling DoAccess..\n"));
1732
1733
1734 RF_LOCK_MUTEX(raidPtr->mutex);
1735 raidPtr->openings--;
1736 RF_UNLOCK_MUTEX(raidPtr->mutex);
1737
1738 /*
1739 * Everything is async.
1740 */
1741 do_async = 1;
1742
1743 disk_busy(&rs->sc_dkdev);
1744
1745 /* XXX we're still at splbio() here... do we *really*
1746 need to be? */
1747
1748 /* don't ever condition on bp->b_flags & B_WRITE.
1749 * always condition on B_READ instead */
1750
1751 retcode = rf_DoAccess(raidPtr, (bp->b_flags & B_READ) ?
1752 RF_IO_TYPE_READ : RF_IO_TYPE_WRITE,
1753 do_async, raid_addr, num_blocks,
1754 bp->b_data, bp, RF_DAG_NONBLOCKING_IO);
1755
1756 RF_LOCK_MUTEX(raidPtr->mutex);
1757 }
1758 RF_UNLOCK_MUTEX(raidPtr->mutex);
1759 }
1760
1761
1762
1763
1764 /* invoke an I/O from kernel mode. Disk queue should be locked upon entry */
1765
1766 int
1767 rf_DispatchKernelIO(queue, req)
1768 RF_DiskQueue_t *queue;
1769 RF_DiskQueueData_t *req;
1770 {
1771 int op = (req->type == RF_IO_TYPE_READ) ? B_READ : B_WRITE;
1772 struct buf *bp;
1773 struct raidbuf *raidbp = NULL;
1774 struct raid_softc *rs;
1775 int unit;
1776 int s;
1777
1778 s=0;
1779 /* s = splbio();*/ /* want to test this */
1780 /* XXX along with the vnode, we also need the softc associated with
1781 * this device.. */
1782
1783 req->queue = queue;
1784
1785 unit = queue->raidPtr->raidid;
1786
1787 db1_printf(("DispatchKernelIO unit: %d\n", unit));
1788
1789 if (unit >= numraid) {
1790 printf("Invalid unit number: %d %d\n", unit, numraid);
1791 panic("Invalid Unit number in rf_DispatchKernelIO\n");
1792 }
1793 rs = &raid_softc[unit];
1794
1795 bp = req->bp;
1796 #if 1
1797 /* XXX when there is a physical disk failure, someone is passing us a
1798 * buffer that contains old stuff!! Attempt to deal with this problem
1799 * without taking a performance hit... (not sure where the real bug
1800 * is. It's buried in RAIDframe somewhere) :-( GO ) */
1801
1802 if (bp->b_flags & B_ERROR) {
1803 bp->b_flags &= ~B_ERROR;
1804 }
1805 if (bp->b_error != 0) {
1806 bp->b_error = 0;
1807 }
1808 #endif
1809 raidbp = RAIDGETBUF(rs);
1810
1811 raidbp->rf_flags = 0; /* XXX not really used anywhere... */
1812
1813 /*
1814 * context for raidiodone
1815 */
1816 raidbp->rf_obp = bp;
1817 raidbp->req = req;
1818
1819 LIST_INIT(&raidbp->rf_buf.b_dep);
1820
1821 switch (req->type) {
1822 case RF_IO_TYPE_NOP: /* used primarily to unlock a locked queue */
1823 /* XXX need to do something extra here.. */
1824 /* I'm leaving this in, as I've never actually seen it used,
1825 * and I'd like folks to report it... GO */
1826 printf(("WAKEUP CALLED\n"));
1827 queue->numOutstanding++;
1828
1829 /* XXX need to glue the original buffer into this?? */
1830
1831 KernelWakeupFunc(&raidbp->rf_buf);
1832 break;
1833
1834 case RF_IO_TYPE_READ:
1835 case RF_IO_TYPE_WRITE:
1836
1837 if (req->tracerec) {
1838 RF_ETIMER_START(req->tracerec->timer);
1839 }
1840 InitBP(&raidbp->rf_buf, queue->rf_cinfo->ci_vp,
1841 op | bp->b_flags, queue->rf_cinfo->ci_dev,
1842 req->sectorOffset, req->numSector,
1843 req->buf, KernelWakeupFunc, (void *) req,
1844 queue->raidPtr->logBytesPerSector, req->b_proc);
1845
1846 if (rf_debugKernelAccess) {
1847 db1_printf(("dispatch: bp->b_blkno = %ld\n",
1848 (long) bp->b_blkno));
1849 }
1850 queue->numOutstanding++;
1851 queue->last_deq_sector = req->sectorOffset;
1852 /* acc wouldn't have been let in if there were any pending
1853 * reqs at any other priority */
1854 queue->curPriority = req->priority;
1855
1856 db1_printf(("Going for %c to unit %d row %d col %d\n",
1857 req->type, unit, queue->row, queue->col));
1858 db1_printf(("sector %d count %d (%d bytes) %d\n",
1859 (int) req->sectorOffset, (int) req->numSector,
1860 (int) (req->numSector <<
1861 queue->raidPtr->logBytesPerSector),
1862 (int) queue->raidPtr->logBytesPerSector));
1863 if ((raidbp->rf_buf.b_flags & B_READ) == 0) {
1864 raidbp->rf_buf.b_vp->v_numoutput++;
1865 }
1866 VOP_STRATEGY(&raidbp->rf_buf);
1867
1868 break;
1869
1870 default:
1871 panic("bad req->type in rf_DispatchKernelIO");
1872 }
1873 db1_printf(("Exiting from DispatchKernelIO\n"));
1874 /* splx(s); */ /* want to test this */
1875 return (0);
1876 }
1877 /* this is the callback function associated with a I/O invoked from
1878 kernel code.
1879 */
1880 static void
1881 KernelWakeupFunc(vbp)
1882 struct buf *vbp;
1883 {
1884 RF_DiskQueueData_t *req = NULL;
1885 RF_DiskQueue_t *queue;
1886 struct raidbuf *raidbp = (struct raidbuf *) vbp;
1887 struct buf *bp;
1888 struct raid_softc *rs;
1889 int unit;
1890 int s;
1891
1892 s = splbio();
1893 db1_printf(("recovering the request queue:\n"));
1894 req = raidbp->req;
1895
1896 bp = raidbp->rf_obp;
1897
1898 queue = (RF_DiskQueue_t *) req->queue;
1899
1900 if (raidbp->rf_buf.b_flags & B_ERROR) {
1901 bp->b_flags |= B_ERROR;
1902 bp->b_error = raidbp->rf_buf.b_error ?
1903 raidbp->rf_buf.b_error : EIO;
1904 }
1905
1906 /* XXX methinks this could be wrong... */
1907 #if 1
1908 bp->b_resid = raidbp->rf_buf.b_resid;
1909 #endif
1910
1911 if (req->tracerec) {
1912 RF_ETIMER_STOP(req->tracerec->timer);
1913 RF_ETIMER_EVAL(req->tracerec->timer);
1914 RF_LOCK_MUTEX(rf_tracing_mutex);
1915 req->tracerec->diskwait_us += RF_ETIMER_VAL_US(req->tracerec->timer);
1916 req->tracerec->phys_io_us += RF_ETIMER_VAL_US(req->tracerec->timer);
1917 req->tracerec->num_phys_ios++;
1918 RF_UNLOCK_MUTEX(rf_tracing_mutex);
1919 }
1920 bp->b_bcount = raidbp->rf_buf.b_bcount; /* XXXX ?? */
1921
1922 unit = queue->raidPtr->raidid; /* *Much* simpler :-> */
1923
1924
1925 /* XXX Ok, let's get aggressive... If B_ERROR is set, let's go
1926 * ballistic, and mark the component as hosed... */
1927
1928 if (bp->b_flags & B_ERROR) {
1929 /* Mark the disk as dead */
1930 /* but only mark it once... */
1931 if (queue->raidPtr->Disks[queue->row][queue->col].status ==
1932 rf_ds_optimal) {
1933 printf("raid%d: IO Error. Marking %s as failed.\n",
1934 unit, queue->raidPtr->Disks[queue->row][queue->col].devname);
1935 queue->raidPtr->Disks[queue->row][queue->col].status =
1936 rf_ds_failed;
1937 queue->raidPtr->status[queue->row] = rf_rs_degraded;
1938 queue->raidPtr->numFailures++;
1939 queue->raidPtr->numNewFailures++;
1940 } else { /* Disk is already dead... */
1941 /* printf("Disk already marked as dead!\n"); */
1942 }
1943
1944 }
1945
1946 rs = &raid_softc[unit];
1947 RAIDPUTBUF(rs, raidbp);
1948
1949 rf_DiskIOComplete(queue, req, (bp->b_flags & B_ERROR) ? 1 : 0);
1950 (req->CompleteFunc) (req->argument, (bp->b_flags & B_ERROR) ? 1 : 0);
1951
1952 splx(s);
1953 }
1954
1955
1956
1957 /*
1958 * initialize a buf structure for doing an I/O in the kernel.
1959 */
1960 static void
1961 InitBP(bp, b_vp, rw_flag, dev, startSect, numSect, buf, cbFunc, cbArg,
1962 logBytesPerSector, b_proc)
1963 struct buf *bp;
1964 struct vnode *b_vp;
1965 unsigned rw_flag;
1966 dev_t dev;
1967 RF_SectorNum_t startSect;
1968 RF_SectorCount_t numSect;
1969 caddr_t buf;
1970 void (*cbFunc) (struct buf *);
1971 void *cbArg;
1972 int logBytesPerSector;
1973 struct proc *b_proc;
1974 {
1975 /* bp->b_flags = B_PHYS | rw_flag; */
1976 bp->b_flags = B_CALL | rw_flag; /* XXX need B_PHYS here too??? */
1977 bp->b_bcount = numSect << logBytesPerSector;
1978 bp->b_bufsize = bp->b_bcount;
1979 bp->b_error = 0;
1980 bp->b_dev = dev;
1981 bp->b_data = buf;
1982 bp->b_blkno = startSect;
1983 bp->b_resid = bp->b_bcount; /* XXX is this right!??!?!! */
1984 if (bp->b_bcount == 0) {
1985 panic("bp->b_bcount is zero in InitBP!!\n");
1986 }
1987 bp->b_proc = b_proc;
1988 bp->b_iodone = cbFunc;
1989 bp->b_vp = b_vp;
1990
1991 }
1992
1993 static void
1994 raidgetdefaultlabel(raidPtr, rs, lp)
1995 RF_Raid_t *raidPtr;
1996 struct raid_softc *rs;
1997 struct disklabel *lp;
1998 {
1999 db1_printf(("Building a default label...\n"));
2000 memset(lp, 0, sizeof(*lp));
2001
2002 /* fabricate a label... */
2003 lp->d_secperunit = raidPtr->totalSectors;
2004 lp->d_secsize = raidPtr->bytesPerSector;
2005 lp->d_nsectors = raidPtr->Layout.dataSectorsPerStripe;
2006 lp->d_ntracks = 4 * raidPtr->numCol;
2007 lp->d_ncylinders = raidPtr->totalSectors /
2008 (lp->d_nsectors * lp->d_ntracks);
2009 lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
2010
2011 strncpy(lp->d_typename, "raid", sizeof(lp->d_typename));
2012 lp->d_type = DTYPE_RAID;
2013 strncpy(lp->d_packname, "fictitious", sizeof(lp->d_packname));
2014 lp->d_rpm = 3600;
2015 lp->d_interleave = 1;
2016 lp->d_flags = 0;
2017
2018 lp->d_partitions[RAW_PART].p_offset = 0;
2019 lp->d_partitions[RAW_PART].p_size = raidPtr->totalSectors;
2020 lp->d_partitions[RAW_PART].p_fstype = FS_UNUSED;
2021 lp->d_npartitions = RAW_PART + 1;
2022
2023 lp->d_magic = DISKMAGIC;
2024 lp->d_magic2 = DISKMAGIC;
2025 lp->d_checksum = dkcksum(rs->sc_dkdev.dk_label);
2026
2027 }
2028 /*
2029 * Read the disklabel from the raid device. If one is not present, fake one
2030 * up.
2031 */
2032 static void
2033 raidgetdisklabel(dev)
2034 dev_t dev;
2035 {
2036 int unit = raidunit(dev);
2037 struct raid_softc *rs = &raid_softc[unit];
2038 char *errstring;
2039 struct disklabel *lp = rs->sc_dkdev.dk_label;
2040 struct cpu_disklabel *clp = rs->sc_dkdev.dk_cpulabel;
2041 RF_Raid_t *raidPtr;
2042
2043 db1_printf(("Getting the disklabel...\n"));
2044
2045 memset(clp, 0, sizeof(*clp));
2046
2047 raidPtr = raidPtrs[unit];
2048
2049 raidgetdefaultlabel(raidPtr, rs, lp);
2050
2051 /*
2052 * Call the generic disklabel extraction routine.
2053 */
2054 errstring = readdisklabel(RAIDLABELDEV(dev), raidstrategy,
2055 rs->sc_dkdev.dk_label, rs->sc_dkdev.dk_cpulabel);
2056 if (errstring)
2057 raidmakedisklabel(rs);
2058 else {
2059 int i;
2060 struct partition *pp;
2061
2062 /*
2063 * Sanity check whether the found disklabel is valid.
2064 *
2065 * This is necessary since total size of the raid device
2066 * may vary when an interleave is changed even though exactly
2067 * same componets are used, and old disklabel may used
2068 * if that is found.
2069 */
2070 if (lp->d_secperunit != rs->sc_size)
2071 printf("raid%d: WARNING: %s: "
2072 "total sector size in disklabel (%d) != "
2073 "the size of raid (%ld)\n", unit, rs->sc_xname,
2074 lp->d_secperunit, (long) rs->sc_size);
2075 for (i = 0; i < lp->d_npartitions; i++) {
2076 pp = &lp->d_partitions[i];
2077 if (pp->p_offset + pp->p_size > rs->sc_size)
2078 printf("raid%d: WARNING: %s: end of partition `%c' "
2079 "exceeds the size of raid (%ld)\n",
2080 unit, rs->sc_xname, 'a' + i, (long) rs->sc_size);
2081 }
2082 }
2083
2084 }
2085 /*
2086 * Take care of things one might want to take care of in the event
2087 * that a disklabel isn't present.
2088 */
2089 static void
2090 raidmakedisklabel(rs)
2091 struct raid_softc *rs;
2092 {
2093 struct disklabel *lp = rs->sc_dkdev.dk_label;
2094 db1_printf(("Making a label..\n"));
2095
2096 /*
2097 * For historical reasons, if there's no disklabel present
2098 * the raw partition must be marked FS_BSDFFS.
2099 */
2100
2101 lp->d_partitions[RAW_PART].p_fstype = FS_BSDFFS;
2102
2103 strncpy(lp->d_packname, "default label", sizeof(lp->d_packname));
2104
2105 lp->d_checksum = dkcksum(lp);
2106 }
2107 /*
2108 * Lookup the provided name in the filesystem. If the file exists,
2109 * is a valid block device, and isn't being used by anyone else,
2110 * set *vpp to the file's vnode.
2111 * You'll find the original of this in ccd.c
2112 */
2113 int
2114 raidlookup(path, p, vpp)
2115 char *path;
2116 struct proc *p;
2117 struct vnode **vpp; /* result */
2118 {
2119 struct nameidata nd;
2120 struct vnode *vp;
2121 struct vattr va;
2122 int error;
2123
2124 NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, path, p);
2125 if ((error = vn_open(&nd, FREAD | FWRITE, 0)) != 0) {
2126 #if 0
2127 printf("RAIDframe: vn_open returned %d\n", error);
2128 #endif
2129 return (error);
2130 }
2131 vp = nd.ni_vp;
2132 if (vp->v_usecount > 1) {
2133 VOP_UNLOCK(vp, 0);
2134 (void) vn_close(vp, FREAD | FWRITE, p->p_ucred, p);
2135 return (EBUSY);
2136 }
2137 if ((error = VOP_GETATTR(vp, &va, p->p_ucred, p)) != 0) {
2138 VOP_UNLOCK(vp, 0);
2139 (void) vn_close(vp, FREAD | FWRITE, p->p_ucred, p);
2140 return (error);
2141 }
2142 /* XXX: eventually we should handle VREG, too. */
2143 if (va.va_type != VBLK) {
2144 VOP_UNLOCK(vp, 0);
2145 (void) vn_close(vp, FREAD | FWRITE, p->p_ucred, p);
2146 return (ENOTBLK);
2147 }
2148 VOP_UNLOCK(vp, 0);
2149 *vpp = vp;
2150 return (0);
2151 }
2152 /*
2153 * Wait interruptibly for an exclusive lock.
2154 *
2155 * XXX
2156 * Several drivers do this; it should be abstracted and made MP-safe.
2157 * (Hmm... where have we seen this warning before :-> GO )
2158 */
2159 static int
2160 raidlock(rs)
2161 struct raid_softc *rs;
2162 {
2163 int error;
2164
2165 while ((rs->sc_flags & RAIDF_LOCKED) != 0) {
2166 rs->sc_flags |= RAIDF_WANTED;
2167 if ((error =
2168 tsleep(rs, PRIBIO | PCATCH, "raidlck", 0)) != 0)
2169 return (error);
2170 }
2171 rs->sc_flags |= RAIDF_LOCKED;
2172 return (0);
2173 }
2174 /*
2175 * Unlock and wake up any waiters.
2176 */
2177 static void
2178 raidunlock(rs)
2179 struct raid_softc *rs;
2180 {
2181
2182 rs->sc_flags &= ~RAIDF_LOCKED;
2183 if ((rs->sc_flags & RAIDF_WANTED) != 0) {
2184 rs->sc_flags &= ~RAIDF_WANTED;
2185 wakeup(rs);
2186 }
2187 }
2188
2189
2190 #define RF_COMPONENT_INFO_OFFSET 16384 /* bytes */
2191 #define RF_COMPONENT_INFO_SIZE 1024 /* bytes */
2192
2193 int
2194 raidmarkclean(dev_t dev, struct vnode *b_vp, int mod_counter)
2195 {
2196 RF_ComponentLabel_t clabel;
2197 raidread_component_label(dev, b_vp, &clabel);
2198 clabel.mod_counter = mod_counter;
2199 clabel.clean = RF_RAID_CLEAN;
2200 raidwrite_component_label(dev, b_vp, &clabel);
2201 return(0);
2202 }
2203
2204
2205 int
2206 raidmarkdirty(dev_t dev, struct vnode *b_vp, int mod_counter)
2207 {
2208 RF_ComponentLabel_t clabel;
2209 raidread_component_label(dev, b_vp, &clabel);
2210 clabel.mod_counter = mod_counter;
2211 clabel.clean = RF_RAID_DIRTY;
2212 raidwrite_component_label(dev, b_vp, &clabel);
2213 return(0);
2214 }
2215
2216 /* ARGSUSED */
2217 int
2218 raidread_component_label(dev, b_vp, clabel)
2219 dev_t dev;
2220 struct vnode *b_vp;
2221 RF_ComponentLabel_t *clabel;
2222 {
2223 struct buf *bp;
2224 const struct bdevsw *bdev;
2225 int error;
2226
2227 /* XXX should probably ensure that we don't try to do this if
2228 someone has changed rf_protected_sectors. */
2229
2230 if (b_vp == NULL) {
2231 /* For whatever reason, this component is not valid.
2232 Don't try to read a component label from it. */
2233 return(EINVAL);
2234 }
2235
2236 /* get a block of the appropriate size... */
2237 bp = geteblk((int)RF_COMPONENT_INFO_SIZE);
2238 bp->b_dev = dev;
2239
2240 /* get our ducks in a row for the read */
2241 bp->b_blkno = RF_COMPONENT_INFO_OFFSET / DEV_BSIZE;
2242 bp->b_bcount = RF_COMPONENT_INFO_SIZE;
2243 bp->b_flags |= B_READ;
2244 bp->b_resid = RF_COMPONENT_INFO_SIZE / DEV_BSIZE;
2245
2246 bdev = bdevsw_lookup(bp->b_dev);
2247 if (bdev == NULL)
2248 return (ENXIO);
2249 (*bdev->d_strategy)(bp);
2250
2251 error = biowait(bp);
2252
2253 if (!error) {
2254 memcpy(clabel, bp->b_data,
2255 sizeof(RF_ComponentLabel_t));
2256 #if 0
2257 rf_print_component_label( clabel );
2258 #endif
2259 } else {
2260 #if 0
2261 printf("Failed to read RAID component label!\n");
2262 #endif
2263 }
2264
2265 brelse(bp);
2266 return(error);
2267 }
2268 /* ARGSUSED */
2269 int
2270 raidwrite_component_label(dev, b_vp, clabel)
2271 dev_t dev;
2272 struct vnode *b_vp;
2273 RF_ComponentLabel_t *clabel;
2274 {
2275 struct buf *bp;
2276 const struct bdevsw *bdev;
2277 int error;
2278
2279 /* get a block of the appropriate size... */
2280 bp = geteblk((int)RF_COMPONENT_INFO_SIZE);
2281 bp->b_dev = dev;
2282
2283 /* get our ducks in a row for the write */
2284 bp->b_blkno = RF_COMPONENT_INFO_OFFSET / DEV_BSIZE;
2285 bp->b_bcount = RF_COMPONENT_INFO_SIZE;
2286 bp->b_flags |= B_WRITE;
2287 bp->b_resid = RF_COMPONENT_INFO_SIZE / DEV_BSIZE;
2288
2289 memset(bp->b_data, 0, RF_COMPONENT_INFO_SIZE );
2290
2291 memcpy(bp->b_data, clabel, sizeof(RF_ComponentLabel_t));
2292
2293 bdev = bdevsw_lookup(bp->b_dev);
2294 if (bdev == NULL)
2295 return (ENXIO);
2296 (*bdev->d_strategy)(bp);
2297 error = biowait(bp);
2298 brelse(bp);
2299 if (error) {
2300 #if 1
2301 printf("Failed to write RAID component info!\n");
2302 #endif
2303 }
2304
2305 return(error);
2306 }
2307
2308 void
2309 rf_markalldirty(raidPtr)
2310 RF_Raid_t *raidPtr;
2311 {
2312 RF_ComponentLabel_t clabel;
2313 int r,c;
2314
2315 raidPtr->mod_counter++;
2316 for (r = 0; r < raidPtr->numRow; r++) {
2317 for (c = 0; c < raidPtr->numCol; c++) {
2318 /* we don't want to touch (at all) a disk that has
2319 failed */
2320 if (!RF_DEAD_DISK(raidPtr->Disks[r][c].status)) {
2321 raidread_component_label(
2322 raidPtr->Disks[r][c].dev,
2323 raidPtr->raid_cinfo[r][c].ci_vp,
2324 &clabel);
2325 if (clabel.status == rf_ds_spared) {
2326 /* XXX do something special...
2327 but whatever you do, don't
2328 try to access it!! */
2329 } else {
2330 #if 0
2331 clabel.status =
2332 raidPtr->Disks[r][c].status;
2333 raidwrite_component_label(
2334 raidPtr->Disks[r][c].dev,
2335 raidPtr->raid_cinfo[r][c].ci_vp,
2336 &clabel);
2337 #endif
2338 raidmarkdirty(
2339 raidPtr->Disks[r][c].dev,
2340 raidPtr->raid_cinfo[r][c].ci_vp,
2341 raidPtr->mod_counter);
2342 }
2343 }
2344 }
2345 }
2346 /* printf("Component labels marked dirty.\n"); */
2347 #if 0
2348 for( c = 0; c < raidPtr->numSpare ; c++) {
2349 sparecol = raidPtr->numCol + c;
2350 if (raidPtr->Disks[r][sparecol].status == rf_ds_used_spare) {
2351 /*
2352
2353 XXX this is where we get fancy and map this spare
2354 into it's correct spot in the array.
2355
2356 */
2357 /*
2358
2359 we claim this disk is "optimal" if it's
2360 rf_ds_used_spare, as that means it should be
2361 directly substitutable for the disk it replaced.
2362 We note that too...
2363
2364 */
2365
2366 for(i=0;i<raidPtr->numRow;i++) {
2367 for(j=0;j<raidPtr->numCol;j++) {
2368 if ((raidPtr->Disks[i][j].spareRow ==
2369 r) &&
2370 (raidPtr->Disks[i][j].spareCol ==
2371 sparecol)) {
2372 srow = r;
2373 scol = sparecol;
2374 break;
2375 }
2376 }
2377 }
2378
2379 raidread_component_label(
2380 raidPtr->Disks[r][sparecol].dev,
2381 raidPtr->raid_cinfo[r][sparecol].ci_vp,
2382 &clabel);
2383 /* make sure status is noted */
2384 clabel.version = RF_COMPONENT_LABEL_VERSION;
2385 clabel.mod_counter = raidPtr->mod_counter;
2386 clabel.serial_number = raidPtr->serial_number;
2387 clabel.row = srow;
2388 clabel.column = scol;
2389 clabel.num_rows = raidPtr->numRow;
2390 clabel.num_columns = raidPtr->numCol;
2391 clabel.clean = RF_RAID_DIRTY; /* changed in a bit*/
2392 clabel.status = rf_ds_optimal;
2393 raidwrite_component_label(
2394 raidPtr->Disks[r][sparecol].dev,
2395 raidPtr->raid_cinfo[r][sparecol].ci_vp,
2396 &clabel);
2397 raidmarkclean( raidPtr->Disks[r][sparecol].dev,
2398 raidPtr->raid_cinfo[r][sparecol].ci_vp);
2399 }
2400 }
2401
2402 #endif
2403 }
2404
2405
2406 void
2407 rf_update_component_labels(raidPtr, final)
2408 RF_Raid_t *raidPtr;
2409 int final;
2410 {
2411 RF_ComponentLabel_t clabel;
2412 int sparecol;
2413 int r,c;
2414 int i,j;
2415 int srow, scol;
2416
2417 srow = -1;
2418 scol = -1;
2419
2420 /* XXX should do extra checks to make sure things really are clean,
2421 rather than blindly setting the clean bit... */
2422
2423 raidPtr->mod_counter++;
2424
2425 for (r = 0; r < raidPtr->numRow; r++) {
2426 for (c = 0; c < raidPtr->numCol; c++) {
2427 if (raidPtr->Disks[r][c].status == rf_ds_optimal) {
2428 raidread_component_label(
2429 raidPtr->Disks[r][c].dev,
2430 raidPtr->raid_cinfo[r][c].ci_vp,
2431 &clabel);
2432 /* make sure status is noted */
2433 clabel.status = rf_ds_optimal;
2434 /* bump the counter */
2435 clabel.mod_counter = raidPtr->mod_counter;
2436
2437 raidwrite_component_label(
2438 raidPtr->Disks[r][c].dev,
2439 raidPtr->raid_cinfo[r][c].ci_vp,
2440 &clabel);
2441 if (final == RF_FINAL_COMPONENT_UPDATE) {
2442 if (raidPtr->parity_good == RF_RAID_CLEAN) {
2443 raidmarkclean(
2444 raidPtr->Disks[r][c].dev,
2445 raidPtr->raid_cinfo[r][c].ci_vp,
2446 raidPtr->mod_counter);
2447 }
2448 }
2449 }
2450 /* else we don't touch it.. */
2451 }
2452 }
2453
2454 for( c = 0; c < raidPtr->numSpare ; c++) {
2455 sparecol = raidPtr->numCol + c;
2456 /* Need to ensure that the reconstruct actually completed! */
2457 if (raidPtr->Disks[0][sparecol].status == rf_ds_used_spare) {
2458 /*
2459
2460 we claim this disk is "optimal" if it's
2461 rf_ds_used_spare, as that means it should be
2462 directly substitutable for the disk it replaced.
2463 We note that too...
2464
2465 */
2466
2467 for(i=0;i<raidPtr->numRow;i++) {
2468 for(j=0;j<raidPtr->numCol;j++) {
2469 if ((raidPtr->Disks[i][j].spareRow ==
2470 0) &&
2471 (raidPtr->Disks[i][j].spareCol ==
2472 sparecol)) {
2473 srow = i;
2474 scol = j;
2475 break;
2476 }
2477 }
2478 }
2479
2480 /* XXX shouldn't *really* need this... */
2481 raidread_component_label(
2482 raidPtr->Disks[0][sparecol].dev,
2483 raidPtr->raid_cinfo[0][sparecol].ci_vp,
2484 &clabel);
2485 /* make sure status is noted */
2486
2487 raid_init_component_label(raidPtr, &clabel);
2488
2489 clabel.mod_counter = raidPtr->mod_counter;
2490 clabel.row = srow;
2491 clabel.column = scol;
2492 clabel.status = rf_ds_optimal;
2493
2494 raidwrite_component_label(
2495 raidPtr->Disks[0][sparecol].dev,
2496 raidPtr->raid_cinfo[0][sparecol].ci_vp,
2497 &clabel);
2498 if (final == RF_FINAL_COMPONENT_UPDATE) {
2499 if (raidPtr->parity_good == RF_RAID_CLEAN) {
2500 raidmarkclean( raidPtr->Disks[0][sparecol].dev,
2501 raidPtr->raid_cinfo[0][sparecol].ci_vp,
2502 raidPtr->mod_counter);
2503 }
2504 }
2505 }
2506 }
2507 /* printf("Component labels updated\n"); */
2508 }
2509
2510 void
2511 rf_close_component(raidPtr, vp, auto_configured)
2512 RF_Raid_t *raidPtr;
2513 struct vnode *vp;
2514 int auto_configured;
2515 {
2516 struct proc *p;
2517
2518 p = raidPtr->engine_thread;
2519
2520 if (vp != NULL) {
2521 if (auto_configured == 1) {
2522 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2523 VOP_CLOSE(vp, FREAD | FWRITE, NOCRED, 0);
2524 vput(vp);
2525
2526 } else {
2527 (void) vn_close(vp, FREAD | FWRITE, p->p_ucred, p);
2528 }
2529 } else {
2530 #if 0
2531 printf("vnode was NULL\n");
2532 #endif
2533 }
2534 }
2535
2536
2537 void
2538 rf_UnconfigureVnodes(raidPtr)
2539 RF_Raid_t *raidPtr;
2540 {
2541 int r,c;
2542 struct proc *p;
2543 struct vnode *vp;
2544 int acd;
2545
2546
2547 /* We take this opportunity to close the vnodes like we should.. */
2548
2549 p = raidPtr->engine_thread;
2550
2551 for (r = 0; r < raidPtr->numRow; r++) {
2552 for (c = 0; c < raidPtr->numCol; c++) {
2553 #if 0
2554 printf("raid%d: Closing vnode for row: %d col: %d\n",
2555 raidPtr->raidid, r, c);
2556 #endif
2557 vp = raidPtr->raid_cinfo[r][c].ci_vp;
2558 acd = raidPtr->Disks[r][c].auto_configured;
2559 rf_close_component(raidPtr, vp, acd);
2560 raidPtr->raid_cinfo[r][c].ci_vp = NULL;
2561 raidPtr->Disks[r][c].auto_configured = 0;
2562 }
2563 }
2564 for (r = 0; r < raidPtr->numSpare; r++) {
2565 #if 0
2566 printf("raid%d: Closing vnode for spare: %d\n",
2567 raidPtr->raidid, r);
2568 #endif
2569 vp = raidPtr->raid_cinfo[0][raidPtr->numCol + r].ci_vp;
2570 acd = raidPtr->Disks[0][raidPtr->numCol + r].auto_configured;
2571 rf_close_component(raidPtr, vp, acd);
2572 raidPtr->raid_cinfo[0][raidPtr->numCol + r].ci_vp = NULL;
2573 raidPtr->Disks[0][raidPtr->numCol + r].auto_configured = 0;
2574 }
2575 }
2576
2577
2578 void
2579 rf_ReconThread(req)
2580 struct rf_recon_req *req;
2581 {
2582 int s;
2583 RF_Raid_t *raidPtr;
2584
2585 s = splbio();
2586 raidPtr = (RF_Raid_t *) req->raidPtr;
2587 raidPtr->recon_in_progress = 1;
2588
2589 rf_FailDisk((RF_Raid_t *) req->raidPtr, req->row, req->col,
2590 ((req->flags & RF_FDFLAGS_RECON) ? 1 : 0));
2591
2592 /* XXX get rid of this! we don't need it at all.. */
2593 RF_Free(req, sizeof(*req));
2594
2595 raidPtr->recon_in_progress = 0;
2596 splx(s);
2597
2598 /* That's all... */
2599 kthread_exit(0); /* does not return */
2600 }
2601
2602 void
2603 rf_RewriteParityThread(raidPtr)
2604 RF_Raid_t *raidPtr;
2605 {
2606 int retcode;
2607 int s;
2608
2609 raidPtr->parity_rewrite_in_progress = 1;
2610 s = splbio();
2611 retcode = rf_RewriteParity(raidPtr);
2612 splx(s);
2613 if (retcode) {
2614 printf("raid%d: Error re-writing parity!\n",raidPtr->raidid);
2615 } else {
2616 /* set the clean bit! If we shutdown correctly,
2617 the clean bit on each component label will get
2618 set */
2619 raidPtr->parity_good = RF_RAID_CLEAN;
2620 }
2621 raidPtr->parity_rewrite_in_progress = 0;
2622
2623 /* Anyone waiting for us to stop? If so, inform them... */
2624 if (raidPtr->waitShutdown) {
2625 wakeup(&raidPtr->parity_rewrite_in_progress);
2626 }
2627
2628 /* That's all... */
2629 kthread_exit(0); /* does not return */
2630 }
2631
2632
2633 void
2634 rf_CopybackThread(raidPtr)
2635 RF_Raid_t *raidPtr;
2636 {
2637 int s;
2638
2639 raidPtr->copyback_in_progress = 1;
2640 s = splbio();
2641 rf_CopybackReconstructedData(raidPtr);
2642 splx(s);
2643 raidPtr->copyback_in_progress = 0;
2644
2645 /* That's all... */
2646 kthread_exit(0); /* does not return */
2647 }
2648
2649
2650 void
2651 rf_ReconstructInPlaceThread(req)
2652 struct rf_recon_req *req;
2653 {
2654 int retcode;
2655 int s;
2656 RF_Raid_t *raidPtr;
2657
2658 s = splbio();
2659 raidPtr = req->raidPtr;
2660 raidPtr->recon_in_progress = 1;
2661 retcode = rf_ReconstructInPlace(raidPtr, req->row, req->col);
2662 RF_Free(req, sizeof(*req));
2663 raidPtr->recon_in_progress = 0;
2664 splx(s);
2665
2666 /* That's all... */
2667 kthread_exit(0); /* does not return */
2668 }
2669
2670 void
2671 rf_mountroot_hook(dev)
2672 struct device *dev;
2673 {
2674
2675 }
2676
2677
2678 RF_AutoConfig_t *
2679 rf_find_raid_components()
2680 {
2681 struct vnode *vp;
2682 struct disklabel label;
2683 struct device *dv;
2684 dev_t dev;
2685 int bmajor;
2686 int error;
2687 int i;
2688 int good_one;
2689 RF_ComponentLabel_t *clabel;
2690 RF_AutoConfig_t *ac_list;
2691 RF_AutoConfig_t *ac;
2692
2693
2694 /* initialize the AutoConfig list */
2695 ac_list = NULL;
2696
2697 /* we begin by trolling through *all* the devices on the system */
2698
2699 for (dv = alldevs.tqh_first; dv != NULL;
2700 dv = dv->dv_list.tqe_next) {
2701
2702 /* we are only interested in disks... */
2703 if (dv->dv_class != DV_DISK)
2704 continue;
2705
2706 /* we don't care about floppies... */
2707 if (!strcmp(dv->dv_cfdata->cf_driver->cd_name,"fd")) {
2708 continue;
2709 }
2710
2711 /* we don't care about CD's... */
2712 if (!strcmp(dv->dv_cfdata->cf_driver->cd_name,"cd")) {
2713 continue;
2714 }
2715
2716 /* hdfd is the Atari/Hades floppy driver */
2717 if (!strcmp(dv->dv_cfdata->cf_driver->cd_name,"hdfd")) {
2718 continue;
2719 }
2720 /* fdisa is the Atari/Milan floppy driver */
2721 if (!strcmp(dv->dv_cfdata->cf_driver->cd_name,"fdisa")) {
2722 continue;
2723 }
2724
2725 /* need to find the device_name_to_block_device_major stuff */
2726 bmajor = devsw_name2blk(dv->dv_xname, NULL, 0);
2727
2728 /* get a vnode for the raw partition of this disk */
2729
2730 dev = MAKEDISKDEV(bmajor, dv->dv_unit, RAW_PART);
2731 if (bdevvp(dev, &vp))
2732 panic("RAID can't alloc vnode");
2733
2734 error = VOP_OPEN(vp, FREAD, NOCRED, 0);
2735
2736 if (error) {
2737 /* "Who cares." Continue looking
2738 for something that exists*/
2739 vput(vp);
2740 continue;
2741 }
2742
2743 /* Ok, the disk exists. Go get the disklabel. */
2744 error = VOP_IOCTL(vp, DIOCGDINFO, (caddr_t)&label,
2745 FREAD, NOCRED, 0);
2746 if (error) {
2747 /*
2748 * XXX can't happen - open() would
2749 * have errored out (or faked up one)
2750 */
2751 printf("can't get label for dev %s%c (%d)!?!?\n",
2752 dv->dv_xname, 'a' + RAW_PART, error);
2753 }
2754
2755 /* don't need this any more. We'll allocate it again
2756 a little later if we really do... */
2757 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2758 VOP_CLOSE(vp, FREAD | FWRITE, NOCRED, 0);
2759 vput(vp);
2760
2761 for (i=0; i < label.d_npartitions; i++) {
2762 /* We only support partitions marked as RAID */
2763 if (label.d_partitions[i].p_fstype != FS_RAID)
2764 continue;
2765
2766 dev = MAKEDISKDEV(bmajor, dv->dv_unit, i);
2767 if (bdevvp(dev, &vp))
2768 panic("RAID can't alloc vnode");
2769
2770 error = VOP_OPEN(vp, FREAD, NOCRED, 0);
2771 if (error) {
2772 /* Whatever... */
2773 vput(vp);
2774 continue;
2775 }
2776
2777 good_one = 0;
2778
2779 clabel = (RF_ComponentLabel_t *)
2780 malloc(sizeof(RF_ComponentLabel_t),
2781 M_RAIDFRAME, M_NOWAIT);
2782 if (clabel == NULL) {
2783 /* XXX CLEANUP HERE */
2784 printf("RAID auto config: out of memory!\n");
2785 return(NULL); /* XXX probably should panic? */
2786 }
2787
2788 if (!raidread_component_label(dev, vp, clabel)) {
2789 /* Got the label. Does it look reasonable? */
2790 if (rf_reasonable_label(clabel) &&
2791 (clabel->partitionSize <=
2792 label.d_partitions[i].p_size)) {
2793 #if DEBUG
2794 printf("Component on: %s%c: %d\n",
2795 dv->dv_xname, 'a'+i,
2796 label.d_partitions[i].p_size);
2797 rf_print_component_label(clabel);
2798 #endif
2799 /* if it's reasonable, add it,
2800 else ignore it. */
2801 ac = (RF_AutoConfig_t *)
2802 malloc(sizeof(RF_AutoConfig_t),
2803 M_RAIDFRAME,
2804 M_NOWAIT);
2805 if (ac == NULL) {
2806 /* XXX should panic?? */
2807 return(NULL);
2808 }
2809
2810 sprintf(ac->devname, "%s%c",
2811 dv->dv_xname, 'a'+i);
2812 ac->dev = dev;
2813 ac->vp = vp;
2814 ac->clabel = clabel;
2815 ac->next = ac_list;
2816 ac_list = ac;
2817 good_one = 1;
2818 }
2819 }
2820 if (!good_one) {
2821 /* cleanup */
2822 free(clabel, M_RAIDFRAME);
2823 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2824 VOP_CLOSE(vp, FREAD | FWRITE, NOCRED, 0);
2825 vput(vp);
2826 }
2827 }
2828 }
2829 return(ac_list);
2830 }
2831
2832 static int
2833 rf_reasonable_label(clabel)
2834 RF_ComponentLabel_t *clabel;
2835 {
2836
2837 if (((clabel->version==RF_COMPONENT_LABEL_VERSION_1) ||
2838 (clabel->version==RF_COMPONENT_LABEL_VERSION)) &&
2839 ((clabel->clean == RF_RAID_CLEAN) ||
2840 (clabel->clean == RF_RAID_DIRTY)) &&
2841 clabel->row >=0 &&
2842 clabel->column >= 0 &&
2843 clabel->num_rows > 0 &&
2844 clabel->num_columns > 0 &&
2845 clabel->row < clabel->num_rows &&
2846 clabel->column < clabel->num_columns &&
2847 clabel->blockSize > 0 &&
2848 clabel->numBlocks > 0) {
2849 /* label looks reasonable enough... */
2850 return(1);
2851 }
2852 return(0);
2853 }
2854
2855
2856 void
2857 rf_print_component_label(clabel)
2858 RF_ComponentLabel_t *clabel;
2859 {
2860 printf(" Row: %d Column: %d Num Rows: %d Num Columns: %d\n",
2861 clabel->row, clabel->column,
2862 clabel->num_rows, clabel->num_columns);
2863 printf(" Version: %d Serial Number: %d Mod Counter: %d\n",
2864 clabel->version, clabel->serial_number,
2865 clabel->mod_counter);
2866 printf(" Clean: %s Status: %d\n",
2867 clabel->clean ? "Yes" : "No", clabel->status );
2868 printf(" sectPerSU: %d SUsPerPU: %d SUsPerRU: %d\n",
2869 clabel->sectPerSU, clabel->SUsPerPU, clabel->SUsPerRU);
2870 printf(" RAID Level: %c blocksize: %d numBlocks: %d\n",
2871 (char) clabel->parityConfig, clabel->blockSize,
2872 clabel->numBlocks);
2873 printf(" Autoconfig: %s\n", clabel->autoconfigure ? "Yes" : "No" );
2874 printf(" Contains root partition: %s\n",
2875 clabel->root_partition ? "Yes" : "No" );
2876 printf(" Last configured as: raid%d\n", clabel->last_unit );
2877 #if 0
2878 printf(" Config order: %d\n", clabel->config_order);
2879 #endif
2880
2881 }
2882
2883 RF_ConfigSet_t *
2884 rf_create_auto_sets(ac_list)
2885 RF_AutoConfig_t *ac_list;
2886 {
2887 RF_AutoConfig_t *ac;
2888 RF_ConfigSet_t *config_sets;
2889 RF_ConfigSet_t *cset;
2890 RF_AutoConfig_t *ac_next;
2891
2892
2893 config_sets = NULL;
2894
2895 /* Go through the AutoConfig list, and figure out which components
2896 belong to what sets. */
2897 ac = ac_list;
2898 while(ac!=NULL) {
2899 /* we're going to putz with ac->next, so save it here
2900 for use at the end of the loop */
2901 ac_next = ac->next;
2902
2903 if (config_sets == NULL) {
2904 /* will need at least this one... */
2905 config_sets = (RF_ConfigSet_t *)
2906 malloc(sizeof(RF_ConfigSet_t),
2907 M_RAIDFRAME, M_NOWAIT);
2908 if (config_sets == NULL) {
2909 panic("rf_create_auto_sets: No memory!\n");
2910 }
2911 /* this one is easy :) */
2912 config_sets->ac = ac;
2913 config_sets->next = NULL;
2914 config_sets->rootable = 0;
2915 ac->next = NULL;
2916 } else {
2917 /* which set does this component fit into? */
2918 cset = config_sets;
2919 while(cset!=NULL) {
2920 if (rf_does_it_fit(cset, ac)) {
2921 /* looks like it matches... */
2922 ac->next = cset->ac;
2923 cset->ac = ac;
2924 break;
2925 }
2926 cset = cset->next;
2927 }
2928 if (cset==NULL) {
2929 /* didn't find a match above... new set..*/
2930 cset = (RF_ConfigSet_t *)
2931 malloc(sizeof(RF_ConfigSet_t),
2932 M_RAIDFRAME, M_NOWAIT);
2933 if (cset == NULL) {
2934 panic("rf_create_auto_sets: No memory!\n");
2935 }
2936 cset->ac = ac;
2937 ac->next = NULL;
2938 cset->next = config_sets;
2939 cset->rootable = 0;
2940 config_sets = cset;
2941 }
2942 }
2943 ac = ac_next;
2944 }
2945
2946
2947 return(config_sets);
2948 }
2949
2950 static int
2951 rf_does_it_fit(cset, ac)
2952 RF_ConfigSet_t *cset;
2953 RF_AutoConfig_t *ac;
2954 {
2955 RF_ComponentLabel_t *clabel1, *clabel2;
2956
2957 /* If this one matches the *first* one in the set, that's good
2958 enough, since the other members of the set would have been
2959 through here too... */
2960 /* note that we are not checking partitionSize here..
2961
2962 Note that we are also not checking the mod_counters here.
2963 If everything else matches execpt the mod_counter, that's
2964 good enough for this test. We will deal with the mod_counters
2965 a little later in the autoconfiguration process.
2966
2967 (clabel1->mod_counter == clabel2->mod_counter) &&
2968
2969 The reason we don't check for this is that failed disks
2970 will have lower modification counts. If those disks are
2971 not added to the set they used to belong to, then they will
2972 form their own set, which may result in 2 different sets,
2973 for example, competing to be configured at raid0, and
2974 perhaps competing to be the root filesystem set. If the
2975 wrong ones get configured, or both attempt to become /,
2976 weird behaviour and or serious lossage will occur. Thus we
2977 need to bring them into the fold here, and kick them out at
2978 a later point.
2979
2980 */
2981
2982 clabel1 = cset->ac->clabel;
2983 clabel2 = ac->clabel;
2984 if ((clabel1->version == clabel2->version) &&
2985 (clabel1->serial_number == clabel2->serial_number) &&
2986 (clabel1->num_rows == clabel2->num_rows) &&
2987 (clabel1->num_columns == clabel2->num_columns) &&
2988 (clabel1->sectPerSU == clabel2->sectPerSU) &&
2989 (clabel1->SUsPerPU == clabel2->SUsPerPU) &&
2990 (clabel1->SUsPerRU == clabel2->SUsPerRU) &&
2991 (clabel1->parityConfig == clabel2->parityConfig) &&
2992 (clabel1->maxOutstanding == clabel2->maxOutstanding) &&
2993 (clabel1->blockSize == clabel2->blockSize) &&
2994 (clabel1->numBlocks == clabel2->numBlocks) &&
2995 (clabel1->autoconfigure == clabel2->autoconfigure) &&
2996 (clabel1->root_partition == clabel2->root_partition) &&
2997 (clabel1->last_unit == clabel2->last_unit) &&
2998 (clabel1->config_order == clabel2->config_order)) {
2999 /* if it get's here, it almost *has* to be a match */
3000 } else {
3001 /* it's not consistent with somebody in the set..
3002 punt */
3003 return(0);
3004 }
3005 /* all was fine.. it must fit... */
3006 return(1);
3007 }
3008
3009 int
3010 rf_have_enough_components(cset)
3011 RF_ConfigSet_t *cset;
3012 {
3013 RF_AutoConfig_t *ac;
3014 RF_AutoConfig_t *auto_config;
3015 RF_ComponentLabel_t *clabel;
3016 int r,c;
3017 int num_rows;
3018 int num_cols;
3019 int num_missing;
3020 int mod_counter;
3021 int mod_counter_found;
3022 int even_pair_failed;
3023 char parity_type;
3024
3025
3026 /* check to see that we have enough 'live' components
3027 of this set. If so, we can configure it if necessary */
3028
3029 num_rows = cset->ac->clabel->num_rows;
3030 num_cols = cset->ac->clabel->num_columns;
3031 parity_type = cset->ac->clabel->parityConfig;
3032
3033 /* XXX Check for duplicate components!?!?!? */
3034
3035 /* Determine what the mod_counter is supposed to be for this set. */
3036
3037 mod_counter_found = 0;
3038 mod_counter = 0;
3039 ac = cset->ac;
3040 while(ac!=NULL) {
3041 if (mod_counter_found==0) {
3042 mod_counter = ac->clabel->mod_counter;
3043 mod_counter_found = 1;
3044 } else {
3045 if (ac->clabel->mod_counter > mod_counter) {
3046 mod_counter = ac->clabel->mod_counter;
3047 }
3048 }
3049 ac = ac->next;
3050 }
3051
3052 num_missing = 0;
3053 auto_config = cset->ac;
3054
3055 for(r=0; r<num_rows; r++) {
3056 even_pair_failed = 0;
3057 for(c=0; c<num_cols; c++) {
3058 ac = auto_config;
3059 while(ac!=NULL) {
3060 if ((ac->clabel->row == r) &&
3061 (ac->clabel->column == c) &&
3062 (ac->clabel->mod_counter == mod_counter)) {
3063 /* it's this one... */
3064 #if DEBUG
3065 printf("Found: %s at %d,%d\n",
3066 ac->devname,r,c);
3067 #endif
3068 break;
3069 }
3070 ac=ac->next;
3071 }
3072 if (ac==NULL) {
3073 /* Didn't find one here! */
3074 /* special case for RAID 1, especially
3075 where there are more than 2
3076 components (where RAIDframe treats
3077 things a little differently :( ) */
3078 if (parity_type == '1') {
3079 if (c%2 == 0) { /* even component */
3080 even_pair_failed = 1;
3081 } else { /* odd component. If
3082 we're failed, and
3083 so is the even
3084 component, it's
3085 "Good Night, Charlie" */
3086 if (even_pair_failed == 1) {
3087 return(0);
3088 }
3089 }
3090 } else {
3091 /* normal accounting */
3092 num_missing++;
3093 }
3094 }
3095 if ((parity_type == '1') && (c%2 == 1)) {
3096 /* Just did an even component, and we didn't
3097 bail.. reset the even_pair_failed flag,
3098 and go on to the next component.... */
3099 even_pair_failed = 0;
3100 }
3101 }
3102 }
3103
3104 clabel = cset->ac->clabel;
3105
3106 if (((clabel->parityConfig == '0') && (num_missing > 0)) ||
3107 ((clabel->parityConfig == '4') && (num_missing > 1)) ||
3108 ((clabel->parityConfig == '5') && (num_missing > 1))) {
3109 /* XXX this needs to be made *much* more general */
3110 /* Too many failures */
3111 return(0);
3112 }
3113 /* otherwise, all is well, and we've got enough to take a kick
3114 at autoconfiguring this set */
3115 return(1);
3116 }
3117
3118 void
3119 rf_create_configuration(ac,config,raidPtr)
3120 RF_AutoConfig_t *ac;
3121 RF_Config_t *config;
3122 RF_Raid_t *raidPtr;
3123 {
3124 RF_ComponentLabel_t *clabel;
3125 int i;
3126
3127 clabel = ac->clabel;
3128
3129 /* 1. Fill in the common stuff */
3130 config->numRow = clabel->num_rows;
3131 config->numCol = clabel->num_columns;
3132 config->numSpare = 0; /* XXX should this be set here? */
3133 config->sectPerSU = clabel->sectPerSU;
3134 config->SUsPerPU = clabel->SUsPerPU;
3135 config->SUsPerRU = clabel->SUsPerRU;
3136 config->parityConfig = clabel->parityConfig;
3137 /* XXX... */
3138 strcpy(config->diskQueueType,"fifo");
3139 config->maxOutstandingDiskReqs = clabel->maxOutstanding;
3140 config->layoutSpecificSize = 0; /* XXX ?? */
3141
3142 while(ac!=NULL) {
3143 /* row/col values will be in range due to the checks
3144 in reasonable_label() */
3145 strcpy(config->devnames[ac->clabel->row][ac->clabel->column],
3146 ac->devname);
3147 ac = ac->next;
3148 }
3149
3150 for(i=0;i<RF_MAXDBGV;i++) {
3151 config->debugVars[i][0] = NULL;
3152 }
3153 }
3154
3155 int
3156 rf_set_autoconfig(raidPtr, new_value)
3157 RF_Raid_t *raidPtr;
3158 int new_value;
3159 {
3160 RF_ComponentLabel_t clabel;
3161 struct vnode *vp;
3162 dev_t dev;
3163 int row, column;
3164
3165 raidPtr->autoconfigure = new_value;
3166 for(row=0; row<raidPtr->numRow; row++) {
3167 for(column=0; column<raidPtr->numCol; column++) {
3168 if (raidPtr->Disks[row][column].status ==
3169 rf_ds_optimal) {
3170 dev = raidPtr->Disks[row][column].dev;
3171 vp = raidPtr->raid_cinfo[row][column].ci_vp;
3172 raidread_component_label(dev, vp, &clabel);
3173 clabel.autoconfigure = new_value;
3174 raidwrite_component_label(dev, vp, &clabel);
3175 }
3176 }
3177 }
3178 return(new_value);
3179 }
3180
3181 int
3182 rf_set_rootpartition(raidPtr, new_value)
3183 RF_Raid_t *raidPtr;
3184 int new_value;
3185 {
3186 RF_ComponentLabel_t clabel;
3187 struct vnode *vp;
3188 dev_t dev;
3189 int row, column;
3190
3191 raidPtr->root_partition = new_value;
3192 for(row=0; row<raidPtr->numRow; row++) {
3193 for(column=0; column<raidPtr->numCol; column++) {
3194 if (raidPtr->Disks[row][column].status ==
3195 rf_ds_optimal) {
3196 dev = raidPtr->Disks[row][column].dev;
3197 vp = raidPtr->raid_cinfo[row][column].ci_vp;
3198 raidread_component_label(dev, vp, &clabel);
3199 clabel.root_partition = new_value;
3200 raidwrite_component_label(dev, vp, &clabel);
3201 }
3202 }
3203 }
3204 return(new_value);
3205 }
3206
3207 void
3208 rf_release_all_vps(cset)
3209 RF_ConfigSet_t *cset;
3210 {
3211 RF_AutoConfig_t *ac;
3212
3213 ac = cset->ac;
3214 while(ac!=NULL) {
3215 /* Close the vp, and give it back */
3216 if (ac->vp) {
3217 vn_lock(ac->vp, LK_EXCLUSIVE | LK_RETRY);
3218 VOP_CLOSE(ac->vp, FREAD, NOCRED, 0);
3219 vput(ac->vp);
3220 ac->vp = NULL;
3221 }
3222 ac = ac->next;
3223 }
3224 }
3225
3226
3227 void
3228 rf_cleanup_config_set(cset)
3229 RF_ConfigSet_t *cset;
3230 {
3231 RF_AutoConfig_t *ac;
3232 RF_AutoConfig_t *next_ac;
3233
3234 ac = cset->ac;
3235 while(ac!=NULL) {
3236 next_ac = ac->next;
3237 /* nuke the label */
3238 free(ac->clabel, M_RAIDFRAME);
3239 /* cleanup the config structure */
3240 free(ac, M_RAIDFRAME);
3241 /* "next.." */
3242 ac = next_ac;
3243 }
3244 /* and, finally, nuke the config set */
3245 free(cset, M_RAIDFRAME);
3246 }
3247
3248
3249 void
3250 raid_init_component_label(raidPtr, clabel)
3251 RF_Raid_t *raidPtr;
3252 RF_ComponentLabel_t *clabel;
3253 {
3254 /* current version number */
3255 clabel->version = RF_COMPONENT_LABEL_VERSION;
3256 clabel->serial_number = raidPtr->serial_number;
3257 clabel->mod_counter = raidPtr->mod_counter;
3258 clabel->num_rows = raidPtr->numRow;
3259 clabel->num_columns = raidPtr->numCol;
3260 clabel->clean = RF_RAID_DIRTY; /* not clean */
3261 clabel->status = rf_ds_optimal; /* "It's good!" */
3262
3263 clabel->sectPerSU = raidPtr->Layout.sectorsPerStripeUnit;
3264 clabel->SUsPerPU = raidPtr->Layout.SUsPerPU;
3265 clabel->SUsPerRU = raidPtr->Layout.SUsPerRU;
3266
3267 clabel->blockSize = raidPtr->bytesPerSector;
3268 clabel->numBlocks = raidPtr->sectorsPerDisk;
3269
3270 /* XXX not portable */
3271 clabel->parityConfig = raidPtr->Layout.map->parityConfig;
3272 clabel->maxOutstanding = raidPtr->maxOutstanding;
3273 clabel->autoconfigure = raidPtr->autoconfigure;
3274 clabel->root_partition = raidPtr->root_partition;
3275 clabel->last_unit = raidPtr->raidid;
3276 clabel->config_order = raidPtr->config_order;
3277 }
3278
3279 int
3280 rf_auto_config_set(cset,unit)
3281 RF_ConfigSet_t *cset;
3282 int *unit;
3283 {
3284 RF_Raid_t *raidPtr;
3285 RF_Config_t *config;
3286 int raidID;
3287 int retcode;
3288
3289 #if DEBUG
3290 printf("RAID autoconfigure\n");
3291 #endif
3292
3293 retcode = 0;
3294 *unit = -1;
3295
3296 /* 1. Create a config structure */
3297
3298 config = (RF_Config_t *)malloc(sizeof(RF_Config_t),
3299 M_RAIDFRAME,
3300 M_NOWAIT);
3301 if (config==NULL) {
3302 printf("Out of mem!?!?\n");
3303 /* XXX do something more intelligent here. */
3304 return(1);
3305 }
3306
3307 memset(config, 0, sizeof(RF_Config_t));
3308
3309 /* XXX raidID needs to be set correctly.. */
3310
3311 /*
3312 2. Figure out what RAID ID this one is supposed to live at
3313 See if we can get the same RAID dev that it was configured
3314 on last time..
3315 */
3316
3317 raidID = cset->ac->clabel->last_unit;
3318 if ((raidID < 0) || (raidID >= numraid)) {
3319 /* let's not wander off into lala land. */
3320 raidID = numraid - 1;
3321 }
3322 if (raidPtrs[raidID]->valid != 0) {
3323
3324 /*
3325 Nope... Go looking for an alternative...
3326 Start high so we don't immediately use raid0 if that's
3327 not taken.
3328 */
3329
3330 for(raidID = numraid - 1; raidID >= 0; raidID--) {
3331 if (raidPtrs[raidID]->valid == 0) {
3332 /* can use this one! */
3333 break;
3334 }
3335 }
3336 }
3337
3338 if (raidID < 0) {
3339 /* punt... */
3340 printf("Unable to auto configure this set!\n");
3341 printf("(Out of RAID devs!)\n");
3342 return(1);
3343 }
3344
3345 #if DEBUG
3346 printf("Configuring raid%d:\n",raidID);
3347 #endif
3348
3349 raidPtr = raidPtrs[raidID];
3350
3351 /* XXX all this stuff should be done SOMEWHERE ELSE! */
3352 raidPtr->raidid = raidID;
3353 raidPtr->openings = RAIDOUTSTANDING;
3354
3355 /* 3. Build the configuration structure */
3356 rf_create_configuration(cset->ac, config, raidPtr);
3357
3358 /* 4. Do the configuration */
3359 retcode = rf_Configure(raidPtr, config, cset->ac);
3360
3361 if (retcode == 0) {
3362
3363 raidinit(raidPtrs[raidID]);
3364
3365 rf_markalldirty(raidPtrs[raidID]);
3366 raidPtrs[raidID]->autoconfigure = 1; /* XXX do this here? */
3367 if (cset->ac->clabel->root_partition==1) {
3368 /* everything configured just fine. Make a note
3369 that this set is eligible to be root. */
3370 cset->rootable = 1;
3371 /* XXX do this here? */
3372 raidPtrs[raidID]->root_partition = 1;
3373 }
3374 }
3375
3376 /* 5. Cleanup */
3377 free(config, M_RAIDFRAME);
3378
3379 *unit = raidID;
3380 return(retcode);
3381 }
3382
3383 void
3384 rf_disk_unbusy(desc)
3385 RF_RaidAccessDesc_t *desc;
3386 {
3387 struct buf *bp;
3388
3389 bp = (struct buf *)desc->bp;
3390 disk_unbusy(&raid_softc[desc->raidPtr->raidid].sc_dkdev,
3391 (bp->b_bcount - bp->b_resid));
3392 }
3393