Home | History | Annotate | Line # | Download | only in raidframe
rf_netbsdkintf.c revision 1.263
      1 /*	$NetBSD: rf_netbsdkintf.c,v 1.263 2009/06/05 21:52:32 haad Exp $	*/
      2 /*-
      3  * Copyright (c) 1996, 1997, 1998, 2008 The NetBSD Foundation, Inc.
      4  * All rights reserved.
      5  *
      6  * This code is derived from software contributed to The NetBSD Foundation
      7  * by Greg Oster; Jason R. Thorpe.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  *
     18  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     19  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     20  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     21  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     22  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     28  * POSSIBILITY OF SUCH DAMAGE.
     29  */
     30 
     31 /*
     32  * Copyright (c) 1990, 1993
     33  *      The Regents of the University of California.  All rights reserved.
     34  *
     35  * This code is derived from software contributed to Berkeley by
     36  * the Systems Programming Group of the University of Utah Computer
     37  * Science Department.
     38  *
     39  * Redistribution and use in source and binary forms, with or without
     40  * modification, are permitted provided that the following conditions
     41  * are met:
     42  * 1. Redistributions of source code must retain the above copyright
     43  *    notice, this list of conditions and the following disclaimer.
     44  * 2. Redistributions in binary form must reproduce the above copyright
     45  *    notice, this list of conditions and the following disclaimer in the
     46  *    documentation and/or other materials provided with the distribution.
     47  * 3. Neither the name of the University nor the names of its contributors
     48  *    may be used to endorse or promote products derived from this software
     49  *    without specific prior written permission.
     50  *
     51  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     52  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     53  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     54  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     55  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     56  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     57  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     58  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     59  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     60  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     61  * SUCH DAMAGE.
     62  *
     63  * from: Utah $Hdr: cd.c 1.6 90/11/28$
     64  *
     65  *      @(#)cd.c        8.2 (Berkeley) 11/16/93
     66  */
     67 
     68 /*
     69  * Copyright (c) 1988 University of Utah.
     70  *
     71  * This code is derived from software contributed to Berkeley by
     72  * the Systems Programming Group of the University of Utah Computer
     73  * Science Department.
     74  *
     75  * Redistribution and use in source and binary forms, with or without
     76  * modification, are permitted provided that the following conditions
     77  * are met:
     78  * 1. Redistributions of source code must retain the above copyright
     79  *    notice, this list of conditions and the following disclaimer.
     80  * 2. Redistributions in binary form must reproduce the above copyright
     81  *    notice, this list of conditions and the following disclaimer in the
     82  *    documentation and/or other materials provided with the distribution.
     83  * 3. All advertising materials mentioning features or use of this software
     84  *    must display the following acknowledgement:
     85  *      This product includes software developed by the University of
     86  *      California, Berkeley and its contributors.
     87  * 4. Neither the name of the University nor the names of its contributors
     88  *    may be used to endorse or promote products derived from this software
     89  *    without specific prior written permission.
     90  *
     91  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     92  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     93  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     94  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     95  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     96  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     97  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     98  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     99  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
    100  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
    101  * SUCH DAMAGE.
    102  *
    103  * from: Utah $Hdr: cd.c 1.6 90/11/28$
    104  *
    105  *      @(#)cd.c        8.2 (Berkeley) 11/16/93
    106  */
    107 
    108 /*
    109  * Copyright (c) 1995 Carnegie-Mellon University.
    110  * All rights reserved.
    111  *
    112  * Authors: Mark Holland, Jim Zelenka
    113  *
    114  * Permission to use, copy, modify and distribute this software and
    115  * its documentation is hereby granted, provided that both the copyright
    116  * notice and this permission notice appear in all copies of the
    117  * software, derivative works or modified versions, and any portions
    118  * thereof, and that both notices appear in supporting documentation.
    119  *
    120  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
    121  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
    122  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
    123  *
    124  * Carnegie Mellon requests users of this software to return to
    125  *
    126  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
    127  *  School of Computer Science
    128  *  Carnegie Mellon University
    129  *  Pittsburgh PA 15213-3890
    130  *
    131  * any improvements or extensions that they make and grant Carnegie the
    132  * rights to redistribute these changes.
    133  */
    134 
    135 /***********************************************************
    136  *
    137  * rf_kintf.c -- the kernel interface routines for RAIDframe
    138  *
    139  ***********************************************************/
    140 
    141 #include <sys/cdefs.h>
    142 __KERNEL_RCSID(0, "$NetBSD: rf_netbsdkintf.c,v 1.263 2009/06/05 21:52:32 haad Exp $");
    143 
    144 #ifdef _KERNEL_OPT
    145 #include "opt_compat_netbsd.h"
    146 #include "opt_raid_autoconfig.h"
    147 #include "raid.h"
    148 #endif
    149 
    150 #include <sys/param.h>
    151 #include <sys/errno.h>
    152 #include <sys/pool.h>
    153 #include <sys/proc.h>
    154 #include <sys/queue.h>
    155 #include <sys/disk.h>
    156 #include <sys/device.h>
    157 #include <sys/stat.h>
    158 #include <sys/ioctl.h>
    159 #include <sys/fcntl.h>
    160 #include <sys/systm.h>
    161 #include <sys/vnode.h>
    162 #include <sys/disklabel.h>
    163 #include <sys/conf.h>
    164 #include <sys/buf.h>
    165 #include <sys/bufq.h>
    166 #include <sys/user.h>
    167 #include <sys/reboot.h>
    168 #include <sys/kauth.h>
    169 
    170 #include <prop/proplib.h>
    171 
    172 #include <dev/raidframe/raidframevar.h>
    173 #include <dev/raidframe/raidframeio.h>
    174 
    175 #include "rf_raid.h"
    176 #include "rf_copyback.h"
    177 #include "rf_dag.h"
    178 #include "rf_dagflags.h"
    179 #include "rf_desc.h"
    180 #include "rf_diskqueue.h"
    181 #include "rf_etimer.h"
    182 #include "rf_general.h"
    183 #include "rf_kintf.h"
    184 #include "rf_options.h"
    185 #include "rf_driver.h"
    186 #include "rf_parityscan.h"
    187 #include "rf_threadstuff.h"
    188 
    189 #ifdef COMPAT_50
    190 #include "rf_compat50.h"
    191 #endif
    192 
    193 #ifdef DEBUG
    194 int     rf_kdebug_level = 0;
    195 #define db1_printf(a) if (rf_kdebug_level > 0) printf a
    196 #else				/* DEBUG */
    197 #define db1_printf(a) { }
    198 #endif				/* DEBUG */
    199 
    200 static RF_Raid_t **raidPtrs;	/* global raid device descriptors */
    201 
    202 #if (RF_INCLUDE_PARITY_DECLUSTERING_DS > 0)
    203 RF_DECLARE_STATIC_MUTEX(rf_sparet_wait_mutex)
    204 
    205 static RF_SparetWait_t *rf_sparet_wait_queue;	/* requests to install a
    206 						 * spare table */
    207 static RF_SparetWait_t *rf_sparet_resp_queue;	/* responses from
    208 						 * installation process */
    209 #endif
    210 
    211 MALLOC_DEFINE(M_RAIDFRAME, "RAIDframe", "RAIDframe structures");
    212 
    213 /* prototypes */
    214 static void KernelWakeupFunc(struct buf *);
    215 static void InitBP(struct buf *, struct vnode *, unsigned,
    216     dev_t, RF_SectorNum_t, RF_SectorCount_t, void *, void (*) (struct buf *),
    217     void *, int, struct proc *);
    218 static void raidinit(RF_Raid_t *);
    219 
    220 void raidattach(int);
    221 static int raid_match(device_t, cfdata_t, void *);
    222 static void raid_attach(device_t, device_t, void *);
    223 static int raid_detach(device_t, int);
    224 
    225 dev_type_open(raidopen);
    226 dev_type_close(raidclose);
    227 dev_type_read(raidread);
    228 dev_type_write(raidwrite);
    229 dev_type_ioctl(raidioctl);
    230 dev_type_strategy(raidstrategy);
    231 dev_type_dump(raiddump);
    232 dev_type_size(raidsize);
    233 
    234 const struct bdevsw raid_bdevsw = {
    235 	raidopen, raidclose, raidstrategy, raidioctl,
    236 	raiddump, raidsize, D_DISK
    237 };
    238 
    239 const struct cdevsw raid_cdevsw = {
    240 	raidopen, raidclose, raidread, raidwrite, raidioctl,
    241 	nostop, notty, nopoll, nommap, nokqfilter, D_DISK
    242 };
    243 
    244 static struct dkdriver rf_dkdriver = { raidstrategy, minphys };
    245 
    246 /* XXX Not sure if the following should be replacing the raidPtrs above,
    247    or if it should be used in conjunction with that...
    248 */
    249 
    250 struct raid_softc {
    251 	device_t sc_dev;
    252 	int     sc_flags;	/* flags */
    253 	int     sc_cflags;	/* configuration flags */
    254 	uint64_t sc_size;	/* size of the raid device */
    255 	char    sc_xname[20];	/* XXX external name */
    256 	struct disk sc_dkdev;	/* generic disk device info */
    257 	struct bufq_state *buf_queue;	/* used for the device queue */
    258 };
    259 /* sc_flags */
    260 #define RAIDF_INITED	0x01	/* unit has been initialized */
    261 #define RAIDF_WLABEL	0x02	/* label area is writable */
    262 #define RAIDF_LABELLING	0x04	/* unit is currently being labelled */
    263 #define RAIDF_WANTED	0x40	/* someone is waiting to obtain a lock */
    264 #define RAIDF_LOCKED	0x80	/* unit is locked */
    265 
    266 #define	raidunit(x)	DISKUNIT(x)
    267 int numraid = 0;
    268 
    269 extern struct cfdriver raid_cd;
    270 CFATTACH_DECL_NEW(raid, sizeof(struct raid_softc),
    271     raid_match, raid_attach, raid_detach, NULL);
    272 
    273 /*
    274  * Allow RAIDOUTSTANDING number of simultaneous IO's to this RAID device.
    275  * Be aware that large numbers can allow the driver to consume a lot of
    276  * kernel memory, especially on writes, and in degraded mode reads.
    277  *
    278  * For example: with a stripe width of 64 blocks (32k) and 5 disks,
    279  * a single 64K write will typically require 64K for the old data,
    280  * 64K for the old parity, and 64K for the new parity, for a total
    281  * of 192K (if the parity buffer is not re-used immediately).
    282  * Even it if is used immediately, that's still 128K, which when multiplied
    283  * by say 10 requests, is 1280K, *on top* of the 640K of incoming data.
    284  *
    285  * Now in degraded mode, for example, a 64K read on the above setup may
    286  * require data reconstruction, which will require *all* of the 4 remaining
    287  * disks to participate -- 4 * 32K/disk == 128K again.
    288  */
    289 
    290 #ifndef RAIDOUTSTANDING
    291 #define RAIDOUTSTANDING   6
    292 #endif
    293 
    294 #define RAIDLABELDEV(dev)	\
    295 	(MAKEDISKDEV(major((dev)), raidunit((dev)), RAW_PART))
    296 
    297 /* declared here, and made public, for the benefit of KVM stuff.. */
    298 struct raid_softc *raid_softc;
    299 
    300 static void raidgetdefaultlabel(RF_Raid_t *, struct raid_softc *,
    301 				     struct disklabel *);
    302 static void raidgetdisklabel(dev_t);
    303 static void raidmakedisklabel(struct raid_softc *);
    304 
    305 static int raidlock(struct raid_softc *);
    306 static void raidunlock(struct raid_softc *);
    307 
    308 static void rf_markalldirty(RF_Raid_t *);
    309 static void rf_set_properties(struct raid_softc *, RF_Raid_t *);
    310 
    311 void rf_ReconThread(struct rf_recon_req *);
    312 void rf_RewriteParityThread(RF_Raid_t *raidPtr);
    313 void rf_CopybackThread(RF_Raid_t *raidPtr);
    314 void rf_ReconstructInPlaceThread(struct rf_recon_req *);
    315 int rf_autoconfig(device_t);
    316 void rf_buildroothack(RF_ConfigSet_t *);
    317 
    318 RF_AutoConfig_t *rf_find_raid_components(void);
    319 RF_ConfigSet_t *rf_create_auto_sets(RF_AutoConfig_t *);
    320 static int rf_does_it_fit(RF_ConfigSet_t *,RF_AutoConfig_t *);
    321 static int rf_reasonable_label(RF_ComponentLabel_t *);
    322 void rf_create_configuration(RF_AutoConfig_t *,RF_Config_t *, RF_Raid_t *);
    323 int rf_set_autoconfig(RF_Raid_t *, int);
    324 int rf_set_rootpartition(RF_Raid_t *, int);
    325 void rf_release_all_vps(RF_ConfigSet_t *);
    326 void rf_cleanup_config_set(RF_ConfigSet_t *);
    327 int rf_have_enough_components(RF_ConfigSet_t *);
    328 int rf_auto_config_set(RF_ConfigSet_t *, int *);
    329 static int rf_sync_component_caches(RF_Raid_t *raidPtr);
    330 
    331 static int raidautoconfig = 0; /* Debugging, mostly.  Set to 0 to not
    332 				  allow autoconfig to take place.
    333 				  Note that this is overridden by having
    334 				  RAID_AUTOCONFIG as an option in the
    335 				  kernel config file.  */
    336 
    337 struct RF_Pools_s rf_pools;
    338 
    339 void
    340 raidattach(int num)
    341 {
    342 	int raidID;
    343 	int i, rc;
    344 
    345 	aprint_debug("raidattach: Asked for %d units\n", num);
    346 
    347 	if (num <= 0) {
    348 #ifdef DIAGNOSTIC
    349 		panic("raidattach: count <= 0");
    350 #endif
    351 		return;
    352 	}
    353 	/* This is where all the initialization stuff gets done. */
    354 
    355 	numraid = num;
    356 
    357 	/* Make some space for requested number of units... */
    358 
    359 	RF_Malloc(raidPtrs, num * sizeof(RF_Raid_t *), (RF_Raid_t **));
    360 	if (raidPtrs == NULL) {
    361 		panic("raidPtrs is NULL!!");
    362 	}
    363 
    364 #if (RF_INCLUDE_PARITY_DECLUSTERING_DS > 0)
    365 	rf_mutex_init(&rf_sparet_wait_mutex);
    366 
    367 	rf_sparet_wait_queue = rf_sparet_resp_queue = NULL;
    368 #endif
    369 
    370 	for (i = 0; i < num; i++)
    371 		raidPtrs[i] = NULL;
    372 	rc = rf_BootRaidframe();
    373 	if (rc == 0)
    374 		aprint_normal("Kernelized RAIDframe activated\n");
    375 	else
    376 		panic("Serious error booting RAID!!");
    377 
    378 	/* put together some datastructures like the CCD device does.. This
    379 	 * lets us lock the device and what-not when it gets opened. */
    380 
    381 	raid_softc = (struct raid_softc *)
    382 		malloc(num * sizeof(struct raid_softc),
    383 		       M_RAIDFRAME, M_NOWAIT);
    384 	if (raid_softc == NULL) {
    385 		aprint_error("WARNING: no memory for RAIDframe driver\n");
    386 		return;
    387 	}
    388 
    389 	memset(raid_softc, 0, num * sizeof(struct raid_softc));
    390 
    391 	for (raidID = 0; raidID < num; raidID++) {
    392 		bufq_alloc(&raid_softc[raidID].buf_queue, "fcfs", 0);
    393 
    394 		RF_Malloc(raidPtrs[raidID], sizeof(RF_Raid_t),
    395 			  (RF_Raid_t *));
    396 		if (raidPtrs[raidID] == NULL) {
    397 			aprint_error("WARNING: raidPtrs[%d] is NULL\n", raidID);
    398 			numraid = raidID;
    399 			return;
    400 		}
    401 	}
    402 
    403 	if (config_cfattach_attach(raid_cd.cd_name, &raid_ca)) {
    404 		aprint_error("raidattach: config_cfattach_attach failed?\n");
    405 	}
    406 
    407 #ifdef RAID_AUTOCONFIG
    408 	raidautoconfig = 1;
    409 #endif
    410 
    411 	/*
    412 	 * Register a finalizer which will be used to auto-config RAID
    413 	 * sets once all real hardware devices have been found.
    414 	 */
    415 	if (config_finalize_register(NULL, rf_autoconfig) != 0)
    416 		aprint_error("WARNING: unable to register RAIDframe finalizer\n");
    417 }
    418 
    419 int
    420 rf_autoconfig(device_t self)
    421 {
    422 	RF_AutoConfig_t *ac_list;
    423 	RF_ConfigSet_t *config_sets;
    424 
    425 	if (raidautoconfig == 0)
    426 		return (0);
    427 
    428 	/* XXX This code can only be run once. */
    429 	raidautoconfig = 0;
    430 
    431 	/* 1. locate all RAID components on the system */
    432 	aprint_debug("Searching for RAID components...\n");
    433 	ac_list = rf_find_raid_components();
    434 
    435 	/* 2. Sort them into their respective sets. */
    436 	config_sets = rf_create_auto_sets(ac_list);
    437 
    438 	/*
    439 	 * 3. Evaluate each set andconfigure the valid ones.
    440 	 * This gets done in rf_buildroothack().
    441 	 */
    442 	rf_buildroothack(config_sets);
    443 
    444 	return 1;
    445 }
    446 
    447 void
    448 rf_buildroothack(RF_ConfigSet_t *config_sets)
    449 {
    450 	RF_ConfigSet_t *cset;
    451 	RF_ConfigSet_t *next_cset;
    452 	int retcode;
    453 	int raidID;
    454 	int rootID;
    455 	int col;
    456 	int num_root;
    457 	char *devname;
    458 
    459 	rootID = 0;
    460 	num_root = 0;
    461 	cset = config_sets;
    462 	while(cset != NULL ) {
    463 		next_cset = cset->next;
    464 		if (rf_have_enough_components(cset) &&
    465 		    cset->ac->clabel->autoconfigure==1) {
    466 			retcode = rf_auto_config_set(cset,&raidID);
    467 			if (!retcode) {
    468 				aprint_debug("raid%d: configured ok\n", raidID);
    469 				if (cset->rootable) {
    470 					rootID = raidID;
    471 					num_root++;
    472 				}
    473 			} else {
    474 				/* The autoconfig didn't work :( */
    475 				aprint_debug("Autoconfig failed with code %d for raid%d\n", retcode, raidID);
    476 				rf_release_all_vps(cset);
    477 			}
    478 		} else {
    479 			/* we're not autoconfiguring this set...
    480 			   release the associated resources */
    481 			rf_release_all_vps(cset);
    482 		}
    483 		/* cleanup */
    484 		rf_cleanup_config_set(cset);
    485 		cset = next_cset;
    486 	}
    487 
    488 	/* if the user has specified what the root device should be
    489 	   then we don't touch booted_device or boothowto... */
    490 
    491 	if (rootspec != NULL)
    492 		return;
    493 
    494 	/* we found something bootable... */
    495 
    496 	if (num_root == 1) {
    497 		booted_device = raid_softc[rootID].sc_dev;
    498 	} else if (num_root > 1) {
    499 
    500 		/*
    501 		 * Maybe the MD code can help. If it cannot, then
    502 		 * setroot() will discover that we have no
    503 		 * booted_device and will ask the user if nothing was
    504 		 * hardwired in the kernel config file
    505 		 */
    506 
    507 		if (booted_device == NULL)
    508 			cpu_rootconf();
    509 		if (booted_device == NULL)
    510 			return;
    511 
    512 		num_root = 0;
    513 		for (raidID = 0; raidID < numraid; raidID++) {
    514 			if (raidPtrs[raidID]->valid == 0)
    515 				continue;
    516 
    517 			if (raidPtrs[raidID]->root_partition == 0)
    518 				continue;
    519 
    520 			for (col = 0; col < raidPtrs[raidID]->numCol; col++) {
    521 				devname = raidPtrs[raidID]->Disks[col].devname;
    522 				devname += sizeof("/dev/") - 1;
    523 				if (strncmp(devname, device_xname(booted_device),
    524 					    strlen(device_xname(booted_device))) != 0)
    525 					continue;
    526 				aprint_debug("raid%d includes boot device %s\n",
    527 				       raidID, devname);
    528 				num_root++;
    529 				rootID = raidID;
    530 			}
    531 		}
    532 
    533 		if (num_root == 1) {
    534 			booted_device = raid_softc[rootID].sc_dev;
    535 		} else {
    536 			/* we can't guess.. require the user to answer... */
    537 			boothowto |= RB_ASKNAME;
    538 		}
    539 	}
    540 }
    541 
    542 
    543 int
    544 raidsize(dev_t dev)
    545 {
    546 	struct raid_softc *rs;
    547 	struct disklabel *lp;
    548 	int     part, unit, omask, size;
    549 
    550 	unit = raidunit(dev);
    551 	if (unit >= numraid)
    552 		return (-1);
    553 	rs = &raid_softc[unit];
    554 
    555 	if ((rs->sc_flags & RAIDF_INITED) == 0)
    556 		return (-1);
    557 
    558 	part = DISKPART(dev);
    559 	omask = rs->sc_dkdev.dk_openmask & (1 << part);
    560 	lp = rs->sc_dkdev.dk_label;
    561 
    562 	if (omask == 0 && raidopen(dev, 0, S_IFBLK, curlwp))
    563 		return (-1);
    564 
    565 	if (lp->d_partitions[part].p_fstype != FS_SWAP)
    566 		size = -1;
    567 	else
    568 		size = lp->d_partitions[part].p_size *
    569 		    (lp->d_secsize / DEV_BSIZE);
    570 
    571 	if (omask == 0 && raidclose(dev, 0, S_IFBLK, curlwp))
    572 		return (-1);
    573 
    574 	return (size);
    575 
    576 }
    577 
    578 int
    579 raiddump(dev_t dev, daddr_t blkno, void *va, size_t size)
    580 {
    581 	int     unit = raidunit(dev);
    582 	struct raid_softc *rs;
    583 	const struct bdevsw *bdev;
    584 	struct disklabel *lp;
    585 	RF_Raid_t *raidPtr;
    586 	daddr_t offset;
    587 	int     part, c, sparecol, j, scol, dumpto;
    588 	int     error = 0;
    589 
    590 	if (unit >= numraid)
    591 		return (ENXIO);
    592 
    593 	rs = &raid_softc[unit];
    594 	raidPtr = raidPtrs[unit];
    595 
    596 	if ((rs->sc_flags & RAIDF_INITED) == 0)
    597 		return ENXIO;
    598 
    599 	/* we only support dumping to RAID 1 sets */
    600 	if (raidPtr->Layout.numDataCol != 1 ||
    601 	    raidPtr->Layout.numParityCol != 1)
    602 		return EINVAL;
    603 
    604 
    605 	if ((error = raidlock(rs)) != 0)
    606 		return error;
    607 
    608 	if (size % DEV_BSIZE != 0) {
    609 		error = EINVAL;
    610 		goto out;
    611 	}
    612 
    613 	if (blkno + size / DEV_BSIZE > rs->sc_size) {
    614 		printf("%s: blkno (%" PRIu64 ") + size / DEV_BSIZE (%zu) > "
    615 		    "sc->sc_size (%" PRIu64 ")\n", __func__, blkno,
    616 		    size / DEV_BSIZE, rs->sc_size);
    617 		error = EINVAL;
    618 		goto out;
    619 	}
    620 
    621 	part = DISKPART(dev);
    622 	lp = rs->sc_dkdev.dk_label;
    623 	offset = lp->d_partitions[part].p_offset + RF_PROTECTED_SECTORS;
    624 
    625 	/* figure out what device is alive.. */
    626 
    627 	/*
    628 	   Look for a component to dump to.  The preference for the
    629 	   component to dump to is as follows:
    630 	   1) the master
    631 	   2) a used_spare of the master
    632 	   3) the slave
    633 	   4) a used_spare of the slave
    634 	*/
    635 
    636 	dumpto = -1;
    637 	for (c = 0; c < raidPtr->numCol; c++) {
    638 		if (raidPtr->Disks[c].status == rf_ds_optimal) {
    639 			/* this might be the one */
    640 			dumpto = c;
    641 			break;
    642 		}
    643 	}
    644 
    645 	/*
    646 	   At this point we have possibly selected a live master or a
    647 	   live slave.  We now check to see if there is a spared
    648 	   master (or a spared slave), if we didn't find a live master
    649 	   or a live slave.
    650 	*/
    651 
    652 	for (c = 0; c < raidPtr->numSpare; c++) {
    653 		sparecol = raidPtr->numCol + c;
    654 		if (raidPtr->Disks[sparecol].status ==  rf_ds_used_spare) {
    655 			/* How about this one? */
    656 			scol = -1;
    657 			for(j=0;j<raidPtr->numCol;j++) {
    658 				if (raidPtr->Disks[j].spareCol == sparecol) {
    659 					scol = j;
    660 					break;
    661 				}
    662 			}
    663 			if (scol == 0) {
    664 				/*
    665 				   We must have found a spared master!
    666 				   We'll take that over anything else
    667 				   found so far.  (We couldn't have
    668 				   found a real master before, since
    669 				   this is a used spare, and it's
    670 				   saying that it's replacing the
    671 				   master.)  On reboot (with
    672 				   autoconfiguration turned on)
    673 				   sparecol will become the 1st
    674 				   component (component0) of this set.
    675 				*/
    676 				dumpto = sparecol;
    677 				break;
    678 			} else if (scol != -1) {
    679 				/*
    680 				   Must be a spared slave.  We'll dump
    681 				   to that if we havn't found anything
    682 				   else so far.
    683 				*/
    684 				if (dumpto == -1)
    685 					dumpto = sparecol;
    686 			}
    687 		}
    688 	}
    689 
    690 	if (dumpto == -1) {
    691 		/* we couldn't find any live components to dump to!?!?
    692 		 */
    693 		error = EINVAL;
    694 		goto out;
    695 	}
    696 
    697 	bdev = bdevsw_lookup(raidPtr->Disks[dumpto].dev);
    698 
    699 	/*
    700 	   Note that blkno is relative to this particular partition.
    701 	   By adding the offset of this partition in the RAID
    702 	   set, and also adding RF_PROTECTED_SECTORS, we get a
    703 	   value that is relative to the partition used for the
    704 	   underlying component.
    705 	*/
    706 
    707 	error = (*bdev->d_dump)(raidPtr->Disks[dumpto].dev,
    708 				blkno + offset, va, size);
    709 
    710 out:
    711 	raidunlock(rs);
    712 
    713 	return error;
    714 }
    715 /* ARGSUSED */
    716 int
    717 raidopen(dev_t dev, int flags, int fmt,
    718     struct lwp *l)
    719 {
    720 	int     unit = raidunit(dev);
    721 	struct raid_softc *rs;
    722 	struct disklabel *lp;
    723 	int     part, pmask;
    724 	int     error = 0;
    725 
    726 	if (unit >= numraid)
    727 		return (ENXIO);
    728 	rs = &raid_softc[unit];
    729 
    730 	if ((error = raidlock(rs)) != 0)
    731 		return (error);
    732 	lp = rs->sc_dkdev.dk_label;
    733 
    734 	part = DISKPART(dev);
    735 
    736 	/*
    737 	 * If there are wedges, and this is not RAW_PART, then we
    738 	 * need to fail.
    739 	 */
    740 	if (rs->sc_dkdev.dk_nwedges != 0 && part != RAW_PART) {
    741 		error = EBUSY;
    742 		goto bad;
    743 	}
    744 	pmask = (1 << part);
    745 
    746 	if ((rs->sc_flags & RAIDF_INITED) &&
    747 	    (rs->sc_dkdev.dk_openmask == 0))
    748 		raidgetdisklabel(dev);
    749 
    750 	/* make sure that this partition exists */
    751 
    752 	if (part != RAW_PART) {
    753 		if (((rs->sc_flags & RAIDF_INITED) == 0) ||
    754 		    ((part >= lp->d_npartitions) ||
    755 			(lp->d_partitions[part].p_fstype == FS_UNUSED))) {
    756 			error = ENXIO;
    757 			goto bad;
    758 		}
    759 	}
    760 	/* Prevent this unit from being unconfigured while open. */
    761 	switch (fmt) {
    762 	case S_IFCHR:
    763 		rs->sc_dkdev.dk_copenmask |= pmask;
    764 		break;
    765 
    766 	case S_IFBLK:
    767 		rs->sc_dkdev.dk_bopenmask |= pmask;
    768 		break;
    769 	}
    770 
    771 	if ((rs->sc_dkdev.dk_openmask == 0) &&
    772 	    ((rs->sc_flags & RAIDF_INITED) != 0)) {
    773 		/* First one... mark things as dirty... Note that we *MUST*
    774 		 have done a configure before this.  I DO NOT WANT TO BE
    775 		 SCRIBBLING TO RANDOM COMPONENTS UNTIL IT'S BEEN DETERMINED
    776 		 THAT THEY BELONG TOGETHER!!!!! */
    777 		/* XXX should check to see if we're only open for reading
    778 		   here... If so, we needn't do this, but then need some
    779 		   other way of keeping track of what's happened.. */
    780 
    781 		rf_markalldirty( raidPtrs[unit] );
    782 	}
    783 
    784 
    785 	rs->sc_dkdev.dk_openmask =
    786 	    rs->sc_dkdev.dk_copenmask | rs->sc_dkdev.dk_bopenmask;
    787 
    788 bad:
    789 	raidunlock(rs);
    790 
    791 	return (error);
    792 
    793 
    794 }
    795 /* ARGSUSED */
    796 int
    797 raidclose(dev_t dev, int flags, int fmt, struct lwp *l)
    798 {
    799 	int     unit = raidunit(dev);
    800 	cfdata_t cf;
    801 	struct raid_softc *rs;
    802 	int     error = 0;
    803 	int     part;
    804 
    805 	if (unit >= numraid)
    806 		return (ENXIO);
    807 	rs = &raid_softc[unit];
    808 
    809 	if ((error = raidlock(rs)) != 0)
    810 		return (error);
    811 
    812 	part = DISKPART(dev);
    813 
    814 	/* ...that much closer to allowing unconfiguration... */
    815 	switch (fmt) {
    816 	case S_IFCHR:
    817 		rs->sc_dkdev.dk_copenmask &= ~(1 << part);
    818 		break;
    819 
    820 	case S_IFBLK:
    821 		rs->sc_dkdev.dk_bopenmask &= ~(1 << part);
    822 		break;
    823 	}
    824 	rs->sc_dkdev.dk_openmask =
    825 	    rs->sc_dkdev.dk_copenmask | rs->sc_dkdev.dk_bopenmask;
    826 
    827 	if ((rs->sc_dkdev.dk_openmask == 0) &&
    828 	    ((rs->sc_flags & RAIDF_INITED) != 0)) {
    829 		/* Last one... device is not unconfigured yet.
    830 		   Device shutdown has taken care of setting the
    831 		   clean bits if RAIDF_INITED is not set
    832 		   mark things as clean... */
    833 
    834 		rf_update_component_labels(raidPtrs[unit],
    835 						 RF_FINAL_COMPONENT_UPDATE);
    836 		if (doing_shutdown) {
    837 			/* last one, and we're going down, so
    838 			   lights out for this RAID set too. */
    839 			error = rf_Shutdown(raidPtrs[unit]);
    840 
    841 			/* It's no longer initialized... */
    842 			rs->sc_flags &= ~RAIDF_INITED;
    843 
    844 			/* detach the device */
    845 
    846 			cf = device_cfdata(rs->sc_dev);
    847 			error = config_detach(rs->sc_dev, DETACH_QUIET);
    848 			free(cf, M_RAIDFRAME);
    849 
    850 			/* Detach the disk. */
    851 			disk_detach(&rs->sc_dkdev);
    852 			disk_destroy(&rs->sc_dkdev);
    853 		}
    854 	}
    855 
    856 	raidunlock(rs);
    857 	return (0);
    858 
    859 }
    860 
    861 void
    862 raidstrategy(struct buf *bp)
    863 {
    864 	int s;
    865 
    866 	unsigned int raidID = raidunit(bp->b_dev);
    867 	RF_Raid_t *raidPtr;
    868 	struct raid_softc *rs = &raid_softc[raidID];
    869 	int     wlabel;
    870 
    871 	if ((rs->sc_flags & RAIDF_INITED) ==0) {
    872 		bp->b_error = ENXIO;
    873 		goto done;
    874 	}
    875 	if (raidID >= numraid || !raidPtrs[raidID]) {
    876 		bp->b_error = ENODEV;
    877 		goto done;
    878 	}
    879 	raidPtr = raidPtrs[raidID];
    880 	if (!raidPtr->valid) {
    881 		bp->b_error = ENODEV;
    882 		goto done;
    883 	}
    884 	if (bp->b_bcount == 0) {
    885 		db1_printf(("b_bcount is zero..\n"));
    886 		goto done;
    887 	}
    888 
    889 	/*
    890 	 * Do bounds checking and adjust transfer.  If there's an
    891 	 * error, the bounds check will flag that for us.
    892 	 */
    893 
    894 	wlabel = rs->sc_flags & (RAIDF_WLABEL | RAIDF_LABELLING);
    895 	if (DISKPART(bp->b_dev) == RAW_PART) {
    896 		uint64_t size; /* device size in DEV_BSIZE unit */
    897 
    898 		if (raidPtr->logBytesPerSector > DEV_BSHIFT) {
    899 			size = raidPtr->totalSectors <<
    900 			    (raidPtr->logBytesPerSector - DEV_BSHIFT);
    901 		} else {
    902 			size = raidPtr->totalSectors >>
    903 			    (DEV_BSHIFT - raidPtr->logBytesPerSector);
    904 		}
    905 		if (bounds_check_with_mediasize(bp, DEV_BSIZE, size) <= 0) {
    906 			goto done;
    907 		}
    908 	} else {
    909 		if (bounds_check_with_label(&rs->sc_dkdev, bp, wlabel) <= 0) {
    910 			db1_printf(("Bounds check failed!!:%d %d\n",
    911 				(int) bp->b_blkno, (int) wlabel));
    912 			goto done;
    913 		}
    914 	}
    915 	s = splbio();
    916 
    917 	bp->b_resid = 0;
    918 
    919 	/* stuff it onto our queue */
    920 	bufq_put(rs->buf_queue, bp);
    921 
    922 	/* scheduled the IO to happen at the next convenient time */
    923 	wakeup(&(raidPtrs[raidID]->iodone));
    924 
    925 	splx(s);
    926 	return;
    927 
    928 done:
    929 	bp->b_resid = bp->b_bcount;
    930 	biodone(bp);
    931 }
    932 /* ARGSUSED */
    933 int
    934 raidread(dev_t dev, struct uio *uio, int flags)
    935 {
    936 	int     unit = raidunit(dev);
    937 	struct raid_softc *rs;
    938 
    939 	if (unit >= numraid)
    940 		return (ENXIO);
    941 	rs = &raid_softc[unit];
    942 
    943 	if ((rs->sc_flags & RAIDF_INITED) == 0)
    944 		return (ENXIO);
    945 
    946 	return (physio(raidstrategy, NULL, dev, B_READ, minphys, uio));
    947 
    948 }
    949 /* ARGSUSED */
    950 int
    951 raidwrite(dev_t dev, struct uio *uio, int flags)
    952 {
    953 	int     unit = raidunit(dev);
    954 	struct raid_softc *rs;
    955 
    956 	if (unit >= numraid)
    957 		return (ENXIO);
    958 	rs = &raid_softc[unit];
    959 
    960 	if ((rs->sc_flags & RAIDF_INITED) == 0)
    961 		return (ENXIO);
    962 
    963 	return (physio(raidstrategy, NULL, dev, B_WRITE, minphys, uio));
    964 
    965 }
    966 
    967 int
    968 raidioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
    969 {
    970 	int     unit = raidunit(dev);
    971 	int     error = 0;
    972 	int     part, pmask;
    973 	cfdata_t cf;
    974 	struct raid_softc *rs;
    975 	RF_Config_t *k_cfg, *u_cfg;
    976 	RF_Raid_t *raidPtr;
    977 	RF_RaidDisk_t *diskPtr;
    978 	RF_AccTotals_t *totals;
    979 	RF_DeviceConfig_t *d_cfg, **ucfgp;
    980 	u_char *specific_buf;
    981 	int retcode = 0;
    982 	int column;
    983 	int raidid;
    984 	struct rf_recon_req *rrcopy, *rr;
    985 	RF_ComponentLabel_t *clabel;
    986 	RF_ComponentLabel_t *ci_label;
    987 	RF_ComponentLabel_t **clabel_ptr;
    988 	RF_SingleComponent_t *sparePtr,*componentPtr;
    989 	RF_SingleComponent_t component;
    990 	RF_ProgressInfo_t progressInfo, **progressInfoPtr;
    991 	int i, j, d;
    992 #ifdef __HAVE_OLD_DISKLABEL
    993 	struct disklabel newlabel;
    994 #endif
    995 	struct dkwedge_info *dkw;
    996 
    997 	if (unit >= numraid)
    998 		return (ENXIO);
    999 	rs = &raid_softc[unit];
   1000 	raidPtr = raidPtrs[unit];
   1001 
   1002 	db1_printf(("raidioctl: %d %d %d %d\n", (int) dev,
   1003 		(int) DISKPART(dev), (int) unit, (int) cmd));
   1004 
   1005 	/* Must be open for writes for these commands... */
   1006 	switch (cmd) {
   1007 #ifdef DIOCGSECTORSIZE
   1008 	case DIOCGSECTORSIZE:
   1009 		*(u_int *)data = raidPtr->bytesPerSector;
   1010 		return 0;
   1011 	case DIOCGMEDIASIZE:
   1012 		*(off_t *)data =
   1013 		    (off_t)raidPtr->totalSectors * raidPtr->bytesPerSector;
   1014 		return 0;
   1015 #endif
   1016 	case DIOCSDINFO:
   1017 	case DIOCWDINFO:
   1018 #ifdef __HAVE_OLD_DISKLABEL
   1019 	case ODIOCWDINFO:
   1020 	case ODIOCSDINFO:
   1021 #endif
   1022 	case DIOCWLABEL:
   1023 	case DIOCAWEDGE:
   1024 	case DIOCDWEDGE:
   1025 		if ((flag & FWRITE) == 0)
   1026 			return (EBADF);
   1027 	}
   1028 
   1029 	/* Must be initialized for these... */
   1030 	switch (cmd) {
   1031 	case DIOCGDINFO:
   1032 	case DIOCSDINFO:
   1033 	case DIOCWDINFO:
   1034 #ifdef __HAVE_OLD_DISKLABEL
   1035 	case ODIOCGDINFO:
   1036 	case ODIOCWDINFO:
   1037 	case ODIOCSDINFO:
   1038 	case ODIOCGDEFLABEL:
   1039 #endif
   1040 	case DIOCGPART:
   1041 	case DIOCWLABEL:
   1042 	case DIOCGDEFLABEL:
   1043 	case DIOCAWEDGE:
   1044 	case DIOCDWEDGE:
   1045 	case DIOCLWEDGES:
   1046 	case DIOCCACHESYNC:
   1047 	case RAIDFRAME_SHUTDOWN:
   1048 	case RAIDFRAME_REWRITEPARITY:
   1049 	case RAIDFRAME_GET_INFO:
   1050 	case RAIDFRAME_RESET_ACCTOTALS:
   1051 	case RAIDFRAME_GET_ACCTOTALS:
   1052 	case RAIDFRAME_KEEP_ACCTOTALS:
   1053 	case RAIDFRAME_GET_SIZE:
   1054 	case RAIDFRAME_FAIL_DISK:
   1055 	case RAIDFRAME_COPYBACK:
   1056 	case RAIDFRAME_CHECK_RECON_STATUS:
   1057 	case RAIDFRAME_CHECK_RECON_STATUS_EXT:
   1058 	case RAIDFRAME_GET_COMPONENT_LABEL:
   1059 	case RAIDFRAME_SET_COMPONENT_LABEL:
   1060 	case RAIDFRAME_ADD_HOT_SPARE:
   1061 	case RAIDFRAME_REMOVE_HOT_SPARE:
   1062 	case RAIDFRAME_INIT_LABELS:
   1063 	case RAIDFRAME_REBUILD_IN_PLACE:
   1064 	case RAIDFRAME_CHECK_PARITY:
   1065 	case RAIDFRAME_CHECK_PARITYREWRITE_STATUS:
   1066 	case RAIDFRAME_CHECK_PARITYREWRITE_STATUS_EXT:
   1067 	case RAIDFRAME_CHECK_COPYBACK_STATUS:
   1068 	case RAIDFRAME_CHECK_COPYBACK_STATUS_EXT:
   1069 	case RAIDFRAME_SET_AUTOCONFIG:
   1070 	case RAIDFRAME_SET_ROOT:
   1071 	case RAIDFRAME_DELETE_COMPONENT:
   1072 	case RAIDFRAME_INCORPORATE_HOT_SPARE:
   1073 		if ((rs->sc_flags & RAIDF_INITED) == 0)
   1074 			return (ENXIO);
   1075 	}
   1076 
   1077 	switch (cmd) {
   1078 #ifdef COMPAT_50
   1079 	case RAIDFRAME_GET_INFO50:
   1080 		return rf_get_info50(raidPtr, data);
   1081 
   1082 	case RAIDFRAME_CONFIGURE50:
   1083 		if ((retcode = rf_config50(raidPtr, unit, data, &k_cfg)) != 0)
   1084 			return retcode;
   1085 		goto config;
   1086 #endif
   1087 		/* configure the system */
   1088 	case RAIDFRAME_CONFIGURE:
   1089 
   1090 		if (raidPtr->valid) {
   1091 			/* There is a valid RAID set running on this unit! */
   1092 			printf("raid%d: Device already configured!\n",unit);
   1093 			return(EINVAL);
   1094 		}
   1095 
   1096 		/* copy-in the configuration information */
   1097 		/* data points to a pointer to the configuration structure */
   1098 
   1099 		u_cfg = *((RF_Config_t **) data);
   1100 		RF_Malloc(k_cfg, sizeof(RF_Config_t), (RF_Config_t *));
   1101 		if (k_cfg == NULL) {
   1102 			return (ENOMEM);
   1103 		}
   1104 		retcode = copyin(u_cfg, k_cfg, sizeof(RF_Config_t));
   1105 		if (retcode) {
   1106 			RF_Free(k_cfg, sizeof(RF_Config_t));
   1107 			db1_printf(("rf_ioctl: retcode=%d copyin.1\n",
   1108 				retcode));
   1109 			return (retcode);
   1110 		}
   1111 		goto config;
   1112 	config:
   1113 		/* allocate a buffer for the layout-specific data, and copy it
   1114 		 * in */
   1115 		if (k_cfg->layoutSpecificSize) {
   1116 			if (k_cfg->layoutSpecificSize > 10000) {
   1117 				/* sanity check */
   1118 				RF_Free(k_cfg, sizeof(RF_Config_t));
   1119 				return (EINVAL);
   1120 			}
   1121 			RF_Malloc(specific_buf, k_cfg->layoutSpecificSize,
   1122 			    (u_char *));
   1123 			if (specific_buf == NULL) {
   1124 				RF_Free(k_cfg, sizeof(RF_Config_t));
   1125 				return (ENOMEM);
   1126 			}
   1127 			retcode = copyin(k_cfg->layoutSpecific, specific_buf,
   1128 			    k_cfg->layoutSpecificSize);
   1129 			if (retcode) {
   1130 				RF_Free(k_cfg, sizeof(RF_Config_t));
   1131 				RF_Free(specific_buf,
   1132 					k_cfg->layoutSpecificSize);
   1133 				db1_printf(("rf_ioctl: retcode=%d copyin.2\n",
   1134 					retcode));
   1135 				return (retcode);
   1136 			}
   1137 		} else
   1138 			specific_buf = NULL;
   1139 		k_cfg->layoutSpecific = specific_buf;
   1140 
   1141 		/* should do some kind of sanity check on the configuration.
   1142 		 * Store the sum of all the bytes in the last byte? */
   1143 
   1144 		/* configure the system */
   1145 
   1146 		/*
   1147 		 * Clear the entire RAID descriptor, just to make sure
   1148 		 *  there is no stale data left in the case of a
   1149 		 *  reconfiguration
   1150 		 */
   1151 		memset((char *) raidPtr, 0, sizeof(RF_Raid_t));
   1152 		raidPtr->raidid = unit;
   1153 
   1154 		retcode = rf_Configure(raidPtr, k_cfg, NULL);
   1155 
   1156 		if (retcode == 0) {
   1157 
   1158 			/* allow this many simultaneous IO's to
   1159 			   this RAID device */
   1160 			raidPtr->openings = RAIDOUTSTANDING;
   1161 
   1162 			raidinit(raidPtr);
   1163 			rf_markalldirty(raidPtr);
   1164 		}
   1165 		/* free the buffers.  No return code here. */
   1166 		if (k_cfg->layoutSpecificSize) {
   1167 			RF_Free(specific_buf, k_cfg->layoutSpecificSize);
   1168 		}
   1169 		RF_Free(k_cfg, sizeof(RF_Config_t));
   1170 
   1171 		return (retcode);
   1172 
   1173 		/* shutdown the system */
   1174 	case RAIDFRAME_SHUTDOWN:
   1175 
   1176 		if ((error = raidlock(rs)) != 0)
   1177 			return (error);
   1178 
   1179 		/*
   1180 		 * If somebody has a partition mounted, we shouldn't
   1181 		 * shutdown.
   1182 		 */
   1183 
   1184 		part = DISKPART(dev);
   1185 		pmask = (1 << part);
   1186 		if ((rs->sc_dkdev.dk_openmask & ~pmask) ||
   1187 		    ((rs->sc_dkdev.dk_bopenmask & pmask) &&
   1188 			(rs->sc_dkdev.dk_copenmask & pmask))) {
   1189 			raidunlock(rs);
   1190 			return (EBUSY);
   1191 		}
   1192 
   1193 		retcode = rf_Shutdown(raidPtr);
   1194 
   1195 		/* It's no longer initialized... */
   1196 		rs->sc_flags &= ~RAIDF_INITED;
   1197 
   1198 		/* free the pseudo device attach bits */
   1199 
   1200 		cf = device_cfdata(rs->sc_dev);
   1201 		/* XXX this causes us to not return any errors
   1202 		   from the above call to rf_Shutdown() */
   1203 		retcode = config_detach(rs->sc_dev, DETACH_QUIET);
   1204 		free(cf, M_RAIDFRAME);
   1205 
   1206 		/* Detach the disk. */
   1207 		disk_detach(&rs->sc_dkdev);
   1208 		disk_destroy(&rs->sc_dkdev);
   1209 
   1210 		raidunlock(rs);
   1211 
   1212 		return (retcode);
   1213 	case RAIDFRAME_GET_COMPONENT_LABEL:
   1214 		clabel_ptr = (RF_ComponentLabel_t **) data;
   1215 		/* need to read the component label for the disk indicated
   1216 		   by row,column in clabel */
   1217 
   1218 		/* For practice, let's get it directly fromdisk, rather
   1219 		   than from the in-core copy */
   1220 		RF_Malloc( clabel, sizeof( RF_ComponentLabel_t ),
   1221 			   (RF_ComponentLabel_t *));
   1222 		if (clabel == NULL)
   1223 			return (ENOMEM);
   1224 
   1225 		retcode = copyin( *clabel_ptr, clabel,
   1226 				  sizeof(RF_ComponentLabel_t));
   1227 
   1228 		if (retcode) {
   1229 			RF_Free( clabel, sizeof(RF_ComponentLabel_t));
   1230 			return(retcode);
   1231 		}
   1232 
   1233 		clabel->row = 0; /* Don't allow looking at anything else.*/
   1234 
   1235 		column = clabel->column;
   1236 
   1237 		if ((column < 0) || (column >= raidPtr->numCol +
   1238 				     raidPtr->numSpare)) {
   1239 			RF_Free( clabel, sizeof(RF_ComponentLabel_t));
   1240 			return(EINVAL);
   1241 		}
   1242 
   1243 		retcode = raidread_component_label(raidPtr->Disks[column].dev,
   1244 				raidPtr->raid_cinfo[column].ci_vp,
   1245 				clabel );
   1246 
   1247 		if (retcode == 0) {
   1248 			retcode = copyout(clabel, *clabel_ptr,
   1249 					  sizeof(RF_ComponentLabel_t));
   1250 		}
   1251 		RF_Free(clabel, sizeof(RF_ComponentLabel_t));
   1252 		return (retcode);
   1253 
   1254 	case RAIDFRAME_SET_COMPONENT_LABEL:
   1255 		clabel = (RF_ComponentLabel_t *) data;
   1256 
   1257 		/* XXX check the label for valid stuff... */
   1258 		/* Note that some things *should not* get modified --
   1259 		   the user should be re-initing the labels instead of
   1260 		   trying to patch things.
   1261 		   */
   1262 
   1263 		raidid = raidPtr->raidid;
   1264 #ifdef DEBUG
   1265 		printf("raid%d: Got component label:\n", raidid);
   1266 		printf("raid%d: Version: %d\n", raidid, clabel->version);
   1267 		printf("raid%d: Serial Number: %d\n", raidid, clabel->serial_number);
   1268 		printf("raid%d: Mod counter: %d\n", raidid, clabel->mod_counter);
   1269 		printf("raid%d: Column: %d\n", raidid, clabel->column);
   1270 		printf("raid%d: Num Columns: %d\n", raidid, clabel->num_columns);
   1271 		printf("raid%d: Clean: %d\n", raidid, clabel->clean);
   1272 		printf("raid%d: Status: %d\n", raidid, clabel->status);
   1273 #endif
   1274 		clabel->row = 0;
   1275 		column = clabel->column;
   1276 
   1277 		if ((column < 0) || (column >= raidPtr->numCol)) {
   1278 			return(EINVAL);
   1279 		}
   1280 
   1281 		/* XXX this isn't allowed to do anything for now :-) */
   1282 
   1283 		/* XXX and before it is, we need to fill in the rest
   1284 		   of the fields!?!?!?! */
   1285 #if 0
   1286 		raidwrite_component_label(
   1287 		     raidPtr->Disks[column].dev,
   1288 			    raidPtr->raid_cinfo[column].ci_vp,
   1289 			    clabel );
   1290 #endif
   1291 		return (0);
   1292 
   1293 	case RAIDFRAME_INIT_LABELS:
   1294 		clabel = (RF_ComponentLabel_t *) data;
   1295 		/*
   1296 		   we only want the serial number from
   1297 		   the above.  We get all the rest of the information
   1298 		   from the config that was used to create this RAID
   1299 		   set.
   1300 		   */
   1301 
   1302 		raidPtr->serial_number = clabel->serial_number;
   1303 
   1304 		RF_Malloc(ci_label, sizeof(RF_ComponentLabel_t),
   1305 			  (RF_ComponentLabel_t *));
   1306 		if (ci_label == NULL)
   1307 			return (ENOMEM);
   1308 
   1309 		raid_init_component_label(raidPtr, ci_label);
   1310 		ci_label->serial_number = clabel->serial_number;
   1311 		ci_label->row = 0; /* we dont' pretend to support more */
   1312 
   1313 		for(column=0;column<raidPtr->numCol;column++) {
   1314 			diskPtr = &raidPtr->Disks[column];
   1315 			if (!RF_DEAD_DISK(diskPtr->status)) {
   1316 				ci_label->partitionSize = diskPtr->partitionSize;
   1317 				ci_label->column = column;
   1318 				raidwrite_component_label(
   1319 							  raidPtr->Disks[column].dev,
   1320 							  raidPtr->raid_cinfo[column].ci_vp,
   1321 							  ci_label );
   1322 			}
   1323 		}
   1324 		RF_Free(ci_label, sizeof(RF_ComponentLabel_t));
   1325 
   1326 		return (retcode);
   1327 	case RAIDFRAME_SET_AUTOCONFIG:
   1328 		d = rf_set_autoconfig(raidPtr, *(int *) data);
   1329 		printf("raid%d: New autoconfig value is: %d\n",
   1330 		       raidPtr->raidid, d);
   1331 		*(int *) data = d;
   1332 		return (retcode);
   1333 
   1334 	case RAIDFRAME_SET_ROOT:
   1335 		d = rf_set_rootpartition(raidPtr, *(int *) data);
   1336 		printf("raid%d: New rootpartition value is: %d\n",
   1337 		       raidPtr->raidid, d);
   1338 		*(int *) data = d;
   1339 		return (retcode);
   1340 
   1341 		/* initialize all parity */
   1342 	case RAIDFRAME_REWRITEPARITY:
   1343 
   1344 		if (raidPtr->Layout.map->faultsTolerated == 0) {
   1345 			/* Parity for RAID 0 is trivially correct */
   1346 			raidPtr->parity_good = RF_RAID_CLEAN;
   1347 			return(0);
   1348 		}
   1349 
   1350 		if (raidPtr->parity_rewrite_in_progress == 1) {
   1351 			/* Re-write is already in progress! */
   1352 			return(EINVAL);
   1353 		}
   1354 
   1355 		retcode = RF_CREATE_THREAD(raidPtr->parity_rewrite_thread,
   1356 					   rf_RewriteParityThread,
   1357 					   raidPtr,"raid_parity");
   1358 		return (retcode);
   1359 
   1360 
   1361 	case RAIDFRAME_ADD_HOT_SPARE:
   1362 		sparePtr = (RF_SingleComponent_t *) data;
   1363 		memcpy( &component, sparePtr, sizeof(RF_SingleComponent_t));
   1364 		retcode = rf_add_hot_spare(raidPtr, &component);
   1365 		return(retcode);
   1366 
   1367 	case RAIDFRAME_REMOVE_HOT_SPARE:
   1368 		return(retcode);
   1369 
   1370 	case RAIDFRAME_DELETE_COMPONENT:
   1371 		componentPtr = (RF_SingleComponent_t *)data;
   1372 		memcpy( &component, componentPtr,
   1373 			sizeof(RF_SingleComponent_t));
   1374 		retcode = rf_delete_component(raidPtr, &component);
   1375 		return(retcode);
   1376 
   1377 	case RAIDFRAME_INCORPORATE_HOT_SPARE:
   1378 		componentPtr = (RF_SingleComponent_t *)data;
   1379 		memcpy( &component, componentPtr,
   1380 			sizeof(RF_SingleComponent_t));
   1381 		retcode = rf_incorporate_hot_spare(raidPtr, &component);
   1382 		return(retcode);
   1383 
   1384 	case RAIDFRAME_REBUILD_IN_PLACE:
   1385 
   1386 		if (raidPtr->Layout.map->faultsTolerated == 0) {
   1387 			/* Can't do this on a RAID 0!! */
   1388 			return(EINVAL);
   1389 		}
   1390 
   1391 		if (raidPtr->recon_in_progress == 1) {
   1392 			/* a reconstruct is already in progress! */
   1393 			return(EINVAL);
   1394 		}
   1395 
   1396 		componentPtr = (RF_SingleComponent_t *) data;
   1397 		memcpy( &component, componentPtr,
   1398 			sizeof(RF_SingleComponent_t));
   1399 		component.row = 0; /* we don't support any more */
   1400 		column = component.column;
   1401 
   1402 		if ((column < 0) || (column >= raidPtr->numCol)) {
   1403 			return(EINVAL);
   1404 		}
   1405 
   1406 		RF_LOCK_MUTEX(raidPtr->mutex);
   1407 		if ((raidPtr->Disks[column].status == rf_ds_optimal) &&
   1408 		    (raidPtr->numFailures > 0)) {
   1409 			/* XXX 0 above shouldn't be constant!!! */
   1410 			/* some component other than this has failed.
   1411 			   Let's not make things worse than they already
   1412 			   are... */
   1413 			printf("raid%d: Unable to reconstruct to disk at:\n",
   1414 			       raidPtr->raidid);
   1415 			printf("raid%d:     Col: %d   Too many failures.\n",
   1416 			       raidPtr->raidid, column);
   1417 			RF_UNLOCK_MUTEX(raidPtr->mutex);
   1418 			return (EINVAL);
   1419 		}
   1420 		if (raidPtr->Disks[column].status ==
   1421 		    rf_ds_reconstructing) {
   1422 			printf("raid%d: Unable to reconstruct to disk at:\n",
   1423 			       raidPtr->raidid);
   1424 			printf("raid%d:    Col: %d   Reconstruction already occuring!\n", raidPtr->raidid, column);
   1425 
   1426 			RF_UNLOCK_MUTEX(raidPtr->mutex);
   1427 			return (EINVAL);
   1428 		}
   1429 		if (raidPtr->Disks[column].status == rf_ds_spared) {
   1430 			RF_UNLOCK_MUTEX(raidPtr->mutex);
   1431 			return (EINVAL);
   1432 		}
   1433 		RF_UNLOCK_MUTEX(raidPtr->mutex);
   1434 
   1435 		RF_Malloc(rrcopy, sizeof(*rrcopy), (struct rf_recon_req *));
   1436 		if (rrcopy == NULL)
   1437 			return(ENOMEM);
   1438 
   1439 		rrcopy->raidPtr = (void *) raidPtr;
   1440 		rrcopy->col = column;
   1441 
   1442 		retcode = RF_CREATE_THREAD(raidPtr->recon_thread,
   1443 					   rf_ReconstructInPlaceThread,
   1444 					   rrcopy,"raid_reconip");
   1445 		return(retcode);
   1446 
   1447 	case RAIDFRAME_GET_INFO:
   1448 		if (!raidPtr->valid)
   1449 			return (ENODEV);
   1450 		ucfgp = (RF_DeviceConfig_t **) data;
   1451 		RF_Malloc(d_cfg, sizeof(RF_DeviceConfig_t),
   1452 			  (RF_DeviceConfig_t *));
   1453 		if (d_cfg == NULL)
   1454 			return (ENOMEM);
   1455 		d_cfg->rows = 1; /* there is only 1 row now */
   1456 		d_cfg->cols = raidPtr->numCol;
   1457 		d_cfg->ndevs = raidPtr->numCol;
   1458 		if (d_cfg->ndevs >= RF_MAX_DISKS) {
   1459 			RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
   1460 			return (ENOMEM);
   1461 		}
   1462 		d_cfg->nspares = raidPtr->numSpare;
   1463 		if (d_cfg->nspares >= RF_MAX_DISKS) {
   1464 			RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
   1465 			return (ENOMEM);
   1466 		}
   1467 		d_cfg->maxqdepth = raidPtr->maxQueueDepth;
   1468 		d = 0;
   1469 		for (j = 0; j < d_cfg->cols; j++) {
   1470 			d_cfg->devs[d] = raidPtr->Disks[j];
   1471 			d++;
   1472 		}
   1473 		for (j = d_cfg->cols, i = 0; i < d_cfg->nspares; i++, j++) {
   1474 			d_cfg->spares[i] = raidPtr->Disks[j];
   1475 		}
   1476 		retcode = copyout(d_cfg, *ucfgp, sizeof(RF_DeviceConfig_t));
   1477 		RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
   1478 
   1479 		return (retcode);
   1480 
   1481 	case RAIDFRAME_CHECK_PARITY:
   1482 		*(int *) data = raidPtr->parity_good;
   1483 		return (0);
   1484 
   1485 	case RAIDFRAME_RESET_ACCTOTALS:
   1486 		memset(&raidPtr->acc_totals, 0, sizeof(raidPtr->acc_totals));
   1487 		return (0);
   1488 
   1489 	case RAIDFRAME_GET_ACCTOTALS:
   1490 		totals = (RF_AccTotals_t *) data;
   1491 		*totals = raidPtr->acc_totals;
   1492 		return (0);
   1493 
   1494 	case RAIDFRAME_KEEP_ACCTOTALS:
   1495 		raidPtr->keep_acc_totals = *(int *)data;
   1496 		return (0);
   1497 
   1498 	case RAIDFRAME_GET_SIZE:
   1499 		*(int *) data = raidPtr->totalSectors;
   1500 		return (0);
   1501 
   1502 		/* fail a disk & optionally start reconstruction */
   1503 	case RAIDFRAME_FAIL_DISK:
   1504 
   1505 		if (raidPtr->Layout.map->faultsTolerated == 0) {
   1506 			/* Can't do this on a RAID 0!! */
   1507 			return(EINVAL);
   1508 		}
   1509 
   1510 		rr = (struct rf_recon_req *) data;
   1511 		rr->row = 0;
   1512 		if (rr->col < 0 || rr->col >= raidPtr->numCol)
   1513 			return (EINVAL);
   1514 
   1515 
   1516 		RF_LOCK_MUTEX(raidPtr->mutex);
   1517 		if (raidPtr->status == rf_rs_reconstructing) {
   1518 			/* you can't fail a disk while we're reconstructing! */
   1519 			/* XXX wrong for RAID6 */
   1520 			RF_UNLOCK_MUTEX(raidPtr->mutex);
   1521 			return (EINVAL);
   1522 		}
   1523 		if ((raidPtr->Disks[rr->col].status ==
   1524 		     rf_ds_optimal) && (raidPtr->numFailures > 0)) {
   1525 			/* some other component has failed.  Let's not make
   1526 			   things worse. XXX wrong for RAID6 */
   1527 			RF_UNLOCK_MUTEX(raidPtr->mutex);
   1528 			return (EINVAL);
   1529 		}
   1530 		if (raidPtr->Disks[rr->col].status == rf_ds_spared) {
   1531 			/* Can't fail a spared disk! */
   1532 			RF_UNLOCK_MUTEX(raidPtr->mutex);
   1533 			return (EINVAL);
   1534 		}
   1535 		RF_UNLOCK_MUTEX(raidPtr->mutex);
   1536 
   1537 		/* make a copy of the recon request so that we don't rely on
   1538 		 * the user's buffer */
   1539 		RF_Malloc(rrcopy, sizeof(*rrcopy), (struct rf_recon_req *));
   1540 		if (rrcopy == NULL)
   1541 			return(ENOMEM);
   1542 		memcpy(rrcopy, rr, sizeof(*rr));
   1543 		rrcopy->raidPtr = (void *) raidPtr;
   1544 
   1545 		retcode = RF_CREATE_THREAD(raidPtr->recon_thread,
   1546 					   rf_ReconThread,
   1547 					   rrcopy,"raid_recon");
   1548 		return (0);
   1549 
   1550 		/* invoke a copyback operation after recon on whatever disk
   1551 		 * needs it, if any */
   1552 	case RAIDFRAME_COPYBACK:
   1553 
   1554 		if (raidPtr->Layout.map->faultsTolerated == 0) {
   1555 			/* This makes no sense on a RAID 0!! */
   1556 			return(EINVAL);
   1557 		}
   1558 
   1559 		if (raidPtr->copyback_in_progress == 1) {
   1560 			/* Copyback is already in progress! */
   1561 			return(EINVAL);
   1562 		}
   1563 
   1564 		retcode = RF_CREATE_THREAD(raidPtr->copyback_thread,
   1565 					   rf_CopybackThread,
   1566 					   raidPtr,"raid_copyback");
   1567 		return (retcode);
   1568 
   1569 		/* return the percentage completion of reconstruction */
   1570 	case RAIDFRAME_CHECK_RECON_STATUS:
   1571 		if (raidPtr->Layout.map->faultsTolerated == 0) {
   1572 			/* This makes no sense on a RAID 0, so tell the
   1573 			   user it's done. */
   1574 			*(int *) data = 100;
   1575 			return(0);
   1576 		}
   1577 		if (raidPtr->status != rf_rs_reconstructing)
   1578 			*(int *) data = 100;
   1579 		else {
   1580 			if (raidPtr->reconControl->numRUsTotal > 0) {
   1581 				*(int *) data = (raidPtr->reconControl->numRUsComplete * 100 / raidPtr->reconControl->numRUsTotal);
   1582 			} else {
   1583 				*(int *) data = 0;
   1584 			}
   1585 		}
   1586 		return (0);
   1587 	case RAIDFRAME_CHECK_RECON_STATUS_EXT:
   1588 		progressInfoPtr = (RF_ProgressInfo_t **) data;
   1589 		if (raidPtr->status != rf_rs_reconstructing) {
   1590 			progressInfo.remaining = 0;
   1591 			progressInfo.completed = 100;
   1592 			progressInfo.total = 100;
   1593 		} else {
   1594 			progressInfo.total =
   1595 				raidPtr->reconControl->numRUsTotal;
   1596 			progressInfo.completed =
   1597 				raidPtr->reconControl->numRUsComplete;
   1598 			progressInfo.remaining = progressInfo.total -
   1599 				progressInfo.completed;
   1600 		}
   1601 		retcode = copyout(&progressInfo, *progressInfoPtr,
   1602 				  sizeof(RF_ProgressInfo_t));
   1603 		return (retcode);
   1604 
   1605 	case RAIDFRAME_CHECK_PARITYREWRITE_STATUS:
   1606 		if (raidPtr->Layout.map->faultsTolerated == 0) {
   1607 			/* This makes no sense on a RAID 0, so tell the
   1608 			   user it's done. */
   1609 			*(int *) data = 100;
   1610 			return(0);
   1611 		}
   1612 		if (raidPtr->parity_rewrite_in_progress == 1) {
   1613 			*(int *) data = 100 *
   1614 				raidPtr->parity_rewrite_stripes_done /
   1615 				raidPtr->Layout.numStripe;
   1616 		} else {
   1617 			*(int *) data = 100;
   1618 		}
   1619 		return (0);
   1620 
   1621 	case RAIDFRAME_CHECK_PARITYREWRITE_STATUS_EXT:
   1622 		progressInfoPtr = (RF_ProgressInfo_t **) data;
   1623 		if (raidPtr->parity_rewrite_in_progress == 1) {
   1624 			progressInfo.total = raidPtr->Layout.numStripe;
   1625 			progressInfo.completed =
   1626 				raidPtr->parity_rewrite_stripes_done;
   1627 			progressInfo.remaining = progressInfo.total -
   1628 				progressInfo.completed;
   1629 		} else {
   1630 			progressInfo.remaining = 0;
   1631 			progressInfo.completed = 100;
   1632 			progressInfo.total = 100;
   1633 		}
   1634 		retcode = copyout(&progressInfo, *progressInfoPtr,
   1635 				  sizeof(RF_ProgressInfo_t));
   1636 		return (retcode);
   1637 
   1638 	case RAIDFRAME_CHECK_COPYBACK_STATUS:
   1639 		if (raidPtr->Layout.map->faultsTolerated == 0) {
   1640 			/* This makes no sense on a RAID 0 */
   1641 			*(int *) data = 100;
   1642 			return(0);
   1643 		}
   1644 		if (raidPtr->copyback_in_progress == 1) {
   1645 			*(int *) data = 100 * raidPtr->copyback_stripes_done /
   1646 				raidPtr->Layout.numStripe;
   1647 		} else {
   1648 			*(int *) data = 100;
   1649 		}
   1650 		return (0);
   1651 
   1652 	case RAIDFRAME_CHECK_COPYBACK_STATUS_EXT:
   1653 		progressInfoPtr = (RF_ProgressInfo_t **) data;
   1654 		if (raidPtr->copyback_in_progress == 1) {
   1655 			progressInfo.total = raidPtr->Layout.numStripe;
   1656 			progressInfo.completed =
   1657 				raidPtr->copyback_stripes_done;
   1658 			progressInfo.remaining = progressInfo.total -
   1659 				progressInfo.completed;
   1660 		} else {
   1661 			progressInfo.remaining = 0;
   1662 			progressInfo.completed = 100;
   1663 			progressInfo.total = 100;
   1664 		}
   1665 		retcode = copyout(&progressInfo, *progressInfoPtr,
   1666 				  sizeof(RF_ProgressInfo_t));
   1667 		return (retcode);
   1668 
   1669 		/* the sparetable daemon calls this to wait for the kernel to
   1670 		 * need a spare table. this ioctl does not return until a
   1671 		 * spare table is needed. XXX -- calling mpsleep here in the
   1672 		 * ioctl code is almost certainly wrong and evil. -- XXX XXX
   1673 		 * -- I should either compute the spare table in the kernel,
   1674 		 * or have a different -- XXX XXX -- interface (a different
   1675 		 * character device) for delivering the table     -- XXX */
   1676 #if 0
   1677 	case RAIDFRAME_SPARET_WAIT:
   1678 		RF_LOCK_MUTEX(rf_sparet_wait_mutex);
   1679 		while (!rf_sparet_wait_queue)
   1680 			mpsleep(&rf_sparet_wait_queue, (PZERO + 1) | PCATCH, "sparet wait", 0, (void *) simple_lock_addr(rf_sparet_wait_mutex), MS_LOCK_SIMPLE);
   1681 		waitreq = rf_sparet_wait_queue;
   1682 		rf_sparet_wait_queue = rf_sparet_wait_queue->next;
   1683 		RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
   1684 
   1685 		/* structure assignment */
   1686 		*((RF_SparetWait_t *) data) = *waitreq;
   1687 
   1688 		RF_Free(waitreq, sizeof(*waitreq));
   1689 		return (0);
   1690 
   1691 		/* wakes up a process waiting on SPARET_WAIT and puts an error
   1692 		 * code in it that will cause the dameon to exit */
   1693 	case RAIDFRAME_ABORT_SPARET_WAIT:
   1694 		RF_Malloc(waitreq, sizeof(*waitreq), (RF_SparetWait_t *));
   1695 		waitreq->fcol = -1;
   1696 		RF_LOCK_MUTEX(rf_sparet_wait_mutex);
   1697 		waitreq->next = rf_sparet_wait_queue;
   1698 		rf_sparet_wait_queue = waitreq;
   1699 		RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
   1700 		wakeup(&rf_sparet_wait_queue);
   1701 		return (0);
   1702 
   1703 		/* used by the spare table daemon to deliver a spare table
   1704 		 * into the kernel */
   1705 	case RAIDFRAME_SEND_SPARET:
   1706 
   1707 		/* install the spare table */
   1708 		retcode = rf_SetSpareTable(raidPtr, *(void **) data);
   1709 
   1710 		/* respond to the requestor.  the return status of the spare
   1711 		 * table installation is passed in the "fcol" field */
   1712 		RF_Malloc(waitreq, sizeof(*waitreq), (RF_SparetWait_t *));
   1713 		waitreq->fcol = retcode;
   1714 		RF_LOCK_MUTEX(rf_sparet_wait_mutex);
   1715 		waitreq->next = rf_sparet_resp_queue;
   1716 		rf_sparet_resp_queue = waitreq;
   1717 		wakeup(&rf_sparet_resp_queue);
   1718 		RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
   1719 
   1720 		return (retcode);
   1721 #endif
   1722 
   1723 	default:
   1724 		break; /* fall through to the os-specific code below */
   1725 
   1726 	}
   1727 
   1728 	if (!raidPtr->valid)
   1729 		return (EINVAL);
   1730 
   1731 	/*
   1732 	 * Add support for "regular" device ioctls here.
   1733 	 */
   1734 
   1735 	error = disk_ioctl(&rs->sc_dkdev, cmd, addr, flag, l);
   1736 	if (error != EPASSTHROUGH)
   1737 		return (error);
   1738 
   1739 	switch (cmd) {
   1740 	case DIOCGDINFO:
   1741 		*(struct disklabel *) data = *(rs->sc_dkdev.dk_label);
   1742 		break;
   1743 #ifdef __HAVE_OLD_DISKLABEL
   1744 	case ODIOCGDINFO:
   1745 		newlabel = *(rs->sc_dkdev.dk_label);
   1746 		if (newlabel.d_npartitions > OLDMAXPARTITIONS)
   1747 			return ENOTTY;
   1748 		memcpy(data, &newlabel, sizeof (struct olddisklabel));
   1749 		break;
   1750 #endif
   1751 
   1752 	case DIOCGPART:
   1753 		((struct partinfo *) data)->disklab = rs->sc_dkdev.dk_label;
   1754 		((struct partinfo *) data)->part =
   1755 		    &rs->sc_dkdev.dk_label->d_partitions[DISKPART(dev)];
   1756 		break;
   1757 
   1758 	case DIOCWDINFO:
   1759 	case DIOCSDINFO:
   1760 #ifdef __HAVE_OLD_DISKLABEL
   1761 	case ODIOCWDINFO:
   1762 	case ODIOCSDINFO:
   1763 #endif
   1764 	{
   1765 		struct disklabel *lp;
   1766 #ifdef __HAVE_OLD_DISKLABEL
   1767 		if (cmd == ODIOCSDINFO || cmd == ODIOCWDINFO) {
   1768 			memset(&newlabel, 0, sizeof newlabel);
   1769 			memcpy(&newlabel, data, sizeof (struct olddisklabel));
   1770 			lp = &newlabel;
   1771 		} else
   1772 #endif
   1773 		lp = (struct disklabel *)data;
   1774 
   1775 		if ((error = raidlock(rs)) != 0)
   1776 			return (error);
   1777 
   1778 		rs->sc_flags |= RAIDF_LABELLING;
   1779 
   1780 		error = setdisklabel(rs->sc_dkdev.dk_label,
   1781 		    lp, 0, rs->sc_dkdev.dk_cpulabel);
   1782 		if (error == 0) {
   1783 			if (cmd == DIOCWDINFO
   1784 #ifdef __HAVE_OLD_DISKLABEL
   1785 			    || cmd == ODIOCWDINFO
   1786 #endif
   1787 			   )
   1788 				error = writedisklabel(RAIDLABELDEV(dev),
   1789 				    raidstrategy, rs->sc_dkdev.dk_label,
   1790 				    rs->sc_dkdev.dk_cpulabel);
   1791 		}
   1792 		rs->sc_flags &= ~RAIDF_LABELLING;
   1793 
   1794 		raidunlock(rs);
   1795 
   1796 		if (error)
   1797 			return (error);
   1798 		break;
   1799 	}
   1800 
   1801 	case DIOCWLABEL:
   1802 		if (*(int *) data != 0)
   1803 			rs->sc_flags |= RAIDF_WLABEL;
   1804 		else
   1805 			rs->sc_flags &= ~RAIDF_WLABEL;
   1806 		break;
   1807 
   1808 	case DIOCGDEFLABEL:
   1809 		raidgetdefaultlabel(raidPtr, rs, (struct disklabel *) data);
   1810 		break;
   1811 
   1812 #ifdef __HAVE_OLD_DISKLABEL
   1813 	case ODIOCGDEFLABEL:
   1814 		raidgetdefaultlabel(raidPtr, rs, &newlabel);
   1815 		if (newlabel.d_npartitions > OLDMAXPARTITIONS)
   1816 			return ENOTTY;
   1817 		memcpy(data, &newlabel, sizeof (struct olddisklabel));
   1818 		break;
   1819 #endif
   1820 
   1821 	case DIOCAWEDGE:
   1822 	case DIOCDWEDGE:
   1823 	    	dkw = (void *)data;
   1824 
   1825 		/* If the ioctl happens here, the parent is us. */
   1826 		(void)strcpy(dkw->dkw_parent, rs->sc_xname);
   1827 		return cmd == DIOCAWEDGE ? dkwedge_add(dkw) : dkwedge_del(dkw);
   1828 
   1829 	case DIOCLWEDGES:
   1830 		return dkwedge_list(&rs->sc_dkdev,
   1831 		    (struct dkwedge_list *)data, l);
   1832 	case DIOCCACHESYNC:
   1833 		return rf_sync_component_caches(raidPtr);
   1834 	default:
   1835 		retcode = ENOTTY;
   1836 	}
   1837 	return (retcode);
   1838 
   1839 }
   1840 
   1841 
   1842 /* raidinit -- complete the rest of the initialization for the
   1843    RAIDframe device.  */
   1844 
   1845 
   1846 static void
   1847 raidinit(RF_Raid_t *raidPtr)
   1848 {
   1849 	cfdata_t cf;
   1850 	struct raid_softc *rs;
   1851 	int     unit;
   1852 
   1853 	unit = raidPtr->raidid;
   1854 
   1855 	rs = &raid_softc[unit];
   1856 
   1857 	/* XXX should check return code first... */
   1858 	rs->sc_flags |= RAIDF_INITED;
   1859 
   1860 	/* XXX doesn't check bounds. */
   1861 	snprintf(rs->sc_xname, sizeof(rs->sc_xname), "raid%d", unit);
   1862 
   1863 	/* attach the pseudo device */
   1864 	cf = malloc(sizeof(*cf), M_RAIDFRAME, M_WAITOK);
   1865 	cf->cf_name = raid_cd.cd_name;
   1866 	cf->cf_atname = raid_cd.cd_name;
   1867 	cf->cf_unit = unit;
   1868 	cf->cf_fstate = FSTATE_STAR;
   1869 
   1870 	rs->sc_dev = config_attach_pseudo(cf);
   1871 
   1872 	if (rs->sc_dev==NULL) {
   1873 		printf("raid%d: config_attach_pseudo failed\n",
   1874 		       raidPtr->raidid);
   1875 	}
   1876 
   1877 	/* disk_attach actually creates space for the CPU disklabel, among
   1878 	 * other things, so it's critical to call this *BEFORE* we try putzing
   1879 	 * with disklabels. */
   1880 
   1881 	disk_init(&rs->sc_dkdev, rs->sc_xname, &rf_dkdriver);
   1882 	disk_attach(&rs->sc_dkdev);
   1883 
   1884 	/* XXX There may be a weird interaction here between this, and
   1885 	 * protectedSectors, as used in RAIDframe.  */
   1886 
   1887 	rs->sc_size = raidPtr->totalSectors;
   1888 
   1889 	dkwedge_discover(&rs->sc_dkdev);
   1890 
   1891 	rf_set_properties(rs, raidPtr);
   1892 
   1893 }
   1894 #if (RF_INCLUDE_PARITY_DECLUSTERING_DS > 0)
   1895 /* wake up the daemon & tell it to get us a spare table
   1896  * XXX
   1897  * the entries in the queues should be tagged with the raidPtr
   1898  * so that in the extremely rare case that two recons happen at once,
   1899  * we know for which device were requesting a spare table
   1900  * XXX
   1901  *
   1902  * XXX This code is not currently used. GO
   1903  */
   1904 int
   1905 rf_GetSpareTableFromDaemon(RF_SparetWait_t *req)
   1906 {
   1907 	int     retcode;
   1908 
   1909 	RF_LOCK_MUTEX(rf_sparet_wait_mutex);
   1910 	req->next = rf_sparet_wait_queue;
   1911 	rf_sparet_wait_queue = req;
   1912 	wakeup(&rf_sparet_wait_queue);
   1913 
   1914 	/* mpsleep unlocks the mutex */
   1915 	while (!rf_sparet_resp_queue) {
   1916 		tsleep(&rf_sparet_resp_queue, PRIBIO,
   1917 		    "raidframe getsparetable", 0);
   1918 	}
   1919 	req = rf_sparet_resp_queue;
   1920 	rf_sparet_resp_queue = req->next;
   1921 	RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
   1922 
   1923 	retcode = req->fcol;
   1924 	RF_Free(req, sizeof(*req));	/* this is not the same req as we
   1925 					 * alloc'd */
   1926 	return (retcode);
   1927 }
   1928 #endif
   1929 
   1930 /* a wrapper around rf_DoAccess that extracts appropriate info from the
   1931  * bp & passes it down.
   1932  * any calls originating in the kernel must use non-blocking I/O
   1933  * do some extra sanity checking to return "appropriate" error values for
   1934  * certain conditions (to make some standard utilities work)
   1935  *
   1936  * Formerly known as: rf_DoAccessKernel
   1937  */
   1938 void
   1939 raidstart(RF_Raid_t *raidPtr)
   1940 {
   1941 	RF_SectorCount_t num_blocks, pb, sum;
   1942 	RF_RaidAddr_t raid_addr;
   1943 	struct partition *pp;
   1944 	daddr_t blocknum;
   1945 	int     unit;
   1946 	struct raid_softc *rs;
   1947 	int     do_async;
   1948 	struct buf *bp;
   1949 	int rc;
   1950 
   1951 	unit = raidPtr->raidid;
   1952 	rs = &raid_softc[unit];
   1953 
   1954 	/* quick check to see if anything has died recently */
   1955 	RF_LOCK_MUTEX(raidPtr->mutex);
   1956 	if (raidPtr->numNewFailures > 0) {
   1957 		RF_UNLOCK_MUTEX(raidPtr->mutex);
   1958 		rf_update_component_labels(raidPtr,
   1959 					   RF_NORMAL_COMPONENT_UPDATE);
   1960 		RF_LOCK_MUTEX(raidPtr->mutex);
   1961 		raidPtr->numNewFailures--;
   1962 	}
   1963 
   1964 	/* Check to see if we're at the limit... */
   1965 	while (raidPtr->openings > 0) {
   1966 		RF_UNLOCK_MUTEX(raidPtr->mutex);
   1967 
   1968 		/* get the next item, if any, from the queue */
   1969 		if ((bp = bufq_get(rs->buf_queue)) == NULL) {
   1970 			/* nothing more to do */
   1971 			return;
   1972 		}
   1973 
   1974 		/* Ok, for the bp we have here, bp->b_blkno is relative to the
   1975 		 * partition.. Need to make it absolute to the underlying
   1976 		 * device.. */
   1977 
   1978 		blocknum = bp->b_blkno;
   1979 		if (DISKPART(bp->b_dev) != RAW_PART) {
   1980 			pp = &rs->sc_dkdev.dk_label->d_partitions[DISKPART(bp->b_dev)];
   1981 			blocknum += pp->p_offset;
   1982 		}
   1983 
   1984 		db1_printf(("Blocks: %d, %d\n", (int) bp->b_blkno,
   1985 			    (int) blocknum));
   1986 
   1987 		db1_printf(("bp->b_bcount = %d\n", (int) bp->b_bcount));
   1988 		db1_printf(("bp->b_resid = %d\n", (int) bp->b_resid));
   1989 
   1990 		/* *THIS* is where we adjust what block we're going to...
   1991 		 * but DO NOT TOUCH bp->b_blkno!!! */
   1992 		raid_addr = blocknum;
   1993 
   1994 		num_blocks = bp->b_bcount >> raidPtr->logBytesPerSector;
   1995 		pb = (bp->b_bcount & raidPtr->sectorMask) ? 1 : 0;
   1996 		sum = raid_addr + num_blocks + pb;
   1997 		if (1 || rf_debugKernelAccess) {
   1998 			db1_printf(("raid_addr=%d sum=%d num_blocks=%d(+%d) (%d)\n",
   1999 				    (int) raid_addr, (int) sum, (int) num_blocks,
   2000 				    (int) pb, (int) bp->b_resid));
   2001 		}
   2002 		if ((sum > raidPtr->totalSectors) || (sum < raid_addr)
   2003 		    || (sum < num_blocks) || (sum < pb)) {
   2004 			bp->b_error = ENOSPC;
   2005 			bp->b_resid = bp->b_bcount;
   2006 			biodone(bp);
   2007 			RF_LOCK_MUTEX(raidPtr->mutex);
   2008 			continue;
   2009 		}
   2010 		/*
   2011 		 * XXX rf_DoAccess() should do this, not just DoAccessKernel()
   2012 		 */
   2013 
   2014 		if (bp->b_bcount & raidPtr->sectorMask) {
   2015 			bp->b_error = EINVAL;
   2016 			bp->b_resid = bp->b_bcount;
   2017 			biodone(bp);
   2018 			RF_LOCK_MUTEX(raidPtr->mutex);
   2019 			continue;
   2020 
   2021 		}
   2022 		db1_printf(("Calling DoAccess..\n"));
   2023 
   2024 
   2025 		RF_LOCK_MUTEX(raidPtr->mutex);
   2026 		raidPtr->openings--;
   2027 		RF_UNLOCK_MUTEX(raidPtr->mutex);
   2028 
   2029 		/*
   2030 		 * Everything is async.
   2031 		 */
   2032 		do_async = 1;
   2033 
   2034 		disk_busy(&rs->sc_dkdev);
   2035 
   2036 		/* XXX we're still at splbio() here... do we *really*
   2037 		   need to be? */
   2038 
   2039 		/* don't ever condition on bp->b_flags & B_WRITE.
   2040 		 * always condition on B_READ instead */
   2041 
   2042 		rc = rf_DoAccess(raidPtr, (bp->b_flags & B_READ) ?
   2043 				 RF_IO_TYPE_READ : RF_IO_TYPE_WRITE,
   2044 				 do_async, raid_addr, num_blocks,
   2045 				 bp->b_data, bp, RF_DAG_NONBLOCKING_IO);
   2046 
   2047 		if (rc) {
   2048 			bp->b_error = rc;
   2049 			bp->b_resid = bp->b_bcount;
   2050 			biodone(bp);
   2051 			/* continue loop */
   2052 		}
   2053 
   2054 		RF_LOCK_MUTEX(raidPtr->mutex);
   2055 	}
   2056 	RF_UNLOCK_MUTEX(raidPtr->mutex);
   2057 }
   2058 
   2059 
   2060 
   2061 
   2062 /* invoke an I/O from kernel mode.  Disk queue should be locked upon entry */
   2063 
   2064 int
   2065 rf_DispatchKernelIO(RF_DiskQueue_t *queue, RF_DiskQueueData_t *req)
   2066 {
   2067 	int     op = (req->type == RF_IO_TYPE_READ) ? B_READ : B_WRITE;
   2068 	struct buf *bp;
   2069 
   2070 	req->queue = queue;
   2071 	bp = req->bp;
   2072 
   2073 	switch (req->type) {
   2074 	case RF_IO_TYPE_NOP:	/* used primarily to unlock a locked queue */
   2075 		/* XXX need to do something extra here.. */
   2076 		/* I'm leaving this in, as I've never actually seen it used,
   2077 		 * and I'd like folks to report it... GO */
   2078 		printf(("WAKEUP CALLED\n"));
   2079 		queue->numOutstanding++;
   2080 
   2081 		bp->b_flags = 0;
   2082 		bp->b_private = req;
   2083 
   2084 		KernelWakeupFunc(bp);
   2085 		break;
   2086 
   2087 	case RF_IO_TYPE_READ:
   2088 	case RF_IO_TYPE_WRITE:
   2089 #if RF_ACC_TRACE > 0
   2090 		if (req->tracerec) {
   2091 			RF_ETIMER_START(req->tracerec->timer);
   2092 		}
   2093 #endif
   2094 		InitBP(bp, queue->rf_cinfo->ci_vp,
   2095 		    op, queue->rf_cinfo->ci_dev,
   2096 		    req->sectorOffset, req->numSector,
   2097 		    req->buf, KernelWakeupFunc, (void *) req,
   2098 		    queue->raidPtr->logBytesPerSector, req->b_proc);
   2099 
   2100 		if (rf_debugKernelAccess) {
   2101 			db1_printf(("dispatch: bp->b_blkno = %ld\n",
   2102 				(long) bp->b_blkno));
   2103 		}
   2104 		queue->numOutstanding++;
   2105 		queue->last_deq_sector = req->sectorOffset;
   2106 		/* acc wouldn't have been let in if there were any pending
   2107 		 * reqs at any other priority */
   2108 		queue->curPriority = req->priority;
   2109 
   2110 		db1_printf(("Going for %c to unit %d col %d\n",
   2111 			    req->type, queue->raidPtr->raidid,
   2112 			    queue->col));
   2113 		db1_printf(("sector %d count %d (%d bytes) %d\n",
   2114 			(int) req->sectorOffset, (int) req->numSector,
   2115 			(int) (req->numSector <<
   2116 			    queue->raidPtr->logBytesPerSector),
   2117 			(int) queue->raidPtr->logBytesPerSector));
   2118 
   2119 		/*
   2120 		 * XXX: drop lock here since this can block at
   2121 		 * least with backing SCSI devices.  Retake it
   2122 		 * to minimize fuss with calling interfaces.
   2123 		 */
   2124 
   2125 		RF_UNLOCK_QUEUE_MUTEX(queue, "unusedparam");
   2126 		bdev_strategy(bp);
   2127 		RF_LOCK_QUEUE_MUTEX(queue, "unusedparam");
   2128 		break;
   2129 
   2130 	default:
   2131 		panic("bad req->type in rf_DispatchKernelIO");
   2132 	}
   2133 	db1_printf(("Exiting from DispatchKernelIO\n"));
   2134 
   2135 	return (0);
   2136 }
   2137 /* this is the callback function associated with a I/O invoked from
   2138    kernel code.
   2139  */
   2140 static void
   2141 KernelWakeupFunc(struct buf *bp)
   2142 {
   2143 	RF_DiskQueueData_t *req = NULL;
   2144 	RF_DiskQueue_t *queue;
   2145 	int s;
   2146 
   2147 	s = splbio();
   2148 	db1_printf(("recovering the request queue:\n"));
   2149 	req = bp->b_private;
   2150 
   2151 	queue = (RF_DiskQueue_t *) req->queue;
   2152 
   2153 #if RF_ACC_TRACE > 0
   2154 	if (req->tracerec) {
   2155 		RF_ETIMER_STOP(req->tracerec->timer);
   2156 		RF_ETIMER_EVAL(req->tracerec->timer);
   2157 		RF_LOCK_MUTEX(rf_tracing_mutex);
   2158 		req->tracerec->diskwait_us += RF_ETIMER_VAL_US(req->tracerec->timer);
   2159 		req->tracerec->phys_io_us += RF_ETIMER_VAL_US(req->tracerec->timer);
   2160 		req->tracerec->num_phys_ios++;
   2161 		RF_UNLOCK_MUTEX(rf_tracing_mutex);
   2162 	}
   2163 #endif
   2164 
   2165 	/* XXX Ok, let's get aggressive... If b_error is set, let's go
   2166 	 * ballistic, and mark the component as hosed... */
   2167 
   2168 	if (bp->b_error != 0) {
   2169 		/* Mark the disk as dead */
   2170 		/* but only mark it once... */
   2171 		/* and only if it wouldn't leave this RAID set
   2172 		   completely broken */
   2173 		if (((queue->raidPtr->Disks[queue->col].status ==
   2174 		      rf_ds_optimal) ||
   2175 		     (queue->raidPtr->Disks[queue->col].status ==
   2176 		      rf_ds_used_spare)) &&
   2177 		     (queue->raidPtr->numFailures <
   2178 		      queue->raidPtr->Layout.map->faultsTolerated)) {
   2179 			printf("raid%d: IO Error.  Marking %s as failed.\n",
   2180 			       queue->raidPtr->raidid,
   2181 			       queue->raidPtr->Disks[queue->col].devname);
   2182 			queue->raidPtr->Disks[queue->col].status =
   2183 			    rf_ds_failed;
   2184 			queue->raidPtr->status = rf_rs_degraded;
   2185 			queue->raidPtr->numFailures++;
   2186 			queue->raidPtr->numNewFailures++;
   2187 		} else {	/* Disk is already dead... */
   2188 			/* printf("Disk already marked as dead!\n"); */
   2189 		}
   2190 
   2191 	}
   2192 
   2193 	/* Fill in the error value */
   2194 
   2195 	req->error = bp->b_error;
   2196 
   2197 	simple_lock(&queue->raidPtr->iodone_lock);
   2198 
   2199 	/* Drop this one on the "finished" queue... */
   2200 	TAILQ_INSERT_TAIL(&(queue->raidPtr->iodone), req, iodone_entries);
   2201 
   2202 	/* Let the raidio thread know there is work to be done. */
   2203 	wakeup(&(queue->raidPtr->iodone));
   2204 
   2205 	simple_unlock(&queue->raidPtr->iodone_lock);
   2206 
   2207 	splx(s);
   2208 }
   2209 
   2210 
   2211 
   2212 /*
   2213  * initialize a buf structure for doing an I/O in the kernel.
   2214  */
   2215 static void
   2216 InitBP(struct buf *bp, struct vnode *b_vp, unsigned rw_flag, dev_t dev,
   2217        RF_SectorNum_t startSect, RF_SectorCount_t numSect, void *bf,
   2218        void (*cbFunc) (struct buf *), void *cbArg, int logBytesPerSector,
   2219        struct proc *b_proc)
   2220 {
   2221 	/* bp->b_flags       = B_PHYS | rw_flag; */
   2222 	bp->b_flags = rw_flag;	/* XXX need B_PHYS here too??? */
   2223 	bp->b_oflags = 0;
   2224 	bp->b_cflags = 0;
   2225 	bp->b_bcount = numSect << logBytesPerSector;
   2226 	bp->b_bufsize = bp->b_bcount;
   2227 	bp->b_error = 0;
   2228 	bp->b_dev = dev;
   2229 	bp->b_data = bf;
   2230 	bp->b_blkno = startSect;
   2231 	bp->b_resid = bp->b_bcount;	/* XXX is this right!??!?!! */
   2232 	if (bp->b_bcount == 0) {
   2233 		panic("bp->b_bcount is zero in InitBP!!");
   2234 	}
   2235 	bp->b_proc = b_proc;
   2236 	bp->b_iodone = cbFunc;
   2237 	bp->b_private = cbArg;
   2238 }
   2239 
   2240 static void
   2241 raidgetdefaultlabel(RF_Raid_t *raidPtr, struct raid_softc *rs,
   2242 		    struct disklabel *lp)
   2243 {
   2244 	memset(lp, 0, sizeof(*lp));
   2245 
   2246 	/* fabricate a label... */
   2247 	lp->d_secperunit = raidPtr->totalSectors;
   2248 	lp->d_secsize = raidPtr->bytesPerSector;
   2249 	lp->d_nsectors = raidPtr->Layout.dataSectorsPerStripe;
   2250 	lp->d_ntracks = 4 * raidPtr->numCol;
   2251 	lp->d_ncylinders = raidPtr->totalSectors /
   2252 		(lp->d_nsectors * lp->d_ntracks);
   2253 	lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
   2254 
   2255 	strncpy(lp->d_typename, "raid", sizeof(lp->d_typename));
   2256 	lp->d_type = DTYPE_RAID;
   2257 	strncpy(lp->d_packname, "fictitious", sizeof(lp->d_packname));
   2258 	lp->d_rpm = 3600;
   2259 	lp->d_interleave = 1;
   2260 	lp->d_flags = 0;
   2261 
   2262 	lp->d_partitions[RAW_PART].p_offset = 0;
   2263 	lp->d_partitions[RAW_PART].p_size = raidPtr->totalSectors;
   2264 	lp->d_partitions[RAW_PART].p_fstype = FS_UNUSED;
   2265 	lp->d_npartitions = RAW_PART + 1;
   2266 
   2267 	lp->d_magic = DISKMAGIC;
   2268 	lp->d_magic2 = DISKMAGIC;
   2269 	lp->d_checksum = dkcksum(rs->sc_dkdev.dk_label);
   2270 
   2271 }
   2272 /*
   2273  * Read the disklabel from the raid device.  If one is not present, fake one
   2274  * up.
   2275  */
   2276 static void
   2277 raidgetdisklabel(dev_t dev)
   2278 {
   2279 	int     unit = raidunit(dev);
   2280 	struct raid_softc *rs = &raid_softc[unit];
   2281 	const char   *errstring;
   2282 	struct disklabel *lp = rs->sc_dkdev.dk_label;
   2283 	struct cpu_disklabel *clp = rs->sc_dkdev.dk_cpulabel;
   2284 	RF_Raid_t *raidPtr;
   2285 
   2286 	db1_printf(("Getting the disklabel...\n"));
   2287 
   2288 	memset(clp, 0, sizeof(*clp));
   2289 
   2290 	raidPtr = raidPtrs[unit];
   2291 
   2292 	raidgetdefaultlabel(raidPtr, rs, lp);
   2293 
   2294 	/*
   2295 	 * Call the generic disklabel extraction routine.
   2296 	 */
   2297 	errstring = readdisklabel(RAIDLABELDEV(dev), raidstrategy,
   2298 	    rs->sc_dkdev.dk_label, rs->sc_dkdev.dk_cpulabel);
   2299 	if (errstring)
   2300 		raidmakedisklabel(rs);
   2301 	else {
   2302 		int     i;
   2303 		struct partition *pp;
   2304 
   2305 		/*
   2306 		 * Sanity check whether the found disklabel is valid.
   2307 		 *
   2308 		 * This is necessary since total size of the raid device
   2309 		 * may vary when an interleave is changed even though exactly
   2310 		 * same components are used, and old disklabel may used
   2311 		 * if that is found.
   2312 		 */
   2313 		if (lp->d_secperunit != rs->sc_size)
   2314 			printf("raid%d: WARNING: %s: "
   2315 			    "total sector size in disklabel (%" PRIu32 ") != "
   2316 			    "the size of raid (%" PRIu64 ")\n", unit, rs->sc_xname,
   2317 			    lp->d_secperunit, rs->sc_size);
   2318 		for (i = 0; i < lp->d_npartitions; i++) {
   2319 			pp = &lp->d_partitions[i];
   2320 			if (pp->p_offset + pp->p_size > rs->sc_size)
   2321 				printf("raid%d: WARNING: %s: end of partition `%c' "
   2322 				       "exceeds the size of raid (%" PRIu64 ")\n",
   2323 				       unit, rs->sc_xname, 'a' + i, rs->sc_size);
   2324 		}
   2325 	}
   2326 
   2327 }
   2328 /*
   2329  * Take care of things one might want to take care of in the event
   2330  * that a disklabel isn't present.
   2331  */
   2332 static void
   2333 raidmakedisklabel(struct raid_softc *rs)
   2334 {
   2335 	struct disklabel *lp = rs->sc_dkdev.dk_label;
   2336 	db1_printf(("Making a label..\n"));
   2337 
   2338 	/*
   2339 	 * For historical reasons, if there's no disklabel present
   2340 	 * the raw partition must be marked FS_BSDFFS.
   2341 	 */
   2342 
   2343 	lp->d_partitions[RAW_PART].p_fstype = FS_BSDFFS;
   2344 
   2345 	strncpy(lp->d_packname, "default label", sizeof(lp->d_packname));
   2346 
   2347 	lp->d_checksum = dkcksum(lp);
   2348 }
   2349 /*
   2350  * Wait interruptibly for an exclusive lock.
   2351  *
   2352  * XXX
   2353  * Several drivers do this; it should be abstracted and made MP-safe.
   2354  * (Hmm... where have we seen this warning before :->  GO )
   2355  */
   2356 static int
   2357 raidlock(struct raid_softc *rs)
   2358 {
   2359 	int     error;
   2360 
   2361 	while ((rs->sc_flags & RAIDF_LOCKED) != 0) {
   2362 		rs->sc_flags |= RAIDF_WANTED;
   2363 		if ((error =
   2364 			tsleep(rs, PRIBIO | PCATCH, "raidlck", 0)) != 0)
   2365 			return (error);
   2366 	}
   2367 	rs->sc_flags |= RAIDF_LOCKED;
   2368 	return (0);
   2369 }
   2370 /*
   2371  * Unlock and wake up any waiters.
   2372  */
   2373 static void
   2374 raidunlock(struct raid_softc *rs)
   2375 {
   2376 
   2377 	rs->sc_flags &= ~RAIDF_LOCKED;
   2378 	if ((rs->sc_flags & RAIDF_WANTED) != 0) {
   2379 		rs->sc_flags &= ~RAIDF_WANTED;
   2380 		wakeup(rs);
   2381 	}
   2382 }
   2383 
   2384 
   2385 #define RF_COMPONENT_INFO_OFFSET  16384 /* bytes */
   2386 #define RF_COMPONENT_INFO_SIZE     1024 /* bytes */
   2387 
   2388 int
   2389 raidmarkclean(dev_t dev, struct vnode *b_vp, int mod_counter)
   2390 {
   2391 	RF_ComponentLabel_t clabel;
   2392 	raidread_component_label(dev, b_vp, &clabel);
   2393 	clabel.mod_counter = mod_counter;
   2394 	clabel.clean = RF_RAID_CLEAN;
   2395 	raidwrite_component_label(dev, b_vp, &clabel);
   2396 	return(0);
   2397 }
   2398 
   2399 
   2400 int
   2401 raidmarkdirty(dev_t dev, struct vnode *b_vp, int mod_counter)
   2402 {
   2403 	RF_ComponentLabel_t clabel;
   2404 	raidread_component_label(dev, b_vp, &clabel);
   2405 	clabel.mod_counter = mod_counter;
   2406 	clabel.clean = RF_RAID_DIRTY;
   2407 	raidwrite_component_label(dev, b_vp, &clabel);
   2408 	return(0);
   2409 }
   2410 
   2411 /* ARGSUSED */
   2412 int
   2413 raidread_component_label(dev_t dev, struct vnode *b_vp,
   2414 			 RF_ComponentLabel_t *clabel)
   2415 {
   2416 	struct buf *bp;
   2417 	const struct bdevsw *bdev;
   2418 	int error;
   2419 
   2420 	/* XXX should probably ensure that we don't try to do this if
   2421 	   someone has changed rf_protected_sectors. */
   2422 
   2423 	if (b_vp == NULL) {
   2424 		/* For whatever reason, this component is not valid.
   2425 		   Don't try to read a component label from it. */
   2426 		return(EINVAL);
   2427 	}
   2428 
   2429 	/* get a block of the appropriate size... */
   2430 	bp = geteblk((int)RF_COMPONENT_INFO_SIZE);
   2431 	bp->b_dev = dev;
   2432 
   2433 	/* get our ducks in a row for the read */
   2434 	bp->b_blkno = RF_COMPONENT_INFO_OFFSET / DEV_BSIZE;
   2435 	bp->b_bcount = RF_COMPONENT_INFO_SIZE;
   2436 	bp->b_flags |= B_READ;
   2437  	bp->b_resid = RF_COMPONENT_INFO_SIZE / DEV_BSIZE;
   2438 
   2439 	bdev = bdevsw_lookup(bp->b_dev);
   2440 	if (bdev == NULL)
   2441 		return (ENXIO);
   2442 	(*bdev->d_strategy)(bp);
   2443 
   2444 	error = biowait(bp);
   2445 
   2446 	if (!error) {
   2447 		memcpy(clabel, bp->b_data,
   2448 		       sizeof(RF_ComponentLabel_t));
   2449 	}
   2450 
   2451 	brelse(bp, 0);
   2452 	return(error);
   2453 }
   2454 /* ARGSUSED */
   2455 int
   2456 raidwrite_component_label(dev_t dev, struct vnode *b_vp,
   2457 			  RF_ComponentLabel_t *clabel)
   2458 {
   2459 	struct buf *bp;
   2460 	const struct bdevsw *bdev;
   2461 	int error;
   2462 
   2463 	/* get a block of the appropriate size... */
   2464 	bp = geteblk((int)RF_COMPONENT_INFO_SIZE);
   2465 	bp->b_dev = dev;
   2466 
   2467 	/* get our ducks in a row for the write */
   2468 	bp->b_blkno = RF_COMPONENT_INFO_OFFSET / DEV_BSIZE;
   2469 	bp->b_bcount = RF_COMPONENT_INFO_SIZE;
   2470 	bp->b_flags |= B_WRITE;
   2471  	bp->b_resid = RF_COMPONENT_INFO_SIZE / DEV_BSIZE;
   2472 
   2473 	memset(bp->b_data, 0, RF_COMPONENT_INFO_SIZE );
   2474 
   2475 	memcpy(bp->b_data, clabel, sizeof(RF_ComponentLabel_t));
   2476 
   2477 	bdev = bdevsw_lookup(bp->b_dev);
   2478 	if (bdev == NULL)
   2479 		return (ENXIO);
   2480 	(*bdev->d_strategy)(bp);
   2481 	error = biowait(bp);
   2482 	brelse(bp, 0);
   2483 	if (error) {
   2484 #if 1
   2485 		printf("Failed to write RAID component info!\n");
   2486 #endif
   2487 	}
   2488 
   2489 	return(error);
   2490 }
   2491 
   2492 void
   2493 rf_markalldirty(RF_Raid_t *raidPtr)
   2494 {
   2495 	RF_ComponentLabel_t clabel;
   2496 	int sparecol;
   2497 	int c;
   2498 	int j;
   2499 	int scol = -1;
   2500 
   2501 	raidPtr->mod_counter++;
   2502 	for (c = 0; c < raidPtr->numCol; c++) {
   2503 		/* we don't want to touch (at all) a disk that has
   2504 		   failed */
   2505 		if (!RF_DEAD_DISK(raidPtr->Disks[c].status)) {
   2506 			raidread_component_label(
   2507 						 raidPtr->Disks[c].dev,
   2508 						 raidPtr->raid_cinfo[c].ci_vp,
   2509 						 &clabel);
   2510 			if (clabel.status == rf_ds_spared) {
   2511 				/* XXX do something special...
   2512 				   but whatever you do, don't
   2513 				   try to access it!! */
   2514 			} else {
   2515 				raidmarkdirty(
   2516 					      raidPtr->Disks[c].dev,
   2517 					      raidPtr->raid_cinfo[c].ci_vp,
   2518 					      raidPtr->mod_counter);
   2519 			}
   2520 		}
   2521 	}
   2522 
   2523 	for( c = 0; c < raidPtr->numSpare ; c++) {
   2524 		sparecol = raidPtr->numCol + c;
   2525 		if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
   2526 			/*
   2527 
   2528 			   we claim this disk is "optimal" if it's
   2529 			   rf_ds_used_spare, as that means it should be
   2530 			   directly substitutable for the disk it replaced.
   2531 			   We note that too...
   2532 
   2533 			 */
   2534 
   2535 			for(j=0;j<raidPtr->numCol;j++) {
   2536 				if (raidPtr->Disks[j].spareCol == sparecol) {
   2537 					scol = j;
   2538 					break;
   2539 				}
   2540 			}
   2541 
   2542 			raidread_component_label(
   2543 				 raidPtr->Disks[sparecol].dev,
   2544 				 raidPtr->raid_cinfo[sparecol].ci_vp,
   2545 				 &clabel);
   2546 			/* make sure status is noted */
   2547 
   2548 			raid_init_component_label(raidPtr, &clabel);
   2549 
   2550 			clabel.row = 0;
   2551 			clabel.column = scol;
   2552 			/* Note: we *don't* change status from rf_ds_used_spare
   2553 			   to rf_ds_optimal */
   2554 			/* clabel.status = rf_ds_optimal; */
   2555 
   2556 			raidmarkdirty(raidPtr->Disks[sparecol].dev,
   2557 				      raidPtr->raid_cinfo[sparecol].ci_vp,
   2558 				      raidPtr->mod_counter);
   2559 		}
   2560 	}
   2561 }
   2562 
   2563 
   2564 void
   2565 rf_update_component_labels(RF_Raid_t *raidPtr, int final)
   2566 {
   2567 	RF_ComponentLabel_t clabel;
   2568 	int sparecol;
   2569 	int c;
   2570 	int j;
   2571 	int scol;
   2572 
   2573 	scol = -1;
   2574 
   2575 	/* XXX should do extra checks to make sure things really are clean,
   2576 	   rather than blindly setting the clean bit... */
   2577 
   2578 	raidPtr->mod_counter++;
   2579 
   2580 	for (c = 0; c < raidPtr->numCol; c++) {
   2581 		if (raidPtr->Disks[c].status == rf_ds_optimal) {
   2582 			raidread_component_label(
   2583 						 raidPtr->Disks[c].dev,
   2584 						 raidPtr->raid_cinfo[c].ci_vp,
   2585 						 &clabel);
   2586 			/* make sure status is noted */
   2587 			clabel.status = rf_ds_optimal;
   2588 
   2589 			/* bump the counter */
   2590 			clabel.mod_counter = raidPtr->mod_counter;
   2591 
   2592 			/* note what unit we are configured as */
   2593 			clabel.last_unit = raidPtr->raidid;
   2594 
   2595 			raidwrite_component_label(
   2596 						  raidPtr->Disks[c].dev,
   2597 						  raidPtr->raid_cinfo[c].ci_vp,
   2598 						  &clabel);
   2599 			if (final == RF_FINAL_COMPONENT_UPDATE) {
   2600 				if (raidPtr->parity_good == RF_RAID_CLEAN) {
   2601 					raidmarkclean(
   2602 						      raidPtr->Disks[c].dev,
   2603 						      raidPtr->raid_cinfo[c].ci_vp,
   2604 						      raidPtr->mod_counter);
   2605 				}
   2606 			}
   2607 		}
   2608 		/* else we don't touch it.. */
   2609 	}
   2610 
   2611 	for( c = 0; c < raidPtr->numSpare ; c++) {
   2612 		sparecol = raidPtr->numCol + c;
   2613 		/* Need to ensure that the reconstruct actually completed! */
   2614 		if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
   2615 			/*
   2616 
   2617 			   we claim this disk is "optimal" if it's
   2618 			   rf_ds_used_spare, as that means it should be
   2619 			   directly substitutable for the disk it replaced.
   2620 			   We note that too...
   2621 
   2622 			 */
   2623 
   2624 			for(j=0;j<raidPtr->numCol;j++) {
   2625 				if (raidPtr->Disks[j].spareCol == sparecol) {
   2626 					scol = j;
   2627 					break;
   2628 				}
   2629 			}
   2630 
   2631 			/* XXX shouldn't *really* need this... */
   2632 			raidread_component_label(
   2633 				      raidPtr->Disks[sparecol].dev,
   2634 				      raidPtr->raid_cinfo[sparecol].ci_vp,
   2635 				      &clabel);
   2636 			/* make sure status is noted */
   2637 
   2638 			raid_init_component_label(raidPtr, &clabel);
   2639 
   2640 			clabel.mod_counter = raidPtr->mod_counter;
   2641 			clabel.column = scol;
   2642 			clabel.status = rf_ds_optimal;
   2643 			clabel.last_unit = raidPtr->raidid;
   2644 
   2645 			raidwrite_component_label(
   2646 				      raidPtr->Disks[sparecol].dev,
   2647 				      raidPtr->raid_cinfo[sparecol].ci_vp,
   2648 				      &clabel);
   2649 			if (final == RF_FINAL_COMPONENT_UPDATE) {
   2650 				if (raidPtr->parity_good == RF_RAID_CLEAN) {
   2651 					raidmarkclean( raidPtr->Disks[sparecol].dev,
   2652 						       raidPtr->raid_cinfo[sparecol].ci_vp,
   2653 						       raidPtr->mod_counter);
   2654 				}
   2655 			}
   2656 		}
   2657 	}
   2658 }
   2659 
   2660 void
   2661 rf_close_component(RF_Raid_t *raidPtr, struct vnode *vp, int auto_configured)
   2662 {
   2663 
   2664 	if (vp != NULL) {
   2665 		if (auto_configured == 1) {
   2666 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
   2667 			VOP_CLOSE(vp, FREAD | FWRITE, NOCRED);
   2668 			vput(vp);
   2669 
   2670 		} else {
   2671 			(void) vn_close(vp, FREAD | FWRITE, curlwp->l_cred);
   2672 		}
   2673 	}
   2674 }
   2675 
   2676 
   2677 void
   2678 rf_UnconfigureVnodes(RF_Raid_t *raidPtr)
   2679 {
   2680 	int r,c;
   2681 	struct vnode *vp;
   2682 	int acd;
   2683 
   2684 
   2685 	/* We take this opportunity to close the vnodes like we should.. */
   2686 
   2687 	for (c = 0; c < raidPtr->numCol; c++) {
   2688 		vp = raidPtr->raid_cinfo[c].ci_vp;
   2689 		acd = raidPtr->Disks[c].auto_configured;
   2690 		rf_close_component(raidPtr, vp, acd);
   2691 		raidPtr->raid_cinfo[c].ci_vp = NULL;
   2692 		raidPtr->Disks[c].auto_configured = 0;
   2693 	}
   2694 
   2695 	for (r = 0; r < raidPtr->numSpare; r++) {
   2696 		vp = raidPtr->raid_cinfo[raidPtr->numCol + r].ci_vp;
   2697 		acd = raidPtr->Disks[raidPtr->numCol + r].auto_configured;
   2698 		rf_close_component(raidPtr, vp, acd);
   2699 		raidPtr->raid_cinfo[raidPtr->numCol + r].ci_vp = NULL;
   2700 		raidPtr->Disks[raidPtr->numCol + r].auto_configured = 0;
   2701 	}
   2702 }
   2703 
   2704 
   2705 void
   2706 rf_ReconThread(struct rf_recon_req *req)
   2707 {
   2708 	int     s;
   2709 	RF_Raid_t *raidPtr;
   2710 
   2711 	s = splbio();
   2712 	raidPtr = (RF_Raid_t *) req->raidPtr;
   2713 	raidPtr->recon_in_progress = 1;
   2714 
   2715 	rf_FailDisk((RF_Raid_t *) req->raidPtr, req->col,
   2716 		    ((req->flags & RF_FDFLAGS_RECON) ? 1 : 0));
   2717 
   2718 	RF_Free(req, sizeof(*req));
   2719 
   2720 	raidPtr->recon_in_progress = 0;
   2721 	splx(s);
   2722 
   2723 	/* That's all... */
   2724 	kthread_exit(0);	/* does not return */
   2725 }
   2726 
   2727 void
   2728 rf_RewriteParityThread(RF_Raid_t *raidPtr)
   2729 {
   2730 	int retcode;
   2731 	int s;
   2732 
   2733 	raidPtr->parity_rewrite_stripes_done = 0;
   2734 	raidPtr->parity_rewrite_in_progress = 1;
   2735 	s = splbio();
   2736 	retcode = rf_RewriteParity(raidPtr);
   2737 	splx(s);
   2738 	if (retcode) {
   2739 		printf("raid%d: Error re-writing parity!\n",raidPtr->raidid);
   2740 	} else {
   2741 		/* set the clean bit!  If we shutdown correctly,
   2742 		   the clean bit on each component label will get
   2743 		   set */
   2744 		raidPtr->parity_good = RF_RAID_CLEAN;
   2745 	}
   2746 	raidPtr->parity_rewrite_in_progress = 0;
   2747 
   2748 	/* Anyone waiting for us to stop?  If so, inform them... */
   2749 	if (raidPtr->waitShutdown) {
   2750 		wakeup(&raidPtr->parity_rewrite_in_progress);
   2751 	}
   2752 
   2753 	/* That's all... */
   2754 	kthread_exit(0);	/* does not return */
   2755 }
   2756 
   2757 
   2758 void
   2759 rf_CopybackThread(RF_Raid_t *raidPtr)
   2760 {
   2761 	int s;
   2762 
   2763 	raidPtr->copyback_in_progress = 1;
   2764 	s = splbio();
   2765 	rf_CopybackReconstructedData(raidPtr);
   2766 	splx(s);
   2767 	raidPtr->copyback_in_progress = 0;
   2768 
   2769 	/* That's all... */
   2770 	kthread_exit(0);	/* does not return */
   2771 }
   2772 
   2773 
   2774 void
   2775 rf_ReconstructInPlaceThread(struct rf_recon_req *req)
   2776 {
   2777 	int s;
   2778 	RF_Raid_t *raidPtr;
   2779 
   2780 	s = splbio();
   2781 	raidPtr = req->raidPtr;
   2782 	raidPtr->recon_in_progress = 1;
   2783 	rf_ReconstructInPlace(raidPtr, req->col);
   2784 	RF_Free(req, sizeof(*req));
   2785 	raidPtr->recon_in_progress = 0;
   2786 	splx(s);
   2787 
   2788 	/* That's all... */
   2789 	kthread_exit(0);	/* does not return */
   2790 }
   2791 
   2792 static RF_AutoConfig_t *
   2793 rf_get_component(RF_AutoConfig_t *ac_list, dev_t dev, struct vnode *vp,
   2794     const char *cname, RF_SectorCount_t size)
   2795 {
   2796 	int good_one = 0;
   2797 	RF_ComponentLabel_t *clabel;
   2798 	RF_AutoConfig_t *ac;
   2799 
   2800 	clabel = malloc(sizeof(RF_ComponentLabel_t), M_RAIDFRAME, M_NOWAIT);
   2801 	if (clabel == NULL) {
   2802 oomem:
   2803 		    while(ac_list) {
   2804 			    ac = ac_list;
   2805 			    if (ac->clabel)
   2806 				    free(ac->clabel, M_RAIDFRAME);
   2807 			    ac_list = ac_list->next;
   2808 			    free(ac, M_RAIDFRAME);
   2809 		    }
   2810 		    printf("RAID auto config: out of memory!\n");
   2811 		    return NULL; /* XXX probably should panic? */
   2812 	}
   2813 
   2814 	if (!raidread_component_label(dev, vp, clabel)) {
   2815 		    /* Got the label.  Does it look reasonable? */
   2816 		    if (rf_reasonable_label(clabel) &&
   2817 			(clabel->partitionSize <= size)) {
   2818 #ifdef DEBUG
   2819 			    printf("Component on: %s: %llu\n",
   2820 				cname, (unsigned long long)size);
   2821 			    rf_print_component_label(clabel);
   2822 #endif
   2823 			    /* if it's reasonable, add it, else ignore it. */
   2824 			    ac = malloc(sizeof(RF_AutoConfig_t), M_RAIDFRAME,
   2825 				M_NOWAIT);
   2826 			    if (ac == NULL) {
   2827 				    free(clabel, M_RAIDFRAME);
   2828 				    goto oomem;
   2829 			    }
   2830 			    strlcpy(ac->devname, cname, sizeof(ac->devname));
   2831 			    ac->dev = dev;
   2832 			    ac->vp = vp;
   2833 			    ac->clabel = clabel;
   2834 			    ac->next = ac_list;
   2835 			    ac_list = ac;
   2836 			    good_one = 1;
   2837 		    }
   2838 	}
   2839 	if (!good_one) {
   2840 		/* cleanup */
   2841 		free(clabel, M_RAIDFRAME);
   2842 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
   2843 		VOP_CLOSE(vp, FREAD | FWRITE, NOCRED);
   2844 		vput(vp);
   2845 	}
   2846 	return ac_list;
   2847 }
   2848 
   2849 RF_AutoConfig_t *
   2850 rf_find_raid_components(void)
   2851 {
   2852 	struct vnode *vp;
   2853 	struct disklabel label;
   2854 	device_t dv;
   2855 	dev_t dev;
   2856 	int bmajor, bminor, wedge;
   2857 	int error;
   2858 	int i;
   2859 	RF_AutoConfig_t *ac_list;
   2860 
   2861 
   2862 	/* initialize the AutoConfig list */
   2863 	ac_list = NULL;
   2864 
   2865 	/* we begin by trolling through *all* the devices on the system */
   2866 
   2867 	for (dv = alldevs.tqh_first; dv != NULL;
   2868 	     dv = dv->dv_list.tqe_next) {
   2869 
   2870 		/* we are only interested in disks... */
   2871 		if (device_class(dv) != DV_DISK)
   2872 			continue;
   2873 
   2874 		/* we don't care about floppies... */
   2875 		if (device_is_a(dv, "fd")) {
   2876 			continue;
   2877 		}
   2878 
   2879 		/* we don't care about CD's... */
   2880 		if (device_is_a(dv, "cd")) {
   2881 			continue;
   2882 		}
   2883 
   2884 		/* we don't care about md's... */
   2885 		if (device_is_a(dv, "md")) {
   2886 			continue;
   2887 		}
   2888 
   2889 		/* hdfd is the Atari/Hades floppy driver */
   2890 		if (device_is_a(dv, "hdfd")) {
   2891 			continue;
   2892 		}
   2893 
   2894 		/* fdisa is the Atari/Milan floppy driver */
   2895 		if (device_is_a(dv, "fdisa")) {
   2896 			continue;
   2897 		}
   2898 
   2899 		/* need to find the device_name_to_block_device_major stuff */
   2900 		bmajor = devsw_name2blk(device_xname(dv), NULL, 0);
   2901 
   2902 		/* get a vnode for the raw partition of this disk */
   2903 
   2904 		wedge = device_is_a(dv, "dk");
   2905 		bminor = minor(device_unit(dv));
   2906 		dev = wedge ? makedev(bmajor, bminor) :
   2907 		    MAKEDISKDEV(bmajor, bminor, RAW_PART);
   2908 		if (bdevvp(dev, &vp))
   2909 			panic("RAID can't alloc vnode");
   2910 
   2911 		error = VOP_OPEN(vp, FREAD, NOCRED);
   2912 
   2913 		if (error) {
   2914 			/* "Who cares."  Continue looking
   2915 			   for something that exists*/
   2916 			vput(vp);
   2917 			continue;
   2918 		}
   2919 
   2920 		if (wedge) {
   2921 			struct dkwedge_info dkw;
   2922 			error = VOP_IOCTL(vp, DIOCGWEDGEINFO, &dkw, FREAD,
   2923 			    NOCRED);
   2924 			if (error) {
   2925 				printf("RAIDframe: can't get wedge info for "
   2926 				    "dev %s (%d)\n", device_xname(dv), error);
   2927 				vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
   2928 				VOP_CLOSE(vp, FREAD | FWRITE, NOCRED);
   2929 				vput(vp);
   2930 				continue;
   2931 			}
   2932 
   2933 			if (strcmp(dkw.dkw_ptype, DKW_PTYPE_RAIDFRAME) != 0) {
   2934 				vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
   2935 				VOP_CLOSE(vp, FREAD | FWRITE, NOCRED);
   2936 				vput(vp);
   2937 				continue;
   2938 			}
   2939 
   2940 			ac_list = rf_get_component(ac_list, dev, vp,
   2941 			    device_xname(dv), dkw.dkw_size);
   2942 			continue;
   2943 		}
   2944 
   2945 		/* Ok, the disk exists.  Go get the disklabel. */
   2946 		error = VOP_IOCTL(vp, DIOCGDINFO, &label, FREAD, NOCRED);
   2947 		if (error) {
   2948 			/*
   2949 			 * XXX can't happen - open() would
   2950 			 * have errored out (or faked up one)
   2951 			 */
   2952 			if (error != ENOTTY)
   2953 				printf("RAIDframe: can't get label for dev "
   2954 				    "%s (%d)\n", device_xname(dv), error);
   2955 		}
   2956 
   2957 		/* don't need this any more.  We'll allocate it again
   2958 		   a little later if we really do... */
   2959 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
   2960 		VOP_CLOSE(vp, FREAD | FWRITE, NOCRED);
   2961 		vput(vp);
   2962 
   2963 		if (error)
   2964 			continue;
   2965 
   2966 		for (i = 0; i < label.d_npartitions; i++) {
   2967 			char cname[sizeof(ac_list->devname)];
   2968 
   2969 			/* We only support partitions marked as RAID */
   2970 			if (label.d_partitions[i].p_fstype != FS_RAID)
   2971 				continue;
   2972 
   2973 			dev = MAKEDISKDEV(bmajor, device_unit(dv), i);
   2974 			if (bdevvp(dev, &vp))
   2975 				panic("RAID can't alloc vnode");
   2976 
   2977 			error = VOP_OPEN(vp, FREAD, NOCRED);
   2978 			if (error) {
   2979 				/* Whatever... */
   2980 				vput(vp);
   2981 				continue;
   2982 			}
   2983 			snprintf(cname, sizeof(cname), "%s%c",
   2984 			    device_xname(dv), 'a' + i);
   2985 			ac_list = rf_get_component(ac_list, dev, vp, cname,
   2986 				label.d_partitions[i].p_size);
   2987 		}
   2988 	}
   2989 	return ac_list;
   2990 }
   2991 
   2992 
   2993 static int
   2994 rf_reasonable_label(RF_ComponentLabel_t *clabel)
   2995 {
   2996 
   2997 	if (((clabel->version==RF_COMPONENT_LABEL_VERSION_1) ||
   2998 	     (clabel->version==RF_COMPONENT_LABEL_VERSION)) &&
   2999 	    ((clabel->clean == RF_RAID_CLEAN) ||
   3000 	     (clabel->clean == RF_RAID_DIRTY)) &&
   3001 	    clabel->row >=0 &&
   3002 	    clabel->column >= 0 &&
   3003 	    clabel->num_rows > 0 &&
   3004 	    clabel->num_columns > 0 &&
   3005 	    clabel->row < clabel->num_rows &&
   3006 	    clabel->column < clabel->num_columns &&
   3007 	    clabel->blockSize > 0 &&
   3008 	    clabel->numBlocks > 0) {
   3009 		/* label looks reasonable enough... */
   3010 		return(1);
   3011 	}
   3012 	return(0);
   3013 }
   3014 
   3015 
   3016 #ifdef DEBUG
   3017 void
   3018 rf_print_component_label(RF_ComponentLabel_t *clabel)
   3019 {
   3020 	printf("   Row: %d Column: %d Num Rows: %d Num Columns: %d\n",
   3021 	       clabel->row, clabel->column,
   3022 	       clabel->num_rows, clabel->num_columns);
   3023 	printf("   Version: %d Serial Number: %d Mod Counter: %d\n",
   3024 	       clabel->version, clabel->serial_number,
   3025 	       clabel->mod_counter);
   3026 	printf("   Clean: %s Status: %d\n",
   3027 	       clabel->clean ? "Yes" : "No", clabel->status );
   3028 	printf("   sectPerSU: %d SUsPerPU: %d SUsPerRU: %d\n",
   3029 	       clabel->sectPerSU, clabel->SUsPerPU, clabel->SUsPerRU);
   3030 	printf("   RAID Level: %c  blocksize: %d numBlocks: %d\n",
   3031 	       (char) clabel->parityConfig, clabel->blockSize,
   3032 	       clabel->numBlocks);
   3033 	printf("   Autoconfig: %s\n", clabel->autoconfigure ? "Yes" : "No" );
   3034 	printf("   Contains root partition: %s\n",
   3035 	       clabel->root_partition ? "Yes" : "No" );
   3036 	printf("   Last configured as: raid%d\n", clabel->last_unit );
   3037 #if 0
   3038 	   printf("   Config order: %d\n", clabel->config_order);
   3039 #endif
   3040 
   3041 }
   3042 #endif
   3043 
   3044 RF_ConfigSet_t *
   3045 rf_create_auto_sets(RF_AutoConfig_t *ac_list)
   3046 {
   3047 	RF_AutoConfig_t *ac;
   3048 	RF_ConfigSet_t *config_sets;
   3049 	RF_ConfigSet_t *cset;
   3050 	RF_AutoConfig_t *ac_next;
   3051 
   3052 
   3053 	config_sets = NULL;
   3054 
   3055 	/* Go through the AutoConfig list, and figure out which components
   3056 	   belong to what sets.  */
   3057 	ac = ac_list;
   3058 	while(ac!=NULL) {
   3059 		/* we're going to putz with ac->next, so save it here
   3060 		   for use at the end of the loop */
   3061 		ac_next = ac->next;
   3062 
   3063 		if (config_sets == NULL) {
   3064 			/* will need at least this one... */
   3065 			config_sets = (RF_ConfigSet_t *)
   3066 				malloc(sizeof(RF_ConfigSet_t),
   3067 				       M_RAIDFRAME, M_NOWAIT);
   3068 			if (config_sets == NULL) {
   3069 				panic("rf_create_auto_sets: No memory!");
   3070 			}
   3071 			/* this one is easy :) */
   3072 			config_sets->ac = ac;
   3073 			config_sets->next = NULL;
   3074 			config_sets->rootable = 0;
   3075 			ac->next = NULL;
   3076 		} else {
   3077 			/* which set does this component fit into? */
   3078 			cset = config_sets;
   3079 			while(cset!=NULL) {
   3080 				if (rf_does_it_fit(cset, ac)) {
   3081 					/* looks like it matches... */
   3082 					ac->next = cset->ac;
   3083 					cset->ac = ac;
   3084 					break;
   3085 				}
   3086 				cset = cset->next;
   3087 			}
   3088 			if (cset==NULL) {
   3089 				/* didn't find a match above... new set..*/
   3090 				cset = (RF_ConfigSet_t *)
   3091 					malloc(sizeof(RF_ConfigSet_t),
   3092 					       M_RAIDFRAME, M_NOWAIT);
   3093 				if (cset == NULL) {
   3094 					panic("rf_create_auto_sets: No memory!");
   3095 				}
   3096 				cset->ac = ac;
   3097 				ac->next = NULL;
   3098 				cset->next = config_sets;
   3099 				cset->rootable = 0;
   3100 				config_sets = cset;
   3101 			}
   3102 		}
   3103 		ac = ac_next;
   3104 	}
   3105 
   3106 
   3107 	return(config_sets);
   3108 }
   3109 
   3110 static int
   3111 rf_does_it_fit(RF_ConfigSet_t *cset, RF_AutoConfig_t *ac)
   3112 {
   3113 	RF_ComponentLabel_t *clabel1, *clabel2;
   3114 
   3115 	/* If this one matches the *first* one in the set, that's good
   3116 	   enough, since the other members of the set would have been
   3117 	   through here too... */
   3118 	/* note that we are not checking partitionSize here..
   3119 
   3120 	   Note that we are also not checking the mod_counters here.
   3121 	   If everything else matches execpt the mod_counter, that's
   3122 	   good enough for this test.  We will deal with the mod_counters
   3123 	   a little later in the autoconfiguration process.
   3124 
   3125 	    (clabel1->mod_counter == clabel2->mod_counter) &&
   3126 
   3127 	   The reason we don't check for this is that failed disks
   3128 	   will have lower modification counts.  If those disks are
   3129 	   not added to the set they used to belong to, then they will
   3130 	   form their own set, which may result in 2 different sets,
   3131 	   for example, competing to be configured at raid0, and
   3132 	   perhaps competing to be the root filesystem set.  If the
   3133 	   wrong ones get configured, or both attempt to become /,
   3134 	   weird behaviour and or serious lossage will occur.  Thus we
   3135 	   need to bring them into the fold here, and kick them out at
   3136 	   a later point.
   3137 
   3138 	*/
   3139 
   3140 	clabel1 = cset->ac->clabel;
   3141 	clabel2 = ac->clabel;
   3142 	if ((clabel1->version == clabel2->version) &&
   3143 	    (clabel1->serial_number == clabel2->serial_number) &&
   3144 	    (clabel1->num_rows == clabel2->num_rows) &&
   3145 	    (clabel1->num_columns == clabel2->num_columns) &&
   3146 	    (clabel1->sectPerSU == clabel2->sectPerSU) &&
   3147 	    (clabel1->SUsPerPU == clabel2->SUsPerPU) &&
   3148 	    (clabel1->SUsPerRU == clabel2->SUsPerRU) &&
   3149 	    (clabel1->parityConfig == clabel2->parityConfig) &&
   3150 	    (clabel1->maxOutstanding == clabel2->maxOutstanding) &&
   3151 	    (clabel1->blockSize == clabel2->blockSize) &&
   3152 	    (clabel1->numBlocks == clabel2->numBlocks) &&
   3153 	    (clabel1->autoconfigure == clabel2->autoconfigure) &&
   3154 	    (clabel1->root_partition == clabel2->root_partition) &&
   3155 	    (clabel1->last_unit == clabel2->last_unit) &&
   3156 	    (clabel1->config_order == clabel2->config_order)) {
   3157 		/* if it get's here, it almost *has* to be a match */
   3158 	} else {
   3159 		/* it's not consistent with somebody in the set..
   3160 		   punt */
   3161 		return(0);
   3162 	}
   3163 	/* all was fine.. it must fit... */
   3164 	return(1);
   3165 }
   3166 
   3167 int
   3168 rf_have_enough_components(RF_ConfigSet_t *cset)
   3169 {
   3170 	RF_AutoConfig_t *ac;
   3171 	RF_AutoConfig_t *auto_config;
   3172 	RF_ComponentLabel_t *clabel;
   3173 	int c;
   3174 	int num_cols;
   3175 	int num_missing;
   3176 	int mod_counter;
   3177 	int mod_counter_found;
   3178 	int even_pair_failed;
   3179 	char parity_type;
   3180 
   3181 
   3182 	/* check to see that we have enough 'live' components
   3183 	   of this set.  If so, we can configure it if necessary */
   3184 
   3185 	num_cols = cset->ac->clabel->num_columns;
   3186 	parity_type = cset->ac->clabel->parityConfig;
   3187 
   3188 	/* XXX Check for duplicate components!?!?!? */
   3189 
   3190 	/* Determine what the mod_counter is supposed to be for this set. */
   3191 
   3192 	mod_counter_found = 0;
   3193 	mod_counter = 0;
   3194 	ac = cset->ac;
   3195 	while(ac!=NULL) {
   3196 		if (mod_counter_found==0) {
   3197 			mod_counter = ac->clabel->mod_counter;
   3198 			mod_counter_found = 1;
   3199 		} else {
   3200 			if (ac->clabel->mod_counter > mod_counter) {
   3201 				mod_counter = ac->clabel->mod_counter;
   3202 			}
   3203 		}
   3204 		ac = ac->next;
   3205 	}
   3206 
   3207 	num_missing = 0;
   3208 	auto_config = cset->ac;
   3209 
   3210 	even_pair_failed = 0;
   3211 	for(c=0; c<num_cols; c++) {
   3212 		ac = auto_config;
   3213 		while(ac!=NULL) {
   3214 			if ((ac->clabel->column == c) &&
   3215 			    (ac->clabel->mod_counter == mod_counter)) {
   3216 				/* it's this one... */
   3217 #ifdef DEBUG
   3218 				printf("Found: %s at %d\n",
   3219 				       ac->devname,c);
   3220 #endif
   3221 				break;
   3222 			}
   3223 			ac=ac->next;
   3224 		}
   3225 		if (ac==NULL) {
   3226 				/* Didn't find one here! */
   3227 				/* special case for RAID 1, especially
   3228 				   where there are more than 2
   3229 				   components (where RAIDframe treats
   3230 				   things a little differently :( ) */
   3231 			if (parity_type == '1') {
   3232 				if (c%2 == 0) { /* even component */
   3233 					even_pair_failed = 1;
   3234 				} else { /* odd component.  If
   3235 					    we're failed, and
   3236 					    so is the even
   3237 					    component, it's
   3238 					    "Good Night, Charlie" */
   3239 					if (even_pair_failed == 1) {
   3240 						return(0);
   3241 					}
   3242 				}
   3243 			} else {
   3244 				/* normal accounting */
   3245 				num_missing++;
   3246 			}
   3247 		}
   3248 		if ((parity_type == '1') && (c%2 == 1)) {
   3249 				/* Just did an even component, and we didn't
   3250 				   bail.. reset the even_pair_failed flag,
   3251 				   and go on to the next component.... */
   3252 			even_pair_failed = 0;
   3253 		}
   3254 	}
   3255 
   3256 	clabel = cset->ac->clabel;
   3257 
   3258 	if (((clabel->parityConfig == '0') && (num_missing > 0)) ||
   3259 	    ((clabel->parityConfig == '4') && (num_missing > 1)) ||
   3260 	    ((clabel->parityConfig == '5') && (num_missing > 1))) {
   3261 		/* XXX this needs to be made *much* more general */
   3262 		/* Too many failures */
   3263 		return(0);
   3264 	}
   3265 	/* otherwise, all is well, and we've got enough to take a kick
   3266 	   at autoconfiguring this set */
   3267 	return(1);
   3268 }
   3269 
   3270 void
   3271 rf_create_configuration(RF_AutoConfig_t *ac, RF_Config_t *config,
   3272 			RF_Raid_t *raidPtr)
   3273 {
   3274 	RF_ComponentLabel_t *clabel;
   3275 	int i;
   3276 
   3277 	clabel = ac->clabel;
   3278 
   3279 	/* 1. Fill in the common stuff */
   3280 	config->numRow = clabel->num_rows = 1;
   3281 	config->numCol = clabel->num_columns;
   3282 	config->numSpare = 0; /* XXX should this be set here? */
   3283 	config->sectPerSU = clabel->sectPerSU;
   3284 	config->SUsPerPU = clabel->SUsPerPU;
   3285 	config->SUsPerRU = clabel->SUsPerRU;
   3286 	config->parityConfig = clabel->parityConfig;
   3287 	/* XXX... */
   3288 	strcpy(config->diskQueueType,"fifo");
   3289 	config->maxOutstandingDiskReqs = clabel->maxOutstanding;
   3290 	config->layoutSpecificSize = 0; /* XXX ?? */
   3291 
   3292 	while(ac!=NULL) {
   3293 		/* row/col values will be in range due to the checks
   3294 		   in reasonable_label() */
   3295 		strcpy(config->devnames[0][ac->clabel->column],
   3296 		       ac->devname);
   3297 		ac = ac->next;
   3298 	}
   3299 
   3300 	for(i=0;i<RF_MAXDBGV;i++) {
   3301 		config->debugVars[i][0] = 0;
   3302 	}
   3303 }
   3304 
   3305 int
   3306 rf_set_autoconfig(RF_Raid_t *raidPtr, int new_value)
   3307 {
   3308 	RF_ComponentLabel_t clabel;
   3309 	struct vnode *vp;
   3310 	dev_t dev;
   3311 	int column;
   3312 	int sparecol;
   3313 
   3314 	raidPtr->autoconfigure = new_value;
   3315 
   3316 	for(column=0; column<raidPtr->numCol; column++) {
   3317 		if (raidPtr->Disks[column].status == rf_ds_optimal) {
   3318 			dev = raidPtr->Disks[column].dev;
   3319 			vp = raidPtr->raid_cinfo[column].ci_vp;
   3320 			raidread_component_label(dev, vp, &clabel);
   3321 			clabel.autoconfigure = new_value;
   3322 			raidwrite_component_label(dev, vp, &clabel);
   3323 		}
   3324 	}
   3325 	for(column = 0; column < raidPtr->numSpare ; column++) {
   3326 		sparecol = raidPtr->numCol + column;
   3327 		if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
   3328 			dev = raidPtr->Disks[sparecol].dev;
   3329 			vp = raidPtr->raid_cinfo[sparecol].ci_vp;
   3330 			raidread_component_label(dev, vp, &clabel);
   3331 			clabel.autoconfigure = new_value;
   3332 			raidwrite_component_label(dev, vp, &clabel);
   3333 		}
   3334 	}
   3335 	return(new_value);
   3336 }
   3337 
   3338 int
   3339 rf_set_rootpartition(RF_Raid_t *raidPtr, int new_value)
   3340 {
   3341 	RF_ComponentLabel_t clabel;
   3342 	struct vnode *vp;
   3343 	dev_t dev;
   3344 	int column;
   3345 	int sparecol;
   3346 
   3347 	raidPtr->root_partition = new_value;
   3348 	for(column=0; column<raidPtr->numCol; column++) {
   3349 		if (raidPtr->Disks[column].status == rf_ds_optimal) {
   3350 			dev = raidPtr->Disks[column].dev;
   3351 			vp = raidPtr->raid_cinfo[column].ci_vp;
   3352 			raidread_component_label(dev, vp, &clabel);
   3353 			clabel.root_partition = new_value;
   3354 			raidwrite_component_label(dev, vp, &clabel);
   3355 		}
   3356 	}
   3357 	for(column = 0; column < raidPtr->numSpare ; column++) {
   3358 		sparecol = raidPtr->numCol + column;
   3359 		if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
   3360 			dev = raidPtr->Disks[sparecol].dev;
   3361 			vp = raidPtr->raid_cinfo[sparecol].ci_vp;
   3362 			raidread_component_label(dev, vp, &clabel);
   3363 			clabel.root_partition = new_value;
   3364 			raidwrite_component_label(dev, vp, &clabel);
   3365 		}
   3366 	}
   3367 	return(new_value);
   3368 }
   3369 
   3370 void
   3371 rf_release_all_vps(RF_ConfigSet_t *cset)
   3372 {
   3373 	RF_AutoConfig_t *ac;
   3374 
   3375 	ac = cset->ac;
   3376 	while(ac!=NULL) {
   3377 		/* Close the vp, and give it back */
   3378 		if (ac->vp) {
   3379 			vn_lock(ac->vp, LK_EXCLUSIVE | LK_RETRY);
   3380 			VOP_CLOSE(ac->vp, FREAD, NOCRED);
   3381 			vput(ac->vp);
   3382 			ac->vp = NULL;
   3383 		}
   3384 		ac = ac->next;
   3385 	}
   3386 }
   3387 
   3388 
   3389 void
   3390 rf_cleanup_config_set(RF_ConfigSet_t *cset)
   3391 {
   3392 	RF_AutoConfig_t *ac;
   3393 	RF_AutoConfig_t *next_ac;
   3394 
   3395 	ac = cset->ac;
   3396 	while(ac!=NULL) {
   3397 		next_ac = ac->next;
   3398 		/* nuke the label */
   3399 		free(ac->clabel, M_RAIDFRAME);
   3400 		/* cleanup the config structure */
   3401 		free(ac, M_RAIDFRAME);
   3402 		/* "next.." */
   3403 		ac = next_ac;
   3404 	}
   3405 	/* and, finally, nuke the config set */
   3406 	free(cset, M_RAIDFRAME);
   3407 }
   3408 
   3409 
   3410 void
   3411 raid_init_component_label(RF_Raid_t *raidPtr, RF_ComponentLabel_t *clabel)
   3412 {
   3413 	/* current version number */
   3414 	clabel->version = RF_COMPONENT_LABEL_VERSION;
   3415 	clabel->serial_number = raidPtr->serial_number;
   3416 	clabel->mod_counter = raidPtr->mod_counter;
   3417 	clabel->num_rows = 1;
   3418 	clabel->num_columns = raidPtr->numCol;
   3419 	clabel->clean = RF_RAID_DIRTY; /* not clean */
   3420 	clabel->status = rf_ds_optimal; /* "It's good!" */
   3421 
   3422 	clabel->sectPerSU = raidPtr->Layout.sectorsPerStripeUnit;
   3423 	clabel->SUsPerPU = raidPtr->Layout.SUsPerPU;
   3424 	clabel->SUsPerRU = raidPtr->Layout.SUsPerRU;
   3425 
   3426 	clabel->blockSize = raidPtr->bytesPerSector;
   3427 	clabel->numBlocks = raidPtr->sectorsPerDisk;
   3428 
   3429 	/* XXX not portable */
   3430 	clabel->parityConfig = raidPtr->Layout.map->parityConfig;
   3431 	clabel->maxOutstanding = raidPtr->maxOutstanding;
   3432 	clabel->autoconfigure = raidPtr->autoconfigure;
   3433 	clabel->root_partition = raidPtr->root_partition;
   3434 	clabel->last_unit = raidPtr->raidid;
   3435 	clabel->config_order = raidPtr->config_order;
   3436 }
   3437 
   3438 int
   3439 rf_auto_config_set(RF_ConfigSet_t *cset, int *unit)
   3440 {
   3441 	RF_Raid_t *raidPtr;
   3442 	RF_Config_t *config;
   3443 	int raidID;
   3444 	int retcode;
   3445 
   3446 #ifdef DEBUG
   3447 	printf("RAID autoconfigure\n");
   3448 #endif
   3449 
   3450 	retcode = 0;
   3451 	*unit = -1;
   3452 
   3453 	/* 1. Create a config structure */
   3454 
   3455 	config = (RF_Config_t *)malloc(sizeof(RF_Config_t),
   3456 				       M_RAIDFRAME,
   3457 				       M_NOWAIT);
   3458 	if (config==NULL) {
   3459 		printf("Out of mem!?!?\n");
   3460 				/* XXX do something more intelligent here. */
   3461 		return(1);
   3462 	}
   3463 
   3464 	memset(config, 0, sizeof(RF_Config_t));
   3465 
   3466 	/*
   3467 	   2. Figure out what RAID ID this one is supposed to live at
   3468 	   See if we can get the same RAID dev that it was configured
   3469 	   on last time..
   3470 	*/
   3471 
   3472 	raidID = cset->ac->clabel->last_unit;
   3473 	if ((raidID < 0) || (raidID >= numraid)) {
   3474 		/* let's not wander off into lala land. */
   3475 		raidID = numraid - 1;
   3476 	}
   3477 	if (raidPtrs[raidID]->valid != 0) {
   3478 
   3479 		/*
   3480 		   Nope... Go looking for an alternative...
   3481 		   Start high so we don't immediately use raid0 if that's
   3482 		   not taken.
   3483 		*/
   3484 
   3485 		for(raidID = numraid - 1; raidID >= 0; raidID--) {
   3486 			if (raidPtrs[raidID]->valid == 0) {
   3487 				/* can use this one! */
   3488 				break;
   3489 			}
   3490 		}
   3491 	}
   3492 
   3493 	if (raidID < 0) {
   3494 		/* punt... */
   3495 		printf("Unable to auto configure this set!\n");
   3496 		printf("(Out of RAID devs!)\n");
   3497 		free(config, M_RAIDFRAME);
   3498 		return(1);
   3499 	}
   3500 
   3501 #ifdef DEBUG
   3502 	printf("Configuring raid%d:\n",raidID);
   3503 #endif
   3504 
   3505 	raidPtr = raidPtrs[raidID];
   3506 
   3507 	/* XXX all this stuff should be done SOMEWHERE ELSE! */
   3508 	raidPtr->raidid = raidID;
   3509 	raidPtr->openings = RAIDOUTSTANDING;
   3510 
   3511 	/* 3. Build the configuration structure */
   3512 	rf_create_configuration(cset->ac, config, raidPtr);
   3513 
   3514 	/* 4. Do the configuration */
   3515 	retcode = rf_Configure(raidPtr, config, cset->ac);
   3516 
   3517 	if (retcode == 0) {
   3518 
   3519 		raidinit(raidPtrs[raidID]);
   3520 
   3521 		rf_markalldirty(raidPtrs[raidID]);
   3522 		raidPtrs[raidID]->autoconfigure = 1; /* XXX do this here? */
   3523 		if (cset->ac->clabel->root_partition==1) {
   3524 			/* everything configured just fine.  Make a note
   3525 			   that this set is eligible to be root. */
   3526 			cset->rootable = 1;
   3527 			/* XXX do this here? */
   3528 			raidPtrs[raidID]->root_partition = 1;
   3529 		}
   3530 	}
   3531 
   3532 	/* 5. Cleanup */
   3533 	free(config, M_RAIDFRAME);
   3534 
   3535 	*unit = raidID;
   3536 	return(retcode);
   3537 }
   3538 
   3539 void
   3540 rf_disk_unbusy(RF_RaidAccessDesc_t *desc)
   3541 {
   3542 	struct buf *bp;
   3543 
   3544 	bp = (struct buf *)desc->bp;
   3545 	disk_unbusy(&raid_softc[desc->raidPtr->raidid].sc_dkdev,
   3546 	    (bp->b_bcount - bp->b_resid), (bp->b_flags & B_READ));
   3547 }
   3548 
   3549 void
   3550 rf_pool_init(struct pool *p, size_t size, const char *w_chan,
   3551 	     size_t xmin, size_t xmax)
   3552 {
   3553 	pool_init(p, size, 0, 0, 0, w_chan, NULL, IPL_BIO);
   3554 	pool_sethiwat(p, xmax);
   3555 	pool_prime(p, xmin);
   3556 	pool_setlowat(p, xmin);
   3557 }
   3558 
   3559 /*
   3560  * rf_buf_queue_check(int raidid) -- looks into the buf_queue to see
   3561  * if there is IO pending and if that IO could possibly be done for a
   3562  * given RAID set.  Returns 0 if IO is waiting and can be done, 1
   3563  * otherwise.
   3564  *
   3565  */
   3566 
   3567 int
   3568 rf_buf_queue_check(int raidid)
   3569 {
   3570 	if ((bufq_peek(raid_softc[raidid].buf_queue) != NULL) &&
   3571 	    raidPtrs[raidid]->openings > 0) {
   3572 		/* there is work to do */
   3573 		return 0;
   3574 	}
   3575 	/* default is nothing to do */
   3576 	return 1;
   3577 }
   3578 
   3579 int
   3580 rf_getdisksize(struct vnode *vp, struct lwp *l, RF_RaidDisk_t *diskPtr)
   3581 {
   3582 	struct partinfo dpart;
   3583 	struct dkwedge_info dkw;
   3584 	int error;
   3585 
   3586 	error = VOP_IOCTL(vp, DIOCGPART, &dpart, FREAD, l->l_cred);
   3587 	if (error == 0) {
   3588 		diskPtr->blockSize = dpart.disklab->d_secsize;
   3589 		diskPtr->numBlocks = dpart.part->p_size - rf_protectedSectors;
   3590 		diskPtr->partitionSize = dpart.part->p_size;
   3591 		return 0;
   3592 	}
   3593 
   3594 	error = VOP_IOCTL(vp, DIOCGWEDGEINFO, &dkw, FREAD, l->l_cred);
   3595 	if (error == 0) {
   3596 		diskPtr->blockSize = 512;	/* XXX */
   3597 		diskPtr->numBlocks = dkw.dkw_size - rf_protectedSectors;
   3598 		diskPtr->partitionSize = dkw.dkw_size;
   3599 		return 0;
   3600 	}
   3601 	return error;
   3602 }
   3603 
   3604 static int
   3605 raid_match(device_t self, cfdata_t cfdata, void *aux)
   3606 {
   3607 	return 1;
   3608 }
   3609 
   3610 static void
   3611 raid_attach(device_t parent, device_t self, void *aux)
   3612 {
   3613 
   3614 }
   3615 
   3616 
   3617 static int
   3618 raid_detach(device_t self, int flags)
   3619 {
   3620 	struct raid_softc *rs = device_private(self);
   3621 
   3622 	if (rs->sc_flags & RAIDF_INITED)
   3623 		return EBUSY;
   3624 
   3625 	return 0;
   3626 }
   3627 
   3628 static void
   3629 rf_set_properties(struct raid_softc *rs, RF_Raid_t *raidPtr)
   3630 {
   3631 	prop_dictionary_t disk_info, odisk_info, geom;
   3632 	disk_info = prop_dictionary_create();
   3633 	geom = prop_dictionary_create();
   3634 	prop_dictionary_set_uint64(geom, "sectors-per-unit",
   3635 				   raidPtr->totalSectors);
   3636 	prop_dictionary_set_uint32(geom, "sector-size",
   3637 				   raidPtr->bytesPerSector);
   3638 
   3639 	prop_dictionary_set_uint16(geom, "sectors-per-track",
   3640 				   raidPtr->Layout.dataSectorsPerStripe);
   3641 	prop_dictionary_set_uint16(geom, "tracks-per-cylinder",
   3642 				   4 * raidPtr->numCol);
   3643 
   3644 	prop_dictionary_set_uint64(geom, "cylinders-per-unit",
   3645 	   raidPtr->totalSectors / (raidPtr->Layout.dataSectorsPerStripe *
   3646 	   (4 * raidPtr->numCol)));
   3647 
   3648 	prop_dictionary_set(disk_info, "geometry", geom);
   3649 	prop_object_release(geom);
   3650 	prop_dictionary_set(device_properties(rs->sc_dev),
   3651 			    "disk-info", disk_info);
   3652 	odisk_info = rs->sc_dkdev.dk_info;
   3653 	rs->sc_dkdev.dk_info = disk_info;
   3654 	if (odisk_info)
   3655 		prop_object_release(odisk_info);
   3656 }
   3657 
   3658 /*
   3659  * Implement forwarding of the DIOCCACHESYNC ioctl to each of the components.
   3660  * We end up returning whatever error was returned by the first cache flush
   3661  * that fails.
   3662  */
   3663 
   3664 static int
   3665 rf_sync_component_caches(RF_Raid_t *raidPtr)
   3666 {
   3667 	int c, sparecol;
   3668 	int e,error;
   3669 	int force = 1;
   3670 
   3671 	error = 0;
   3672 	for (c = 0; c < raidPtr->numCol; c++) {
   3673 		if (raidPtr->Disks[c].status == rf_ds_optimal) {
   3674 			e = VOP_IOCTL(raidPtr->raid_cinfo[c].ci_vp, DIOCCACHESYNC,
   3675 					  &force, FWRITE, NOCRED);
   3676 			if (e) {
   3677 				if (e != ENODEV)
   3678 					printf("raid%d: cache flush to component %s failed.\n",
   3679 					       raidPtr->raidid, raidPtr->Disks[c].devname);
   3680 				if (error == 0) {
   3681 					error = e;
   3682 				}
   3683 			}
   3684 		}
   3685 	}
   3686 
   3687 	for( c = 0; c < raidPtr->numSpare ; c++) {
   3688 		sparecol = raidPtr->numCol + c;
   3689 		/* Need to ensure that the reconstruct actually completed! */
   3690 		if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
   3691 			e = VOP_IOCTL(raidPtr->raid_cinfo[sparecol].ci_vp,
   3692 					  DIOCCACHESYNC, &force, FWRITE, NOCRED);
   3693 			if (e) {
   3694 				if (e != ENODEV)
   3695 					printf("raid%d: cache flush to component %s failed.\n",
   3696 					       raidPtr->raidid, raidPtr->Disks[sparecol].devname);
   3697 				if (error == 0) {
   3698 					error = e;
   3699 				}
   3700 			}
   3701 		}
   3702 	}
   3703 	return error;
   3704 }
   3705