Home | History | Annotate | Line # | Download | only in raidframe
rf_netbsdkintf.c revision 1.262
      1 /*	$NetBSD: rf_netbsdkintf.c,v 1.262 2009/05/12 13:19:50 cegger Exp $	*/
      2 /*-
      3  * Copyright (c) 1996, 1997, 1998, 2008 The NetBSD Foundation, Inc.
      4  * All rights reserved.
      5  *
      6  * This code is derived from software contributed to The NetBSD Foundation
      7  * by Greg Oster; Jason R. Thorpe.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  *
     18  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     19  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     20  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     21  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     22  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     28  * POSSIBILITY OF SUCH DAMAGE.
     29  */
     30 
     31 /*
     32  * Copyright (c) 1990, 1993
     33  *      The Regents of the University of California.  All rights reserved.
     34  *
     35  * This code is derived from software contributed to Berkeley by
     36  * the Systems Programming Group of the University of Utah Computer
     37  * Science Department.
     38  *
     39  * Redistribution and use in source and binary forms, with or without
     40  * modification, are permitted provided that the following conditions
     41  * are met:
     42  * 1. Redistributions of source code must retain the above copyright
     43  *    notice, this list of conditions and the following disclaimer.
     44  * 2. Redistributions in binary form must reproduce the above copyright
     45  *    notice, this list of conditions and the following disclaimer in the
     46  *    documentation and/or other materials provided with the distribution.
     47  * 3. Neither the name of the University nor the names of its contributors
     48  *    may be used to endorse or promote products derived from this software
     49  *    without specific prior written permission.
     50  *
     51  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     52  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     53  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     54  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     55  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     56  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     57  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     58  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     59  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     60  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     61  * SUCH DAMAGE.
     62  *
     63  * from: Utah $Hdr: cd.c 1.6 90/11/28$
     64  *
     65  *      @(#)cd.c        8.2 (Berkeley) 11/16/93
     66  */
     67 
     68 /*
     69  * Copyright (c) 1988 University of Utah.
     70  *
     71  * This code is derived from software contributed to Berkeley by
     72  * the Systems Programming Group of the University of Utah Computer
     73  * Science Department.
     74  *
     75  * Redistribution and use in source and binary forms, with or without
     76  * modification, are permitted provided that the following conditions
     77  * are met:
     78  * 1. Redistributions of source code must retain the above copyright
     79  *    notice, this list of conditions and the following disclaimer.
     80  * 2. Redistributions in binary form must reproduce the above copyright
     81  *    notice, this list of conditions and the following disclaimer in the
     82  *    documentation and/or other materials provided with the distribution.
     83  * 3. All advertising materials mentioning features or use of this software
     84  *    must display the following acknowledgement:
     85  *      This product includes software developed by the University of
     86  *      California, Berkeley and its contributors.
     87  * 4. Neither the name of the University nor the names of its contributors
     88  *    may be used to endorse or promote products derived from this software
     89  *    without specific prior written permission.
     90  *
     91  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     92  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     93  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     94  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     95  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     96  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     97  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     98  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     99  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
    100  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
    101  * SUCH DAMAGE.
    102  *
    103  * from: Utah $Hdr: cd.c 1.6 90/11/28$
    104  *
    105  *      @(#)cd.c        8.2 (Berkeley) 11/16/93
    106  */
    107 
    108 /*
    109  * Copyright (c) 1995 Carnegie-Mellon University.
    110  * All rights reserved.
    111  *
    112  * Authors: Mark Holland, Jim Zelenka
    113  *
    114  * Permission to use, copy, modify and distribute this software and
    115  * its documentation is hereby granted, provided that both the copyright
    116  * notice and this permission notice appear in all copies of the
    117  * software, derivative works or modified versions, and any portions
    118  * thereof, and that both notices appear in supporting documentation.
    119  *
    120  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
    121  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
    122  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
    123  *
    124  * Carnegie Mellon requests users of this software to return to
    125  *
    126  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
    127  *  School of Computer Science
    128  *  Carnegie Mellon University
    129  *  Pittsburgh PA 15213-3890
    130  *
    131  * any improvements or extensions that they make and grant Carnegie the
    132  * rights to redistribute these changes.
    133  */
    134 
    135 /***********************************************************
    136  *
    137  * rf_kintf.c -- the kernel interface routines for RAIDframe
    138  *
    139  ***********************************************************/
    140 
    141 #include <sys/cdefs.h>
    142 __KERNEL_RCSID(0, "$NetBSD: rf_netbsdkintf.c,v 1.262 2009/05/12 13:19:50 cegger Exp $");
    143 
    144 #ifdef _KERNEL_OPT
    145 #include "opt_compat_netbsd.h"
    146 #include "opt_raid_autoconfig.h"
    147 #include "raid.h"
    148 #endif
    149 
    150 #include <sys/param.h>
    151 #include <sys/errno.h>
    152 #include <sys/pool.h>
    153 #include <sys/proc.h>
    154 #include <sys/queue.h>
    155 #include <sys/disk.h>
    156 #include <sys/device.h>
    157 #include <sys/stat.h>
    158 #include <sys/ioctl.h>
    159 #include <sys/fcntl.h>
    160 #include <sys/systm.h>
    161 #include <sys/vnode.h>
    162 #include <sys/disklabel.h>
    163 #include <sys/conf.h>
    164 #include <sys/buf.h>
    165 #include <sys/bufq.h>
    166 #include <sys/user.h>
    167 #include <sys/reboot.h>
    168 #include <sys/kauth.h>
    169 
    170 #include <prop/proplib.h>
    171 
    172 #include <dev/raidframe/raidframevar.h>
    173 #include <dev/raidframe/raidframeio.h>
    174 
    175 #include "rf_raid.h"
    176 #include "rf_copyback.h"
    177 #include "rf_dag.h"
    178 #include "rf_dagflags.h"
    179 #include "rf_desc.h"
    180 #include "rf_diskqueue.h"
    181 #include "rf_etimer.h"
    182 #include "rf_general.h"
    183 #include "rf_kintf.h"
    184 #include "rf_options.h"
    185 #include "rf_driver.h"
    186 #include "rf_parityscan.h"
    187 #include "rf_threadstuff.h"
    188 
    189 #ifdef COMPAT_50
    190 #include "rf_compat50.h"
    191 #endif
    192 
    193 #ifdef DEBUG
    194 int     rf_kdebug_level = 0;
    195 #define db1_printf(a) if (rf_kdebug_level > 0) printf a
    196 #else				/* DEBUG */
    197 #define db1_printf(a) { }
    198 #endif				/* DEBUG */
    199 
    200 static RF_Raid_t **raidPtrs;	/* global raid device descriptors */
    201 
    202 #if (RF_INCLUDE_PARITY_DECLUSTERING_DS > 0)
    203 RF_DECLARE_STATIC_MUTEX(rf_sparet_wait_mutex)
    204 
    205 static RF_SparetWait_t *rf_sparet_wait_queue;	/* requests to install a
    206 						 * spare table */
    207 static RF_SparetWait_t *rf_sparet_resp_queue;	/* responses from
    208 						 * installation process */
    209 #endif
    210 
    211 MALLOC_DEFINE(M_RAIDFRAME, "RAIDframe", "RAIDframe structures");
    212 
    213 /* prototypes */
    214 static void KernelWakeupFunc(struct buf *);
    215 static void InitBP(struct buf *, struct vnode *, unsigned,
    216     dev_t, RF_SectorNum_t, RF_SectorCount_t, void *, void (*) (struct buf *),
    217     void *, int, struct proc *);
    218 static void raidinit(RF_Raid_t *);
    219 
    220 void raidattach(int);
    221 static int raid_match(device_t, cfdata_t, void *);
    222 static void raid_attach(device_t, device_t, void *);
    223 static int raid_detach(device_t, int);
    224 
    225 dev_type_open(raidopen);
    226 dev_type_close(raidclose);
    227 dev_type_read(raidread);
    228 dev_type_write(raidwrite);
    229 dev_type_ioctl(raidioctl);
    230 dev_type_strategy(raidstrategy);
    231 dev_type_dump(raiddump);
    232 dev_type_size(raidsize);
    233 
    234 const struct bdevsw raid_bdevsw = {
    235 	raidopen, raidclose, raidstrategy, raidioctl,
    236 	raiddump, raidsize, D_DISK
    237 };
    238 
    239 const struct cdevsw raid_cdevsw = {
    240 	raidopen, raidclose, raidread, raidwrite, raidioctl,
    241 	nostop, notty, nopoll, nommap, nokqfilter, D_DISK
    242 };
    243 
    244 static struct dkdriver rf_dkdriver = { raidstrategy, minphys };
    245 
    246 /* XXX Not sure if the following should be replacing the raidPtrs above,
    247    or if it should be used in conjunction with that...
    248 */
    249 
    250 struct raid_softc {
    251 	device_t sc_dev;
    252 	int     sc_flags;	/* flags */
    253 	int     sc_cflags;	/* configuration flags */
    254 	uint64_t sc_size;	/* size of the raid device */
    255 	char    sc_xname[20];	/* XXX external name */
    256 	struct disk sc_dkdev;	/* generic disk device info */
    257 	struct bufq_state *buf_queue;	/* used for the device queue */
    258 };
    259 /* sc_flags */
    260 #define RAIDF_INITED	0x01	/* unit has been initialized */
    261 #define RAIDF_WLABEL	0x02	/* label area is writable */
    262 #define RAIDF_LABELLING	0x04	/* unit is currently being labelled */
    263 #define RAIDF_WANTED	0x40	/* someone is waiting to obtain a lock */
    264 #define RAIDF_LOCKED	0x80	/* unit is locked */
    265 
    266 #define	raidunit(x)	DISKUNIT(x)
    267 int numraid = 0;
    268 
    269 extern struct cfdriver raid_cd;
    270 CFATTACH_DECL_NEW(raid, sizeof(struct raid_softc),
    271     raid_match, raid_attach, raid_detach, NULL);
    272 
    273 /*
    274  * Allow RAIDOUTSTANDING number of simultaneous IO's to this RAID device.
    275  * Be aware that large numbers can allow the driver to consume a lot of
    276  * kernel memory, especially on writes, and in degraded mode reads.
    277  *
    278  * For example: with a stripe width of 64 blocks (32k) and 5 disks,
    279  * a single 64K write will typically require 64K for the old data,
    280  * 64K for the old parity, and 64K for the new parity, for a total
    281  * of 192K (if the parity buffer is not re-used immediately).
    282  * Even it if is used immediately, that's still 128K, which when multiplied
    283  * by say 10 requests, is 1280K, *on top* of the 640K of incoming data.
    284  *
    285  * Now in degraded mode, for example, a 64K read on the above setup may
    286  * require data reconstruction, which will require *all* of the 4 remaining
    287  * disks to participate -- 4 * 32K/disk == 128K again.
    288  */
    289 
    290 #ifndef RAIDOUTSTANDING
    291 #define RAIDOUTSTANDING   6
    292 #endif
    293 
    294 #define RAIDLABELDEV(dev)	\
    295 	(MAKEDISKDEV(major((dev)), raidunit((dev)), RAW_PART))
    296 
    297 /* declared here, and made public, for the benefit of KVM stuff.. */
    298 struct raid_softc *raid_softc;
    299 
    300 static void raidgetdefaultlabel(RF_Raid_t *, struct raid_softc *,
    301 				     struct disklabel *);
    302 static void raidgetdisklabel(dev_t);
    303 static void raidmakedisklabel(struct raid_softc *);
    304 
    305 static int raidlock(struct raid_softc *);
    306 static void raidunlock(struct raid_softc *);
    307 
    308 static void rf_markalldirty(RF_Raid_t *);
    309 static void rf_set_properties(struct raid_softc *, RF_Raid_t *);
    310 
    311 void rf_ReconThread(struct rf_recon_req *);
    312 void rf_RewriteParityThread(RF_Raid_t *raidPtr);
    313 void rf_CopybackThread(RF_Raid_t *raidPtr);
    314 void rf_ReconstructInPlaceThread(struct rf_recon_req *);
    315 int rf_autoconfig(device_t);
    316 void rf_buildroothack(RF_ConfigSet_t *);
    317 
    318 RF_AutoConfig_t *rf_find_raid_components(void);
    319 RF_ConfigSet_t *rf_create_auto_sets(RF_AutoConfig_t *);
    320 static int rf_does_it_fit(RF_ConfigSet_t *,RF_AutoConfig_t *);
    321 static int rf_reasonable_label(RF_ComponentLabel_t *);
    322 void rf_create_configuration(RF_AutoConfig_t *,RF_Config_t *, RF_Raid_t *);
    323 int rf_set_autoconfig(RF_Raid_t *, int);
    324 int rf_set_rootpartition(RF_Raid_t *, int);
    325 void rf_release_all_vps(RF_ConfigSet_t *);
    326 void rf_cleanup_config_set(RF_ConfigSet_t *);
    327 int rf_have_enough_components(RF_ConfigSet_t *);
    328 int rf_auto_config_set(RF_ConfigSet_t *, int *);
    329 static int rf_sync_component_caches(RF_Raid_t *raidPtr);
    330 
    331 static int raidautoconfig = 0; /* Debugging, mostly.  Set to 0 to not
    332 				  allow autoconfig to take place.
    333 				  Note that this is overridden by having
    334 				  RAID_AUTOCONFIG as an option in the
    335 				  kernel config file.  */
    336 
    337 struct RF_Pools_s rf_pools;
    338 
    339 void
    340 raidattach(int num)
    341 {
    342 	int raidID;
    343 	int i, rc;
    344 
    345 	aprint_debug("raidattach: Asked for %d units\n", num);
    346 
    347 	if (num <= 0) {
    348 #ifdef DIAGNOSTIC
    349 		panic("raidattach: count <= 0");
    350 #endif
    351 		return;
    352 	}
    353 	/* This is where all the initialization stuff gets done. */
    354 
    355 	numraid = num;
    356 
    357 	/* Make some space for requested number of units... */
    358 
    359 	RF_Malloc(raidPtrs, num * sizeof(RF_Raid_t *), (RF_Raid_t **));
    360 	if (raidPtrs == NULL) {
    361 		panic("raidPtrs is NULL!!");
    362 	}
    363 
    364 #if (RF_INCLUDE_PARITY_DECLUSTERING_DS > 0)
    365 	rf_mutex_init(&rf_sparet_wait_mutex);
    366 
    367 	rf_sparet_wait_queue = rf_sparet_resp_queue = NULL;
    368 #endif
    369 
    370 	for (i = 0; i < num; i++)
    371 		raidPtrs[i] = NULL;
    372 	rc = rf_BootRaidframe();
    373 	if (rc == 0)
    374 		aprint_normal("Kernelized RAIDframe activated\n");
    375 	else
    376 		panic("Serious error booting RAID!!");
    377 
    378 	/* put together some datastructures like the CCD device does.. This
    379 	 * lets us lock the device and what-not when it gets opened. */
    380 
    381 	raid_softc = (struct raid_softc *)
    382 		malloc(num * sizeof(struct raid_softc),
    383 		       M_RAIDFRAME, M_NOWAIT);
    384 	if (raid_softc == NULL) {
    385 		aprint_error("WARNING: no memory for RAIDframe driver\n");
    386 		return;
    387 	}
    388 
    389 	memset(raid_softc, 0, num * sizeof(struct raid_softc));
    390 
    391 	for (raidID = 0; raidID < num; raidID++) {
    392 		bufq_alloc(&raid_softc[raidID].buf_queue, "fcfs", 0);
    393 
    394 		RF_Malloc(raidPtrs[raidID], sizeof(RF_Raid_t),
    395 			  (RF_Raid_t *));
    396 		if (raidPtrs[raidID] == NULL) {
    397 			aprint_error("WARNING: raidPtrs[%d] is NULL\n", raidID);
    398 			numraid = raidID;
    399 			return;
    400 		}
    401 	}
    402 
    403 	if (config_cfattach_attach(raid_cd.cd_name, &raid_ca)) {
    404 		aprint_error("raidattach: config_cfattach_attach failed?\n");
    405 	}
    406 
    407 #ifdef RAID_AUTOCONFIG
    408 	raidautoconfig = 1;
    409 #endif
    410 
    411 	/*
    412 	 * Register a finalizer which will be used to auto-config RAID
    413 	 * sets once all real hardware devices have been found.
    414 	 */
    415 	if (config_finalize_register(NULL, rf_autoconfig) != 0)
    416 		aprint_error("WARNING: unable to register RAIDframe finalizer\n");
    417 }
    418 
    419 int
    420 rf_autoconfig(device_t self)
    421 {
    422 	RF_AutoConfig_t *ac_list;
    423 	RF_ConfigSet_t *config_sets;
    424 
    425 	if (raidautoconfig == 0)
    426 		return (0);
    427 
    428 	/* XXX This code can only be run once. */
    429 	raidautoconfig = 0;
    430 
    431 	/* 1. locate all RAID components on the system */
    432 	aprint_debug("Searching for RAID components...\n");
    433 	ac_list = rf_find_raid_components();
    434 
    435 	/* 2. Sort them into their respective sets. */
    436 	config_sets = rf_create_auto_sets(ac_list);
    437 
    438 	/*
    439 	 * 3. Evaluate each set andconfigure the valid ones.
    440 	 * This gets done in rf_buildroothack().
    441 	 */
    442 	rf_buildroothack(config_sets);
    443 
    444 	return 1;
    445 }
    446 
    447 void
    448 rf_buildroothack(RF_ConfigSet_t *config_sets)
    449 {
    450 	RF_ConfigSet_t *cset;
    451 	RF_ConfigSet_t *next_cset;
    452 	int retcode;
    453 	int raidID;
    454 	int rootID;
    455 	int col;
    456 	int num_root;
    457 	char *devname;
    458 
    459 	rootID = 0;
    460 	num_root = 0;
    461 	cset = config_sets;
    462 	while(cset != NULL ) {
    463 		next_cset = cset->next;
    464 		if (rf_have_enough_components(cset) &&
    465 		    cset->ac->clabel->autoconfigure==1) {
    466 			retcode = rf_auto_config_set(cset,&raidID);
    467 			if (!retcode) {
    468 				aprint_debug("raid%d: configured ok\n", raidID);
    469 				if (cset->rootable) {
    470 					rootID = raidID;
    471 					num_root++;
    472 				}
    473 			} else {
    474 				/* The autoconfig didn't work :( */
    475 				aprint_debug("Autoconfig failed with code %d for raid%d\n", retcode, raidID);
    476 				rf_release_all_vps(cset);
    477 			}
    478 		} else {
    479 			/* we're not autoconfiguring this set...
    480 			   release the associated resources */
    481 			rf_release_all_vps(cset);
    482 		}
    483 		/* cleanup */
    484 		rf_cleanup_config_set(cset);
    485 		cset = next_cset;
    486 	}
    487 
    488 	/* if the user has specified what the root device should be
    489 	   then we don't touch booted_device or boothowto... */
    490 
    491 	if (rootspec != NULL)
    492 		return;
    493 
    494 	/* we found something bootable... */
    495 
    496 	if (num_root == 1) {
    497 		booted_device = raid_softc[rootID].sc_dev;
    498 	} else if (num_root > 1) {
    499 
    500 		/*
    501 		 * Maybe the MD code can help. If it cannot, then
    502 		 * setroot() will discover that we have no
    503 		 * booted_device and will ask the user if nothing was
    504 		 * hardwired in the kernel config file
    505 		 */
    506 
    507 		if (booted_device == NULL)
    508 			cpu_rootconf();
    509 		if (booted_device == NULL)
    510 			return;
    511 
    512 		num_root = 0;
    513 		for (raidID = 0; raidID < numraid; raidID++) {
    514 			if (raidPtrs[raidID]->valid == 0)
    515 				continue;
    516 
    517 			if (raidPtrs[raidID]->root_partition == 0)
    518 				continue;
    519 
    520 			for (col = 0; col < raidPtrs[raidID]->numCol; col++) {
    521 				devname = raidPtrs[raidID]->Disks[col].devname;
    522 				devname += sizeof("/dev/") - 1;
    523 				if (strncmp(devname, device_xname(booted_device),
    524 					    strlen(device_xname(booted_device))) != 0)
    525 					continue;
    526 				aprint_debug("raid%d includes boot device %s\n",
    527 				       raidID, devname);
    528 				num_root++;
    529 				rootID = raidID;
    530 			}
    531 		}
    532 
    533 		if (num_root == 1) {
    534 			booted_device = raid_softc[rootID].sc_dev;
    535 		} else {
    536 			/* we can't guess.. require the user to answer... */
    537 			boothowto |= RB_ASKNAME;
    538 		}
    539 	}
    540 }
    541 
    542 
    543 int
    544 raidsize(dev_t dev)
    545 {
    546 	struct raid_softc *rs;
    547 	struct disklabel *lp;
    548 	int     part, unit, omask, size;
    549 
    550 	unit = raidunit(dev);
    551 	if (unit >= numraid)
    552 		return (-1);
    553 	rs = &raid_softc[unit];
    554 
    555 	if ((rs->sc_flags & RAIDF_INITED) == 0)
    556 		return (-1);
    557 
    558 	part = DISKPART(dev);
    559 	omask = rs->sc_dkdev.dk_openmask & (1 << part);
    560 	lp = rs->sc_dkdev.dk_label;
    561 
    562 	if (omask == 0 && raidopen(dev, 0, S_IFBLK, curlwp))
    563 		return (-1);
    564 
    565 	if (lp->d_partitions[part].p_fstype != FS_SWAP)
    566 		size = -1;
    567 	else
    568 		size = lp->d_partitions[part].p_size *
    569 		    (lp->d_secsize / DEV_BSIZE);
    570 
    571 	if (omask == 0 && raidclose(dev, 0, S_IFBLK, curlwp))
    572 		return (-1);
    573 
    574 	return (size);
    575 
    576 }
    577 
    578 int
    579 raiddump(dev_t dev, daddr_t blkno, void *va, size_t size)
    580 {
    581 	int     unit = raidunit(dev);
    582 	struct raid_softc *rs;
    583 	const struct bdevsw *bdev;
    584 	struct disklabel *lp;
    585 	RF_Raid_t *raidPtr;
    586 	daddr_t offset;
    587 	int     part, c, sparecol, j, scol, dumpto;
    588 	int     error = 0;
    589 
    590 	if (unit >= numraid)
    591 		return (ENXIO);
    592 
    593 	rs = &raid_softc[unit];
    594 	raidPtr = raidPtrs[unit];
    595 
    596 	if ((rs->sc_flags & RAIDF_INITED) == 0)
    597 		return ENXIO;
    598 
    599 	/* we only support dumping to RAID 1 sets */
    600 	if (raidPtr->Layout.numDataCol != 1 ||
    601 	    raidPtr->Layout.numParityCol != 1)
    602 		return EINVAL;
    603 
    604 
    605 	if ((error = raidlock(rs)) != 0)
    606 		return error;
    607 
    608 	if (size % DEV_BSIZE != 0) {
    609 		error = EINVAL;
    610 		goto out;
    611 	}
    612 
    613 	if (blkno + size / DEV_BSIZE > rs->sc_size) {
    614 		printf("%s: blkno (%" PRIu64 ") + size / DEV_BSIZE (%zu) > "
    615 		    "sc->sc_size (%" PRIu64 ")\n", __func__, blkno,
    616 		    size / DEV_BSIZE, rs->sc_size);
    617 		error = EINVAL;
    618 		goto out;
    619 	}
    620 
    621 	part = DISKPART(dev);
    622 	lp = rs->sc_dkdev.dk_label;
    623 	offset = lp->d_partitions[part].p_offset + RF_PROTECTED_SECTORS;
    624 
    625 	/* figure out what device is alive.. */
    626 
    627 	/*
    628 	   Look for a component to dump to.  The preference for the
    629 	   component to dump to is as follows:
    630 	   1) the master
    631 	   2) a used_spare of the master
    632 	   3) the slave
    633 	   4) a used_spare of the slave
    634 	*/
    635 
    636 	dumpto = -1;
    637 	for (c = 0; c < raidPtr->numCol; c++) {
    638 		if (raidPtr->Disks[c].status == rf_ds_optimal) {
    639 			/* this might be the one */
    640 			dumpto = c;
    641 			break;
    642 		}
    643 	}
    644 
    645 	/*
    646 	   At this point we have possibly selected a live master or a
    647 	   live slave.  We now check to see if there is a spared
    648 	   master (or a spared slave), if we didn't find a live master
    649 	   or a live slave.
    650 	*/
    651 
    652 	for (c = 0; c < raidPtr->numSpare; c++) {
    653 		sparecol = raidPtr->numCol + c;
    654 		if (raidPtr->Disks[sparecol].status ==  rf_ds_used_spare) {
    655 			/* How about this one? */
    656 			scol = -1;
    657 			for(j=0;j<raidPtr->numCol;j++) {
    658 				if (raidPtr->Disks[j].spareCol == sparecol) {
    659 					scol = j;
    660 					break;
    661 				}
    662 			}
    663 			if (scol == 0) {
    664 				/*
    665 				   We must have found a spared master!
    666 				   We'll take that over anything else
    667 				   found so far.  (We couldn't have
    668 				   found a real master before, since
    669 				   this is a used spare, and it's
    670 				   saying that it's replacing the
    671 				   master.)  On reboot (with
    672 				   autoconfiguration turned on)
    673 				   sparecol will become the 1st
    674 				   component (component0) of this set.
    675 				*/
    676 				dumpto = sparecol;
    677 				break;
    678 			} else if (scol != -1) {
    679 				/*
    680 				   Must be a spared slave.  We'll dump
    681 				   to that if we havn't found anything
    682 				   else so far.
    683 				*/
    684 				if (dumpto == -1)
    685 					dumpto = sparecol;
    686 			}
    687 		}
    688 	}
    689 
    690 	if (dumpto == -1) {
    691 		/* we couldn't find any live components to dump to!?!?
    692 		 */
    693 		error = EINVAL;
    694 		goto out;
    695 	}
    696 
    697 	bdev = bdevsw_lookup(raidPtr->Disks[dumpto].dev);
    698 
    699 	/*
    700 	   Note that blkno is relative to this particular partition.
    701 	   By adding the offset of this partition in the RAID
    702 	   set, and also adding RF_PROTECTED_SECTORS, we get a
    703 	   value that is relative to the partition used for the
    704 	   underlying component.
    705 	*/
    706 
    707 	error = (*bdev->d_dump)(raidPtr->Disks[dumpto].dev,
    708 				blkno + offset, va, size);
    709 
    710 out:
    711 	raidunlock(rs);
    712 
    713 	return error;
    714 }
    715 /* ARGSUSED */
    716 int
    717 raidopen(dev_t dev, int flags, int fmt,
    718     struct lwp *l)
    719 {
    720 	int     unit = raidunit(dev);
    721 	struct raid_softc *rs;
    722 	struct disklabel *lp;
    723 	int     part, pmask;
    724 	int     error = 0;
    725 
    726 	if (unit >= numraid)
    727 		return (ENXIO);
    728 	rs = &raid_softc[unit];
    729 
    730 	if ((error = raidlock(rs)) != 0)
    731 		return (error);
    732 	lp = rs->sc_dkdev.dk_label;
    733 
    734 	part = DISKPART(dev);
    735 
    736 	/*
    737 	 * If there are wedges, and this is not RAW_PART, then we
    738 	 * need to fail.
    739 	 */
    740 	if (rs->sc_dkdev.dk_nwedges != 0 && part != RAW_PART) {
    741 		error = EBUSY;
    742 		goto bad;
    743 	}
    744 	pmask = (1 << part);
    745 
    746 	if ((rs->sc_flags & RAIDF_INITED) &&
    747 	    (rs->sc_dkdev.dk_openmask == 0))
    748 		raidgetdisklabel(dev);
    749 
    750 	/* make sure that this partition exists */
    751 
    752 	if (part != RAW_PART) {
    753 		if (((rs->sc_flags & RAIDF_INITED) == 0) ||
    754 		    ((part >= lp->d_npartitions) ||
    755 			(lp->d_partitions[part].p_fstype == FS_UNUSED))) {
    756 			error = ENXIO;
    757 			goto bad;
    758 		}
    759 	}
    760 	/* Prevent this unit from being unconfigured while open. */
    761 	switch (fmt) {
    762 	case S_IFCHR:
    763 		rs->sc_dkdev.dk_copenmask |= pmask;
    764 		break;
    765 
    766 	case S_IFBLK:
    767 		rs->sc_dkdev.dk_bopenmask |= pmask;
    768 		break;
    769 	}
    770 
    771 	if ((rs->sc_dkdev.dk_openmask == 0) &&
    772 	    ((rs->sc_flags & RAIDF_INITED) != 0)) {
    773 		/* First one... mark things as dirty... Note that we *MUST*
    774 		 have done a configure before this.  I DO NOT WANT TO BE
    775 		 SCRIBBLING TO RANDOM COMPONENTS UNTIL IT'S BEEN DETERMINED
    776 		 THAT THEY BELONG TOGETHER!!!!! */
    777 		/* XXX should check to see if we're only open for reading
    778 		   here... If so, we needn't do this, but then need some
    779 		   other way of keeping track of what's happened.. */
    780 
    781 		rf_markalldirty( raidPtrs[unit] );
    782 	}
    783 
    784 
    785 	rs->sc_dkdev.dk_openmask =
    786 	    rs->sc_dkdev.dk_copenmask | rs->sc_dkdev.dk_bopenmask;
    787 
    788 bad:
    789 	raidunlock(rs);
    790 
    791 	return (error);
    792 
    793 
    794 }
    795 /* ARGSUSED */
    796 int
    797 raidclose(dev_t dev, int flags, int fmt, struct lwp *l)
    798 {
    799 	int     unit = raidunit(dev);
    800 	cfdata_t cf;
    801 	struct raid_softc *rs;
    802 	int     error = 0;
    803 	int     part;
    804 
    805 	if (unit >= numraid)
    806 		return (ENXIO);
    807 	rs = &raid_softc[unit];
    808 
    809 	if ((error = raidlock(rs)) != 0)
    810 		return (error);
    811 
    812 	part = DISKPART(dev);
    813 
    814 	/* ...that much closer to allowing unconfiguration... */
    815 	switch (fmt) {
    816 	case S_IFCHR:
    817 		rs->sc_dkdev.dk_copenmask &= ~(1 << part);
    818 		break;
    819 
    820 	case S_IFBLK:
    821 		rs->sc_dkdev.dk_bopenmask &= ~(1 << part);
    822 		break;
    823 	}
    824 	rs->sc_dkdev.dk_openmask =
    825 	    rs->sc_dkdev.dk_copenmask | rs->sc_dkdev.dk_bopenmask;
    826 
    827 	if ((rs->sc_dkdev.dk_openmask == 0) &&
    828 	    ((rs->sc_flags & RAIDF_INITED) != 0)) {
    829 		/* Last one... device is not unconfigured yet.
    830 		   Device shutdown has taken care of setting the
    831 		   clean bits if RAIDF_INITED is not set
    832 		   mark things as clean... */
    833 
    834 		rf_update_component_labels(raidPtrs[unit],
    835 						 RF_FINAL_COMPONENT_UPDATE);
    836 		if (doing_shutdown) {
    837 			/* last one, and we're going down, so
    838 			   lights out for this RAID set too. */
    839 			error = rf_Shutdown(raidPtrs[unit]);
    840 
    841 			/* It's no longer initialized... */
    842 			rs->sc_flags &= ~RAIDF_INITED;
    843 
    844 			/* detach the device */
    845 
    846 			cf = device_cfdata(rs->sc_dev);
    847 			error = config_detach(rs->sc_dev, DETACH_QUIET);
    848 			free(cf, M_RAIDFRAME);
    849 
    850 			/* Detach the disk. */
    851 			disk_detach(&rs->sc_dkdev);
    852 			disk_destroy(&rs->sc_dkdev);
    853 		}
    854 	}
    855 
    856 	raidunlock(rs);
    857 	return (0);
    858 
    859 }
    860 
    861 void
    862 raidstrategy(struct buf *bp)
    863 {
    864 	int s;
    865 
    866 	unsigned int raidID = raidunit(bp->b_dev);
    867 	RF_Raid_t *raidPtr;
    868 	struct raid_softc *rs = &raid_softc[raidID];
    869 	int     wlabel;
    870 
    871 	if ((rs->sc_flags & RAIDF_INITED) ==0) {
    872 		bp->b_error = ENXIO;
    873 		goto done;
    874 	}
    875 	if (raidID >= numraid || !raidPtrs[raidID]) {
    876 		bp->b_error = ENODEV;
    877 		goto done;
    878 	}
    879 	raidPtr = raidPtrs[raidID];
    880 	if (!raidPtr->valid) {
    881 		bp->b_error = ENODEV;
    882 		goto done;
    883 	}
    884 	if (bp->b_bcount == 0) {
    885 		db1_printf(("b_bcount is zero..\n"));
    886 		goto done;
    887 	}
    888 
    889 	/*
    890 	 * Do bounds checking and adjust transfer.  If there's an
    891 	 * error, the bounds check will flag that for us.
    892 	 */
    893 
    894 	wlabel = rs->sc_flags & (RAIDF_WLABEL | RAIDF_LABELLING);
    895 	if (DISKPART(bp->b_dev) == RAW_PART) {
    896 		uint64_t size; /* device size in DEV_BSIZE unit */
    897 
    898 		if (raidPtr->logBytesPerSector > DEV_BSHIFT) {
    899 			size = raidPtr->totalSectors <<
    900 			    (raidPtr->logBytesPerSector - DEV_BSHIFT);
    901 		} else {
    902 			size = raidPtr->totalSectors >>
    903 			    (DEV_BSHIFT - raidPtr->logBytesPerSector);
    904 		}
    905 		if (bounds_check_with_mediasize(bp, DEV_BSIZE, size) <= 0) {
    906 			goto done;
    907 		}
    908 	} else {
    909 		if (bounds_check_with_label(&rs->sc_dkdev, bp, wlabel) <= 0) {
    910 			db1_printf(("Bounds check failed!!:%d %d\n",
    911 				(int) bp->b_blkno, (int) wlabel));
    912 			goto done;
    913 		}
    914 	}
    915 	s = splbio();
    916 
    917 	bp->b_resid = 0;
    918 
    919 	/* stuff it onto our queue */
    920 	bufq_put(rs->buf_queue, bp);
    921 
    922 	/* scheduled the IO to happen at the next convenient time */
    923 	wakeup(&(raidPtrs[raidID]->iodone));
    924 
    925 	splx(s);
    926 	return;
    927 
    928 done:
    929 	bp->b_resid = bp->b_bcount;
    930 	biodone(bp);
    931 }
    932 /* ARGSUSED */
    933 int
    934 raidread(dev_t dev, struct uio *uio, int flags)
    935 {
    936 	int     unit = raidunit(dev);
    937 	struct raid_softc *rs;
    938 
    939 	if (unit >= numraid)
    940 		return (ENXIO);
    941 	rs = &raid_softc[unit];
    942 
    943 	if ((rs->sc_flags & RAIDF_INITED) == 0)
    944 		return (ENXIO);
    945 
    946 	return (physio(raidstrategy, NULL, dev, B_READ, minphys, uio));
    947 
    948 }
    949 /* ARGSUSED */
    950 int
    951 raidwrite(dev_t dev, struct uio *uio, int flags)
    952 {
    953 	int     unit = raidunit(dev);
    954 	struct raid_softc *rs;
    955 
    956 	if (unit >= numraid)
    957 		return (ENXIO);
    958 	rs = &raid_softc[unit];
    959 
    960 	if ((rs->sc_flags & RAIDF_INITED) == 0)
    961 		return (ENXIO);
    962 
    963 	return (physio(raidstrategy, NULL, dev, B_WRITE, minphys, uio));
    964 
    965 }
    966 
    967 int
    968 raidioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
    969 {
    970 	int     unit = raidunit(dev);
    971 	int     error = 0;
    972 	int     part, pmask;
    973 	cfdata_t cf;
    974 	struct raid_softc *rs;
    975 	RF_Config_t *k_cfg, *u_cfg;
    976 	RF_Raid_t *raidPtr;
    977 	RF_RaidDisk_t *diskPtr;
    978 	RF_AccTotals_t *totals;
    979 	RF_DeviceConfig_t *d_cfg, **ucfgp;
    980 	u_char *specific_buf;
    981 	int retcode = 0;
    982 	int column;
    983 	int raidid;
    984 	struct rf_recon_req *rrcopy, *rr;
    985 	RF_ComponentLabel_t *clabel;
    986 	RF_ComponentLabel_t *ci_label;
    987 	RF_ComponentLabel_t **clabel_ptr;
    988 	RF_SingleComponent_t *sparePtr,*componentPtr;
    989 	RF_SingleComponent_t component;
    990 	RF_ProgressInfo_t progressInfo, **progressInfoPtr;
    991 	int i, j, d;
    992 #ifdef __HAVE_OLD_DISKLABEL
    993 	struct disklabel newlabel;
    994 #endif
    995 	struct dkwedge_info *dkw;
    996 
    997 	if (unit >= numraid)
    998 		return (ENXIO);
    999 	rs = &raid_softc[unit];
   1000 	raidPtr = raidPtrs[unit];
   1001 
   1002 	db1_printf(("raidioctl: %d %d %d %d\n", (int) dev,
   1003 		(int) DISKPART(dev), (int) unit, (int) cmd));
   1004 
   1005 	/* Must be open for writes for these commands... */
   1006 	switch (cmd) {
   1007 #ifdef DIOCGSECTORSIZE
   1008 	case DIOCGSECTORSIZE:
   1009 		*(u_int *)data = raidPtr->bytesPerSector;
   1010 		return 0;
   1011 	case DIOCGMEDIASIZE:
   1012 		*(off_t *)data =
   1013 		    (off_t)raidPtr->totalSectors * raidPtr->bytesPerSector;
   1014 		return 0;
   1015 #endif
   1016 	case DIOCSDINFO:
   1017 	case DIOCWDINFO:
   1018 #ifdef __HAVE_OLD_DISKLABEL
   1019 	case ODIOCWDINFO:
   1020 	case ODIOCSDINFO:
   1021 #endif
   1022 	case DIOCWLABEL:
   1023 	case DIOCAWEDGE:
   1024 	case DIOCDWEDGE:
   1025 		if ((flag & FWRITE) == 0)
   1026 			return (EBADF);
   1027 	}
   1028 
   1029 	/* Must be initialized for these... */
   1030 	switch (cmd) {
   1031 	case DIOCGDINFO:
   1032 	case DIOCSDINFO:
   1033 	case DIOCWDINFO:
   1034 #ifdef __HAVE_OLD_DISKLABEL
   1035 	case ODIOCGDINFO:
   1036 	case ODIOCWDINFO:
   1037 	case ODIOCSDINFO:
   1038 	case ODIOCGDEFLABEL:
   1039 #endif
   1040 	case DIOCGPART:
   1041 	case DIOCWLABEL:
   1042 	case DIOCGDEFLABEL:
   1043 	case DIOCAWEDGE:
   1044 	case DIOCDWEDGE:
   1045 	case DIOCLWEDGES:
   1046 	case DIOCCACHESYNC:
   1047 	case RAIDFRAME_SHUTDOWN:
   1048 	case RAIDFRAME_REWRITEPARITY:
   1049 	case RAIDFRAME_GET_INFO:
   1050 	case RAIDFRAME_RESET_ACCTOTALS:
   1051 	case RAIDFRAME_GET_ACCTOTALS:
   1052 	case RAIDFRAME_KEEP_ACCTOTALS:
   1053 	case RAIDFRAME_GET_SIZE:
   1054 	case RAIDFRAME_FAIL_DISK:
   1055 	case RAIDFRAME_COPYBACK:
   1056 	case RAIDFRAME_CHECK_RECON_STATUS:
   1057 	case RAIDFRAME_CHECK_RECON_STATUS_EXT:
   1058 	case RAIDFRAME_GET_COMPONENT_LABEL:
   1059 	case RAIDFRAME_SET_COMPONENT_LABEL:
   1060 	case RAIDFRAME_ADD_HOT_SPARE:
   1061 	case RAIDFRAME_REMOVE_HOT_SPARE:
   1062 	case RAIDFRAME_INIT_LABELS:
   1063 	case RAIDFRAME_REBUILD_IN_PLACE:
   1064 	case RAIDFRAME_CHECK_PARITY:
   1065 	case RAIDFRAME_CHECK_PARITYREWRITE_STATUS:
   1066 	case RAIDFRAME_CHECK_PARITYREWRITE_STATUS_EXT:
   1067 	case RAIDFRAME_CHECK_COPYBACK_STATUS:
   1068 	case RAIDFRAME_CHECK_COPYBACK_STATUS_EXT:
   1069 	case RAIDFRAME_SET_AUTOCONFIG:
   1070 	case RAIDFRAME_SET_ROOT:
   1071 	case RAIDFRAME_DELETE_COMPONENT:
   1072 	case RAIDFRAME_INCORPORATE_HOT_SPARE:
   1073 		if ((rs->sc_flags & RAIDF_INITED) == 0)
   1074 			return (ENXIO);
   1075 	}
   1076 
   1077 	switch (cmd) {
   1078 #ifdef COMPAT_50
   1079 	case RAIDFRAME_GET_INFO50:
   1080 		return rf_get_info50(raidPtr, data);
   1081 
   1082 	case RAIDFRAME_CONFIGURE50:
   1083 		if ((retcode = rf_config50(raidPtr, unit, data, &k_cfg)) != 0)
   1084 			return retcode;
   1085 		goto config;
   1086 #endif
   1087 		/* configure the system */
   1088 	case RAIDFRAME_CONFIGURE:
   1089 
   1090 		if (raidPtr->valid) {
   1091 			/* There is a valid RAID set running on this unit! */
   1092 			printf("raid%d: Device already configured!\n",unit);
   1093 			return(EINVAL);
   1094 		}
   1095 
   1096 		/* copy-in the configuration information */
   1097 		/* data points to a pointer to the configuration structure */
   1098 
   1099 		u_cfg = *((RF_Config_t **) data);
   1100 		RF_Malloc(k_cfg, sizeof(RF_Config_t), (RF_Config_t *));
   1101 		if (k_cfg == NULL) {
   1102 			return (ENOMEM);
   1103 		}
   1104 		retcode = copyin(u_cfg, k_cfg, sizeof(RF_Config_t));
   1105 		if (retcode) {
   1106 			RF_Free(k_cfg, sizeof(RF_Config_t));
   1107 			db1_printf(("rf_ioctl: retcode=%d copyin.1\n",
   1108 				retcode));
   1109 			return (retcode);
   1110 		}
   1111 		goto config;
   1112 	config:
   1113 		/* allocate a buffer for the layout-specific data, and copy it
   1114 		 * in */
   1115 		if (k_cfg->layoutSpecificSize) {
   1116 			if (k_cfg->layoutSpecificSize > 10000) {
   1117 				/* sanity check */
   1118 				RF_Free(k_cfg, sizeof(RF_Config_t));
   1119 				return (EINVAL);
   1120 			}
   1121 			RF_Malloc(specific_buf, k_cfg->layoutSpecificSize,
   1122 			    (u_char *));
   1123 			if (specific_buf == NULL) {
   1124 				RF_Free(k_cfg, sizeof(RF_Config_t));
   1125 				return (ENOMEM);
   1126 			}
   1127 			retcode = copyin(k_cfg->layoutSpecific, specific_buf,
   1128 			    k_cfg->layoutSpecificSize);
   1129 			if (retcode) {
   1130 				RF_Free(k_cfg, sizeof(RF_Config_t));
   1131 				RF_Free(specific_buf,
   1132 					k_cfg->layoutSpecificSize);
   1133 				db1_printf(("rf_ioctl: retcode=%d copyin.2\n",
   1134 					retcode));
   1135 				return (retcode);
   1136 			}
   1137 		} else
   1138 			specific_buf = NULL;
   1139 		k_cfg->layoutSpecific = specific_buf;
   1140 
   1141 		/* should do some kind of sanity check on the configuration.
   1142 		 * Store the sum of all the bytes in the last byte? */
   1143 
   1144 		/* configure the system */
   1145 
   1146 		/*
   1147 		 * Clear the entire RAID descriptor, just to make sure
   1148 		 *  there is no stale data left in the case of a
   1149 		 *  reconfiguration
   1150 		 */
   1151 		memset((char *) raidPtr, 0, sizeof(RF_Raid_t));
   1152 		raidPtr->raidid = unit;
   1153 
   1154 		retcode = rf_Configure(raidPtr, k_cfg, NULL);
   1155 
   1156 		if (retcode == 0) {
   1157 
   1158 			/* allow this many simultaneous IO's to
   1159 			   this RAID device */
   1160 			raidPtr->openings = RAIDOUTSTANDING;
   1161 
   1162 			raidinit(raidPtr);
   1163 			rf_markalldirty(raidPtr);
   1164 		}
   1165 		/* free the buffers.  No return code here. */
   1166 		if (k_cfg->layoutSpecificSize) {
   1167 			RF_Free(specific_buf, k_cfg->layoutSpecificSize);
   1168 		}
   1169 		RF_Free(k_cfg, sizeof(RF_Config_t));
   1170 
   1171 		return (retcode);
   1172 
   1173 		/* shutdown the system */
   1174 	case RAIDFRAME_SHUTDOWN:
   1175 
   1176 		if ((error = raidlock(rs)) != 0)
   1177 			return (error);
   1178 
   1179 		/*
   1180 		 * If somebody has a partition mounted, we shouldn't
   1181 		 * shutdown.
   1182 		 */
   1183 
   1184 		part = DISKPART(dev);
   1185 		pmask = (1 << part);
   1186 		if ((rs->sc_dkdev.dk_openmask & ~pmask) ||
   1187 		    ((rs->sc_dkdev.dk_bopenmask & pmask) &&
   1188 			(rs->sc_dkdev.dk_copenmask & pmask))) {
   1189 			raidunlock(rs);
   1190 			return (EBUSY);
   1191 		}
   1192 
   1193 		retcode = rf_Shutdown(raidPtr);
   1194 
   1195 		/* It's no longer initialized... */
   1196 		rs->sc_flags &= ~RAIDF_INITED;
   1197 
   1198 		/* free the pseudo device attach bits */
   1199 
   1200 		cf = device_cfdata(rs->sc_dev);
   1201 		/* XXX this causes us to not return any errors
   1202 		   from the above call to rf_Shutdown() */
   1203 		retcode = config_detach(rs->sc_dev, DETACH_QUIET);
   1204 		free(cf, M_RAIDFRAME);
   1205 
   1206 		/* Detach the disk. */
   1207 		disk_detach(&rs->sc_dkdev);
   1208 		disk_destroy(&rs->sc_dkdev);
   1209 
   1210 		raidunlock(rs);
   1211 
   1212 		return (retcode);
   1213 	case RAIDFRAME_GET_COMPONENT_LABEL:
   1214 		clabel_ptr = (RF_ComponentLabel_t **) data;
   1215 		/* need to read the component label for the disk indicated
   1216 		   by row,column in clabel */
   1217 
   1218 		/* For practice, let's get it directly fromdisk, rather
   1219 		   than from the in-core copy */
   1220 		RF_Malloc( clabel, sizeof( RF_ComponentLabel_t ),
   1221 			   (RF_ComponentLabel_t *));
   1222 		if (clabel == NULL)
   1223 			return (ENOMEM);
   1224 
   1225 		retcode = copyin( *clabel_ptr, clabel,
   1226 				  sizeof(RF_ComponentLabel_t));
   1227 
   1228 		if (retcode) {
   1229 			RF_Free( clabel, sizeof(RF_ComponentLabel_t));
   1230 			return(retcode);
   1231 		}
   1232 
   1233 		clabel->row = 0; /* Don't allow looking at anything else.*/
   1234 
   1235 		column = clabel->column;
   1236 
   1237 		if ((column < 0) || (column >= raidPtr->numCol +
   1238 				     raidPtr->numSpare)) {
   1239 			RF_Free( clabel, sizeof(RF_ComponentLabel_t));
   1240 			return(EINVAL);
   1241 		}
   1242 
   1243 		retcode = raidread_component_label(raidPtr->Disks[column].dev,
   1244 				raidPtr->raid_cinfo[column].ci_vp,
   1245 				clabel );
   1246 
   1247 		if (retcode == 0) {
   1248 			retcode = copyout(clabel, *clabel_ptr,
   1249 					  sizeof(RF_ComponentLabel_t));
   1250 		}
   1251 		RF_Free(clabel, sizeof(RF_ComponentLabel_t));
   1252 		return (retcode);
   1253 
   1254 	case RAIDFRAME_SET_COMPONENT_LABEL:
   1255 		clabel = (RF_ComponentLabel_t *) data;
   1256 
   1257 		/* XXX check the label for valid stuff... */
   1258 		/* Note that some things *should not* get modified --
   1259 		   the user should be re-initing the labels instead of
   1260 		   trying to patch things.
   1261 		   */
   1262 
   1263 		raidid = raidPtr->raidid;
   1264 #ifdef DEBUG
   1265 		printf("raid%d: Got component label:\n", raidid);
   1266 		printf("raid%d: Version: %d\n", raidid, clabel->version);
   1267 		printf("raid%d: Serial Number: %d\n", raidid, clabel->serial_number);
   1268 		printf("raid%d: Mod counter: %d\n", raidid, clabel->mod_counter);
   1269 		printf("raid%d: Column: %d\n", raidid, clabel->column);
   1270 		printf("raid%d: Num Columns: %d\n", raidid, clabel->num_columns);
   1271 		printf("raid%d: Clean: %d\n", raidid, clabel->clean);
   1272 		printf("raid%d: Status: %d\n", raidid, clabel->status);
   1273 #endif
   1274 		clabel->row = 0;
   1275 		column = clabel->column;
   1276 
   1277 		if ((column < 0) || (column >= raidPtr->numCol)) {
   1278 			return(EINVAL);
   1279 		}
   1280 
   1281 		/* XXX this isn't allowed to do anything for now :-) */
   1282 
   1283 		/* XXX and before it is, we need to fill in the rest
   1284 		   of the fields!?!?!?! */
   1285 #if 0
   1286 		raidwrite_component_label(
   1287 		     raidPtr->Disks[column].dev,
   1288 			    raidPtr->raid_cinfo[column].ci_vp,
   1289 			    clabel );
   1290 #endif
   1291 		return (0);
   1292 
   1293 	case RAIDFRAME_INIT_LABELS:
   1294 		clabel = (RF_ComponentLabel_t *) data;
   1295 		/*
   1296 		   we only want the serial number from
   1297 		   the above.  We get all the rest of the information
   1298 		   from the config that was used to create this RAID
   1299 		   set.
   1300 		   */
   1301 
   1302 		raidPtr->serial_number = clabel->serial_number;
   1303 
   1304 		RF_Malloc(ci_label, sizeof(RF_ComponentLabel_t),
   1305 			  (RF_ComponentLabel_t *));
   1306 		if (ci_label == NULL)
   1307 			return (ENOMEM);
   1308 
   1309 		raid_init_component_label(raidPtr, ci_label);
   1310 		ci_label->serial_number = clabel->serial_number;
   1311 		ci_label->row = 0; /* we dont' pretend to support more */
   1312 
   1313 		for(column=0;column<raidPtr->numCol;column++) {
   1314 			diskPtr = &raidPtr->Disks[column];
   1315 			if (!RF_DEAD_DISK(diskPtr->status)) {
   1316 				ci_label->partitionSize = diskPtr->partitionSize;
   1317 				ci_label->column = column;
   1318 				raidwrite_component_label(
   1319 							  raidPtr->Disks[column].dev,
   1320 							  raidPtr->raid_cinfo[column].ci_vp,
   1321 							  ci_label );
   1322 			}
   1323 		}
   1324 		RF_Free(ci_label, sizeof(RF_ComponentLabel_t));
   1325 
   1326 		return (retcode);
   1327 	case RAIDFRAME_SET_AUTOCONFIG:
   1328 		d = rf_set_autoconfig(raidPtr, *(int *) data);
   1329 		printf("raid%d: New autoconfig value is: %d\n",
   1330 		       raidPtr->raidid, d);
   1331 		*(int *) data = d;
   1332 		return (retcode);
   1333 
   1334 	case RAIDFRAME_SET_ROOT:
   1335 		d = rf_set_rootpartition(raidPtr, *(int *) data);
   1336 		printf("raid%d: New rootpartition value is: %d\n",
   1337 		       raidPtr->raidid, d);
   1338 		*(int *) data = d;
   1339 		return (retcode);
   1340 
   1341 		/* initialize all parity */
   1342 	case RAIDFRAME_REWRITEPARITY:
   1343 
   1344 		if (raidPtr->Layout.map->faultsTolerated == 0) {
   1345 			/* Parity for RAID 0 is trivially correct */
   1346 			raidPtr->parity_good = RF_RAID_CLEAN;
   1347 			return(0);
   1348 		}
   1349 
   1350 		if (raidPtr->parity_rewrite_in_progress == 1) {
   1351 			/* Re-write is already in progress! */
   1352 			return(EINVAL);
   1353 		}
   1354 
   1355 		retcode = RF_CREATE_THREAD(raidPtr->parity_rewrite_thread,
   1356 					   rf_RewriteParityThread,
   1357 					   raidPtr,"raid_parity");
   1358 		return (retcode);
   1359 
   1360 
   1361 	case RAIDFRAME_ADD_HOT_SPARE:
   1362 		sparePtr = (RF_SingleComponent_t *) data;
   1363 		memcpy( &component, sparePtr, sizeof(RF_SingleComponent_t));
   1364 		retcode = rf_add_hot_spare(raidPtr, &component);
   1365 		return(retcode);
   1366 
   1367 	case RAIDFRAME_REMOVE_HOT_SPARE:
   1368 		return(retcode);
   1369 
   1370 	case RAIDFRAME_DELETE_COMPONENT:
   1371 		componentPtr = (RF_SingleComponent_t *)data;
   1372 		memcpy( &component, componentPtr,
   1373 			sizeof(RF_SingleComponent_t));
   1374 		retcode = rf_delete_component(raidPtr, &component);
   1375 		return(retcode);
   1376 
   1377 	case RAIDFRAME_INCORPORATE_HOT_SPARE:
   1378 		componentPtr = (RF_SingleComponent_t *)data;
   1379 		memcpy( &component, componentPtr,
   1380 			sizeof(RF_SingleComponent_t));
   1381 		retcode = rf_incorporate_hot_spare(raidPtr, &component);
   1382 		return(retcode);
   1383 
   1384 	case RAIDFRAME_REBUILD_IN_PLACE:
   1385 
   1386 		if (raidPtr->Layout.map->faultsTolerated == 0) {
   1387 			/* Can't do this on a RAID 0!! */
   1388 			return(EINVAL);
   1389 		}
   1390 
   1391 		if (raidPtr->recon_in_progress == 1) {
   1392 			/* a reconstruct is already in progress! */
   1393 			return(EINVAL);
   1394 		}
   1395 
   1396 		componentPtr = (RF_SingleComponent_t *) data;
   1397 		memcpy( &component, componentPtr,
   1398 			sizeof(RF_SingleComponent_t));
   1399 		component.row = 0; /* we don't support any more */
   1400 		column = component.column;
   1401 
   1402 		if ((column < 0) || (column >= raidPtr->numCol)) {
   1403 			return(EINVAL);
   1404 		}
   1405 
   1406 		RF_LOCK_MUTEX(raidPtr->mutex);
   1407 		if ((raidPtr->Disks[column].status == rf_ds_optimal) &&
   1408 		    (raidPtr->numFailures > 0)) {
   1409 			/* XXX 0 above shouldn't be constant!!! */
   1410 			/* some component other than this has failed.
   1411 			   Let's not make things worse than they already
   1412 			   are... */
   1413 			printf("raid%d: Unable to reconstruct to disk at:\n",
   1414 			       raidPtr->raidid);
   1415 			printf("raid%d:     Col: %d   Too many failures.\n",
   1416 			       raidPtr->raidid, column);
   1417 			RF_UNLOCK_MUTEX(raidPtr->mutex);
   1418 			return (EINVAL);
   1419 		}
   1420 		if (raidPtr->Disks[column].status ==
   1421 		    rf_ds_reconstructing) {
   1422 			printf("raid%d: Unable to reconstruct to disk at:\n",
   1423 			       raidPtr->raidid);
   1424 			printf("raid%d:    Col: %d   Reconstruction already occuring!\n", raidPtr->raidid, column);
   1425 
   1426 			RF_UNLOCK_MUTEX(raidPtr->mutex);
   1427 			return (EINVAL);
   1428 		}
   1429 		if (raidPtr->Disks[column].status == rf_ds_spared) {
   1430 			RF_UNLOCK_MUTEX(raidPtr->mutex);
   1431 			return (EINVAL);
   1432 		}
   1433 		RF_UNLOCK_MUTEX(raidPtr->mutex);
   1434 
   1435 		RF_Malloc(rrcopy, sizeof(*rrcopy), (struct rf_recon_req *));
   1436 		if (rrcopy == NULL)
   1437 			return(ENOMEM);
   1438 
   1439 		rrcopy->raidPtr = (void *) raidPtr;
   1440 		rrcopy->col = column;
   1441 
   1442 		retcode = RF_CREATE_THREAD(raidPtr->recon_thread,
   1443 					   rf_ReconstructInPlaceThread,
   1444 					   rrcopy,"raid_reconip");
   1445 		return(retcode);
   1446 
   1447 	case RAIDFRAME_GET_INFO:
   1448 		if (!raidPtr->valid)
   1449 			return (ENODEV);
   1450 		ucfgp = (RF_DeviceConfig_t **) data;
   1451 		RF_Malloc(d_cfg, sizeof(RF_DeviceConfig_t),
   1452 			  (RF_DeviceConfig_t *));
   1453 		if (d_cfg == NULL)
   1454 			return (ENOMEM);
   1455 		d_cfg->rows = 1; /* there is only 1 row now */
   1456 		d_cfg->cols = raidPtr->numCol;
   1457 		d_cfg->ndevs = raidPtr->numCol;
   1458 		if (d_cfg->ndevs >= RF_MAX_DISKS) {
   1459 			RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
   1460 			return (ENOMEM);
   1461 		}
   1462 		d_cfg->nspares = raidPtr->numSpare;
   1463 		if (d_cfg->nspares >= RF_MAX_DISKS) {
   1464 			RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
   1465 			return (ENOMEM);
   1466 		}
   1467 		d_cfg->maxqdepth = raidPtr->maxQueueDepth;
   1468 		d = 0;
   1469 		for (j = 0; j < d_cfg->cols; j++) {
   1470 			d_cfg->devs[d] = raidPtr->Disks[j];
   1471 			d++;
   1472 		}
   1473 		for (j = d_cfg->cols, i = 0; i < d_cfg->nspares; i++, j++) {
   1474 			d_cfg->spares[i] = raidPtr->Disks[j];
   1475 		}
   1476 		retcode = copyout(d_cfg, *ucfgp, sizeof(RF_DeviceConfig_t));
   1477 		RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
   1478 
   1479 		return (retcode);
   1480 
   1481 	case RAIDFRAME_CHECK_PARITY:
   1482 		*(int *) data = raidPtr->parity_good;
   1483 		return (0);
   1484 
   1485 	case RAIDFRAME_RESET_ACCTOTALS:
   1486 		memset(&raidPtr->acc_totals, 0, sizeof(raidPtr->acc_totals));
   1487 		return (0);
   1488 
   1489 	case RAIDFRAME_GET_ACCTOTALS:
   1490 		totals = (RF_AccTotals_t *) data;
   1491 		*totals = raidPtr->acc_totals;
   1492 		return (0);
   1493 
   1494 	case RAIDFRAME_KEEP_ACCTOTALS:
   1495 		raidPtr->keep_acc_totals = *(int *)data;
   1496 		return (0);
   1497 
   1498 	case RAIDFRAME_GET_SIZE:
   1499 		*(int *) data = raidPtr->totalSectors;
   1500 		return (0);
   1501 
   1502 		/* fail a disk & optionally start reconstruction */
   1503 	case RAIDFRAME_FAIL_DISK:
   1504 
   1505 		if (raidPtr->Layout.map->faultsTolerated == 0) {
   1506 			/* Can't do this on a RAID 0!! */
   1507 			return(EINVAL);
   1508 		}
   1509 
   1510 		rr = (struct rf_recon_req *) data;
   1511 		rr->row = 0;
   1512 		if (rr->col < 0 || rr->col >= raidPtr->numCol)
   1513 			return (EINVAL);
   1514 
   1515 
   1516 		RF_LOCK_MUTEX(raidPtr->mutex);
   1517 		if (raidPtr->status == rf_rs_reconstructing) {
   1518 			/* you can't fail a disk while we're reconstructing! */
   1519 			/* XXX wrong for RAID6 */
   1520 			RF_UNLOCK_MUTEX(raidPtr->mutex);
   1521 			return (EINVAL);
   1522 		}
   1523 		if ((raidPtr->Disks[rr->col].status ==
   1524 		     rf_ds_optimal) && (raidPtr->numFailures > 0)) {
   1525 			/* some other component has failed.  Let's not make
   1526 			   things worse. XXX wrong for RAID6 */
   1527 			RF_UNLOCK_MUTEX(raidPtr->mutex);
   1528 			return (EINVAL);
   1529 		}
   1530 		if (raidPtr->Disks[rr->col].status == rf_ds_spared) {
   1531 			/* Can't fail a spared disk! */
   1532 			RF_UNLOCK_MUTEX(raidPtr->mutex);
   1533 			return (EINVAL);
   1534 		}
   1535 		RF_UNLOCK_MUTEX(raidPtr->mutex);
   1536 
   1537 		/* make a copy of the recon request so that we don't rely on
   1538 		 * the user's buffer */
   1539 		RF_Malloc(rrcopy, sizeof(*rrcopy), (struct rf_recon_req *));
   1540 		if (rrcopy == NULL)
   1541 			return(ENOMEM);
   1542 		memcpy(rrcopy, rr, sizeof(*rr));
   1543 		rrcopy->raidPtr = (void *) raidPtr;
   1544 
   1545 		retcode = RF_CREATE_THREAD(raidPtr->recon_thread,
   1546 					   rf_ReconThread,
   1547 					   rrcopy,"raid_recon");
   1548 		return (0);
   1549 
   1550 		/* invoke a copyback operation after recon on whatever disk
   1551 		 * needs it, if any */
   1552 	case RAIDFRAME_COPYBACK:
   1553 
   1554 		if (raidPtr->Layout.map->faultsTolerated == 0) {
   1555 			/* This makes no sense on a RAID 0!! */
   1556 			return(EINVAL);
   1557 		}
   1558 
   1559 		if (raidPtr->copyback_in_progress == 1) {
   1560 			/* Copyback is already in progress! */
   1561 			return(EINVAL);
   1562 		}
   1563 
   1564 		retcode = RF_CREATE_THREAD(raidPtr->copyback_thread,
   1565 					   rf_CopybackThread,
   1566 					   raidPtr,"raid_copyback");
   1567 		return (retcode);
   1568 
   1569 		/* return the percentage completion of reconstruction */
   1570 	case RAIDFRAME_CHECK_RECON_STATUS:
   1571 		if (raidPtr->Layout.map->faultsTolerated == 0) {
   1572 			/* This makes no sense on a RAID 0, so tell the
   1573 			   user it's done. */
   1574 			*(int *) data = 100;
   1575 			return(0);
   1576 		}
   1577 		if (raidPtr->status != rf_rs_reconstructing)
   1578 			*(int *) data = 100;
   1579 		else {
   1580 			if (raidPtr->reconControl->numRUsTotal > 0) {
   1581 				*(int *) data = (raidPtr->reconControl->numRUsComplete * 100 / raidPtr->reconControl->numRUsTotal);
   1582 			} else {
   1583 				*(int *) data = 0;
   1584 			}
   1585 		}
   1586 		return (0);
   1587 	case RAIDFRAME_CHECK_RECON_STATUS_EXT:
   1588 		progressInfoPtr = (RF_ProgressInfo_t **) data;
   1589 		if (raidPtr->status != rf_rs_reconstructing) {
   1590 			progressInfo.remaining = 0;
   1591 			progressInfo.completed = 100;
   1592 			progressInfo.total = 100;
   1593 		} else {
   1594 			progressInfo.total =
   1595 				raidPtr->reconControl->numRUsTotal;
   1596 			progressInfo.completed =
   1597 				raidPtr->reconControl->numRUsComplete;
   1598 			progressInfo.remaining = progressInfo.total -
   1599 				progressInfo.completed;
   1600 		}
   1601 		retcode = copyout(&progressInfo, *progressInfoPtr,
   1602 				  sizeof(RF_ProgressInfo_t));
   1603 		return (retcode);
   1604 
   1605 	case RAIDFRAME_CHECK_PARITYREWRITE_STATUS:
   1606 		if (raidPtr->Layout.map->faultsTolerated == 0) {
   1607 			/* This makes no sense on a RAID 0, so tell the
   1608 			   user it's done. */
   1609 			*(int *) data = 100;
   1610 			return(0);
   1611 		}
   1612 		if (raidPtr->parity_rewrite_in_progress == 1) {
   1613 			*(int *) data = 100 *
   1614 				raidPtr->parity_rewrite_stripes_done /
   1615 				raidPtr->Layout.numStripe;
   1616 		} else {
   1617 			*(int *) data = 100;
   1618 		}
   1619 		return (0);
   1620 
   1621 	case RAIDFRAME_CHECK_PARITYREWRITE_STATUS_EXT:
   1622 		progressInfoPtr = (RF_ProgressInfo_t **) data;
   1623 		if (raidPtr->parity_rewrite_in_progress == 1) {
   1624 			progressInfo.total = raidPtr->Layout.numStripe;
   1625 			progressInfo.completed =
   1626 				raidPtr->parity_rewrite_stripes_done;
   1627 			progressInfo.remaining = progressInfo.total -
   1628 				progressInfo.completed;
   1629 		} else {
   1630 			progressInfo.remaining = 0;
   1631 			progressInfo.completed = 100;
   1632 			progressInfo.total = 100;
   1633 		}
   1634 		retcode = copyout(&progressInfo, *progressInfoPtr,
   1635 				  sizeof(RF_ProgressInfo_t));
   1636 		return (retcode);
   1637 
   1638 	case RAIDFRAME_CHECK_COPYBACK_STATUS:
   1639 		if (raidPtr->Layout.map->faultsTolerated == 0) {
   1640 			/* This makes no sense on a RAID 0 */
   1641 			*(int *) data = 100;
   1642 			return(0);
   1643 		}
   1644 		if (raidPtr->copyback_in_progress == 1) {
   1645 			*(int *) data = 100 * raidPtr->copyback_stripes_done /
   1646 				raidPtr->Layout.numStripe;
   1647 		} else {
   1648 			*(int *) data = 100;
   1649 		}
   1650 		return (0);
   1651 
   1652 	case RAIDFRAME_CHECK_COPYBACK_STATUS_EXT:
   1653 		progressInfoPtr = (RF_ProgressInfo_t **) data;
   1654 		if (raidPtr->copyback_in_progress == 1) {
   1655 			progressInfo.total = raidPtr->Layout.numStripe;
   1656 			progressInfo.completed =
   1657 				raidPtr->copyback_stripes_done;
   1658 			progressInfo.remaining = progressInfo.total -
   1659 				progressInfo.completed;
   1660 		} else {
   1661 			progressInfo.remaining = 0;
   1662 			progressInfo.completed = 100;
   1663 			progressInfo.total = 100;
   1664 		}
   1665 		retcode = copyout(&progressInfo, *progressInfoPtr,
   1666 				  sizeof(RF_ProgressInfo_t));
   1667 		return (retcode);
   1668 
   1669 		/* the sparetable daemon calls this to wait for the kernel to
   1670 		 * need a spare table. this ioctl does not return until a
   1671 		 * spare table is needed. XXX -- calling mpsleep here in the
   1672 		 * ioctl code is almost certainly wrong and evil. -- XXX XXX
   1673 		 * -- I should either compute the spare table in the kernel,
   1674 		 * or have a different -- XXX XXX -- interface (a different
   1675 		 * character device) for delivering the table     -- XXX */
   1676 #if 0
   1677 	case RAIDFRAME_SPARET_WAIT:
   1678 		RF_LOCK_MUTEX(rf_sparet_wait_mutex);
   1679 		while (!rf_sparet_wait_queue)
   1680 			mpsleep(&rf_sparet_wait_queue, (PZERO + 1) | PCATCH, "sparet wait", 0, (void *) simple_lock_addr(rf_sparet_wait_mutex), MS_LOCK_SIMPLE);
   1681 		waitreq = rf_sparet_wait_queue;
   1682 		rf_sparet_wait_queue = rf_sparet_wait_queue->next;
   1683 		RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
   1684 
   1685 		/* structure assignment */
   1686 		*((RF_SparetWait_t *) data) = *waitreq;
   1687 
   1688 		RF_Free(waitreq, sizeof(*waitreq));
   1689 		return (0);
   1690 
   1691 		/* wakes up a process waiting on SPARET_WAIT and puts an error
   1692 		 * code in it that will cause the dameon to exit */
   1693 	case RAIDFRAME_ABORT_SPARET_WAIT:
   1694 		RF_Malloc(waitreq, sizeof(*waitreq), (RF_SparetWait_t *));
   1695 		waitreq->fcol = -1;
   1696 		RF_LOCK_MUTEX(rf_sparet_wait_mutex);
   1697 		waitreq->next = rf_sparet_wait_queue;
   1698 		rf_sparet_wait_queue = waitreq;
   1699 		RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
   1700 		wakeup(&rf_sparet_wait_queue);
   1701 		return (0);
   1702 
   1703 		/* used by the spare table daemon to deliver a spare table
   1704 		 * into the kernel */
   1705 	case RAIDFRAME_SEND_SPARET:
   1706 
   1707 		/* install the spare table */
   1708 		retcode = rf_SetSpareTable(raidPtr, *(void **) data);
   1709 
   1710 		/* respond to the requestor.  the return status of the spare
   1711 		 * table installation is passed in the "fcol" field */
   1712 		RF_Malloc(waitreq, sizeof(*waitreq), (RF_SparetWait_t *));
   1713 		waitreq->fcol = retcode;
   1714 		RF_LOCK_MUTEX(rf_sparet_wait_mutex);
   1715 		waitreq->next = rf_sparet_resp_queue;
   1716 		rf_sparet_resp_queue = waitreq;
   1717 		wakeup(&rf_sparet_resp_queue);
   1718 		RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
   1719 
   1720 		return (retcode);
   1721 #endif
   1722 
   1723 	default:
   1724 		break; /* fall through to the os-specific code below */
   1725 
   1726 	}
   1727 
   1728 	if (!raidPtr->valid)
   1729 		return (EINVAL);
   1730 
   1731 	/*
   1732 	 * Add support for "regular" device ioctls here.
   1733 	 */
   1734 
   1735 	switch (cmd) {
   1736 	case DIOCGDINFO:
   1737 		*(struct disklabel *) data = *(rs->sc_dkdev.dk_label);
   1738 		break;
   1739 #ifdef __HAVE_OLD_DISKLABEL
   1740 	case ODIOCGDINFO:
   1741 		newlabel = *(rs->sc_dkdev.dk_label);
   1742 		if (newlabel.d_npartitions > OLDMAXPARTITIONS)
   1743 			return ENOTTY;
   1744 		memcpy(data, &newlabel, sizeof (struct olddisklabel));
   1745 		break;
   1746 #endif
   1747 
   1748 	case DIOCGPART:
   1749 		((struct partinfo *) data)->disklab = rs->sc_dkdev.dk_label;
   1750 		((struct partinfo *) data)->part =
   1751 		    &rs->sc_dkdev.dk_label->d_partitions[DISKPART(dev)];
   1752 		break;
   1753 
   1754 	case DIOCWDINFO:
   1755 	case DIOCSDINFO:
   1756 #ifdef __HAVE_OLD_DISKLABEL
   1757 	case ODIOCWDINFO:
   1758 	case ODIOCSDINFO:
   1759 #endif
   1760 	{
   1761 		struct disklabel *lp;
   1762 #ifdef __HAVE_OLD_DISKLABEL
   1763 		if (cmd == ODIOCSDINFO || cmd == ODIOCWDINFO) {
   1764 			memset(&newlabel, 0, sizeof newlabel);
   1765 			memcpy(&newlabel, data, sizeof (struct olddisklabel));
   1766 			lp = &newlabel;
   1767 		} else
   1768 #endif
   1769 		lp = (struct disklabel *)data;
   1770 
   1771 		if ((error = raidlock(rs)) != 0)
   1772 			return (error);
   1773 
   1774 		rs->sc_flags |= RAIDF_LABELLING;
   1775 
   1776 		error = setdisklabel(rs->sc_dkdev.dk_label,
   1777 		    lp, 0, rs->sc_dkdev.dk_cpulabel);
   1778 		if (error == 0) {
   1779 			if (cmd == DIOCWDINFO
   1780 #ifdef __HAVE_OLD_DISKLABEL
   1781 			    || cmd == ODIOCWDINFO
   1782 #endif
   1783 			   )
   1784 				error = writedisklabel(RAIDLABELDEV(dev),
   1785 				    raidstrategy, rs->sc_dkdev.dk_label,
   1786 				    rs->sc_dkdev.dk_cpulabel);
   1787 		}
   1788 		rs->sc_flags &= ~RAIDF_LABELLING;
   1789 
   1790 		raidunlock(rs);
   1791 
   1792 		if (error)
   1793 			return (error);
   1794 		break;
   1795 	}
   1796 
   1797 	case DIOCWLABEL:
   1798 		if (*(int *) data != 0)
   1799 			rs->sc_flags |= RAIDF_WLABEL;
   1800 		else
   1801 			rs->sc_flags &= ~RAIDF_WLABEL;
   1802 		break;
   1803 
   1804 	case DIOCGDEFLABEL:
   1805 		raidgetdefaultlabel(raidPtr, rs, (struct disklabel *) data);
   1806 		break;
   1807 
   1808 #ifdef __HAVE_OLD_DISKLABEL
   1809 	case ODIOCGDEFLABEL:
   1810 		raidgetdefaultlabel(raidPtr, rs, &newlabel);
   1811 		if (newlabel.d_npartitions > OLDMAXPARTITIONS)
   1812 			return ENOTTY;
   1813 		memcpy(data, &newlabel, sizeof (struct olddisklabel));
   1814 		break;
   1815 #endif
   1816 
   1817 	case DIOCAWEDGE:
   1818 	case DIOCDWEDGE:
   1819 	    	dkw = (void *)data;
   1820 
   1821 		/* If the ioctl happens here, the parent is us. */
   1822 		(void)strcpy(dkw->dkw_parent, rs->sc_xname);
   1823 		return cmd == DIOCAWEDGE ? dkwedge_add(dkw) : dkwedge_del(dkw);
   1824 
   1825 	case DIOCLWEDGES:
   1826 		return dkwedge_list(&rs->sc_dkdev,
   1827 		    (struct dkwedge_list *)data, l);
   1828 	case DIOCCACHESYNC:
   1829 		return rf_sync_component_caches(raidPtr);
   1830 	default:
   1831 		retcode = ENOTTY;
   1832 	}
   1833 	return (retcode);
   1834 
   1835 }
   1836 
   1837 
   1838 /* raidinit -- complete the rest of the initialization for the
   1839    RAIDframe device.  */
   1840 
   1841 
   1842 static void
   1843 raidinit(RF_Raid_t *raidPtr)
   1844 {
   1845 	cfdata_t cf;
   1846 	struct raid_softc *rs;
   1847 	int     unit;
   1848 
   1849 	unit = raidPtr->raidid;
   1850 
   1851 	rs = &raid_softc[unit];
   1852 
   1853 	/* XXX should check return code first... */
   1854 	rs->sc_flags |= RAIDF_INITED;
   1855 
   1856 	/* XXX doesn't check bounds. */
   1857 	snprintf(rs->sc_xname, sizeof(rs->sc_xname), "raid%d", unit);
   1858 
   1859 	/* attach the pseudo device */
   1860 	cf = malloc(sizeof(*cf), M_RAIDFRAME, M_WAITOK);
   1861 	cf->cf_name = raid_cd.cd_name;
   1862 	cf->cf_atname = raid_cd.cd_name;
   1863 	cf->cf_unit = unit;
   1864 	cf->cf_fstate = FSTATE_STAR;
   1865 
   1866 	rs->sc_dev = config_attach_pseudo(cf);
   1867 
   1868 	if (rs->sc_dev==NULL) {
   1869 		printf("raid%d: config_attach_pseudo failed\n",
   1870 		       raidPtr->raidid);
   1871 	}
   1872 
   1873 	/* disk_attach actually creates space for the CPU disklabel, among
   1874 	 * other things, so it's critical to call this *BEFORE* we try putzing
   1875 	 * with disklabels. */
   1876 
   1877 	disk_init(&rs->sc_dkdev, rs->sc_xname, &rf_dkdriver);
   1878 	disk_attach(&rs->sc_dkdev);
   1879 
   1880 	/* XXX There may be a weird interaction here between this, and
   1881 	 * protectedSectors, as used in RAIDframe.  */
   1882 
   1883 	rs->sc_size = raidPtr->totalSectors;
   1884 
   1885 	dkwedge_discover(&rs->sc_dkdev);
   1886 
   1887 	rf_set_properties(rs, raidPtr);
   1888 
   1889 }
   1890 #if (RF_INCLUDE_PARITY_DECLUSTERING_DS > 0)
   1891 /* wake up the daemon & tell it to get us a spare table
   1892  * XXX
   1893  * the entries in the queues should be tagged with the raidPtr
   1894  * so that in the extremely rare case that two recons happen at once,
   1895  * we know for which device were requesting a spare table
   1896  * XXX
   1897  *
   1898  * XXX This code is not currently used. GO
   1899  */
   1900 int
   1901 rf_GetSpareTableFromDaemon(RF_SparetWait_t *req)
   1902 {
   1903 	int     retcode;
   1904 
   1905 	RF_LOCK_MUTEX(rf_sparet_wait_mutex);
   1906 	req->next = rf_sparet_wait_queue;
   1907 	rf_sparet_wait_queue = req;
   1908 	wakeup(&rf_sparet_wait_queue);
   1909 
   1910 	/* mpsleep unlocks the mutex */
   1911 	while (!rf_sparet_resp_queue) {
   1912 		tsleep(&rf_sparet_resp_queue, PRIBIO,
   1913 		    "raidframe getsparetable", 0);
   1914 	}
   1915 	req = rf_sparet_resp_queue;
   1916 	rf_sparet_resp_queue = req->next;
   1917 	RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
   1918 
   1919 	retcode = req->fcol;
   1920 	RF_Free(req, sizeof(*req));	/* this is not the same req as we
   1921 					 * alloc'd */
   1922 	return (retcode);
   1923 }
   1924 #endif
   1925 
   1926 /* a wrapper around rf_DoAccess that extracts appropriate info from the
   1927  * bp & passes it down.
   1928  * any calls originating in the kernel must use non-blocking I/O
   1929  * do some extra sanity checking to return "appropriate" error values for
   1930  * certain conditions (to make some standard utilities work)
   1931  *
   1932  * Formerly known as: rf_DoAccessKernel
   1933  */
   1934 void
   1935 raidstart(RF_Raid_t *raidPtr)
   1936 {
   1937 	RF_SectorCount_t num_blocks, pb, sum;
   1938 	RF_RaidAddr_t raid_addr;
   1939 	struct partition *pp;
   1940 	daddr_t blocknum;
   1941 	int     unit;
   1942 	struct raid_softc *rs;
   1943 	int     do_async;
   1944 	struct buf *bp;
   1945 	int rc;
   1946 
   1947 	unit = raidPtr->raidid;
   1948 	rs = &raid_softc[unit];
   1949 
   1950 	/* quick check to see if anything has died recently */
   1951 	RF_LOCK_MUTEX(raidPtr->mutex);
   1952 	if (raidPtr->numNewFailures > 0) {
   1953 		RF_UNLOCK_MUTEX(raidPtr->mutex);
   1954 		rf_update_component_labels(raidPtr,
   1955 					   RF_NORMAL_COMPONENT_UPDATE);
   1956 		RF_LOCK_MUTEX(raidPtr->mutex);
   1957 		raidPtr->numNewFailures--;
   1958 	}
   1959 
   1960 	/* Check to see if we're at the limit... */
   1961 	while (raidPtr->openings > 0) {
   1962 		RF_UNLOCK_MUTEX(raidPtr->mutex);
   1963 
   1964 		/* get the next item, if any, from the queue */
   1965 		if ((bp = bufq_get(rs->buf_queue)) == NULL) {
   1966 			/* nothing more to do */
   1967 			return;
   1968 		}
   1969 
   1970 		/* Ok, for the bp we have here, bp->b_blkno is relative to the
   1971 		 * partition.. Need to make it absolute to the underlying
   1972 		 * device.. */
   1973 
   1974 		blocknum = bp->b_blkno;
   1975 		if (DISKPART(bp->b_dev) != RAW_PART) {
   1976 			pp = &rs->sc_dkdev.dk_label->d_partitions[DISKPART(bp->b_dev)];
   1977 			blocknum += pp->p_offset;
   1978 		}
   1979 
   1980 		db1_printf(("Blocks: %d, %d\n", (int) bp->b_blkno,
   1981 			    (int) blocknum));
   1982 
   1983 		db1_printf(("bp->b_bcount = %d\n", (int) bp->b_bcount));
   1984 		db1_printf(("bp->b_resid = %d\n", (int) bp->b_resid));
   1985 
   1986 		/* *THIS* is where we adjust what block we're going to...
   1987 		 * but DO NOT TOUCH bp->b_blkno!!! */
   1988 		raid_addr = blocknum;
   1989 
   1990 		num_blocks = bp->b_bcount >> raidPtr->logBytesPerSector;
   1991 		pb = (bp->b_bcount & raidPtr->sectorMask) ? 1 : 0;
   1992 		sum = raid_addr + num_blocks + pb;
   1993 		if (1 || rf_debugKernelAccess) {
   1994 			db1_printf(("raid_addr=%d sum=%d num_blocks=%d(+%d) (%d)\n",
   1995 				    (int) raid_addr, (int) sum, (int) num_blocks,
   1996 				    (int) pb, (int) bp->b_resid));
   1997 		}
   1998 		if ((sum > raidPtr->totalSectors) || (sum < raid_addr)
   1999 		    || (sum < num_blocks) || (sum < pb)) {
   2000 			bp->b_error = ENOSPC;
   2001 			bp->b_resid = bp->b_bcount;
   2002 			biodone(bp);
   2003 			RF_LOCK_MUTEX(raidPtr->mutex);
   2004 			continue;
   2005 		}
   2006 		/*
   2007 		 * XXX rf_DoAccess() should do this, not just DoAccessKernel()
   2008 		 */
   2009 
   2010 		if (bp->b_bcount & raidPtr->sectorMask) {
   2011 			bp->b_error = EINVAL;
   2012 			bp->b_resid = bp->b_bcount;
   2013 			biodone(bp);
   2014 			RF_LOCK_MUTEX(raidPtr->mutex);
   2015 			continue;
   2016 
   2017 		}
   2018 		db1_printf(("Calling DoAccess..\n"));
   2019 
   2020 
   2021 		RF_LOCK_MUTEX(raidPtr->mutex);
   2022 		raidPtr->openings--;
   2023 		RF_UNLOCK_MUTEX(raidPtr->mutex);
   2024 
   2025 		/*
   2026 		 * Everything is async.
   2027 		 */
   2028 		do_async = 1;
   2029 
   2030 		disk_busy(&rs->sc_dkdev);
   2031 
   2032 		/* XXX we're still at splbio() here... do we *really*
   2033 		   need to be? */
   2034 
   2035 		/* don't ever condition on bp->b_flags & B_WRITE.
   2036 		 * always condition on B_READ instead */
   2037 
   2038 		rc = rf_DoAccess(raidPtr, (bp->b_flags & B_READ) ?
   2039 				 RF_IO_TYPE_READ : RF_IO_TYPE_WRITE,
   2040 				 do_async, raid_addr, num_blocks,
   2041 				 bp->b_data, bp, RF_DAG_NONBLOCKING_IO);
   2042 
   2043 		if (rc) {
   2044 			bp->b_error = rc;
   2045 			bp->b_resid = bp->b_bcount;
   2046 			biodone(bp);
   2047 			/* continue loop */
   2048 		}
   2049 
   2050 		RF_LOCK_MUTEX(raidPtr->mutex);
   2051 	}
   2052 	RF_UNLOCK_MUTEX(raidPtr->mutex);
   2053 }
   2054 
   2055 
   2056 
   2057 
   2058 /* invoke an I/O from kernel mode.  Disk queue should be locked upon entry */
   2059 
   2060 int
   2061 rf_DispatchKernelIO(RF_DiskQueue_t *queue, RF_DiskQueueData_t *req)
   2062 {
   2063 	int     op = (req->type == RF_IO_TYPE_READ) ? B_READ : B_WRITE;
   2064 	struct buf *bp;
   2065 
   2066 	req->queue = queue;
   2067 	bp = req->bp;
   2068 
   2069 	switch (req->type) {
   2070 	case RF_IO_TYPE_NOP:	/* used primarily to unlock a locked queue */
   2071 		/* XXX need to do something extra here.. */
   2072 		/* I'm leaving this in, as I've never actually seen it used,
   2073 		 * and I'd like folks to report it... GO */
   2074 		printf(("WAKEUP CALLED\n"));
   2075 		queue->numOutstanding++;
   2076 
   2077 		bp->b_flags = 0;
   2078 		bp->b_private = req;
   2079 
   2080 		KernelWakeupFunc(bp);
   2081 		break;
   2082 
   2083 	case RF_IO_TYPE_READ:
   2084 	case RF_IO_TYPE_WRITE:
   2085 #if RF_ACC_TRACE > 0
   2086 		if (req->tracerec) {
   2087 			RF_ETIMER_START(req->tracerec->timer);
   2088 		}
   2089 #endif
   2090 		InitBP(bp, queue->rf_cinfo->ci_vp,
   2091 		    op, queue->rf_cinfo->ci_dev,
   2092 		    req->sectorOffset, req->numSector,
   2093 		    req->buf, KernelWakeupFunc, (void *) req,
   2094 		    queue->raidPtr->logBytesPerSector, req->b_proc);
   2095 
   2096 		if (rf_debugKernelAccess) {
   2097 			db1_printf(("dispatch: bp->b_blkno = %ld\n",
   2098 				(long) bp->b_blkno));
   2099 		}
   2100 		queue->numOutstanding++;
   2101 		queue->last_deq_sector = req->sectorOffset;
   2102 		/* acc wouldn't have been let in if there were any pending
   2103 		 * reqs at any other priority */
   2104 		queue->curPriority = req->priority;
   2105 
   2106 		db1_printf(("Going for %c to unit %d col %d\n",
   2107 			    req->type, queue->raidPtr->raidid,
   2108 			    queue->col));
   2109 		db1_printf(("sector %d count %d (%d bytes) %d\n",
   2110 			(int) req->sectorOffset, (int) req->numSector,
   2111 			(int) (req->numSector <<
   2112 			    queue->raidPtr->logBytesPerSector),
   2113 			(int) queue->raidPtr->logBytesPerSector));
   2114 
   2115 		/*
   2116 		 * XXX: drop lock here since this can block at
   2117 		 * least with backing SCSI devices.  Retake it
   2118 		 * to minimize fuss with calling interfaces.
   2119 		 */
   2120 
   2121 		RF_UNLOCK_QUEUE_MUTEX(queue, "unusedparam");
   2122 		bdev_strategy(bp);
   2123 		RF_LOCK_QUEUE_MUTEX(queue, "unusedparam");
   2124 		break;
   2125 
   2126 	default:
   2127 		panic("bad req->type in rf_DispatchKernelIO");
   2128 	}
   2129 	db1_printf(("Exiting from DispatchKernelIO\n"));
   2130 
   2131 	return (0);
   2132 }
   2133 /* this is the callback function associated with a I/O invoked from
   2134    kernel code.
   2135  */
   2136 static void
   2137 KernelWakeupFunc(struct buf *bp)
   2138 {
   2139 	RF_DiskQueueData_t *req = NULL;
   2140 	RF_DiskQueue_t *queue;
   2141 	int s;
   2142 
   2143 	s = splbio();
   2144 	db1_printf(("recovering the request queue:\n"));
   2145 	req = bp->b_private;
   2146 
   2147 	queue = (RF_DiskQueue_t *) req->queue;
   2148 
   2149 #if RF_ACC_TRACE > 0
   2150 	if (req->tracerec) {
   2151 		RF_ETIMER_STOP(req->tracerec->timer);
   2152 		RF_ETIMER_EVAL(req->tracerec->timer);
   2153 		RF_LOCK_MUTEX(rf_tracing_mutex);
   2154 		req->tracerec->diskwait_us += RF_ETIMER_VAL_US(req->tracerec->timer);
   2155 		req->tracerec->phys_io_us += RF_ETIMER_VAL_US(req->tracerec->timer);
   2156 		req->tracerec->num_phys_ios++;
   2157 		RF_UNLOCK_MUTEX(rf_tracing_mutex);
   2158 	}
   2159 #endif
   2160 
   2161 	/* XXX Ok, let's get aggressive... If b_error is set, let's go
   2162 	 * ballistic, and mark the component as hosed... */
   2163 
   2164 	if (bp->b_error != 0) {
   2165 		/* Mark the disk as dead */
   2166 		/* but only mark it once... */
   2167 		/* and only if it wouldn't leave this RAID set
   2168 		   completely broken */
   2169 		if (((queue->raidPtr->Disks[queue->col].status ==
   2170 		      rf_ds_optimal) ||
   2171 		     (queue->raidPtr->Disks[queue->col].status ==
   2172 		      rf_ds_used_spare)) &&
   2173 		     (queue->raidPtr->numFailures <
   2174 		      queue->raidPtr->Layout.map->faultsTolerated)) {
   2175 			printf("raid%d: IO Error.  Marking %s as failed.\n",
   2176 			       queue->raidPtr->raidid,
   2177 			       queue->raidPtr->Disks[queue->col].devname);
   2178 			queue->raidPtr->Disks[queue->col].status =
   2179 			    rf_ds_failed;
   2180 			queue->raidPtr->status = rf_rs_degraded;
   2181 			queue->raidPtr->numFailures++;
   2182 			queue->raidPtr->numNewFailures++;
   2183 		} else {	/* Disk is already dead... */
   2184 			/* printf("Disk already marked as dead!\n"); */
   2185 		}
   2186 
   2187 	}
   2188 
   2189 	/* Fill in the error value */
   2190 
   2191 	req->error = bp->b_error;
   2192 
   2193 	simple_lock(&queue->raidPtr->iodone_lock);
   2194 
   2195 	/* Drop this one on the "finished" queue... */
   2196 	TAILQ_INSERT_TAIL(&(queue->raidPtr->iodone), req, iodone_entries);
   2197 
   2198 	/* Let the raidio thread know there is work to be done. */
   2199 	wakeup(&(queue->raidPtr->iodone));
   2200 
   2201 	simple_unlock(&queue->raidPtr->iodone_lock);
   2202 
   2203 	splx(s);
   2204 }
   2205 
   2206 
   2207 
   2208 /*
   2209  * initialize a buf structure for doing an I/O in the kernel.
   2210  */
   2211 static void
   2212 InitBP(struct buf *bp, struct vnode *b_vp, unsigned rw_flag, dev_t dev,
   2213        RF_SectorNum_t startSect, RF_SectorCount_t numSect, void *bf,
   2214        void (*cbFunc) (struct buf *), void *cbArg, int logBytesPerSector,
   2215        struct proc *b_proc)
   2216 {
   2217 	/* bp->b_flags       = B_PHYS | rw_flag; */
   2218 	bp->b_flags = rw_flag;	/* XXX need B_PHYS here too??? */
   2219 	bp->b_oflags = 0;
   2220 	bp->b_cflags = 0;
   2221 	bp->b_bcount = numSect << logBytesPerSector;
   2222 	bp->b_bufsize = bp->b_bcount;
   2223 	bp->b_error = 0;
   2224 	bp->b_dev = dev;
   2225 	bp->b_data = bf;
   2226 	bp->b_blkno = startSect;
   2227 	bp->b_resid = bp->b_bcount;	/* XXX is this right!??!?!! */
   2228 	if (bp->b_bcount == 0) {
   2229 		panic("bp->b_bcount is zero in InitBP!!");
   2230 	}
   2231 	bp->b_proc = b_proc;
   2232 	bp->b_iodone = cbFunc;
   2233 	bp->b_private = cbArg;
   2234 }
   2235 
   2236 static void
   2237 raidgetdefaultlabel(RF_Raid_t *raidPtr, struct raid_softc *rs,
   2238 		    struct disklabel *lp)
   2239 {
   2240 	memset(lp, 0, sizeof(*lp));
   2241 
   2242 	/* fabricate a label... */
   2243 	lp->d_secperunit = raidPtr->totalSectors;
   2244 	lp->d_secsize = raidPtr->bytesPerSector;
   2245 	lp->d_nsectors = raidPtr->Layout.dataSectorsPerStripe;
   2246 	lp->d_ntracks = 4 * raidPtr->numCol;
   2247 	lp->d_ncylinders = raidPtr->totalSectors /
   2248 		(lp->d_nsectors * lp->d_ntracks);
   2249 	lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
   2250 
   2251 	strncpy(lp->d_typename, "raid", sizeof(lp->d_typename));
   2252 	lp->d_type = DTYPE_RAID;
   2253 	strncpy(lp->d_packname, "fictitious", sizeof(lp->d_packname));
   2254 	lp->d_rpm = 3600;
   2255 	lp->d_interleave = 1;
   2256 	lp->d_flags = 0;
   2257 
   2258 	lp->d_partitions[RAW_PART].p_offset = 0;
   2259 	lp->d_partitions[RAW_PART].p_size = raidPtr->totalSectors;
   2260 	lp->d_partitions[RAW_PART].p_fstype = FS_UNUSED;
   2261 	lp->d_npartitions = RAW_PART + 1;
   2262 
   2263 	lp->d_magic = DISKMAGIC;
   2264 	lp->d_magic2 = DISKMAGIC;
   2265 	lp->d_checksum = dkcksum(rs->sc_dkdev.dk_label);
   2266 
   2267 }
   2268 /*
   2269  * Read the disklabel from the raid device.  If one is not present, fake one
   2270  * up.
   2271  */
   2272 static void
   2273 raidgetdisklabel(dev_t dev)
   2274 {
   2275 	int     unit = raidunit(dev);
   2276 	struct raid_softc *rs = &raid_softc[unit];
   2277 	const char   *errstring;
   2278 	struct disklabel *lp = rs->sc_dkdev.dk_label;
   2279 	struct cpu_disklabel *clp = rs->sc_dkdev.dk_cpulabel;
   2280 	RF_Raid_t *raidPtr;
   2281 
   2282 	db1_printf(("Getting the disklabel...\n"));
   2283 
   2284 	memset(clp, 0, sizeof(*clp));
   2285 
   2286 	raidPtr = raidPtrs[unit];
   2287 
   2288 	raidgetdefaultlabel(raidPtr, rs, lp);
   2289 
   2290 	/*
   2291 	 * Call the generic disklabel extraction routine.
   2292 	 */
   2293 	errstring = readdisklabel(RAIDLABELDEV(dev), raidstrategy,
   2294 	    rs->sc_dkdev.dk_label, rs->sc_dkdev.dk_cpulabel);
   2295 	if (errstring)
   2296 		raidmakedisklabel(rs);
   2297 	else {
   2298 		int     i;
   2299 		struct partition *pp;
   2300 
   2301 		/*
   2302 		 * Sanity check whether the found disklabel is valid.
   2303 		 *
   2304 		 * This is necessary since total size of the raid device
   2305 		 * may vary when an interleave is changed even though exactly
   2306 		 * same components are used, and old disklabel may used
   2307 		 * if that is found.
   2308 		 */
   2309 		if (lp->d_secperunit != rs->sc_size)
   2310 			printf("raid%d: WARNING: %s: "
   2311 			    "total sector size in disklabel (%" PRIu32 ") != "
   2312 			    "the size of raid (%" PRIu64 ")\n", unit, rs->sc_xname,
   2313 			    lp->d_secperunit, rs->sc_size);
   2314 		for (i = 0; i < lp->d_npartitions; i++) {
   2315 			pp = &lp->d_partitions[i];
   2316 			if (pp->p_offset + pp->p_size > rs->sc_size)
   2317 				printf("raid%d: WARNING: %s: end of partition `%c' "
   2318 				       "exceeds the size of raid (%" PRIu64 ")\n",
   2319 				       unit, rs->sc_xname, 'a' + i, rs->sc_size);
   2320 		}
   2321 	}
   2322 
   2323 }
   2324 /*
   2325  * Take care of things one might want to take care of in the event
   2326  * that a disklabel isn't present.
   2327  */
   2328 static void
   2329 raidmakedisklabel(struct raid_softc *rs)
   2330 {
   2331 	struct disklabel *lp = rs->sc_dkdev.dk_label;
   2332 	db1_printf(("Making a label..\n"));
   2333 
   2334 	/*
   2335 	 * For historical reasons, if there's no disklabel present
   2336 	 * the raw partition must be marked FS_BSDFFS.
   2337 	 */
   2338 
   2339 	lp->d_partitions[RAW_PART].p_fstype = FS_BSDFFS;
   2340 
   2341 	strncpy(lp->d_packname, "default label", sizeof(lp->d_packname));
   2342 
   2343 	lp->d_checksum = dkcksum(lp);
   2344 }
   2345 /*
   2346  * Wait interruptibly for an exclusive lock.
   2347  *
   2348  * XXX
   2349  * Several drivers do this; it should be abstracted and made MP-safe.
   2350  * (Hmm... where have we seen this warning before :->  GO )
   2351  */
   2352 static int
   2353 raidlock(struct raid_softc *rs)
   2354 {
   2355 	int     error;
   2356 
   2357 	while ((rs->sc_flags & RAIDF_LOCKED) != 0) {
   2358 		rs->sc_flags |= RAIDF_WANTED;
   2359 		if ((error =
   2360 			tsleep(rs, PRIBIO | PCATCH, "raidlck", 0)) != 0)
   2361 			return (error);
   2362 	}
   2363 	rs->sc_flags |= RAIDF_LOCKED;
   2364 	return (0);
   2365 }
   2366 /*
   2367  * Unlock and wake up any waiters.
   2368  */
   2369 static void
   2370 raidunlock(struct raid_softc *rs)
   2371 {
   2372 
   2373 	rs->sc_flags &= ~RAIDF_LOCKED;
   2374 	if ((rs->sc_flags & RAIDF_WANTED) != 0) {
   2375 		rs->sc_flags &= ~RAIDF_WANTED;
   2376 		wakeup(rs);
   2377 	}
   2378 }
   2379 
   2380 
   2381 #define RF_COMPONENT_INFO_OFFSET  16384 /* bytes */
   2382 #define RF_COMPONENT_INFO_SIZE     1024 /* bytes */
   2383 
   2384 int
   2385 raidmarkclean(dev_t dev, struct vnode *b_vp, int mod_counter)
   2386 {
   2387 	RF_ComponentLabel_t clabel;
   2388 	raidread_component_label(dev, b_vp, &clabel);
   2389 	clabel.mod_counter = mod_counter;
   2390 	clabel.clean = RF_RAID_CLEAN;
   2391 	raidwrite_component_label(dev, b_vp, &clabel);
   2392 	return(0);
   2393 }
   2394 
   2395 
   2396 int
   2397 raidmarkdirty(dev_t dev, struct vnode *b_vp, int mod_counter)
   2398 {
   2399 	RF_ComponentLabel_t clabel;
   2400 	raidread_component_label(dev, b_vp, &clabel);
   2401 	clabel.mod_counter = mod_counter;
   2402 	clabel.clean = RF_RAID_DIRTY;
   2403 	raidwrite_component_label(dev, b_vp, &clabel);
   2404 	return(0);
   2405 }
   2406 
   2407 /* ARGSUSED */
   2408 int
   2409 raidread_component_label(dev_t dev, struct vnode *b_vp,
   2410 			 RF_ComponentLabel_t *clabel)
   2411 {
   2412 	struct buf *bp;
   2413 	const struct bdevsw *bdev;
   2414 	int error;
   2415 
   2416 	/* XXX should probably ensure that we don't try to do this if
   2417 	   someone has changed rf_protected_sectors. */
   2418 
   2419 	if (b_vp == NULL) {
   2420 		/* For whatever reason, this component is not valid.
   2421 		   Don't try to read a component label from it. */
   2422 		return(EINVAL);
   2423 	}
   2424 
   2425 	/* get a block of the appropriate size... */
   2426 	bp = geteblk((int)RF_COMPONENT_INFO_SIZE);
   2427 	bp->b_dev = dev;
   2428 
   2429 	/* get our ducks in a row for the read */
   2430 	bp->b_blkno = RF_COMPONENT_INFO_OFFSET / DEV_BSIZE;
   2431 	bp->b_bcount = RF_COMPONENT_INFO_SIZE;
   2432 	bp->b_flags |= B_READ;
   2433  	bp->b_resid = RF_COMPONENT_INFO_SIZE / DEV_BSIZE;
   2434 
   2435 	bdev = bdevsw_lookup(bp->b_dev);
   2436 	if (bdev == NULL)
   2437 		return (ENXIO);
   2438 	(*bdev->d_strategy)(bp);
   2439 
   2440 	error = biowait(bp);
   2441 
   2442 	if (!error) {
   2443 		memcpy(clabel, bp->b_data,
   2444 		       sizeof(RF_ComponentLabel_t));
   2445 	}
   2446 
   2447 	brelse(bp, 0);
   2448 	return(error);
   2449 }
   2450 /* ARGSUSED */
   2451 int
   2452 raidwrite_component_label(dev_t dev, struct vnode *b_vp,
   2453 			  RF_ComponentLabel_t *clabel)
   2454 {
   2455 	struct buf *bp;
   2456 	const struct bdevsw *bdev;
   2457 	int error;
   2458 
   2459 	/* get a block of the appropriate size... */
   2460 	bp = geteblk((int)RF_COMPONENT_INFO_SIZE);
   2461 	bp->b_dev = dev;
   2462 
   2463 	/* get our ducks in a row for the write */
   2464 	bp->b_blkno = RF_COMPONENT_INFO_OFFSET / DEV_BSIZE;
   2465 	bp->b_bcount = RF_COMPONENT_INFO_SIZE;
   2466 	bp->b_flags |= B_WRITE;
   2467  	bp->b_resid = RF_COMPONENT_INFO_SIZE / DEV_BSIZE;
   2468 
   2469 	memset(bp->b_data, 0, RF_COMPONENT_INFO_SIZE );
   2470 
   2471 	memcpy(bp->b_data, clabel, sizeof(RF_ComponentLabel_t));
   2472 
   2473 	bdev = bdevsw_lookup(bp->b_dev);
   2474 	if (bdev == NULL)
   2475 		return (ENXIO);
   2476 	(*bdev->d_strategy)(bp);
   2477 	error = biowait(bp);
   2478 	brelse(bp, 0);
   2479 	if (error) {
   2480 #if 1
   2481 		printf("Failed to write RAID component info!\n");
   2482 #endif
   2483 	}
   2484 
   2485 	return(error);
   2486 }
   2487 
   2488 void
   2489 rf_markalldirty(RF_Raid_t *raidPtr)
   2490 {
   2491 	RF_ComponentLabel_t clabel;
   2492 	int sparecol;
   2493 	int c;
   2494 	int j;
   2495 	int scol = -1;
   2496 
   2497 	raidPtr->mod_counter++;
   2498 	for (c = 0; c < raidPtr->numCol; c++) {
   2499 		/* we don't want to touch (at all) a disk that has
   2500 		   failed */
   2501 		if (!RF_DEAD_DISK(raidPtr->Disks[c].status)) {
   2502 			raidread_component_label(
   2503 						 raidPtr->Disks[c].dev,
   2504 						 raidPtr->raid_cinfo[c].ci_vp,
   2505 						 &clabel);
   2506 			if (clabel.status == rf_ds_spared) {
   2507 				/* XXX do something special...
   2508 				   but whatever you do, don't
   2509 				   try to access it!! */
   2510 			} else {
   2511 				raidmarkdirty(
   2512 					      raidPtr->Disks[c].dev,
   2513 					      raidPtr->raid_cinfo[c].ci_vp,
   2514 					      raidPtr->mod_counter);
   2515 			}
   2516 		}
   2517 	}
   2518 
   2519 	for( c = 0; c < raidPtr->numSpare ; c++) {
   2520 		sparecol = raidPtr->numCol + c;
   2521 		if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
   2522 			/*
   2523 
   2524 			   we claim this disk is "optimal" if it's
   2525 			   rf_ds_used_spare, as that means it should be
   2526 			   directly substitutable for the disk it replaced.
   2527 			   We note that too...
   2528 
   2529 			 */
   2530 
   2531 			for(j=0;j<raidPtr->numCol;j++) {
   2532 				if (raidPtr->Disks[j].spareCol == sparecol) {
   2533 					scol = j;
   2534 					break;
   2535 				}
   2536 			}
   2537 
   2538 			raidread_component_label(
   2539 				 raidPtr->Disks[sparecol].dev,
   2540 				 raidPtr->raid_cinfo[sparecol].ci_vp,
   2541 				 &clabel);
   2542 			/* make sure status is noted */
   2543 
   2544 			raid_init_component_label(raidPtr, &clabel);
   2545 
   2546 			clabel.row = 0;
   2547 			clabel.column = scol;
   2548 			/* Note: we *don't* change status from rf_ds_used_spare
   2549 			   to rf_ds_optimal */
   2550 			/* clabel.status = rf_ds_optimal; */
   2551 
   2552 			raidmarkdirty(raidPtr->Disks[sparecol].dev,
   2553 				      raidPtr->raid_cinfo[sparecol].ci_vp,
   2554 				      raidPtr->mod_counter);
   2555 		}
   2556 	}
   2557 }
   2558 
   2559 
   2560 void
   2561 rf_update_component_labels(RF_Raid_t *raidPtr, int final)
   2562 {
   2563 	RF_ComponentLabel_t clabel;
   2564 	int sparecol;
   2565 	int c;
   2566 	int j;
   2567 	int scol;
   2568 
   2569 	scol = -1;
   2570 
   2571 	/* XXX should do extra checks to make sure things really are clean,
   2572 	   rather than blindly setting the clean bit... */
   2573 
   2574 	raidPtr->mod_counter++;
   2575 
   2576 	for (c = 0; c < raidPtr->numCol; c++) {
   2577 		if (raidPtr->Disks[c].status == rf_ds_optimal) {
   2578 			raidread_component_label(
   2579 						 raidPtr->Disks[c].dev,
   2580 						 raidPtr->raid_cinfo[c].ci_vp,
   2581 						 &clabel);
   2582 			/* make sure status is noted */
   2583 			clabel.status = rf_ds_optimal;
   2584 
   2585 			/* bump the counter */
   2586 			clabel.mod_counter = raidPtr->mod_counter;
   2587 
   2588 			/* note what unit we are configured as */
   2589 			clabel.last_unit = raidPtr->raidid;
   2590 
   2591 			raidwrite_component_label(
   2592 						  raidPtr->Disks[c].dev,
   2593 						  raidPtr->raid_cinfo[c].ci_vp,
   2594 						  &clabel);
   2595 			if (final == RF_FINAL_COMPONENT_UPDATE) {
   2596 				if (raidPtr->parity_good == RF_RAID_CLEAN) {
   2597 					raidmarkclean(
   2598 						      raidPtr->Disks[c].dev,
   2599 						      raidPtr->raid_cinfo[c].ci_vp,
   2600 						      raidPtr->mod_counter);
   2601 				}
   2602 			}
   2603 		}
   2604 		/* else we don't touch it.. */
   2605 	}
   2606 
   2607 	for( c = 0; c < raidPtr->numSpare ; c++) {
   2608 		sparecol = raidPtr->numCol + c;
   2609 		/* Need to ensure that the reconstruct actually completed! */
   2610 		if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
   2611 			/*
   2612 
   2613 			   we claim this disk is "optimal" if it's
   2614 			   rf_ds_used_spare, as that means it should be
   2615 			   directly substitutable for the disk it replaced.
   2616 			   We note that too...
   2617 
   2618 			 */
   2619 
   2620 			for(j=0;j<raidPtr->numCol;j++) {
   2621 				if (raidPtr->Disks[j].spareCol == sparecol) {
   2622 					scol = j;
   2623 					break;
   2624 				}
   2625 			}
   2626 
   2627 			/* XXX shouldn't *really* need this... */
   2628 			raidread_component_label(
   2629 				      raidPtr->Disks[sparecol].dev,
   2630 				      raidPtr->raid_cinfo[sparecol].ci_vp,
   2631 				      &clabel);
   2632 			/* make sure status is noted */
   2633 
   2634 			raid_init_component_label(raidPtr, &clabel);
   2635 
   2636 			clabel.mod_counter = raidPtr->mod_counter;
   2637 			clabel.column = scol;
   2638 			clabel.status = rf_ds_optimal;
   2639 			clabel.last_unit = raidPtr->raidid;
   2640 
   2641 			raidwrite_component_label(
   2642 				      raidPtr->Disks[sparecol].dev,
   2643 				      raidPtr->raid_cinfo[sparecol].ci_vp,
   2644 				      &clabel);
   2645 			if (final == RF_FINAL_COMPONENT_UPDATE) {
   2646 				if (raidPtr->parity_good == RF_RAID_CLEAN) {
   2647 					raidmarkclean( raidPtr->Disks[sparecol].dev,
   2648 						       raidPtr->raid_cinfo[sparecol].ci_vp,
   2649 						       raidPtr->mod_counter);
   2650 				}
   2651 			}
   2652 		}
   2653 	}
   2654 }
   2655 
   2656 void
   2657 rf_close_component(RF_Raid_t *raidPtr, struct vnode *vp, int auto_configured)
   2658 {
   2659 
   2660 	if (vp != NULL) {
   2661 		if (auto_configured == 1) {
   2662 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
   2663 			VOP_CLOSE(vp, FREAD | FWRITE, NOCRED);
   2664 			vput(vp);
   2665 
   2666 		} else {
   2667 			(void) vn_close(vp, FREAD | FWRITE, curlwp->l_cred);
   2668 		}
   2669 	}
   2670 }
   2671 
   2672 
   2673 void
   2674 rf_UnconfigureVnodes(RF_Raid_t *raidPtr)
   2675 {
   2676 	int r,c;
   2677 	struct vnode *vp;
   2678 	int acd;
   2679 
   2680 
   2681 	/* We take this opportunity to close the vnodes like we should.. */
   2682 
   2683 	for (c = 0; c < raidPtr->numCol; c++) {
   2684 		vp = raidPtr->raid_cinfo[c].ci_vp;
   2685 		acd = raidPtr->Disks[c].auto_configured;
   2686 		rf_close_component(raidPtr, vp, acd);
   2687 		raidPtr->raid_cinfo[c].ci_vp = NULL;
   2688 		raidPtr->Disks[c].auto_configured = 0;
   2689 	}
   2690 
   2691 	for (r = 0; r < raidPtr->numSpare; r++) {
   2692 		vp = raidPtr->raid_cinfo[raidPtr->numCol + r].ci_vp;
   2693 		acd = raidPtr->Disks[raidPtr->numCol + r].auto_configured;
   2694 		rf_close_component(raidPtr, vp, acd);
   2695 		raidPtr->raid_cinfo[raidPtr->numCol + r].ci_vp = NULL;
   2696 		raidPtr->Disks[raidPtr->numCol + r].auto_configured = 0;
   2697 	}
   2698 }
   2699 
   2700 
   2701 void
   2702 rf_ReconThread(struct rf_recon_req *req)
   2703 {
   2704 	int     s;
   2705 	RF_Raid_t *raidPtr;
   2706 
   2707 	s = splbio();
   2708 	raidPtr = (RF_Raid_t *) req->raidPtr;
   2709 	raidPtr->recon_in_progress = 1;
   2710 
   2711 	rf_FailDisk((RF_Raid_t *) req->raidPtr, req->col,
   2712 		    ((req->flags & RF_FDFLAGS_RECON) ? 1 : 0));
   2713 
   2714 	RF_Free(req, sizeof(*req));
   2715 
   2716 	raidPtr->recon_in_progress = 0;
   2717 	splx(s);
   2718 
   2719 	/* That's all... */
   2720 	kthread_exit(0);	/* does not return */
   2721 }
   2722 
   2723 void
   2724 rf_RewriteParityThread(RF_Raid_t *raidPtr)
   2725 {
   2726 	int retcode;
   2727 	int s;
   2728 
   2729 	raidPtr->parity_rewrite_stripes_done = 0;
   2730 	raidPtr->parity_rewrite_in_progress = 1;
   2731 	s = splbio();
   2732 	retcode = rf_RewriteParity(raidPtr);
   2733 	splx(s);
   2734 	if (retcode) {
   2735 		printf("raid%d: Error re-writing parity!\n",raidPtr->raidid);
   2736 	} else {
   2737 		/* set the clean bit!  If we shutdown correctly,
   2738 		   the clean bit on each component label will get
   2739 		   set */
   2740 		raidPtr->parity_good = RF_RAID_CLEAN;
   2741 	}
   2742 	raidPtr->parity_rewrite_in_progress = 0;
   2743 
   2744 	/* Anyone waiting for us to stop?  If so, inform them... */
   2745 	if (raidPtr->waitShutdown) {
   2746 		wakeup(&raidPtr->parity_rewrite_in_progress);
   2747 	}
   2748 
   2749 	/* That's all... */
   2750 	kthread_exit(0);	/* does not return */
   2751 }
   2752 
   2753 
   2754 void
   2755 rf_CopybackThread(RF_Raid_t *raidPtr)
   2756 {
   2757 	int s;
   2758 
   2759 	raidPtr->copyback_in_progress = 1;
   2760 	s = splbio();
   2761 	rf_CopybackReconstructedData(raidPtr);
   2762 	splx(s);
   2763 	raidPtr->copyback_in_progress = 0;
   2764 
   2765 	/* That's all... */
   2766 	kthread_exit(0);	/* does not return */
   2767 }
   2768 
   2769 
   2770 void
   2771 rf_ReconstructInPlaceThread(struct rf_recon_req *req)
   2772 {
   2773 	int s;
   2774 	RF_Raid_t *raidPtr;
   2775 
   2776 	s = splbio();
   2777 	raidPtr = req->raidPtr;
   2778 	raidPtr->recon_in_progress = 1;
   2779 	rf_ReconstructInPlace(raidPtr, req->col);
   2780 	RF_Free(req, sizeof(*req));
   2781 	raidPtr->recon_in_progress = 0;
   2782 	splx(s);
   2783 
   2784 	/* That's all... */
   2785 	kthread_exit(0);	/* does not return */
   2786 }
   2787 
   2788 static RF_AutoConfig_t *
   2789 rf_get_component(RF_AutoConfig_t *ac_list, dev_t dev, struct vnode *vp,
   2790     const char *cname, RF_SectorCount_t size)
   2791 {
   2792 	int good_one = 0;
   2793 	RF_ComponentLabel_t *clabel;
   2794 	RF_AutoConfig_t *ac;
   2795 
   2796 	clabel = malloc(sizeof(RF_ComponentLabel_t), M_RAIDFRAME, M_NOWAIT);
   2797 	if (clabel == NULL) {
   2798 oomem:
   2799 		    while(ac_list) {
   2800 			    ac = ac_list;
   2801 			    if (ac->clabel)
   2802 				    free(ac->clabel, M_RAIDFRAME);
   2803 			    ac_list = ac_list->next;
   2804 			    free(ac, M_RAIDFRAME);
   2805 		    }
   2806 		    printf("RAID auto config: out of memory!\n");
   2807 		    return NULL; /* XXX probably should panic? */
   2808 	}
   2809 
   2810 	if (!raidread_component_label(dev, vp, clabel)) {
   2811 		    /* Got the label.  Does it look reasonable? */
   2812 		    if (rf_reasonable_label(clabel) &&
   2813 			(clabel->partitionSize <= size)) {
   2814 #ifdef DEBUG
   2815 			    printf("Component on: %s: %llu\n",
   2816 				cname, (unsigned long long)size);
   2817 			    rf_print_component_label(clabel);
   2818 #endif
   2819 			    /* if it's reasonable, add it, else ignore it. */
   2820 			    ac = malloc(sizeof(RF_AutoConfig_t), M_RAIDFRAME,
   2821 				M_NOWAIT);
   2822 			    if (ac == NULL) {
   2823 				    free(clabel, M_RAIDFRAME);
   2824 				    goto oomem;
   2825 			    }
   2826 			    strlcpy(ac->devname, cname, sizeof(ac->devname));
   2827 			    ac->dev = dev;
   2828 			    ac->vp = vp;
   2829 			    ac->clabel = clabel;
   2830 			    ac->next = ac_list;
   2831 			    ac_list = ac;
   2832 			    good_one = 1;
   2833 		    }
   2834 	}
   2835 	if (!good_one) {
   2836 		/* cleanup */
   2837 		free(clabel, M_RAIDFRAME);
   2838 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
   2839 		VOP_CLOSE(vp, FREAD | FWRITE, NOCRED);
   2840 		vput(vp);
   2841 	}
   2842 	return ac_list;
   2843 }
   2844 
   2845 RF_AutoConfig_t *
   2846 rf_find_raid_components(void)
   2847 {
   2848 	struct vnode *vp;
   2849 	struct disklabel label;
   2850 	device_t dv;
   2851 	dev_t dev;
   2852 	int bmajor, bminor, wedge;
   2853 	int error;
   2854 	int i;
   2855 	RF_AutoConfig_t *ac_list;
   2856 
   2857 
   2858 	/* initialize the AutoConfig list */
   2859 	ac_list = NULL;
   2860 
   2861 	/* we begin by trolling through *all* the devices on the system */
   2862 
   2863 	for (dv = alldevs.tqh_first; dv != NULL;
   2864 	     dv = dv->dv_list.tqe_next) {
   2865 
   2866 		/* we are only interested in disks... */
   2867 		if (device_class(dv) != DV_DISK)
   2868 			continue;
   2869 
   2870 		/* we don't care about floppies... */
   2871 		if (device_is_a(dv, "fd")) {
   2872 			continue;
   2873 		}
   2874 
   2875 		/* we don't care about CD's... */
   2876 		if (device_is_a(dv, "cd")) {
   2877 			continue;
   2878 		}
   2879 
   2880 		/* we don't care about md's... */
   2881 		if (device_is_a(dv, "md")) {
   2882 			continue;
   2883 		}
   2884 
   2885 		/* hdfd is the Atari/Hades floppy driver */
   2886 		if (device_is_a(dv, "hdfd")) {
   2887 			continue;
   2888 		}
   2889 
   2890 		/* fdisa is the Atari/Milan floppy driver */
   2891 		if (device_is_a(dv, "fdisa")) {
   2892 			continue;
   2893 		}
   2894 
   2895 		/* need to find the device_name_to_block_device_major stuff */
   2896 		bmajor = devsw_name2blk(device_xname(dv), NULL, 0);
   2897 
   2898 		/* get a vnode for the raw partition of this disk */
   2899 
   2900 		wedge = device_is_a(dv, "dk");
   2901 		bminor = minor(device_unit(dv));
   2902 		dev = wedge ? makedev(bmajor, bminor) :
   2903 		    MAKEDISKDEV(bmajor, bminor, RAW_PART);
   2904 		if (bdevvp(dev, &vp))
   2905 			panic("RAID can't alloc vnode");
   2906 
   2907 		error = VOP_OPEN(vp, FREAD, NOCRED);
   2908 
   2909 		if (error) {
   2910 			/* "Who cares."  Continue looking
   2911 			   for something that exists*/
   2912 			vput(vp);
   2913 			continue;
   2914 		}
   2915 
   2916 		if (wedge) {
   2917 			struct dkwedge_info dkw;
   2918 			error = VOP_IOCTL(vp, DIOCGWEDGEINFO, &dkw, FREAD,
   2919 			    NOCRED);
   2920 			if (error) {
   2921 				printf("RAIDframe: can't get wedge info for "
   2922 				    "dev %s (%d)\n", device_xname(dv), error);
   2923 				vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
   2924 				VOP_CLOSE(vp, FREAD | FWRITE, NOCRED);
   2925 				vput(vp);
   2926 				continue;
   2927 			}
   2928 
   2929 			if (strcmp(dkw.dkw_ptype, DKW_PTYPE_RAIDFRAME) != 0) {
   2930 				vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
   2931 				VOP_CLOSE(vp, FREAD | FWRITE, NOCRED);
   2932 				vput(vp);
   2933 				continue;
   2934 			}
   2935 
   2936 			ac_list = rf_get_component(ac_list, dev, vp,
   2937 			    device_xname(dv), dkw.dkw_size);
   2938 			continue;
   2939 		}
   2940 
   2941 		/* Ok, the disk exists.  Go get the disklabel. */
   2942 		error = VOP_IOCTL(vp, DIOCGDINFO, &label, FREAD, NOCRED);
   2943 		if (error) {
   2944 			/*
   2945 			 * XXX can't happen - open() would
   2946 			 * have errored out (or faked up one)
   2947 			 */
   2948 			if (error != ENOTTY)
   2949 				printf("RAIDframe: can't get label for dev "
   2950 				    "%s (%d)\n", device_xname(dv), error);
   2951 		}
   2952 
   2953 		/* don't need this any more.  We'll allocate it again
   2954 		   a little later if we really do... */
   2955 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
   2956 		VOP_CLOSE(vp, FREAD | FWRITE, NOCRED);
   2957 		vput(vp);
   2958 
   2959 		if (error)
   2960 			continue;
   2961 
   2962 		for (i = 0; i < label.d_npartitions; i++) {
   2963 			char cname[sizeof(ac_list->devname)];
   2964 
   2965 			/* We only support partitions marked as RAID */
   2966 			if (label.d_partitions[i].p_fstype != FS_RAID)
   2967 				continue;
   2968 
   2969 			dev = MAKEDISKDEV(bmajor, device_unit(dv), i);
   2970 			if (bdevvp(dev, &vp))
   2971 				panic("RAID can't alloc vnode");
   2972 
   2973 			error = VOP_OPEN(vp, FREAD, NOCRED);
   2974 			if (error) {
   2975 				/* Whatever... */
   2976 				vput(vp);
   2977 				continue;
   2978 			}
   2979 			snprintf(cname, sizeof(cname), "%s%c",
   2980 			    device_xname(dv), 'a' + i);
   2981 			ac_list = rf_get_component(ac_list, dev, vp, cname,
   2982 				label.d_partitions[i].p_size);
   2983 		}
   2984 	}
   2985 	return ac_list;
   2986 }
   2987 
   2988 
   2989 static int
   2990 rf_reasonable_label(RF_ComponentLabel_t *clabel)
   2991 {
   2992 
   2993 	if (((clabel->version==RF_COMPONENT_LABEL_VERSION_1) ||
   2994 	     (clabel->version==RF_COMPONENT_LABEL_VERSION)) &&
   2995 	    ((clabel->clean == RF_RAID_CLEAN) ||
   2996 	     (clabel->clean == RF_RAID_DIRTY)) &&
   2997 	    clabel->row >=0 &&
   2998 	    clabel->column >= 0 &&
   2999 	    clabel->num_rows > 0 &&
   3000 	    clabel->num_columns > 0 &&
   3001 	    clabel->row < clabel->num_rows &&
   3002 	    clabel->column < clabel->num_columns &&
   3003 	    clabel->blockSize > 0 &&
   3004 	    clabel->numBlocks > 0) {
   3005 		/* label looks reasonable enough... */
   3006 		return(1);
   3007 	}
   3008 	return(0);
   3009 }
   3010 
   3011 
   3012 #ifdef DEBUG
   3013 void
   3014 rf_print_component_label(RF_ComponentLabel_t *clabel)
   3015 {
   3016 	printf("   Row: %d Column: %d Num Rows: %d Num Columns: %d\n",
   3017 	       clabel->row, clabel->column,
   3018 	       clabel->num_rows, clabel->num_columns);
   3019 	printf("   Version: %d Serial Number: %d Mod Counter: %d\n",
   3020 	       clabel->version, clabel->serial_number,
   3021 	       clabel->mod_counter);
   3022 	printf("   Clean: %s Status: %d\n",
   3023 	       clabel->clean ? "Yes" : "No", clabel->status );
   3024 	printf("   sectPerSU: %d SUsPerPU: %d SUsPerRU: %d\n",
   3025 	       clabel->sectPerSU, clabel->SUsPerPU, clabel->SUsPerRU);
   3026 	printf("   RAID Level: %c  blocksize: %d numBlocks: %d\n",
   3027 	       (char) clabel->parityConfig, clabel->blockSize,
   3028 	       clabel->numBlocks);
   3029 	printf("   Autoconfig: %s\n", clabel->autoconfigure ? "Yes" : "No" );
   3030 	printf("   Contains root partition: %s\n",
   3031 	       clabel->root_partition ? "Yes" : "No" );
   3032 	printf("   Last configured as: raid%d\n", clabel->last_unit );
   3033 #if 0
   3034 	   printf("   Config order: %d\n", clabel->config_order);
   3035 #endif
   3036 
   3037 }
   3038 #endif
   3039 
   3040 RF_ConfigSet_t *
   3041 rf_create_auto_sets(RF_AutoConfig_t *ac_list)
   3042 {
   3043 	RF_AutoConfig_t *ac;
   3044 	RF_ConfigSet_t *config_sets;
   3045 	RF_ConfigSet_t *cset;
   3046 	RF_AutoConfig_t *ac_next;
   3047 
   3048 
   3049 	config_sets = NULL;
   3050 
   3051 	/* Go through the AutoConfig list, and figure out which components
   3052 	   belong to what sets.  */
   3053 	ac = ac_list;
   3054 	while(ac!=NULL) {
   3055 		/* we're going to putz with ac->next, so save it here
   3056 		   for use at the end of the loop */
   3057 		ac_next = ac->next;
   3058 
   3059 		if (config_sets == NULL) {
   3060 			/* will need at least this one... */
   3061 			config_sets = (RF_ConfigSet_t *)
   3062 				malloc(sizeof(RF_ConfigSet_t),
   3063 				       M_RAIDFRAME, M_NOWAIT);
   3064 			if (config_sets == NULL) {
   3065 				panic("rf_create_auto_sets: No memory!");
   3066 			}
   3067 			/* this one is easy :) */
   3068 			config_sets->ac = ac;
   3069 			config_sets->next = NULL;
   3070 			config_sets->rootable = 0;
   3071 			ac->next = NULL;
   3072 		} else {
   3073 			/* which set does this component fit into? */
   3074 			cset = config_sets;
   3075 			while(cset!=NULL) {
   3076 				if (rf_does_it_fit(cset, ac)) {
   3077 					/* looks like it matches... */
   3078 					ac->next = cset->ac;
   3079 					cset->ac = ac;
   3080 					break;
   3081 				}
   3082 				cset = cset->next;
   3083 			}
   3084 			if (cset==NULL) {
   3085 				/* didn't find a match above... new set..*/
   3086 				cset = (RF_ConfigSet_t *)
   3087 					malloc(sizeof(RF_ConfigSet_t),
   3088 					       M_RAIDFRAME, M_NOWAIT);
   3089 				if (cset == NULL) {
   3090 					panic("rf_create_auto_sets: No memory!");
   3091 				}
   3092 				cset->ac = ac;
   3093 				ac->next = NULL;
   3094 				cset->next = config_sets;
   3095 				cset->rootable = 0;
   3096 				config_sets = cset;
   3097 			}
   3098 		}
   3099 		ac = ac_next;
   3100 	}
   3101 
   3102 
   3103 	return(config_sets);
   3104 }
   3105 
   3106 static int
   3107 rf_does_it_fit(RF_ConfigSet_t *cset, RF_AutoConfig_t *ac)
   3108 {
   3109 	RF_ComponentLabel_t *clabel1, *clabel2;
   3110 
   3111 	/* If this one matches the *first* one in the set, that's good
   3112 	   enough, since the other members of the set would have been
   3113 	   through here too... */
   3114 	/* note that we are not checking partitionSize here..
   3115 
   3116 	   Note that we are also not checking the mod_counters here.
   3117 	   If everything else matches execpt the mod_counter, that's
   3118 	   good enough for this test.  We will deal with the mod_counters
   3119 	   a little later in the autoconfiguration process.
   3120 
   3121 	    (clabel1->mod_counter == clabel2->mod_counter) &&
   3122 
   3123 	   The reason we don't check for this is that failed disks
   3124 	   will have lower modification counts.  If those disks are
   3125 	   not added to the set they used to belong to, then they will
   3126 	   form their own set, which may result in 2 different sets,
   3127 	   for example, competing to be configured at raid0, and
   3128 	   perhaps competing to be the root filesystem set.  If the
   3129 	   wrong ones get configured, or both attempt to become /,
   3130 	   weird behaviour and or serious lossage will occur.  Thus we
   3131 	   need to bring them into the fold here, and kick them out at
   3132 	   a later point.
   3133 
   3134 	*/
   3135 
   3136 	clabel1 = cset->ac->clabel;
   3137 	clabel2 = ac->clabel;
   3138 	if ((clabel1->version == clabel2->version) &&
   3139 	    (clabel1->serial_number == clabel2->serial_number) &&
   3140 	    (clabel1->num_rows == clabel2->num_rows) &&
   3141 	    (clabel1->num_columns == clabel2->num_columns) &&
   3142 	    (clabel1->sectPerSU == clabel2->sectPerSU) &&
   3143 	    (clabel1->SUsPerPU == clabel2->SUsPerPU) &&
   3144 	    (clabel1->SUsPerRU == clabel2->SUsPerRU) &&
   3145 	    (clabel1->parityConfig == clabel2->parityConfig) &&
   3146 	    (clabel1->maxOutstanding == clabel2->maxOutstanding) &&
   3147 	    (clabel1->blockSize == clabel2->blockSize) &&
   3148 	    (clabel1->numBlocks == clabel2->numBlocks) &&
   3149 	    (clabel1->autoconfigure == clabel2->autoconfigure) &&
   3150 	    (clabel1->root_partition == clabel2->root_partition) &&
   3151 	    (clabel1->last_unit == clabel2->last_unit) &&
   3152 	    (clabel1->config_order == clabel2->config_order)) {
   3153 		/* if it get's here, it almost *has* to be a match */
   3154 	} else {
   3155 		/* it's not consistent with somebody in the set..
   3156 		   punt */
   3157 		return(0);
   3158 	}
   3159 	/* all was fine.. it must fit... */
   3160 	return(1);
   3161 }
   3162 
   3163 int
   3164 rf_have_enough_components(RF_ConfigSet_t *cset)
   3165 {
   3166 	RF_AutoConfig_t *ac;
   3167 	RF_AutoConfig_t *auto_config;
   3168 	RF_ComponentLabel_t *clabel;
   3169 	int c;
   3170 	int num_cols;
   3171 	int num_missing;
   3172 	int mod_counter;
   3173 	int mod_counter_found;
   3174 	int even_pair_failed;
   3175 	char parity_type;
   3176 
   3177 
   3178 	/* check to see that we have enough 'live' components
   3179 	   of this set.  If so, we can configure it if necessary */
   3180 
   3181 	num_cols = cset->ac->clabel->num_columns;
   3182 	parity_type = cset->ac->clabel->parityConfig;
   3183 
   3184 	/* XXX Check for duplicate components!?!?!? */
   3185 
   3186 	/* Determine what the mod_counter is supposed to be for this set. */
   3187 
   3188 	mod_counter_found = 0;
   3189 	mod_counter = 0;
   3190 	ac = cset->ac;
   3191 	while(ac!=NULL) {
   3192 		if (mod_counter_found==0) {
   3193 			mod_counter = ac->clabel->mod_counter;
   3194 			mod_counter_found = 1;
   3195 		} else {
   3196 			if (ac->clabel->mod_counter > mod_counter) {
   3197 				mod_counter = ac->clabel->mod_counter;
   3198 			}
   3199 		}
   3200 		ac = ac->next;
   3201 	}
   3202 
   3203 	num_missing = 0;
   3204 	auto_config = cset->ac;
   3205 
   3206 	even_pair_failed = 0;
   3207 	for(c=0; c<num_cols; c++) {
   3208 		ac = auto_config;
   3209 		while(ac!=NULL) {
   3210 			if ((ac->clabel->column == c) &&
   3211 			    (ac->clabel->mod_counter == mod_counter)) {
   3212 				/* it's this one... */
   3213 #ifdef DEBUG
   3214 				printf("Found: %s at %d\n",
   3215 				       ac->devname,c);
   3216 #endif
   3217 				break;
   3218 			}
   3219 			ac=ac->next;
   3220 		}
   3221 		if (ac==NULL) {
   3222 				/* Didn't find one here! */
   3223 				/* special case for RAID 1, especially
   3224 				   where there are more than 2
   3225 				   components (where RAIDframe treats
   3226 				   things a little differently :( ) */
   3227 			if (parity_type == '1') {
   3228 				if (c%2 == 0) { /* even component */
   3229 					even_pair_failed = 1;
   3230 				} else { /* odd component.  If
   3231 					    we're failed, and
   3232 					    so is the even
   3233 					    component, it's
   3234 					    "Good Night, Charlie" */
   3235 					if (even_pair_failed == 1) {
   3236 						return(0);
   3237 					}
   3238 				}
   3239 			} else {
   3240 				/* normal accounting */
   3241 				num_missing++;
   3242 			}
   3243 		}
   3244 		if ((parity_type == '1') && (c%2 == 1)) {
   3245 				/* Just did an even component, and we didn't
   3246 				   bail.. reset the even_pair_failed flag,
   3247 				   and go on to the next component.... */
   3248 			even_pair_failed = 0;
   3249 		}
   3250 	}
   3251 
   3252 	clabel = cset->ac->clabel;
   3253 
   3254 	if (((clabel->parityConfig == '0') && (num_missing > 0)) ||
   3255 	    ((clabel->parityConfig == '4') && (num_missing > 1)) ||
   3256 	    ((clabel->parityConfig == '5') && (num_missing > 1))) {
   3257 		/* XXX this needs to be made *much* more general */
   3258 		/* Too many failures */
   3259 		return(0);
   3260 	}
   3261 	/* otherwise, all is well, and we've got enough to take a kick
   3262 	   at autoconfiguring this set */
   3263 	return(1);
   3264 }
   3265 
   3266 void
   3267 rf_create_configuration(RF_AutoConfig_t *ac, RF_Config_t *config,
   3268 			RF_Raid_t *raidPtr)
   3269 {
   3270 	RF_ComponentLabel_t *clabel;
   3271 	int i;
   3272 
   3273 	clabel = ac->clabel;
   3274 
   3275 	/* 1. Fill in the common stuff */
   3276 	config->numRow = clabel->num_rows = 1;
   3277 	config->numCol = clabel->num_columns;
   3278 	config->numSpare = 0; /* XXX should this be set here? */
   3279 	config->sectPerSU = clabel->sectPerSU;
   3280 	config->SUsPerPU = clabel->SUsPerPU;
   3281 	config->SUsPerRU = clabel->SUsPerRU;
   3282 	config->parityConfig = clabel->parityConfig;
   3283 	/* XXX... */
   3284 	strcpy(config->diskQueueType,"fifo");
   3285 	config->maxOutstandingDiskReqs = clabel->maxOutstanding;
   3286 	config->layoutSpecificSize = 0; /* XXX ?? */
   3287 
   3288 	while(ac!=NULL) {
   3289 		/* row/col values will be in range due to the checks
   3290 		   in reasonable_label() */
   3291 		strcpy(config->devnames[0][ac->clabel->column],
   3292 		       ac->devname);
   3293 		ac = ac->next;
   3294 	}
   3295 
   3296 	for(i=0;i<RF_MAXDBGV;i++) {
   3297 		config->debugVars[i][0] = 0;
   3298 	}
   3299 }
   3300 
   3301 int
   3302 rf_set_autoconfig(RF_Raid_t *raidPtr, int new_value)
   3303 {
   3304 	RF_ComponentLabel_t clabel;
   3305 	struct vnode *vp;
   3306 	dev_t dev;
   3307 	int column;
   3308 	int sparecol;
   3309 
   3310 	raidPtr->autoconfigure = new_value;
   3311 
   3312 	for(column=0; column<raidPtr->numCol; column++) {
   3313 		if (raidPtr->Disks[column].status == rf_ds_optimal) {
   3314 			dev = raidPtr->Disks[column].dev;
   3315 			vp = raidPtr->raid_cinfo[column].ci_vp;
   3316 			raidread_component_label(dev, vp, &clabel);
   3317 			clabel.autoconfigure = new_value;
   3318 			raidwrite_component_label(dev, vp, &clabel);
   3319 		}
   3320 	}
   3321 	for(column = 0; column < raidPtr->numSpare ; column++) {
   3322 		sparecol = raidPtr->numCol + column;
   3323 		if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
   3324 			dev = raidPtr->Disks[sparecol].dev;
   3325 			vp = raidPtr->raid_cinfo[sparecol].ci_vp;
   3326 			raidread_component_label(dev, vp, &clabel);
   3327 			clabel.autoconfigure = new_value;
   3328 			raidwrite_component_label(dev, vp, &clabel);
   3329 		}
   3330 	}
   3331 	return(new_value);
   3332 }
   3333 
   3334 int
   3335 rf_set_rootpartition(RF_Raid_t *raidPtr, int new_value)
   3336 {
   3337 	RF_ComponentLabel_t clabel;
   3338 	struct vnode *vp;
   3339 	dev_t dev;
   3340 	int column;
   3341 	int sparecol;
   3342 
   3343 	raidPtr->root_partition = new_value;
   3344 	for(column=0; column<raidPtr->numCol; column++) {
   3345 		if (raidPtr->Disks[column].status == rf_ds_optimal) {
   3346 			dev = raidPtr->Disks[column].dev;
   3347 			vp = raidPtr->raid_cinfo[column].ci_vp;
   3348 			raidread_component_label(dev, vp, &clabel);
   3349 			clabel.root_partition = new_value;
   3350 			raidwrite_component_label(dev, vp, &clabel);
   3351 		}
   3352 	}
   3353 	for(column = 0; column < raidPtr->numSpare ; column++) {
   3354 		sparecol = raidPtr->numCol + column;
   3355 		if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
   3356 			dev = raidPtr->Disks[sparecol].dev;
   3357 			vp = raidPtr->raid_cinfo[sparecol].ci_vp;
   3358 			raidread_component_label(dev, vp, &clabel);
   3359 			clabel.root_partition = new_value;
   3360 			raidwrite_component_label(dev, vp, &clabel);
   3361 		}
   3362 	}
   3363 	return(new_value);
   3364 }
   3365 
   3366 void
   3367 rf_release_all_vps(RF_ConfigSet_t *cset)
   3368 {
   3369 	RF_AutoConfig_t *ac;
   3370 
   3371 	ac = cset->ac;
   3372 	while(ac!=NULL) {
   3373 		/* Close the vp, and give it back */
   3374 		if (ac->vp) {
   3375 			vn_lock(ac->vp, LK_EXCLUSIVE | LK_RETRY);
   3376 			VOP_CLOSE(ac->vp, FREAD, NOCRED);
   3377 			vput(ac->vp);
   3378 			ac->vp = NULL;
   3379 		}
   3380 		ac = ac->next;
   3381 	}
   3382 }
   3383 
   3384 
   3385 void
   3386 rf_cleanup_config_set(RF_ConfigSet_t *cset)
   3387 {
   3388 	RF_AutoConfig_t *ac;
   3389 	RF_AutoConfig_t *next_ac;
   3390 
   3391 	ac = cset->ac;
   3392 	while(ac!=NULL) {
   3393 		next_ac = ac->next;
   3394 		/* nuke the label */
   3395 		free(ac->clabel, M_RAIDFRAME);
   3396 		/* cleanup the config structure */
   3397 		free(ac, M_RAIDFRAME);
   3398 		/* "next.." */
   3399 		ac = next_ac;
   3400 	}
   3401 	/* and, finally, nuke the config set */
   3402 	free(cset, M_RAIDFRAME);
   3403 }
   3404 
   3405 
   3406 void
   3407 raid_init_component_label(RF_Raid_t *raidPtr, RF_ComponentLabel_t *clabel)
   3408 {
   3409 	/* current version number */
   3410 	clabel->version = RF_COMPONENT_LABEL_VERSION;
   3411 	clabel->serial_number = raidPtr->serial_number;
   3412 	clabel->mod_counter = raidPtr->mod_counter;
   3413 	clabel->num_rows = 1;
   3414 	clabel->num_columns = raidPtr->numCol;
   3415 	clabel->clean = RF_RAID_DIRTY; /* not clean */
   3416 	clabel->status = rf_ds_optimal; /* "It's good!" */
   3417 
   3418 	clabel->sectPerSU = raidPtr->Layout.sectorsPerStripeUnit;
   3419 	clabel->SUsPerPU = raidPtr->Layout.SUsPerPU;
   3420 	clabel->SUsPerRU = raidPtr->Layout.SUsPerRU;
   3421 
   3422 	clabel->blockSize = raidPtr->bytesPerSector;
   3423 	clabel->numBlocks = raidPtr->sectorsPerDisk;
   3424 
   3425 	/* XXX not portable */
   3426 	clabel->parityConfig = raidPtr->Layout.map->parityConfig;
   3427 	clabel->maxOutstanding = raidPtr->maxOutstanding;
   3428 	clabel->autoconfigure = raidPtr->autoconfigure;
   3429 	clabel->root_partition = raidPtr->root_partition;
   3430 	clabel->last_unit = raidPtr->raidid;
   3431 	clabel->config_order = raidPtr->config_order;
   3432 }
   3433 
   3434 int
   3435 rf_auto_config_set(RF_ConfigSet_t *cset, int *unit)
   3436 {
   3437 	RF_Raid_t *raidPtr;
   3438 	RF_Config_t *config;
   3439 	int raidID;
   3440 	int retcode;
   3441 
   3442 #ifdef DEBUG
   3443 	printf("RAID autoconfigure\n");
   3444 #endif
   3445 
   3446 	retcode = 0;
   3447 	*unit = -1;
   3448 
   3449 	/* 1. Create a config structure */
   3450 
   3451 	config = (RF_Config_t *)malloc(sizeof(RF_Config_t),
   3452 				       M_RAIDFRAME,
   3453 				       M_NOWAIT);
   3454 	if (config==NULL) {
   3455 		printf("Out of mem!?!?\n");
   3456 				/* XXX do something more intelligent here. */
   3457 		return(1);
   3458 	}
   3459 
   3460 	memset(config, 0, sizeof(RF_Config_t));
   3461 
   3462 	/*
   3463 	   2. Figure out what RAID ID this one is supposed to live at
   3464 	   See if we can get the same RAID dev that it was configured
   3465 	   on last time..
   3466 	*/
   3467 
   3468 	raidID = cset->ac->clabel->last_unit;
   3469 	if ((raidID < 0) || (raidID >= numraid)) {
   3470 		/* let's not wander off into lala land. */
   3471 		raidID = numraid - 1;
   3472 	}
   3473 	if (raidPtrs[raidID]->valid != 0) {
   3474 
   3475 		/*
   3476 		   Nope... Go looking for an alternative...
   3477 		   Start high so we don't immediately use raid0 if that's
   3478 		   not taken.
   3479 		*/
   3480 
   3481 		for(raidID = numraid - 1; raidID >= 0; raidID--) {
   3482 			if (raidPtrs[raidID]->valid == 0) {
   3483 				/* can use this one! */
   3484 				break;
   3485 			}
   3486 		}
   3487 	}
   3488 
   3489 	if (raidID < 0) {
   3490 		/* punt... */
   3491 		printf("Unable to auto configure this set!\n");
   3492 		printf("(Out of RAID devs!)\n");
   3493 		free(config, M_RAIDFRAME);
   3494 		return(1);
   3495 	}
   3496 
   3497 #ifdef DEBUG
   3498 	printf("Configuring raid%d:\n",raidID);
   3499 #endif
   3500 
   3501 	raidPtr = raidPtrs[raidID];
   3502 
   3503 	/* XXX all this stuff should be done SOMEWHERE ELSE! */
   3504 	raidPtr->raidid = raidID;
   3505 	raidPtr->openings = RAIDOUTSTANDING;
   3506 
   3507 	/* 3. Build the configuration structure */
   3508 	rf_create_configuration(cset->ac, config, raidPtr);
   3509 
   3510 	/* 4. Do the configuration */
   3511 	retcode = rf_Configure(raidPtr, config, cset->ac);
   3512 
   3513 	if (retcode == 0) {
   3514 
   3515 		raidinit(raidPtrs[raidID]);
   3516 
   3517 		rf_markalldirty(raidPtrs[raidID]);
   3518 		raidPtrs[raidID]->autoconfigure = 1; /* XXX do this here? */
   3519 		if (cset->ac->clabel->root_partition==1) {
   3520 			/* everything configured just fine.  Make a note
   3521 			   that this set is eligible to be root. */
   3522 			cset->rootable = 1;
   3523 			/* XXX do this here? */
   3524 			raidPtrs[raidID]->root_partition = 1;
   3525 		}
   3526 	}
   3527 
   3528 	/* 5. Cleanup */
   3529 	free(config, M_RAIDFRAME);
   3530 
   3531 	*unit = raidID;
   3532 	return(retcode);
   3533 }
   3534 
   3535 void
   3536 rf_disk_unbusy(RF_RaidAccessDesc_t *desc)
   3537 {
   3538 	struct buf *bp;
   3539 
   3540 	bp = (struct buf *)desc->bp;
   3541 	disk_unbusy(&raid_softc[desc->raidPtr->raidid].sc_dkdev,
   3542 	    (bp->b_bcount - bp->b_resid), (bp->b_flags & B_READ));
   3543 }
   3544 
   3545 void
   3546 rf_pool_init(struct pool *p, size_t size, const char *w_chan,
   3547 	     size_t xmin, size_t xmax)
   3548 {
   3549 	pool_init(p, size, 0, 0, 0, w_chan, NULL, IPL_BIO);
   3550 	pool_sethiwat(p, xmax);
   3551 	pool_prime(p, xmin);
   3552 	pool_setlowat(p, xmin);
   3553 }
   3554 
   3555 /*
   3556  * rf_buf_queue_check(int raidid) -- looks into the buf_queue to see
   3557  * if there is IO pending and if that IO could possibly be done for a
   3558  * given RAID set.  Returns 0 if IO is waiting and can be done, 1
   3559  * otherwise.
   3560  *
   3561  */
   3562 
   3563 int
   3564 rf_buf_queue_check(int raidid)
   3565 {
   3566 	if ((bufq_peek(raid_softc[raidid].buf_queue) != NULL) &&
   3567 	    raidPtrs[raidid]->openings > 0) {
   3568 		/* there is work to do */
   3569 		return 0;
   3570 	}
   3571 	/* default is nothing to do */
   3572 	return 1;
   3573 }
   3574 
   3575 int
   3576 rf_getdisksize(struct vnode *vp, struct lwp *l, RF_RaidDisk_t *diskPtr)
   3577 {
   3578 	struct partinfo dpart;
   3579 	struct dkwedge_info dkw;
   3580 	int error;
   3581 
   3582 	error = VOP_IOCTL(vp, DIOCGPART, &dpart, FREAD, l->l_cred);
   3583 	if (error == 0) {
   3584 		diskPtr->blockSize = dpart.disklab->d_secsize;
   3585 		diskPtr->numBlocks = dpart.part->p_size - rf_protectedSectors;
   3586 		diskPtr->partitionSize = dpart.part->p_size;
   3587 		return 0;
   3588 	}
   3589 
   3590 	error = VOP_IOCTL(vp, DIOCGWEDGEINFO, &dkw, FREAD, l->l_cred);
   3591 	if (error == 0) {
   3592 		diskPtr->blockSize = 512;	/* XXX */
   3593 		diskPtr->numBlocks = dkw.dkw_size - rf_protectedSectors;
   3594 		diskPtr->partitionSize = dkw.dkw_size;
   3595 		return 0;
   3596 	}
   3597 	return error;
   3598 }
   3599 
   3600 static int
   3601 raid_match(device_t self, cfdata_t cfdata, void *aux)
   3602 {
   3603 	return 1;
   3604 }
   3605 
   3606 static void
   3607 raid_attach(device_t parent, device_t self, void *aux)
   3608 {
   3609 
   3610 }
   3611 
   3612 
   3613 static int
   3614 raid_detach(device_t self, int flags)
   3615 {
   3616 	struct raid_softc *rs = device_private(self);
   3617 
   3618 	if (rs->sc_flags & RAIDF_INITED)
   3619 		return EBUSY;
   3620 
   3621 	return 0;
   3622 }
   3623 
   3624 static void
   3625 rf_set_properties(struct raid_softc *rs, RF_Raid_t *raidPtr)
   3626 {
   3627 	prop_dictionary_t disk_info, odisk_info, geom;
   3628 	disk_info = prop_dictionary_create();
   3629 	geom = prop_dictionary_create();
   3630 	prop_dictionary_set_uint64(geom, "sectors-per-unit",
   3631 				   raidPtr->totalSectors);
   3632 	prop_dictionary_set_uint32(geom, "sector-size",
   3633 				   raidPtr->bytesPerSector);
   3634 
   3635 	prop_dictionary_set_uint16(geom, "sectors-per-track",
   3636 				   raidPtr->Layout.dataSectorsPerStripe);
   3637 	prop_dictionary_set_uint16(geom, "tracks-per-cylinder",
   3638 				   4 * raidPtr->numCol);
   3639 
   3640 	prop_dictionary_set_uint64(geom, "cylinders-per-unit",
   3641 	   raidPtr->totalSectors / (raidPtr->Layout.dataSectorsPerStripe *
   3642 	   (4 * raidPtr->numCol)));
   3643 
   3644 	prop_dictionary_set(disk_info, "geometry", geom);
   3645 	prop_object_release(geom);
   3646 	prop_dictionary_set(device_properties(rs->sc_dev),
   3647 			    "disk-info", disk_info);
   3648 	odisk_info = rs->sc_dkdev.dk_info;
   3649 	rs->sc_dkdev.dk_info = disk_info;
   3650 	if (odisk_info)
   3651 		prop_object_release(odisk_info);
   3652 }
   3653 
   3654 /*
   3655  * Implement forwarding of the DIOCCACHESYNC ioctl to each of the components.
   3656  * We end up returning whatever error was returned by the first cache flush
   3657  * that fails.
   3658  */
   3659 
   3660 static int
   3661 rf_sync_component_caches(RF_Raid_t *raidPtr)
   3662 {
   3663 	int c, sparecol;
   3664 	int e,error;
   3665 	int force = 1;
   3666 
   3667 	error = 0;
   3668 	for (c = 0; c < raidPtr->numCol; c++) {
   3669 		if (raidPtr->Disks[c].status == rf_ds_optimal) {
   3670 			e = VOP_IOCTL(raidPtr->raid_cinfo[c].ci_vp, DIOCCACHESYNC,
   3671 					  &force, FWRITE, NOCRED);
   3672 			if (e) {
   3673 				if (e != ENODEV)
   3674 					printf("raid%d: cache flush to component %s failed.\n",
   3675 					       raidPtr->raidid, raidPtr->Disks[c].devname);
   3676 				if (error == 0) {
   3677 					error = e;
   3678 				}
   3679 			}
   3680 		}
   3681 	}
   3682 
   3683 	for( c = 0; c < raidPtr->numSpare ; c++) {
   3684 		sparecol = raidPtr->numCol + c;
   3685 		/* Need to ensure that the reconstruct actually completed! */
   3686 		if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
   3687 			e = VOP_IOCTL(raidPtr->raid_cinfo[sparecol].ci_vp,
   3688 					  DIOCCACHESYNC, &force, FWRITE, NOCRED);
   3689 			if (e) {
   3690 				if (e != ENODEV)
   3691 					printf("raid%d: cache flush to component %s failed.\n",
   3692 					       raidPtr->raidid, raidPtr->Disks[sparecol].devname);
   3693 				if (error == 0) {
   3694 					error = e;
   3695 				}
   3696 			}
   3697 		}
   3698 	}
   3699 	return error;
   3700 }
   3701