Home | History | Annotate | Line # | Download | only in raidframe
rf_netbsdkintf.c revision 1.280
      1 /*	$NetBSD: rf_netbsdkintf.c,v 1.280 2011/01/07 19:52:18 christos Exp $	*/
      2 /*-
      3  * Copyright (c) 1996, 1997, 1998, 2008 The NetBSD Foundation, Inc.
      4  * All rights reserved.
      5  *
      6  * This code is derived from software contributed to The NetBSD Foundation
      7  * by Greg Oster; Jason R. Thorpe.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  *
     18  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     19  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     20  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     21  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     22  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     28  * POSSIBILITY OF SUCH DAMAGE.
     29  */
     30 
     31 /*
     32  * Copyright (c) 1990, 1993
     33  *      The Regents of the University of California.  All rights reserved.
     34  *
     35  * This code is derived from software contributed to Berkeley by
     36  * the Systems Programming Group of the University of Utah Computer
     37  * Science Department.
     38  *
     39  * Redistribution and use in source and binary forms, with or without
     40  * modification, are permitted provided that the following conditions
     41  * are met:
     42  * 1. Redistributions of source code must retain the above copyright
     43  *    notice, this list of conditions and the following disclaimer.
     44  * 2. Redistributions in binary form must reproduce the above copyright
     45  *    notice, this list of conditions and the following disclaimer in the
     46  *    documentation and/or other materials provided with the distribution.
     47  * 3. Neither the name of the University nor the names of its contributors
     48  *    may be used to endorse or promote products derived from this software
     49  *    without specific prior written permission.
     50  *
     51  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     52  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     53  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     54  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     55  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     56  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     57  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     58  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     59  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     60  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     61  * SUCH DAMAGE.
     62  *
     63  * from: Utah $Hdr: cd.c 1.6 90/11/28$
     64  *
     65  *      @(#)cd.c        8.2 (Berkeley) 11/16/93
     66  */
     67 
     68 /*
     69  * Copyright (c) 1988 University of Utah.
     70  *
     71  * This code is derived from software contributed to Berkeley by
     72  * the Systems Programming Group of the University of Utah Computer
     73  * Science Department.
     74  *
     75  * Redistribution and use in source and binary forms, with or without
     76  * modification, are permitted provided that the following conditions
     77  * are met:
     78  * 1. Redistributions of source code must retain the above copyright
     79  *    notice, this list of conditions and the following disclaimer.
     80  * 2. Redistributions in binary form must reproduce the above copyright
     81  *    notice, this list of conditions and the following disclaimer in the
     82  *    documentation and/or other materials provided with the distribution.
     83  * 3. All advertising materials mentioning features or use of this software
     84  *    must display the following acknowledgement:
     85  *      This product includes software developed by the University of
     86  *      California, Berkeley and its contributors.
     87  * 4. Neither the name of the University nor the names of its contributors
     88  *    may be used to endorse or promote products derived from this software
     89  *    without specific prior written permission.
     90  *
     91  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     92  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     93  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     94  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     95  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     96  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     97  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     98  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     99  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
    100  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
    101  * SUCH DAMAGE.
    102  *
    103  * from: Utah $Hdr: cd.c 1.6 90/11/28$
    104  *
    105  *      @(#)cd.c        8.2 (Berkeley) 11/16/93
    106  */
    107 
    108 /*
    109  * Copyright (c) 1995 Carnegie-Mellon University.
    110  * All rights reserved.
    111  *
    112  * Authors: Mark Holland, Jim Zelenka
    113  *
    114  * Permission to use, copy, modify and distribute this software and
    115  * its documentation is hereby granted, provided that both the copyright
    116  * notice and this permission notice appear in all copies of the
    117  * software, derivative works or modified versions, and any portions
    118  * thereof, and that both notices appear in supporting documentation.
    119  *
    120  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
    121  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
    122  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
    123  *
    124  * Carnegie Mellon requests users of this software to return to
    125  *
    126  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
    127  *  School of Computer Science
    128  *  Carnegie Mellon University
    129  *  Pittsburgh PA 15213-3890
    130  *
    131  * any improvements or extensions that they make and grant Carnegie the
    132  * rights to redistribute these changes.
    133  */
    134 
    135 /***********************************************************
    136  *
    137  * rf_kintf.c -- the kernel interface routines for RAIDframe
    138  *
    139  ***********************************************************/
    140 
    141 #include <sys/cdefs.h>
    142 __KERNEL_RCSID(0, "$NetBSD: rf_netbsdkintf.c,v 1.280 2011/01/07 19:52:18 christos Exp $");
    143 
    144 #ifdef _KERNEL_OPT
    145 #include "opt_compat_netbsd.h"
    146 #include "opt_raid_autoconfig.h"
    147 #include "raid.h"
    148 #endif
    149 
    150 #include <sys/param.h>
    151 #include <sys/errno.h>
    152 #include <sys/pool.h>
    153 #include <sys/proc.h>
    154 #include <sys/queue.h>
    155 #include <sys/disk.h>
    156 #include <sys/device.h>
    157 #include <sys/stat.h>
    158 #include <sys/ioctl.h>
    159 #include <sys/fcntl.h>
    160 #include <sys/systm.h>
    161 #include <sys/vnode.h>
    162 #include <sys/disklabel.h>
    163 #include <sys/conf.h>
    164 #include <sys/buf.h>
    165 #include <sys/bufq.h>
    166 #include <sys/reboot.h>
    167 #include <sys/kauth.h>
    168 
    169 #include <prop/proplib.h>
    170 
    171 #include <dev/raidframe/raidframevar.h>
    172 #include <dev/raidframe/raidframeio.h>
    173 #include <dev/raidframe/rf_paritymap.h>
    174 
    175 #include "rf_raid.h"
    176 #include "rf_copyback.h"
    177 #include "rf_dag.h"
    178 #include "rf_dagflags.h"
    179 #include "rf_desc.h"
    180 #include "rf_diskqueue.h"
    181 #include "rf_etimer.h"
    182 #include "rf_general.h"
    183 #include "rf_kintf.h"
    184 #include "rf_options.h"
    185 #include "rf_driver.h"
    186 #include "rf_parityscan.h"
    187 #include "rf_threadstuff.h"
    188 
    189 #ifdef COMPAT_50
    190 #include "rf_compat50.h"
    191 #endif
    192 
    193 #ifdef DEBUG
    194 int     rf_kdebug_level = 0;
    195 #define db1_printf(a) if (rf_kdebug_level > 0) printf a
    196 #else				/* DEBUG */
    197 #define db1_printf(a) { }
    198 #endif				/* DEBUG */
    199 
    200 static RF_Raid_t **raidPtrs;	/* global raid device descriptors */
    201 
    202 #if (RF_INCLUDE_PARITY_DECLUSTERING_DS > 0)
    203 RF_DECLARE_STATIC_MUTEX(rf_sparet_wait_mutex)
    204 
    205 static RF_SparetWait_t *rf_sparet_wait_queue;	/* requests to install a
    206 						 * spare table */
    207 static RF_SparetWait_t *rf_sparet_resp_queue;	/* responses from
    208 						 * installation process */
    209 #endif
    210 
    211 MALLOC_DEFINE(M_RAIDFRAME, "RAIDframe", "RAIDframe structures");
    212 
    213 /* prototypes */
    214 static void KernelWakeupFunc(struct buf *);
    215 static void InitBP(struct buf *, struct vnode *, unsigned,
    216     dev_t, RF_SectorNum_t, RF_SectorCount_t, void *, void (*) (struct buf *),
    217     void *, int, struct proc *);
    218 static void raidinit(RF_Raid_t *);
    219 
    220 void raidattach(int);
    221 static int raid_match(device_t, cfdata_t, void *);
    222 static void raid_attach(device_t, device_t, void *);
    223 static int raid_detach(device_t, int);
    224 
    225 static int raidread_component_area(dev_t, struct vnode *, void *, size_t,
    226     daddr_t, daddr_t);
    227 static int raidwrite_component_area(dev_t, struct vnode *, void *, size_t,
    228     daddr_t, daddr_t, int);
    229 
    230 static int raidwrite_component_label(unsigned,
    231     dev_t, struct vnode *, RF_ComponentLabel_t *);
    232 static int raidread_component_label(unsigned,
    233     dev_t, struct vnode *, RF_ComponentLabel_t *);
    234 
    235 
    236 dev_type_open(raidopen);
    237 dev_type_close(raidclose);
    238 dev_type_read(raidread);
    239 dev_type_write(raidwrite);
    240 dev_type_ioctl(raidioctl);
    241 dev_type_strategy(raidstrategy);
    242 dev_type_dump(raiddump);
    243 dev_type_size(raidsize);
    244 
    245 const struct bdevsw raid_bdevsw = {
    246 	raidopen, raidclose, raidstrategy, raidioctl,
    247 	raiddump, raidsize, D_DISK
    248 };
    249 
    250 const struct cdevsw raid_cdevsw = {
    251 	raidopen, raidclose, raidread, raidwrite, raidioctl,
    252 	nostop, notty, nopoll, nommap, nokqfilter, D_DISK
    253 };
    254 
    255 static struct dkdriver rf_dkdriver = { raidstrategy, minphys };
    256 
    257 /* XXX Not sure if the following should be replacing the raidPtrs above,
    258    or if it should be used in conjunction with that...
    259 */
    260 
    261 struct raid_softc {
    262 	device_t sc_dev;
    263 	int     sc_flags;	/* flags */
    264 	int     sc_cflags;	/* configuration flags */
    265 	uint64_t sc_size;	/* size of the raid device */
    266 	char    sc_xname[20];	/* XXX external name */
    267 	struct disk sc_dkdev;	/* generic disk device info */
    268 	struct bufq_state *buf_queue;	/* used for the device queue */
    269 };
    270 /* sc_flags */
    271 #define RAIDF_INITED	0x01	/* unit has been initialized */
    272 #define RAIDF_WLABEL	0x02	/* label area is writable */
    273 #define RAIDF_LABELLING	0x04	/* unit is currently being labelled */
    274 #define RAIDF_SHUTDOWN	0x08	/* unit is being shutdown */
    275 #define RAIDF_WANTED	0x40	/* someone is waiting to obtain a lock */
    276 #define RAIDF_LOCKED	0x80	/* unit is locked */
    277 
    278 #define	raidunit(x)	DISKUNIT(x)
    279 int numraid = 0;
    280 
    281 extern struct cfdriver raid_cd;
    282 CFATTACH_DECL3_NEW(raid, sizeof(struct raid_softc),
    283     raid_match, raid_attach, raid_detach, NULL, NULL, NULL,
    284     DVF_DETACH_SHUTDOWN);
    285 
    286 /*
    287  * Allow RAIDOUTSTANDING number of simultaneous IO's to this RAID device.
    288  * Be aware that large numbers can allow the driver to consume a lot of
    289  * kernel memory, especially on writes, and in degraded mode reads.
    290  *
    291  * For example: with a stripe width of 64 blocks (32k) and 5 disks,
    292  * a single 64K write will typically require 64K for the old data,
    293  * 64K for the old parity, and 64K for the new parity, for a total
    294  * of 192K (if the parity buffer is not re-used immediately).
    295  * Even it if is used immediately, that's still 128K, which when multiplied
    296  * by say 10 requests, is 1280K, *on top* of the 640K of incoming data.
    297  *
    298  * Now in degraded mode, for example, a 64K read on the above setup may
    299  * require data reconstruction, which will require *all* of the 4 remaining
    300  * disks to participate -- 4 * 32K/disk == 128K again.
    301  */
    302 
    303 #ifndef RAIDOUTSTANDING
    304 #define RAIDOUTSTANDING   6
    305 #endif
    306 
    307 #define RAIDLABELDEV(dev)	\
    308 	(MAKEDISKDEV(major((dev)), raidunit((dev)), RAW_PART))
    309 
    310 /* declared here, and made public, for the benefit of KVM stuff.. */
    311 struct raid_softc *raid_softc;
    312 
    313 static void raidgetdefaultlabel(RF_Raid_t *, struct raid_softc *,
    314 				     struct disklabel *);
    315 static void raidgetdisklabel(dev_t);
    316 static void raidmakedisklabel(struct raid_softc *);
    317 
    318 static int raidlock(struct raid_softc *);
    319 static void raidunlock(struct raid_softc *);
    320 
    321 static int raid_detach_unlocked(struct raid_softc *);
    322 
    323 static void rf_markalldirty(RF_Raid_t *);
    324 static void rf_set_properties(struct raid_softc *, RF_Raid_t *);
    325 
    326 void rf_ReconThread(struct rf_recon_req *);
    327 void rf_RewriteParityThread(RF_Raid_t *raidPtr);
    328 void rf_CopybackThread(RF_Raid_t *raidPtr);
    329 void rf_ReconstructInPlaceThread(struct rf_recon_req *);
    330 int rf_autoconfig(device_t);
    331 void rf_buildroothack(RF_ConfigSet_t *);
    332 
    333 RF_AutoConfig_t *rf_find_raid_components(void);
    334 RF_ConfigSet_t *rf_create_auto_sets(RF_AutoConfig_t *);
    335 static int rf_does_it_fit(RF_ConfigSet_t *,RF_AutoConfig_t *);
    336 static int rf_reasonable_label(RF_ComponentLabel_t *);
    337 void rf_create_configuration(RF_AutoConfig_t *,RF_Config_t *, RF_Raid_t *);
    338 int rf_set_autoconfig(RF_Raid_t *, int);
    339 int rf_set_rootpartition(RF_Raid_t *, int);
    340 void rf_release_all_vps(RF_ConfigSet_t *);
    341 void rf_cleanup_config_set(RF_ConfigSet_t *);
    342 int rf_have_enough_components(RF_ConfigSet_t *);
    343 int rf_auto_config_set(RF_ConfigSet_t *, int *);
    344 static void rf_fix_old_label_size(RF_ComponentLabel_t *, uint64_t);
    345 
    346 static int raidautoconfig = 0; /* Debugging, mostly.  Set to 0 to not
    347 				  allow autoconfig to take place.
    348 				  Note that this is overridden by having
    349 				  RAID_AUTOCONFIG as an option in the
    350 				  kernel config file.  */
    351 
    352 struct RF_Pools_s rf_pools;
    353 
    354 void
    355 raidattach(int num)
    356 {
    357 	int raidID;
    358 	int i, rc;
    359 
    360 	aprint_debug("raidattach: Asked for %d units\n", num);
    361 
    362 	if (num <= 0) {
    363 #ifdef DIAGNOSTIC
    364 		panic("raidattach: count <= 0");
    365 #endif
    366 		return;
    367 	}
    368 	/* This is where all the initialization stuff gets done. */
    369 
    370 	numraid = num;
    371 
    372 	/* Make some space for requested number of units... */
    373 
    374 	RF_Malloc(raidPtrs, num * sizeof(RF_Raid_t *), (RF_Raid_t **));
    375 	if (raidPtrs == NULL) {
    376 		panic("raidPtrs is NULL!!");
    377 	}
    378 
    379 #if (RF_INCLUDE_PARITY_DECLUSTERING_DS > 0)
    380 	rf_mutex_init(&rf_sparet_wait_mutex);
    381 
    382 	rf_sparet_wait_queue = rf_sparet_resp_queue = NULL;
    383 #endif
    384 
    385 	for (i = 0; i < num; i++)
    386 		raidPtrs[i] = NULL;
    387 	rc = rf_BootRaidframe();
    388 	if (rc == 0)
    389 		aprint_verbose("Kernelized RAIDframe activated\n");
    390 	else
    391 		panic("Serious error booting RAID!!");
    392 
    393 	/* put together some datastructures like the CCD device does.. This
    394 	 * lets us lock the device and what-not when it gets opened. */
    395 
    396 	raid_softc = (struct raid_softc *)
    397 		malloc(num * sizeof(struct raid_softc),
    398 		       M_RAIDFRAME, M_NOWAIT);
    399 	if (raid_softc == NULL) {
    400 		aprint_error("WARNING: no memory for RAIDframe driver\n");
    401 		return;
    402 	}
    403 
    404 	memset(raid_softc, 0, num * sizeof(struct raid_softc));
    405 
    406 	for (raidID = 0; raidID < num; raidID++) {
    407 		bufq_alloc(&raid_softc[raidID].buf_queue, "fcfs", 0);
    408 
    409 		RF_Malloc(raidPtrs[raidID], sizeof(RF_Raid_t),
    410 			  (RF_Raid_t *));
    411 		if (raidPtrs[raidID] == NULL) {
    412 			aprint_error("WARNING: raidPtrs[%d] is NULL\n", raidID);
    413 			numraid = raidID;
    414 			return;
    415 		}
    416 	}
    417 
    418 	if (config_cfattach_attach(raid_cd.cd_name, &raid_ca)) {
    419 		aprint_error("raidattach: config_cfattach_attach failed?\n");
    420 	}
    421 
    422 #ifdef RAID_AUTOCONFIG
    423 	raidautoconfig = 1;
    424 #endif
    425 
    426 	/*
    427 	 * Register a finalizer which will be used to auto-config RAID
    428 	 * sets once all real hardware devices have been found.
    429 	 */
    430 	if (config_finalize_register(NULL, rf_autoconfig) != 0)
    431 		aprint_error("WARNING: unable to register RAIDframe finalizer\n");
    432 }
    433 
    434 int
    435 rf_autoconfig(device_t self)
    436 {
    437 	RF_AutoConfig_t *ac_list;
    438 	RF_ConfigSet_t *config_sets;
    439 
    440 	if (raidautoconfig == 0)
    441 		return (0);
    442 
    443 	/* XXX This code can only be run once. */
    444 	raidautoconfig = 0;
    445 
    446 	/* 1. locate all RAID components on the system */
    447 	aprint_debug("Searching for RAID components...\n");
    448 	ac_list = rf_find_raid_components();
    449 
    450 	/* 2. Sort them into their respective sets. */
    451 	config_sets = rf_create_auto_sets(ac_list);
    452 
    453 	/*
    454 	 * 3. Evaluate each set andconfigure the valid ones.
    455 	 * This gets done in rf_buildroothack().
    456 	 */
    457 	rf_buildroothack(config_sets);
    458 
    459 	return 1;
    460 }
    461 
    462 void
    463 rf_buildroothack(RF_ConfigSet_t *config_sets)
    464 {
    465 	RF_ConfigSet_t *cset;
    466 	RF_ConfigSet_t *next_cset;
    467 	int retcode;
    468 	int raidID;
    469 	int rootID;
    470 	int col;
    471 	int num_root;
    472 	char *devname;
    473 
    474 	rootID = 0;
    475 	num_root = 0;
    476 	cset = config_sets;
    477 	while (cset != NULL) {
    478 		next_cset = cset->next;
    479 		if (rf_have_enough_components(cset) &&
    480 		    cset->ac->clabel->autoconfigure==1) {
    481 			retcode = rf_auto_config_set(cset,&raidID);
    482 			if (!retcode) {
    483 				aprint_debug("raid%d: configured ok\n", raidID);
    484 				if (cset->rootable) {
    485 					rootID = raidID;
    486 					num_root++;
    487 				}
    488 			} else {
    489 				/* The autoconfig didn't work :( */
    490 				aprint_debug("Autoconfig failed with code %d for raid%d\n", retcode, raidID);
    491 				rf_release_all_vps(cset);
    492 			}
    493 		} else {
    494 			/* we're not autoconfiguring this set...
    495 			   release the associated resources */
    496 			rf_release_all_vps(cset);
    497 		}
    498 		/* cleanup */
    499 		rf_cleanup_config_set(cset);
    500 		cset = next_cset;
    501 	}
    502 
    503 	/* if the user has specified what the root device should be
    504 	   then we don't touch booted_device or boothowto... */
    505 
    506 	if (rootspec != NULL)
    507 		return;
    508 
    509 	/* we found something bootable... */
    510 
    511 	if (num_root == 1) {
    512 		booted_device = raid_softc[rootID].sc_dev;
    513 	} else if (num_root > 1) {
    514 
    515 		/*
    516 		 * Maybe the MD code can help. If it cannot, then
    517 		 * setroot() will discover that we have no
    518 		 * booted_device and will ask the user if nothing was
    519 		 * hardwired in the kernel config file
    520 		 */
    521 
    522 		if (booted_device == NULL)
    523 			cpu_rootconf();
    524 		if (booted_device == NULL)
    525 			return;
    526 
    527 		num_root = 0;
    528 		for (raidID = 0; raidID < numraid; raidID++) {
    529 			if (raidPtrs[raidID]->valid == 0)
    530 				continue;
    531 
    532 			if (raidPtrs[raidID]->root_partition == 0)
    533 				continue;
    534 
    535 			for (col = 0; col < raidPtrs[raidID]->numCol; col++) {
    536 				devname = raidPtrs[raidID]->Disks[col].devname;
    537 				devname += sizeof("/dev/") - 1;
    538 				if (strncmp(devname, device_xname(booted_device),
    539 					    strlen(device_xname(booted_device))) != 0)
    540 					continue;
    541 				aprint_debug("raid%d includes boot device %s\n",
    542 				       raidID, devname);
    543 				num_root++;
    544 				rootID = raidID;
    545 			}
    546 		}
    547 
    548 		if (num_root == 1) {
    549 			booted_device = raid_softc[rootID].sc_dev;
    550 		} else {
    551 			/* we can't guess.. require the user to answer... */
    552 			boothowto |= RB_ASKNAME;
    553 		}
    554 	}
    555 }
    556 
    557 
    558 int
    559 raidsize(dev_t dev)
    560 {
    561 	struct raid_softc *rs;
    562 	struct disklabel *lp;
    563 	int     part, unit, omask, size;
    564 
    565 	unit = raidunit(dev);
    566 	if (unit >= numraid)
    567 		return (-1);
    568 	rs = &raid_softc[unit];
    569 
    570 	if ((rs->sc_flags & RAIDF_INITED) == 0)
    571 		return (-1);
    572 
    573 	part = DISKPART(dev);
    574 	omask = rs->sc_dkdev.dk_openmask & (1 << part);
    575 	lp = rs->sc_dkdev.dk_label;
    576 
    577 	if (omask == 0 && raidopen(dev, 0, S_IFBLK, curlwp))
    578 		return (-1);
    579 
    580 	if (lp->d_partitions[part].p_fstype != FS_SWAP)
    581 		size = -1;
    582 	else
    583 		size = lp->d_partitions[part].p_size *
    584 		    (lp->d_secsize / DEV_BSIZE);
    585 
    586 	if (omask == 0 && raidclose(dev, 0, S_IFBLK, curlwp))
    587 		return (-1);
    588 
    589 	return (size);
    590 
    591 }
    592 
    593 int
    594 raiddump(dev_t dev, daddr_t blkno, void *va, size_t size)
    595 {
    596 	int     unit = raidunit(dev);
    597 	struct raid_softc *rs;
    598 	const struct bdevsw *bdev;
    599 	struct disklabel *lp;
    600 	RF_Raid_t *raidPtr;
    601 	daddr_t offset;
    602 	int     part, c, sparecol, j, scol, dumpto;
    603 	int     error = 0;
    604 
    605 	if (unit >= numraid)
    606 		return (ENXIO);
    607 
    608 	rs = &raid_softc[unit];
    609 	raidPtr = raidPtrs[unit];
    610 
    611 	if ((rs->sc_flags & RAIDF_INITED) == 0)
    612 		return ENXIO;
    613 
    614 	/* we only support dumping to RAID 1 sets */
    615 	if (raidPtr->Layout.numDataCol != 1 ||
    616 	    raidPtr->Layout.numParityCol != 1)
    617 		return EINVAL;
    618 
    619 
    620 	if ((error = raidlock(rs)) != 0)
    621 		return error;
    622 
    623 	if (size % DEV_BSIZE != 0) {
    624 		error = EINVAL;
    625 		goto out;
    626 	}
    627 
    628 	if (blkno + size / DEV_BSIZE > rs->sc_size) {
    629 		printf("%s: blkno (%" PRIu64 ") + size / DEV_BSIZE (%zu) > "
    630 		    "sc->sc_size (%" PRIu64 ")\n", __func__, blkno,
    631 		    size / DEV_BSIZE, rs->sc_size);
    632 		error = EINVAL;
    633 		goto out;
    634 	}
    635 
    636 	part = DISKPART(dev);
    637 	lp = rs->sc_dkdev.dk_label;
    638 	offset = lp->d_partitions[part].p_offset + RF_PROTECTED_SECTORS;
    639 
    640 	/* figure out what device is alive.. */
    641 
    642 	/*
    643 	   Look for a component to dump to.  The preference for the
    644 	   component to dump to is as follows:
    645 	   1) the master
    646 	   2) a used_spare of the master
    647 	   3) the slave
    648 	   4) a used_spare of the slave
    649 	*/
    650 
    651 	dumpto = -1;
    652 	for (c = 0; c < raidPtr->numCol; c++) {
    653 		if (raidPtr->Disks[c].status == rf_ds_optimal) {
    654 			/* this might be the one */
    655 			dumpto = c;
    656 			break;
    657 		}
    658 	}
    659 
    660 	/*
    661 	   At this point we have possibly selected a live master or a
    662 	   live slave.  We now check to see if there is a spared
    663 	   master (or a spared slave), if we didn't find a live master
    664 	   or a live slave.
    665 	*/
    666 
    667 	for (c = 0; c < raidPtr->numSpare; c++) {
    668 		sparecol = raidPtr->numCol + c;
    669 		if (raidPtr->Disks[sparecol].status ==  rf_ds_used_spare) {
    670 			/* How about this one? */
    671 			scol = -1;
    672 			for(j=0;j<raidPtr->numCol;j++) {
    673 				if (raidPtr->Disks[j].spareCol == sparecol) {
    674 					scol = j;
    675 					break;
    676 				}
    677 			}
    678 			if (scol == 0) {
    679 				/*
    680 				   We must have found a spared master!
    681 				   We'll take that over anything else
    682 				   found so far.  (We couldn't have
    683 				   found a real master before, since
    684 				   this is a used spare, and it's
    685 				   saying that it's replacing the
    686 				   master.)  On reboot (with
    687 				   autoconfiguration turned on)
    688 				   sparecol will become the 1st
    689 				   component (component0) of this set.
    690 				*/
    691 				dumpto = sparecol;
    692 				break;
    693 			} else if (scol != -1) {
    694 				/*
    695 				   Must be a spared slave.  We'll dump
    696 				   to that if we havn't found anything
    697 				   else so far.
    698 				*/
    699 				if (dumpto == -1)
    700 					dumpto = sparecol;
    701 			}
    702 		}
    703 	}
    704 
    705 	if (dumpto == -1) {
    706 		/* we couldn't find any live components to dump to!?!?
    707 		 */
    708 		error = EINVAL;
    709 		goto out;
    710 	}
    711 
    712 	bdev = bdevsw_lookup(raidPtr->Disks[dumpto].dev);
    713 
    714 	/*
    715 	   Note that blkno is relative to this particular partition.
    716 	   By adding the offset of this partition in the RAID
    717 	   set, and also adding RF_PROTECTED_SECTORS, we get a
    718 	   value that is relative to the partition used for the
    719 	   underlying component.
    720 	*/
    721 
    722 	error = (*bdev->d_dump)(raidPtr->Disks[dumpto].dev,
    723 				blkno + offset, va, size);
    724 
    725 out:
    726 	raidunlock(rs);
    727 
    728 	return error;
    729 }
    730 /* ARGSUSED */
    731 int
    732 raidopen(dev_t dev, int flags, int fmt,
    733     struct lwp *l)
    734 {
    735 	int     unit = raidunit(dev);
    736 	struct raid_softc *rs;
    737 	struct disklabel *lp;
    738 	int     part, pmask;
    739 	int     error = 0;
    740 
    741 	if (unit >= numraid)
    742 		return (ENXIO);
    743 	rs = &raid_softc[unit];
    744 
    745 	if ((error = raidlock(rs)) != 0)
    746 		return (error);
    747 
    748 	if ((rs->sc_flags & RAIDF_SHUTDOWN) != 0) {
    749 		error = EBUSY;
    750 		goto bad;
    751 	}
    752 
    753 	lp = rs->sc_dkdev.dk_label;
    754 
    755 	part = DISKPART(dev);
    756 
    757 	/*
    758 	 * If there are wedges, and this is not RAW_PART, then we
    759 	 * need to fail.
    760 	 */
    761 	if (rs->sc_dkdev.dk_nwedges != 0 && part != RAW_PART) {
    762 		error = EBUSY;
    763 		goto bad;
    764 	}
    765 	pmask = (1 << part);
    766 
    767 	if ((rs->sc_flags & RAIDF_INITED) &&
    768 	    (rs->sc_dkdev.dk_openmask == 0))
    769 		raidgetdisklabel(dev);
    770 
    771 	/* make sure that this partition exists */
    772 
    773 	if (part != RAW_PART) {
    774 		if (((rs->sc_flags & RAIDF_INITED) == 0) ||
    775 		    ((part >= lp->d_npartitions) ||
    776 			(lp->d_partitions[part].p_fstype == FS_UNUSED))) {
    777 			error = ENXIO;
    778 			goto bad;
    779 		}
    780 	}
    781 	/* Prevent this unit from being unconfigured while open. */
    782 	switch (fmt) {
    783 	case S_IFCHR:
    784 		rs->sc_dkdev.dk_copenmask |= pmask;
    785 		break;
    786 
    787 	case S_IFBLK:
    788 		rs->sc_dkdev.dk_bopenmask |= pmask;
    789 		break;
    790 	}
    791 
    792 	if ((rs->sc_dkdev.dk_openmask == 0) &&
    793 	    ((rs->sc_flags & RAIDF_INITED) != 0)) {
    794 		/* First one... mark things as dirty... Note that we *MUST*
    795 		 have done a configure before this.  I DO NOT WANT TO BE
    796 		 SCRIBBLING TO RANDOM COMPONENTS UNTIL IT'S BEEN DETERMINED
    797 		 THAT THEY BELONG TOGETHER!!!!! */
    798 		/* XXX should check to see if we're only open for reading
    799 		   here... If so, we needn't do this, but then need some
    800 		   other way of keeping track of what's happened.. */
    801 
    802 		rf_markalldirty(raidPtrs[unit]);
    803 	}
    804 
    805 
    806 	rs->sc_dkdev.dk_openmask =
    807 	    rs->sc_dkdev.dk_copenmask | rs->sc_dkdev.dk_bopenmask;
    808 
    809 bad:
    810 	raidunlock(rs);
    811 
    812 	return (error);
    813 
    814 
    815 }
    816 /* ARGSUSED */
    817 int
    818 raidclose(dev_t dev, int flags, int fmt, struct lwp *l)
    819 {
    820 	int     unit = raidunit(dev);
    821 	struct raid_softc *rs;
    822 	int     error = 0;
    823 	int     part;
    824 
    825 	if (unit >= numraid)
    826 		return (ENXIO);
    827 	rs = &raid_softc[unit];
    828 
    829 	if ((error = raidlock(rs)) != 0)
    830 		return (error);
    831 
    832 	part = DISKPART(dev);
    833 
    834 	/* ...that much closer to allowing unconfiguration... */
    835 	switch (fmt) {
    836 	case S_IFCHR:
    837 		rs->sc_dkdev.dk_copenmask &= ~(1 << part);
    838 		break;
    839 
    840 	case S_IFBLK:
    841 		rs->sc_dkdev.dk_bopenmask &= ~(1 << part);
    842 		break;
    843 	}
    844 	rs->sc_dkdev.dk_openmask =
    845 	    rs->sc_dkdev.dk_copenmask | rs->sc_dkdev.dk_bopenmask;
    846 
    847 	if ((rs->sc_dkdev.dk_openmask == 0) &&
    848 	    ((rs->sc_flags & RAIDF_INITED) != 0)) {
    849 		/* Last one... device is not unconfigured yet.
    850 		   Device shutdown has taken care of setting the
    851 		   clean bits if RAIDF_INITED is not set
    852 		   mark things as clean... */
    853 
    854 		rf_update_component_labels(raidPtrs[unit],
    855 						 RF_FINAL_COMPONENT_UPDATE);
    856 
    857 		/* If the kernel is shutting down, it will detach
    858 		 * this RAID set soon enough.
    859 		 */
    860 	}
    861 
    862 	raidunlock(rs);
    863 	return (0);
    864 
    865 }
    866 
    867 void
    868 raidstrategy(struct buf *bp)
    869 {
    870 	int s;
    871 
    872 	unsigned int raidID = raidunit(bp->b_dev);
    873 	RF_Raid_t *raidPtr;
    874 	struct raid_softc *rs = &raid_softc[raidID];
    875 	int     wlabel;
    876 
    877 	if ((rs->sc_flags & RAIDF_INITED) ==0) {
    878 		bp->b_error = ENXIO;
    879 		goto done;
    880 	}
    881 	if (raidID >= numraid || !raidPtrs[raidID]) {
    882 		bp->b_error = ENODEV;
    883 		goto done;
    884 	}
    885 	raidPtr = raidPtrs[raidID];
    886 	if (!raidPtr->valid) {
    887 		bp->b_error = ENODEV;
    888 		goto done;
    889 	}
    890 	if (bp->b_bcount == 0) {
    891 		db1_printf(("b_bcount is zero..\n"));
    892 		goto done;
    893 	}
    894 
    895 	/*
    896 	 * Do bounds checking and adjust transfer.  If there's an
    897 	 * error, the bounds check will flag that for us.
    898 	 */
    899 
    900 	wlabel = rs->sc_flags & (RAIDF_WLABEL | RAIDF_LABELLING);
    901 	if (DISKPART(bp->b_dev) == RAW_PART) {
    902 		uint64_t size; /* device size in DEV_BSIZE unit */
    903 
    904 		if (raidPtr->logBytesPerSector > DEV_BSHIFT) {
    905 			size = raidPtr->totalSectors <<
    906 			    (raidPtr->logBytesPerSector - DEV_BSHIFT);
    907 		} else {
    908 			size = raidPtr->totalSectors >>
    909 			    (DEV_BSHIFT - raidPtr->logBytesPerSector);
    910 		}
    911 		if (bounds_check_with_mediasize(bp, DEV_BSIZE, size) <= 0) {
    912 			goto done;
    913 		}
    914 	} else {
    915 		if (bounds_check_with_label(&rs->sc_dkdev, bp, wlabel) <= 0) {
    916 			db1_printf(("Bounds check failed!!:%d %d\n",
    917 				(int) bp->b_blkno, (int) wlabel));
    918 			goto done;
    919 		}
    920 	}
    921 	s = splbio();
    922 
    923 	bp->b_resid = 0;
    924 
    925 	/* stuff it onto our queue */
    926 	bufq_put(rs->buf_queue, bp);
    927 
    928 	/* scheduled the IO to happen at the next convenient time */
    929 	wakeup(&(raidPtrs[raidID]->iodone));
    930 
    931 	splx(s);
    932 	return;
    933 
    934 done:
    935 	bp->b_resid = bp->b_bcount;
    936 	biodone(bp);
    937 }
    938 /* ARGSUSED */
    939 int
    940 raidread(dev_t dev, struct uio *uio, int flags)
    941 {
    942 	int     unit = raidunit(dev);
    943 	struct raid_softc *rs;
    944 
    945 	if (unit >= numraid)
    946 		return (ENXIO);
    947 	rs = &raid_softc[unit];
    948 
    949 	if ((rs->sc_flags & RAIDF_INITED) == 0)
    950 		return (ENXIO);
    951 
    952 	return (physio(raidstrategy, NULL, dev, B_READ, minphys, uio));
    953 
    954 }
    955 /* ARGSUSED */
    956 int
    957 raidwrite(dev_t dev, struct uio *uio, int flags)
    958 {
    959 	int     unit = raidunit(dev);
    960 	struct raid_softc *rs;
    961 
    962 	if (unit >= numraid)
    963 		return (ENXIO);
    964 	rs = &raid_softc[unit];
    965 
    966 	if ((rs->sc_flags & RAIDF_INITED) == 0)
    967 		return (ENXIO);
    968 
    969 	return (physio(raidstrategy, NULL, dev, B_WRITE, minphys, uio));
    970 
    971 }
    972 
    973 static int
    974 raid_detach_unlocked(struct raid_softc *rs)
    975 {
    976 	int error;
    977 	RF_Raid_t *raidPtr;
    978 
    979 	raidPtr = raidPtrs[device_unit(rs->sc_dev)];
    980 
    981 	/*
    982 	 * If somebody has a partition mounted, we shouldn't
    983 	 * shutdown.
    984 	 */
    985 	if (rs->sc_dkdev.dk_openmask != 0)
    986 		return EBUSY;
    987 
    988 	if ((rs->sc_flags & RAIDF_INITED) == 0)
    989 		;	/* not initialized: nothing to do */
    990 	else if ((error = rf_Shutdown(raidPtr)) != 0)
    991 		return error;
    992 	else
    993 		rs->sc_flags &= ~(RAIDF_INITED|RAIDF_SHUTDOWN);
    994 
    995 	/* Detach the disk. */
    996 	dkwedge_delall(&rs->sc_dkdev);
    997 	disk_detach(&rs->sc_dkdev);
    998 	disk_destroy(&rs->sc_dkdev);
    999 
   1000 	return 0;
   1001 }
   1002 
   1003 int
   1004 raidioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
   1005 {
   1006 	int     unit = raidunit(dev);
   1007 	int     error = 0;
   1008 	int     part, pmask;
   1009 	cfdata_t cf;
   1010 	struct raid_softc *rs;
   1011 	RF_Config_t *k_cfg, *u_cfg;
   1012 	RF_Raid_t *raidPtr;
   1013 	RF_RaidDisk_t *diskPtr;
   1014 	RF_AccTotals_t *totals;
   1015 	RF_DeviceConfig_t *d_cfg, **ucfgp;
   1016 	u_char *specific_buf;
   1017 	int retcode = 0;
   1018 	int column;
   1019 /*	int raidid; */
   1020 	struct rf_recon_req *rrcopy, *rr;
   1021 	RF_ComponentLabel_t *clabel;
   1022 	RF_ComponentLabel_t *ci_label;
   1023 	RF_ComponentLabel_t **clabel_ptr;
   1024 	RF_SingleComponent_t *sparePtr,*componentPtr;
   1025 	RF_SingleComponent_t component;
   1026 	RF_ProgressInfo_t progressInfo, **progressInfoPtr;
   1027 	int i, j, d;
   1028 #ifdef __HAVE_OLD_DISKLABEL
   1029 	struct disklabel newlabel;
   1030 #endif
   1031 	struct dkwedge_info *dkw;
   1032 
   1033 	if (unit >= numraid)
   1034 		return (ENXIO);
   1035 	rs = &raid_softc[unit];
   1036 	raidPtr = raidPtrs[unit];
   1037 
   1038 	db1_printf(("raidioctl: %d %d %d %lu\n", (int) dev,
   1039 		(int) DISKPART(dev), (int) unit, cmd));
   1040 
   1041 	/* Must be open for writes for these commands... */
   1042 	switch (cmd) {
   1043 #ifdef DIOCGSECTORSIZE
   1044 	case DIOCGSECTORSIZE:
   1045 		*(u_int *)data = raidPtr->bytesPerSector;
   1046 		return 0;
   1047 	case DIOCGMEDIASIZE:
   1048 		*(off_t *)data =
   1049 		    (off_t)raidPtr->totalSectors * raidPtr->bytesPerSector;
   1050 		return 0;
   1051 #endif
   1052 	case DIOCSDINFO:
   1053 	case DIOCWDINFO:
   1054 #ifdef __HAVE_OLD_DISKLABEL
   1055 	case ODIOCWDINFO:
   1056 	case ODIOCSDINFO:
   1057 #endif
   1058 	case DIOCWLABEL:
   1059 	case DIOCAWEDGE:
   1060 	case DIOCDWEDGE:
   1061 		if ((flag & FWRITE) == 0)
   1062 			return (EBADF);
   1063 	}
   1064 
   1065 	/* Must be initialized for these... */
   1066 	switch (cmd) {
   1067 	case DIOCGDINFO:
   1068 	case DIOCSDINFO:
   1069 	case DIOCWDINFO:
   1070 #ifdef __HAVE_OLD_DISKLABEL
   1071 	case ODIOCGDINFO:
   1072 	case ODIOCWDINFO:
   1073 	case ODIOCSDINFO:
   1074 	case ODIOCGDEFLABEL:
   1075 #endif
   1076 	case DIOCGPART:
   1077 	case DIOCWLABEL:
   1078 	case DIOCGDEFLABEL:
   1079 	case DIOCAWEDGE:
   1080 	case DIOCDWEDGE:
   1081 	case DIOCLWEDGES:
   1082 	case DIOCCACHESYNC:
   1083 	case RAIDFRAME_SHUTDOWN:
   1084 	case RAIDFRAME_REWRITEPARITY:
   1085 	case RAIDFRAME_GET_INFO:
   1086 	case RAIDFRAME_RESET_ACCTOTALS:
   1087 	case RAIDFRAME_GET_ACCTOTALS:
   1088 	case RAIDFRAME_KEEP_ACCTOTALS:
   1089 	case RAIDFRAME_GET_SIZE:
   1090 	case RAIDFRAME_FAIL_DISK:
   1091 	case RAIDFRAME_COPYBACK:
   1092 	case RAIDFRAME_CHECK_RECON_STATUS:
   1093 	case RAIDFRAME_CHECK_RECON_STATUS_EXT:
   1094 	case RAIDFRAME_GET_COMPONENT_LABEL:
   1095 	case RAIDFRAME_SET_COMPONENT_LABEL:
   1096 	case RAIDFRAME_ADD_HOT_SPARE:
   1097 	case RAIDFRAME_REMOVE_HOT_SPARE:
   1098 	case RAIDFRAME_INIT_LABELS:
   1099 	case RAIDFRAME_REBUILD_IN_PLACE:
   1100 	case RAIDFRAME_CHECK_PARITY:
   1101 	case RAIDFRAME_CHECK_PARITYREWRITE_STATUS:
   1102 	case RAIDFRAME_CHECK_PARITYREWRITE_STATUS_EXT:
   1103 	case RAIDFRAME_CHECK_COPYBACK_STATUS:
   1104 	case RAIDFRAME_CHECK_COPYBACK_STATUS_EXT:
   1105 	case RAIDFRAME_SET_AUTOCONFIG:
   1106 	case RAIDFRAME_SET_ROOT:
   1107 	case RAIDFRAME_DELETE_COMPONENT:
   1108 	case RAIDFRAME_INCORPORATE_HOT_SPARE:
   1109 	case RAIDFRAME_PARITYMAP_STATUS:
   1110 	case RAIDFRAME_PARITYMAP_GET_DISABLE:
   1111 	case RAIDFRAME_PARITYMAP_SET_DISABLE:
   1112 	case RAIDFRAME_PARITYMAP_SET_PARAMS:
   1113 		if ((rs->sc_flags & RAIDF_INITED) == 0)
   1114 			return (ENXIO);
   1115 	}
   1116 
   1117 	switch (cmd) {
   1118 #ifdef COMPAT_50
   1119 	case RAIDFRAME_GET_INFO50:
   1120 		return rf_get_info50(raidPtr, data);
   1121 
   1122 	case RAIDFRAME_CONFIGURE50:
   1123 		if ((retcode = rf_config50(raidPtr, unit, data, &k_cfg)) != 0)
   1124 			return retcode;
   1125 		goto config;
   1126 #endif
   1127 		/* configure the system */
   1128 	case RAIDFRAME_CONFIGURE:
   1129 
   1130 		if (raidPtr->valid) {
   1131 			/* There is a valid RAID set running on this unit! */
   1132 			printf("raid%d: Device already configured!\n",unit);
   1133 			return(EINVAL);
   1134 		}
   1135 
   1136 		/* copy-in the configuration information */
   1137 		/* data points to a pointer to the configuration structure */
   1138 
   1139 		u_cfg = *((RF_Config_t **) data);
   1140 		RF_Malloc(k_cfg, sizeof(RF_Config_t), (RF_Config_t *));
   1141 		if (k_cfg == NULL) {
   1142 			return (ENOMEM);
   1143 		}
   1144 		retcode = copyin(u_cfg, k_cfg, sizeof(RF_Config_t));
   1145 		if (retcode) {
   1146 			RF_Free(k_cfg, sizeof(RF_Config_t));
   1147 			db1_printf(("rf_ioctl: retcode=%d copyin.1\n",
   1148 				retcode));
   1149 			return (retcode);
   1150 		}
   1151 		goto config;
   1152 	config:
   1153 		/* allocate a buffer for the layout-specific data, and copy it
   1154 		 * in */
   1155 		if (k_cfg->layoutSpecificSize) {
   1156 			if (k_cfg->layoutSpecificSize > 10000) {
   1157 				/* sanity check */
   1158 				RF_Free(k_cfg, sizeof(RF_Config_t));
   1159 				return (EINVAL);
   1160 			}
   1161 			RF_Malloc(specific_buf, k_cfg->layoutSpecificSize,
   1162 			    (u_char *));
   1163 			if (specific_buf == NULL) {
   1164 				RF_Free(k_cfg, sizeof(RF_Config_t));
   1165 				return (ENOMEM);
   1166 			}
   1167 			retcode = copyin(k_cfg->layoutSpecific, specific_buf,
   1168 			    k_cfg->layoutSpecificSize);
   1169 			if (retcode) {
   1170 				RF_Free(k_cfg, sizeof(RF_Config_t));
   1171 				RF_Free(specific_buf,
   1172 					k_cfg->layoutSpecificSize);
   1173 				db1_printf(("rf_ioctl: retcode=%d copyin.2\n",
   1174 					retcode));
   1175 				return (retcode);
   1176 			}
   1177 		} else
   1178 			specific_buf = NULL;
   1179 		k_cfg->layoutSpecific = specific_buf;
   1180 
   1181 		/* should do some kind of sanity check on the configuration.
   1182 		 * Store the sum of all the bytes in the last byte? */
   1183 
   1184 		/* configure the system */
   1185 
   1186 		/*
   1187 		 * Clear the entire RAID descriptor, just to make sure
   1188 		 *  there is no stale data left in the case of a
   1189 		 *  reconfiguration
   1190 		 */
   1191 		memset(raidPtr, 0, sizeof(*raidPtr));
   1192 		raidPtr->raidid = unit;
   1193 
   1194 		retcode = rf_Configure(raidPtr, k_cfg, NULL);
   1195 
   1196 		if (retcode == 0) {
   1197 
   1198 			/* allow this many simultaneous IO's to
   1199 			   this RAID device */
   1200 			raidPtr->openings = RAIDOUTSTANDING;
   1201 
   1202 			raidinit(raidPtr);
   1203 			rf_markalldirty(raidPtr);
   1204 		}
   1205 		/* free the buffers.  No return code here. */
   1206 		if (k_cfg->layoutSpecificSize) {
   1207 			RF_Free(specific_buf, k_cfg->layoutSpecificSize);
   1208 		}
   1209 		RF_Free(k_cfg, sizeof(RF_Config_t));
   1210 
   1211 		return (retcode);
   1212 
   1213 		/* shutdown the system */
   1214 	case RAIDFRAME_SHUTDOWN:
   1215 
   1216 		part = DISKPART(dev);
   1217 		pmask = (1 << part);
   1218 
   1219 		if ((error = raidlock(rs)) != 0)
   1220 			return (error);
   1221 
   1222 		if ((rs->sc_dkdev.dk_openmask & ~pmask) ||
   1223 		    ((rs->sc_dkdev.dk_bopenmask & pmask) &&
   1224 			(rs->sc_dkdev.dk_copenmask & pmask)))
   1225 			retcode = EBUSY;
   1226 		else {
   1227 			rs->sc_flags |= RAIDF_SHUTDOWN;
   1228 			rs->sc_dkdev.dk_copenmask &= ~pmask;
   1229 			rs->sc_dkdev.dk_bopenmask &= ~pmask;
   1230 			rs->sc_dkdev.dk_openmask &= ~pmask;
   1231 			retcode = 0;
   1232 		}
   1233 
   1234 		raidunlock(rs);
   1235 
   1236 		if (retcode != 0)
   1237 			return retcode;
   1238 
   1239 		/* free the pseudo device attach bits */
   1240 
   1241 		cf = device_cfdata(rs->sc_dev);
   1242 		if ((retcode = config_detach(rs->sc_dev, DETACH_QUIET)) == 0)
   1243 			free(cf, M_RAIDFRAME);
   1244 
   1245 		return (retcode);
   1246 	case RAIDFRAME_GET_COMPONENT_LABEL:
   1247 		clabel_ptr = (RF_ComponentLabel_t **) data;
   1248 		/* need to read the component label for the disk indicated
   1249 		   by row,column in clabel */
   1250 
   1251 		/*
   1252 		 * Perhaps there should be an option to skip the in-core
   1253 		 * copy and hit the disk, as with disklabel(8).
   1254 		 */
   1255 		RF_Malloc(clabel, sizeof(*clabel), (RF_ComponentLabel_t *));
   1256 
   1257 		retcode = copyin(*clabel_ptr, clabel, sizeof(*clabel));
   1258 
   1259 		if (retcode) {
   1260 			RF_Free(clabel, sizeof(*clabel));
   1261 			return retcode;
   1262 		}
   1263 
   1264 		clabel->row = 0; /* Don't allow looking at anything else.*/
   1265 
   1266 		column = clabel->column;
   1267 
   1268 		if ((column < 0) || (column >= raidPtr->numCol +
   1269 		    raidPtr->numSpare)) {
   1270 			RF_Free(clabel, sizeof(*clabel));
   1271 			return EINVAL;
   1272 		}
   1273 
   1274 		RF_Free(clabel, sizeof(*clabel));
   1275 
   1276 		clabel = raidget_component_label(raidPtr, column);
   1277 
   1278 		return copyout(clabel, *clabel_ptr, sizeof(**clabel_ptr));
   1279 
   1280 #if 0
   1281 	case RAIDFRAME_SET_COMPONENT_LABEL:
   1282 		clabel = (RF_ComponentLabel_t *) data;
   1283 
   1284 		/* XXX check the label for valid stuff... */
   1285 		/* Note that some things *should not* get modified --
   1286 		   the user should be re-initing the labels instead of
   1287 		   trying to patch things.
   1288 		   */
   1289 
   1290 		raidid = raidPtr->raidid;
   1291 #ifdef DEBUG
   1292 		printf("raid%d: Got component label:\n", raidid);
   1293 		printf("raid%d: Version: %d\n", raidid, clabel->version);
   1294 		printf("raid%d: Serial Number: %d\n", raidid, clabel->serial_number);
   1295 		printf("raid%d: Mod counter: %d\n", raidid, clabel->mod_counter);
   1296 		printf("raid%d: Column: %d\n", raidid, clabel->column);
   1297 		printf("raid%d: Num Columns: %d\n", raidid, clabel->num_columns);
   1298 		printf("raid%d: Clean: %d\n", raidid, clabel->clean);
   1299 		printf("raid%d: Status: %d\n", raidid, clabel->status);
   1300 #endif
   1301 		clabel->row = 0;
   1302 		column = clabel->column;
   1303 
   1304 		if ((column < 0) || (column >= raidPtr->numCol)) {
   1305 			return(EINVAL);
   1306 		}
   1307 
   1308 		/* XXX this isn't allowed to do anything for now :-) */
   1309 
   1310 		/* XXX and before it is, we need to fill in the rest
   1311 		   of the fields!?!?!?! */
   1312 		memcpy(raidget_component_label(raidPtr, column),
   1313 		    clabel, sizeof(*clabel));
   1314 		raidflush_component_label(raidPtr, column);
   1315 		return (0);
   1316 #endif
   1317 
   1318 	case RAIDFRAME_INIT_LABELS:
   1319 		clabel = (RF_ComponentLabel_t *) data;
   1320 		/*
   1321 		   we only want the serial number from
   1322 		   the above.  We get all the rest of the information
   1323 		   from the config that was used to create this RAID
   1324 		   set.
   1325 		   */
   1326 
   1327 		raidPtr->serial_number = clabel->serial_number;
   1328 
   1329 		for(column=0;column<raidPtr->numCol;column++) {
   1330 			diskPtr = &raidPtr->Disks[column];
   1331 			if (!RF_DEAD_DISK(diskPtr->status)) {
   1332 				ci_label = raidget_component_label(raidPtr,
   1333 				    column);
   1334 				/* Zeroing this is important. */
   1335 				memset(ci_label, 0, sizeof(*ci_label));
   1336 				raid_init_component_label(raidPtr, ci_label);
   1337 				ci_label->serial_number =
   1338 				    raidPtr->serial_number;
   1339 				ci_label->row = 0; /* we dont' pretend to support more */
   1340 				ci_label->partitionSize =
   1341 				    diskPtr->partitionSize;
   1342 				ci_label->column = column;
   1343 				raidflush_component_label(raidPtr, column);
   1344 			}
   1345 			/* XXXjld what about the spares? */
   1346 		}
   1347 
   1348 		return (retcode);
   1349 	case RAIDFRAME_SET_AUTOCONFIG:
   1350 		d = rf_set_autoconfig(raidPtr, *(int *) data);
   1351 		printf("raid%d: New autoconfig value is: %d\n",
   1352 		       raidPtr->raidid, d);
   1353 		*(int *) data = d;
   1354 		return (retcode);
   1355 
   1356 	case RAIDFRAME_SET_ROOT:
   1357 		d = rf_set_rootpartition(raidPtr, *(int *) data);
   1358 		printf("raid%d: New rootpartition value is: %d\n",
   1359 		       raidPtr->raidid, d);
   1360 		*(int *) data = d;
   1361 		return (retcode);
   1362 
   1363 		/* initialize all parity */
   1364 	case RAIDFRAME_REWRITEPARITY:
   1365 
   1366 		if (raidPtr->Layout.map->faultsTolerated == 0) {
   1367 			/* Parity for RAID 0 is trivially correct */
   1368 			raidPtr->parity_good = RF_RAID_CLEAN;
   1369 			return(0);
   1370 		}
   1371 
   1372 		if (raidPtr->parity_rewrite_in_progress == 1) {
   1373 			/* Re-write is already in progress! */
   1374 			return(EINVAL);
   1375 		}
   1376 
   1377 		retcode = RF_CREATE_THREAD(raidPtr->parity_rewrite_thread,
   1378 					   rf_RewriteParityThread,
   1379 					   raidPtr,"raid_parity");
   1380 		return (retcode);
   1381 
   1382 
   1383 	case RAIDFRAME_ADD_HOT_SPARE:
   1384 		sparePtr = (RF_SingleComponent_t *) data;
   1385 		memcpy( &component, sparePtr, sizeof(RF_SingleComponent_t));
   1386 		retcode = rf_add_hot_spare(raidPtr, &component);
   1387 		return(retcode);
   1388 
   1389 	case RAIDFRAME_REMOVE_HOT_SPARE:
   1390 		return(retcode);
   1391 
   1392 	case RAIDFRAME_DELETE_COMPONENT:
   1393 		componentPtr = (RF_SingleComponent_t *)data;
   1394 		memcpy( &component, componentPtr,
   1395 			sizeof(RF_SingleComponent_t));
   1396 		retcode = rf_delete_component(raidPtr, &component);
   1397 		return(retcode);
   1398 
   1399 	case RAIDFRAME_INCORPORATE_HOT_SPARE:
   1400 		componentPtr = (RF_SingleComponent_t *)data;
   1401 		memcpy( &component, componentPtr,
   1402 			sizeof(RF_SingleComponent_t));
   1403 		retcode = rf_incorporate_hot_spare(raidPtr, &component);
   1404 		return(retcode);
   1405 
   1406 	case RAIDFRAME_REBUILD_IN_PLACE:
   1407 
   1408 		if (raidPtr->Layout.map->faultsTolerated == 0) {
   1409 			/* Can't do this on a RAID 0!! */
   1410 			return(EINVAL);
   1411 		}
   1412 
   1413 		if (raidPtr->recon_in_progress == 1) {
   1414 			/* a reconstruct is already in progress! */
   1415 			return(EINVAL);
   1416 		}
   1417 
   1418 		componentPtr = (RF_SingleComponent_t *) data;
   1419 		memcpy( &component, componentPtr,
   1420 			sizeof(RF_SingleComponent_t));
   1421 		component.row = 0; /* we don't support any more */
   1422 		column = component.column;
   1423 
   1424 		if ((column < 0) || (column >= raidPtr->numCol)) {
   1425 			return(EINVAL);
   1426 		}
   1427 
   1428 		RF_LOCK_MUTEX(raidPtr->mutex);
   1429 		if ((raidPtr->Disks[column].status == rf_ds_optimal) &&
   1430 		    (raidPtr->numFailures > 0)) {
   1431 			/* XXX 0 above shouldn't be constant!!! */
   1432 			/* some component other than this has failed.
   1433 			   Let's not make things worse than they already
   1434 			   are... */
   1435 			printf("raid%d: Unable to reconstruct to disk at:\n",
   1436 			       raidPtr->raidid);
   1437 			printf("raid%d:     Col: %d   Too many failures.\n",
   1438 			       raidPtr->raidid, column);
   1439 			RF_UNLOCK_MUTEX(raidPtr->mutex);
   1440 			return (EINVAL);
   1441 		}
   1442 		if (raidPtr->Disks[column].status ==
   1443 		    rf_ds_reconstructing) {
   1444 			printf("raid%d: Unable to reconstruct to disk at:\n",
   1445 			       raidPtr->raidid);
   1446 			printf("raid%d:    Col: %d   Reconstruction already occuring!\n", raidPtr->raidid, column);
   1447 
   1448 			RF_UNLOCK_MUTEX(raidPtr->mutex);
   1449 			return (EINVAL);
   1450 		}
   1451 		if (raidPtr->Disks[column].status == rf_ds_spared) {
   1452 			RF_UNLOCK_MUTEX(raidPtr->mutex);
   1453 			return (EINVAL);
   1454 		}
   1455 		RF_UNLOCK_MUTEX(raidPtr->mutex);
   1456 
   1457 		RF_Malloc(rrcopy, sizeof(*rrcopy), (struct rf_recon_req *));
   1458 		if (rrcopy == NULL)
   1459 			return(ENOMEM);
   1460 
   1461 		rrcopy->raidPtr = (void *) raidPtr;
   1462 		rrcopy->col = column;
   1463 
   1464 		retcode = RF_CREATE_THREAD(raidPtr->recon_thread,
   1465 					   rf_ReconstructInPlaceThread,
   1466 					   rrcopy,"raid_reconip");
   1467 		return(retcode);
   1468 
   1469 	case RAIDFRAME_GET_INFO:
   1470 		if (!raidPtr->valid)
   1471 			return (ENODEV);
   1472 		ucfgp = (RF_DeviceConfig_t **) data;
   1473 		RF_Malloc(d_cfg, sizeof(RF_DeviceConfig_t),
   1474 			  (RF_DeviceConfig_t *));
   1475 		if (d_cfg == NULL)
   1476 			return (ENOMEM);
   1477 		d_cfg->rows = 1; /* there is only 1 row now */
   1478 		d_cfg->cols = raidPtr->numCol;
   1479 		d_cfg->ndevs = raidPtr->numCol;
   1480 		if (d_cfg->ndevs >= RF_MAX_DISKS) {
   1481 			RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
   1482 			return (ENOMEM);
   1483 		}
   1484 		d_cfg->nspares = raidPtr->numSpare;
   1485 		if (d_cfg->nspares >= RF_MAX_DISKS) {
   1486 			RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
   1487 			return (ENOMEM);
   1488 		}
   1489 		d_cfg->maxqdepth = raidPtr->maxQueueDepth;
   1490 		d = 0;
   1491 		for (j = 0; j < d_cfg->cols; j++) {
   1492 			d_cfg->devs[d] = raidPtr->Disks[j];
   1493 			d++;
   1494 		}
   1495 		for (j = d_cfg->cols, i = 0; i < d_cfg->nspares; i++, j++) {
   1496 			d_cfg->spares[i] = raidPtr->Disks[j];
   1497 		}
   1498 		retcode = copyout(d_cfg, *ucfgp, sizeof(RF_DeviceConfig_t));
   1499 		RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
   1500 
   1501 		return (retcode);
   1502 
   1503 	case RAIDFRAME_CHECK_PARITY:
   1504 		*(int *) data = raidPtr->parity_good;
   1505 		return (0);
   1506 
   1507 	case RAIDFRAME_PARITYMAP_STATUS:
   1508 		if (rf_paritymap_ineligible(raidPtr))
   1509 			return EINVAL;
   1510 		rf_paritymap_status(raidPtr->parity_map,
   1511 		    (struct rf_pmstat *)data);
   1512 		return 0;
   1513 
   1514 	case RAIDFRAME_PARITYMAP_SET_PARAMS:
   1515 		if (rf_paritymap_ineligible(raidPtr))
   1516 			return EINVAL;
   1517 		if (raidPtr->parity_map == NULL)
   1518 			return ENOENT; /* ??? */
   1519 		if (0 != rf_paritymap_set_params(raidPtr->parity_map,
   1520 			(struct rf_pmparams *)data, 1))
   1521 			return EINVAL;
   1522 		return 0;
   1523 
   1524 	case RAIDFRAME_PARITYMAP_GET_DISABLE:
   1525 		if (rf_paritymap_ineligible(raidPtr))
   1526 			return EINVAL;
   1527 		*(int *) data = rf_paritymap_get_disable(raidPtr);
   1528 		return 0;
   1529 
   1530 	case RAIDFRAME_PARITYMAP_SET_DISABLE:
   1531 		if (rf_paritymap_ineligible(raidPtr))
   1532 			return EINVAL;
   1533 		rf_paritymap_set_disable(raidPtr, *(int *)data);
   1534 		/* XXX should errors be passed up? */
   1535 		return 0;
   1536 
   1537 	case RAIDFRAME_RESET_ACCTOTALS:
   1538 		memset(&raidPtr->acc_totals, 0, sizeof(raidPtr->acc_totals));
   1539 		return (0);
   1540 
   1541 	case RAIDFRAME_GET_ACCTOTALS:
   1542 		totals = (RF_AccTotals_t *) data;
   1543 		*totals = raidPtr->acc_totals;
   1544 		return (0);
   1545 
   1546 	case RAIDFRAME_KEEP_ACCTOTALS:
   1547 		raidPtr->keep_acc_totals = *(int *)data;
   1548 		return (0);
   1549 
   1550 	case RAIDFRAME_GET_SIZE:
   1551 		*(int *) data = raidPtr->totalSectors;
   1552 		return (0);
   1553 
   1554 		/* fail a disk & optionally start reconstruction */
   1555 	case RAIDFRAME_FAIL_DISK:
   1556 
   1557 		if (raidPtr->Layout.map->faultsTolerated == 0) {
   1558 			/* Can't do this on a RAID 0!! */
   1559 			return(EINVAL);
   1560 		}
   1561 
   1562 		rr = (struct rf_recon_req *) data;
   1563 		rr->row = 0;
   1564 		if (rr->col < 0 || rr->col >= raidPtr->numCol)
   1565 			return (EINVAL);
   1566 
   1567 
   1568 		RF_LOCK_MUTEX(raidPtr->mutex);
   1569 		if (raidPtr->status == rf_rs_reconstructing) {
   1570 			/* you can't fail a disk while we're reconstructing! */
   1571 			/* XXX wrong for RAID6 */
   1572 			RF_UNLOCK_MUTEX(raidPtr->mutex);
   1573 			return (EINVAL);
   1574 		}
   1575 		if ((raidPtr->Disks[rr->col].status ==
   1576 		     rf_ds_optimal) && (raidPtr->numFailures > 0)) {
   1577 			/* some other component has failed.  Let's not make
   1578 			   things worse. XXX wrong for RAID6 */
   1579 			RF_UNLOCK_MUTEX(raidPtr->mutex);
   1580 			return (EINVAL);
   1581 		}
   1582 		if (raidPtr->Disks[rr->col].status == rf_ds_spared) {
   1583 			/* Can't fail a spared disk! */
   1584 			RF_UNLOCK_MUTEX(raidPtr->mutex);
   1585 			return (EINVAL);
   1586 		}
   1587 		RF_UNLOCK_MUTEX(raidPtr->mutex);
   1588 
   1589 		/* make a copy of the recon request so that we don't rely on
   1590 		 * the user's buffer */
   1591 		RF_Malloc(rrcopy, sizeof(*rrcopy), (struct rf_recon_req *));
   1592 		if (rrcopy == NULL)
   1593 			return(ENOMEM);
   1594 		memcpy(rrcopy, rr, sizeof(*rr));
   1595 		rrcopy->raidPtr = (void *) raidPtr;
   1596 
   1597 		retcode = RF_CREATE_THREAD(raidPtr->recon_thread,
   1598 					   rf_ReconThread,
   1599 					   rrcopy,"raid_recon");
   1600 		return (0);
   1601 
   1602 		/* invoke a copyback operation after recon on whatever disk
   1603 		 * needs it, if any */
   1604 	case RAIDFRAME_COPYBACK:
   1605 
   1606 		if (raidPtr->Layout.map->faultsTolerated == 0) {
   1607 			/* This makes no sense on a RAID 0!! */
   1608 			return(EINVAL);
   1609 		}
   1610 
   1611 		if (raidPtr->copyback_in_progress == 1) {
   1612 			/* Copyback is already in progress! */
   1613 			return(EINVAL);
   1614 		}
   1615 
   1616 		retcode = RF_CREATE_THREAD(raidPtr->copyback_thread,
   1617 					   rf_CopybackThread,
   1618 					   raidPtr,"raid_copyback");
   1619 		return (retcode);
   1620 
   1621 		/* return the percentage completion of reconstruction */
   1622 	case RAIDFRAME_CHECK_RECON_STATUS:
   1623 		if (raidPtr->Layout.map->faultsTolerated == 0) {
   1624 			/* This makes no sense on a RAID 0, so tell the
   1625 			   user it's done. */
   1626 			*(int *) data = 100;
   1627 			return(0);
   1628 		}
   1629 		if (raidPtr->status != rf_rs_reconstructing)
   1630 			*(int *) data = 100;
   1631 		else {
   1632 			if (raidPtr->reconControl->numRUsTotal > 0) {
   1633 				*(int *) data = (raidPtr->reconControl->numRUsComplete * 100 / raidPtr->reconControl->numRUsTotal);
   1634 			} else {
   1635 				*(int *) data = 0;
   1636 			}
   1637 		}
   1638 		return (0);
   1639 	case RAIDFRAME_CHECK_RECON_STATUS_EXT:
   1640 		progressInfoPtr = (RF_ProgressInfo_t **) data;
   1641 		if (raidPtr->status != rf_rs_reconstructing) {
   1642 			progressInfo.remaining = 0;
   1643 			progressInfo.completed = 100;
   1644 			progressInfo.total = 100;
   1645 		} else {
   1646 			progressInfo.total =
   1647 				raidPtr->reconControl->numRUsTotal;
   1648 			progressInfo.completed =
   1649 				raidPtr->reconControl->numRUsComplete;
   1650 			progressInfo.remaining = progressInfo.total -
   1651 				progressInfo.completed;
   1652 		}
   1653 		retcode = copyout(&progressInfo, *progressInfoPtr,
   1654 				  sizeof(RF_ProgressInfo_t));
   1655 		return (retcode);
   1656 
   1657 	case RAIDFRAME_CHECK_PARITYREWRITE_STATUS:
   1658 		if (raidPtr->Layout.map->faultsTolerated == 0) {
   1659 			/* This makes no sense on a RAID 0, so tell the
   1660 			   user it's done. */
   1661 			*(int *) data = 100;
   1662 			return(0);
   1663 		}
   1664 		if (raidPtr->parity_rewrite_in_progress == 1) {
   1665 			*(int *) data = 100 *
   1666 				raidPtr->parity_rewrite_stripes_done /
   1667 				raidPtr->Layout.numStripe;
   1668 		} else {
   1669 			*(int *) data = 100;
   1670 		}
   1671 		return (0);
   1672 
   1673 	case RAIDFRAME_CHECK_PARITYREWRITE_STATUS_EXT:
   1674 		progressInfoPtr = (RF_ProgressInfo_t **) data;
   1675 		if (raidPtr->parity_rewrite_in_progress == 1) {
   1676 			progressInfo.total = raidPtr->Layout.numStripe;
   1677 			progressInfo.completed =
   1678 				raidPtr->parity_rewrite_stripes_done;
   1679 			progressInfo.remaining = progressInfo.total -
   1680 				progressInfo.completed;
   1681 		} else {
   1682 			progressInfo.remaining = 0;
   1683 			progressInfo.completed = 100;
   1684 			progressInfo.total = 100;
   1685 		}
   1686 		retcode = copyout(&progressInfo, *progressInfoPtr,
   1687 				  sizeof(RF_ProgressInfo_t));
   1688 		return (retcode);
   1689 
   1690 	case RAIDFRAME_CHECK_COPYBACK_STATUS:
   1691 		if (raidPtr->Layout.map->faultsTolerated == 0) {
   1692 			/* This makes no sense on a RAID 0 */
   1693 			*(int *) data = 100;
   1694 			return(0);
   1695 		}
   1696 		if (raidPtr->copyback_in_progress == 1) {
   1697 			*(int *) data = 100 * raidPtr->copyback_stripes_done /
   1698 				raidPtr->Layout.numStripe;
   1699 		} else {
   1700 			*(int *) data = 100;
   1701 		}
   1702 		return (0);
   1703 
   1704 	case RAIDFRAME_CHECK_COPYBACK_STATUS_EXT:
   1705 		progressInfoPtr = (RF_ProgressInfo_t **) data;
   1706 		if (raidPtr->copyback_in_progress == 1) {
   1707 			progressInfo.total = raidPtr->Layout.numStripe;
   1708 			progressInfo.completed =
   1709 				raidPtr->copyback_stripes_done;
   1710 			progressInfo.remaining = progressInfo.total -
   1711 				progressInfo.completed;
   1712 		} else {
   1713 			progressInfo.remaining = 0;
   1714 			progressInfo.completed = 100;
   1715 			progressInfo.total = 100;
   1716 		}
   1717 		retcode = copyout(&progressInfo, *progressInfoPtr,
   1718 				  sizeof(RF_ProgressInfo_t));
   1719 		return (retcode);
   1720 
   1721 		/* the sparetable daemon calls this to wait for the kernel to
   1722 		 * need a spare table. this ioctl does not return until a
   1723 		 * spare table is needed. XXX -- calling mpsleep here in the
   1724 		 * ioctl code is almost certainly wrong and evil. -- XXX XXX
   1725 		 * -- I should either compute the spare table in the kernel,
   1726 		 * or have a different -- XXX XXX -- interface (a different
   1727 		 * character device) for delivering the table     -- XXX */
   1728 #if 0
   1729 	case RAIDFRAME_SPARET_WAIT:
   1730 		RF_LOCK_MUTEX(rf_sparet_wait_mutex);
   1731 		while (!rf_sparet_wait_queue)
   1732 			mpsleep(&rf_sparet_wait_queue, (PZERO + 1) | PCATCH, "sparet wait", 0, (void *) simple_lock_addr(rf_sparet_wait_mutex), MS_LOCK_SIMPLE);
   1733 		waitreq = rf_sparet_wait_queue;
   1734 		rf_sparet_wait_queue = rf_sparet_wait_queue->next;
   1735 		RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
   1736 
   1737 		/* structure assignment */
   1738 		*((RF_SparetWait_t *) data) = *waitreq;
   1739 
   1740 		RF_Free(waitreq, sizeof(*waitreq));
   1741 		return (0);
   1742 
   1743 		/* wakes up a process waiting on SPARET_WAIT and puts an error
   1744 		 * code in it that will cause the dameon to exit */
   1745 	case RAIDFRAME_ABORT_SPARET_WAIT:
   1746 		RF_Malloc(waitreq, sizeof(*waitreq), (RF_SparetWait_t *));
   1747 		waitreq->fcol = -1;
   1748 		RF_LOCK_MUTEX(rf_sparet_wait_mutex);
   1749 		waitreq->next = rf_sparet_wait_queue;
   1750 		rf_sparet_wait_queue = waitreq;
   1751 		RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
   1752 		wakeup(&rf_sparet_wait_queue);
   1753 		return (0);
   1754 
   1755 		/* used by the spare table daemon to deliver a spare table
   1756 		 * into the kernel */
   1757 	case RAIDFRAME_SEND_SPARET:
   1758 
   1759 		/* install the spare table */
   1760 		retcode = rf_SetSpareTable(raidPtr, *(void **) data);
   1761 
   1762 		/* respond to the requestor.  the return status of the spare
   1763 		 * table installation is passed in the "fcol" field */
   1764 		RF_Malloc(waitreq, sizeof(*waitreq), (RF_SparetWait_t *));
   1765 		waitreq->fcol = retcode;
   1766 		RF_LOCK_MUTEX(rf_sparet_wait_mutex);
   1767 		waitreq->next = rf_sparet_resp_queue;
   1768 		rf_sparet_resp_queue = waitreq;
   1769 		wakeup(&rf_sparet_resp_queue);
   1770 		RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
   1771 
   1772 		return (retcode);
   1773 #endif
   1774 
   1775 	default:
   1776 		break; /* fall through to the os-specific code below */
   1777 
   1778 	}
   1779 
   1780 	if (!raidPtr->valid)
   1781 		return (EINVAL);
   1782 
   1783 	/*
   1784 	 * Add support for "regular" device ioctls here.
   1785 	 */
   1786 
   1787 	error = disk_ioctl(&rs->sc_dkdev, cmd, data, flag, l);
   1788 	if (error != EPASSTHROUGH)
   1789 		return (error);
   1790 
   1791 	switch (cmd) {
   1792 	case DIOCGDINFO:
   1793 		*(struct disklabel *) data = *(rs->sc_dkdev.dk_label);
   1794 		break;
   1795 #ifdef __HAVE_OLD_DISKLABEL
   1796 	case ODIOCGDINFO:
   1797 		newlabel = *(rs->sc_dkdev.dk_label);
   1798 		if (newlabel.d_npartitions > OLDMAXPARTITIONS)
   1799 			return ENOTTY;
   1800 		memcpy(data, &newlabel, sizeof (struct olddisklabel));
   1801 		break;
   1802 #endif
   1803 
   1804 	case DIOCGPART:
   1805 		((struct partinfo *) data)->disklab = rs->sc_dkdev.dk_label;
   1806 		((struct partinfo *) data)->part =
   1807 		    &rs->sc_dkdev.dk_label->d_partitions[DISKPART(dev)];
   1808 		break;
   1809 
   1810 	case DIOCWDINFO:
   1811 	case DIOCSDINFO:
   1812 #ifdef __HAVE_OLD_DISKLABEL
   1813 	case ODIOCWDINFO:
   1814 	case ODIOCSDINFO:
   1815 #endif
   1816 	{
   1817 		struct disklabel *lp;
   1818 #ifdef __HAVE_OLD_DISKLABEL
   1819 		if (cmd == ODIOCSDINFO || cmd == ODIOCWDINFO) {
   1820 			memset(&newlabel, 0, sizeof newlabel);
   1821 			memcpy(&newlabel, data, sizeof (struct olddisklabel));
   1822 			lp = &newlabel;
   1823 		} else
   1824 #endif
   1825 		lp = (struct disklabel *)data;
   1826 
   1827 		if ((error = raidlock(rs)) != 0)
   1828 			return (error);
   1829 
   1830 		rs->sc_flags |= RAIDF_LABELLING;
   1831 
   1832 		error = setdisklabel(rs->sc_dkdev.dk_label,
   1833 		    lp, 0, rs->sc_dkdev.dk_cpulabel);
   1834 		if (error == 0) {
   1835 			if (cmd == DIOCWDINFO
   1836 #ifdef __HAVE_OLD_DISKLABEL
   1837 			    || cmd == ODIOCWDINFO
   1838 #endif
   1839 			   )
   1840 				error = writedisklabel(RAIDLABELDEV(dev),
   1841 				    raidstrategy, rs->sc_dkdev.dk_label,
   1842 				    rs->sc_dkdev.dk_cpulabel);
   1843 		}
   1844 		rs->sc_flags &= ~RAIDF_LABELLING;
   1845 
   1846 		raidunlock(rs);
   1847 
   1848 		if (error)
   1849 			return (error);
   1850 		break;
   1851 	}
   1852 
   1853 	case DIOCWLABEL:
   1854 		if (*(int *) data != 0)
   1855 			rs->sc_flags |= RAIDF_WLABEL;
   1856 		else
   1857 			rs->sc_flags &= ~RAIDF_WLABEL;
   1858 		break;
   1859 
   1860 	case DIOCGDEFLABEL:
   1861 		raidgetdefaultlabel(raidPtr, rs, (struct disklabel *) data);
   1862 		break;
   1863 
   1864 #ifdef __HAVE_OLD_DISKLABEL
   1865 	case ODIOCGDEFLABEL:
   1866 		raidgetdefaultlabel(raidPtr, rs, &newlabel);
   1867 		if (newlabel.d_npartitions > OLDMAXPARTITIONS)
   1868 			return ENOTTY;
   1869 		memcpy(data, &newlabel, sizeof (struct olddisklabel));
   1870 		break;
   1871 #endif
   1872 
   1873 	case DIOCAWEDGE:
   1874 	case DIOCDWEDGE:
   1875 	    	dkw = (void *)data;
   1876 
   1877 		/* If the ioctl happens here, the parent is us. */
   1878 		(void)strcpy(dkw->dkw_parent, rs->sc_xname);
   1879 		return cmd == DIOCAWEDGE ? dkwedge_add(dkw) : dkwedge_del(dkw);
   1880 
   1881 	case DIOCLWEDGES:
   1882 		return dkwedge_list(&rs->sc_dkdev,
   1883 		    (struct dkwedge_list *)data, l);
   1884 	case DIOCCACHESYNC:
   1885 		return rf_sync_component_caches(raidPtr);
   1886 	default:
   1887 		retcode = ENOTTY;
   1888 	}
   1889 	return (retcode);
   1890 
   1891 }
   1892 
   1893 
   1894 /* raidinit -- complete the rest of the initialization for the
   1895    RAIDframe device.  */
   1896 
   1897 
   1898 static void
   1899 raidinit(RF_Raid_t *raidPtr)
   1900 {
   1901 	cfdata_t cf;
   1902 	struct raid_softc *rs;
   1903 	int     unit;
   1904 
   1905 	unit = raidPtr->raidid;
   1906 
   1907 	rs = &raid_softc[unit];
   1908 
   1909 	/* XXX should check return code first... */
   1910 	rs->sc_flags |= RAIDF_INITED;
   1911 
   1912 	/* XXX doesn't check bounds. */
   1913 	snprintf(rs->sc_xname, sizeof(rs->sc_xname), "raid%d", unit);
   1914 
   1915 	/* attach the pseudo device */
   1916 	cf = malloc(sizeof(*cf), M_RAIDFRAME, M_WAITOK);
   1917 	cf->cf_name = raid_cd.cd_name;
   1918 	cf->cf_atname = raid_cd.cd_name;
   1919 	cf->cf_unit = unit;
   1920 	cf->cf_fstate = FSTATE_STAR;
   1921 
   1922 	rs->sc_dev = config_attach_pseudo(cf);
   1923 
   1924 	if (rs->sc_dev == NULL) {
   1925 		printf("raid%d: config_attach_pseudo failed\n",
   1926 		    raidPtr->raidid);
   1927 		rs->sc_flags &= ~RAIDF_INITED;
   1928 		free(cf, M_RAIDFRAME);
   1929 		return;
   1930 	}
   1931 
   1932 	/* disk_attach actually creates space for the CPU disklabel, among
   1933 	 * other things, so it's critical to call this *BEFORE* we try putzing
   1934 	 * with disklabels. */
   1935 
   1936 	disk_init(&rs->sc_dkdev, rs->sc_xname, &rf_dkdriver);
   1937 	disk_attach(&rs->sc_dkdev);
   1938 	disk_blocksize(&rs->sc_dkdev, raidPtr->bytesPerSector);
   1939 
   1940 	/* XXX There may be a weird interaction here between this, and
   1941 	 * protectedSectors, as used in RAIDframe.  */
   1942 
   1943 	rs->sc_size = raidPtr->totalSectors;
   1944 
   1945 	dkwedge_discover(&rs->sc_dkdev);
   1946 
   1947 	rf_set_properties(rs, raidPtr);
   1948 
   1949 }
   1950 #if (RF_INCLUDE_PARITY_DECLUSTERING_DS > 0)
   1951 /* wake up the daemon & tell it to get us a spare table
   1952  * XXX
   1953  * the entries in the queues should be tagged with the raidPtr
   1954  * so that in the extremely rare case that two recons happen at once,
   1955  * we know for which device were requesting a spare table
   1956  * XXX
   1957  *
   1958  * XXX This code is not currently used. GO
   1959  */
   1960 int
   1961 rf_GetSpareTableFromDaemon(RF_SparetWait_t *req)
   1962 {
   1963 	int     retcode;
   1964 
   1965 	RF_LOCK_MUTEX(rf_sparet_wait_mutex);
   1966 	req->next = rf_sparet_wait_queue;
   1967 	rf_sparet_wait_queue = req;
   1968 	wakeup(&rf_sparet_wait_queue);
   1969 
   1970 	/* mpsleep unlocks the mutex */
   1971 	while (!rf_sparet_resp_queue) {
   1972 		tsleep(&rf_sparet_resp_queue, PRIBIO,
   1973 		    "raidframe getsparetable", 0);
   1974 	}
   1975 	req = rf_sparet_resp_queue;
   1976 	rf_sparet_resp_queue = req->next;
   1977 	RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
   1978 
   1979 	retcode = req->fcol;
   1980 	RF_Free(req, sizeof(*req));	/* this is not the same req as we
   1981 					 * alloc'd */
   1982 	return (retcode);
   1983 }
   1984 #endif
   1985 
   1986 /* a wrapper around rf_DoAccess that extracts appropriate info from the
   1987  * bp & passes it down.
   1988  * any calls originating in the kernel must use non-blocking I/O
   1989  * do some extra sanity checking to return "appropriate" error values for
   1990  * certain conditions (to make some standard utilities work)
   1991  *
   1992  * Formerly known as: rf_DoAccessKernel
   1993  */
   1994 void
   1995 raidstart(RF_Raid_t *raidPtr)
   1996 {
   1997 	RF_SectorCount_t num_blocks, pb, sum;
   1998 	RF_RaidAddr_t raid_addr;
   1999 	struct partition *pp;
   2000 	daddr_t blocknum;
   2001 	int     unit;
   2002 	struct raid_softc *rs;
   2003 	int     do_async;
   2004 	struct buf *bp;
   2005 	int rc;
   2006 
   2007 	unit = raidPtr->raidid;
   2008 	rs = &raid_softc[unit];
   2009 
   2010 	/* quick check to see if anything has died recently */
   2011 	RF_LOCK_MUTEX(raidPtr->mutex);
   2012 	if (raidPtr->numNewFailures > 0) {
   2013 		RF_UNLOCK_MUTEX(raidPtr->mutex);
   2014 		rf_update_component_labels(raidPtr,
   2015 					   RF_NORMAL_COMPONENT_UPDATE);
   2016 		RF_LOCK_MUTEX(raidPtr->mutex);
   2017 		raidPtr->numNewFailures--;
   2018 	}
   2019 
   2020 	/* Check to see if we're at the limit... */
   2021 	while (raidPtr->openings > 0) {
   2022 		RF_UNLOCK_MUTEX(raidPtr->mutex);
   2023 
   2024 		/* get the next item, if any, from the queue */
   2025 		if ((bp = bufq_get(rs->buf_queue)) == NULL) {
   2026 			/* nothing more to do */
   2027 			return;
   2028 		}
   2029 
   2030 		/* Ok, for the bp we have here, bp->b_blkno is relative to the
   2031 		 * partition.. Need to make it absolute to the underlying
   2032 		 * device.. */
   2033 
   2034 		blocknum = bp->b_blkno << DEV_BSHIFT >> raidPtr->logBytesPerSector;
   2035 		if (DISKPART(bp->b_dev) != RAW_PART) {
   2036 			pp = &rs->sc_dkdev.dk_label->d_partitions[DISKPART(bp->b_dev)];
   2037 			blocknum += pp->p_offset;
   2038 		}
   2039 
   2040 		db1_printf(("Blocks: %d, %d\n", (int) bp->b_blkno,
   2041 			    (int) blocknum));
   2042 
   2043 		db1_printf(("bp->b_bcount = %d\n", (int) bp->b_bcount));
   2044 		db1_printf(("bp->b_resid = %d\n", (int) bp->b_resid));
   2045 
   2046 		/* *THIS* is where we adjust what block we're going to...
   2047 		 * but DO NOT TOUCH bp->b_blkno!!! */
   2048 		raid_addr = blocknum;
   2049 
   2050 		num_blocks = bp->b_bcount >> raidPtr->logBytesPerSector;
   2051 		pb = (bp->b_bcount & raidPtr->sectorMask) ? 1 : 0;
   2052 		sum = raid_addr + num_blocks + pb;
   2053 		if (1 || rf_debugKernelAccess) {
   2054 			db1_printf(("raid_addr=%d sum=%d num_blocks=%d(+%d) (%d)\n",
   2055 				    (int) raid_addr, (int) sum, (int) num_blocks,
   2056 				    (int) pb, (int) bp->b_resid));
   2057 		}
   2058 		if ((sum > raidPtr->totalSectors) || (sum < raid_addr)
   2059 		    || (sum < num_blocks) || (sum < pb)) {
   2060 			bp->b_error = ENOSPC;
   2061 			bp->b_resid = bp->b_bcount;
   2062 			biodone(bp);
   2063 			RF_LOCK_MUTEX(raidPtr->mutex);
   2064 			continue;
   2065 		}
   2066 		/*
   2067 		 * XXX rf_DoAccess() should do this, not just DoAccessKernel()
   2068 		 */
   2069 
   2070 		if (bp->b_bcount & raidPtr->sectorMask) {
   2071 			bp->b_error = EINVAL;
   2072 			bp->b_resid = bp->b_bcount;
   2073 			biodone(bp);
   2074 			RF_LOCK_MUTEX(raidPtr->mutex);
   2075 			continue;
   2076 
   2077 		}
   2078 		db1_printf(("Calling DoAccess..\n"));
   2079 
   2080 
   2081 		RF_LOCK_MUTEX(raidPtr->mutex);
   2082 		raidPtr->openings--;
   2083 		RF_UNLOCK_MUTEX(raidPtr->mutex);
   2084 
   2085 		/*
   2086 		 * Everything is async.
   2087 		 */
   2088 		do_async = 1;
   2089 
   2090 		disk_busy(&rs->sc_dkdev);
   2091 
   2092 		/* XXX we're still at splbio() here... do we *really*
   2093 		   need to be? */
   2094 
   2095 		/* don't ever condition on bp->b_flags & B_WRITE.
   2096 		 * always condition on B_READ instead */
   2097 
   2098 		rc = rf_DoAccess(raidPtr, (bp->b_flags & B_READ) ?
   2099 				 RF_IO_TYPE_READ : RF_IO_TYPE_WRITE,
   2100 				 do_async, raid_addr, num_blocks,
   2101 				 bp->b_data, bp, RF_DAG_NONBLOCKING_IO);
   2102 
   2103 		if (rc) {
   2104 			bp->b_error = rc;
   2105 			bp->b_resid = bp->b_bcount;
   2106 			biodone(bp);
   2107 			/* continue loop */
   2108 		}
   2109 
   2110 		RF_LOCK_MUTEX(raidPtr->mutex);
   2111 	}
   2112 	RF_UNLOCK_MUTEX(raidPtr->mutex);
   2113 }
   2114 
   2115 
   2116 
   2117 
   2118 /* invoke an I/O from kernel mode.  Disk queue should be locked upon entry */
   2119 
   2120 int
   2121 rf_DispatchKernelIO(RF_DiskQueue_t *queue, RF_DiskQueueData_t *req)
   2122 {
   2123 	int     op = (req->type == RF_IO_TYPE_READ) ? B_READ : B_WRITE;
   2124 	struct buf *bp;
   2125 
   2126 	req->queue = queue;
   2127 	bp = req->bp;
   2128 
   2129 	switch (req->type) {
   2130 	case RF_IO_TYPE_NOP:	/* used primarily to unlock a locked queue */
   2131 		/* XXX need to do something extra here.. */
   2132 		/* I'm leaving this in, as I've never actually seen it used,
   2133 		 * and I'd like folks to report it... GO */
   2134 		printf(("WAKEUP CALLED\n"));
   2135 		queue->numOutstanding++;
   2136 
   2137 		bp->b_flags = 0;
   2138 		bp->b_private = req;
   2139 
   2140 		KernelWakeupFunc(bp);
   2141 		break;
   2142 
   2143 	case RF_IO_TYPE_READ:
   2144 	case RF_IO_TYPE_WRITE:
   2145 #if RF_ACC_TRACE > 0
   2146 		if (req->tracerec) {
   2147 			RF_ETIMER_START(req->tracerec->timer);
   2148 		}
   2149 #endif
   2150 		InitBP(bp, queue->rf_cinfo->ci_vp,
   2151 		    op, queue->rf_cinfo->ci_dev,
   2152 		    req->sectorOffset, req->numSector,
   2153 		    req->buf, KernelWakeupFunc, (void *) req,
   2154 		    queue->raidPtr->logBytesPerSector, req->b_proc);
   2155 
   2156 		if (rf_debugKernelAccess) {
   2157 			db1_printf(("dispatch: bp->b_blkno = %ld\n",
   2158 				(long) bp->b_blkno));
   2159 		}
   2160 		queue->numOutstanding++;
   2161 		queue->last_deq_sector = req->sectorOffset;
   2162 		/* acc wouldn't have been let in if there were any pending
   2163 		 * reqs at any other priority */
   2164 		queue->curPriority = req->priority;
   2165 
   2166 		db1_printf(("Going for %c to unit %d col %d\n",
   2167 			    req->type, queue->raidPtr->raidid,
   2168 			    queue->col));
   2169 		db1_printf(("sector %d count %d (%d bytes) %d\n",
   2170 			(int) req->sectorOffset, (int) req->numSector,
   2171 			(int) (req->numSector <<
   2172 			    queue->raidPtr->logBytesPerSector),
   2173 			(int) queue->raidPtr->logBytesPerSector));
   2174 
   2175 		/*
   2176 		 * XXX: drop lock here since this can block at
   2177 		 * least with backing SCSI devices.  Retake it
   2178 		 * to minimize fuss with calling interfaces.
   2179 		 */
   2180 
   2181 		RF_UNLOCK_QUEUE_MUTEX(queue, "unusedparam");
   2182 		bdev_strategy(bp);
   2183 		RF_LOCK_QUEUE_MUTEX(queue, "unusedparam");
   2184 		break;
   2185 
   2186 	default:
   2187 		panic("bad req->type in rf_DispatchKernelIO");
   2188 	}
   2189 	db1_printf(("Exiting from DispatchKernelIO\n"));
   2190 
   2191 	return (0);
   2192 }
   2193 /* this is the callback function associated with a I/O invoked from
   2194    kernel code.
   2195  */
   2196 static void
   2197 KernelWakeupFunc(struct buf *bp)
   2198 {
   2199 	RF_DiskQueueData_t *req = NULL;
   2200 	RF_DiskQueue_t *queue;
   2201 	int s;
   2202 
   2203 	s = splbio();
   2204 	db1_printf(("recovering the request queue:\n"));
   2205 	req = bp->b_private;
   2206 
   2207 	queue = (RF_DiskQueue_t *) req->queue;
   2208 
   2209 #if RF_ACC_TRACE > 0
   2210 	if (req->tracerec) {
   2211 		RF_ETIMER_STOP(req->tracerec->timer);
   2212 		RF_ETIMER_EVAL(req->tracerec->timer);
   2213 		RF_LOCK_MUTEX(rf_tracing_mutex);
   2214 		req->tracerec->diskwait_us += RF_ETIMER_VAL_US(req->tracerec->timer);
   2215 		req->tracerec->phys_io_us += RF_ETIMER_VAL_US(req->tracerec->timer);
   2216 		req->tracerec->num_phys_ios++;
   2217 		RF_UNLOCK_MUTEX(rf_tracing_mutex);
   2218 	}
   2219 #endif
   2220 
   2221 	/* XXX Ok, let's get aggressive... If b_error is set, let's go
   2222 	 * ballistic, and mark the component as hosed... */
   2223 
   2224 	if (bp->b_error != 0) {
   2225 		/* Mark the disk as dead */
   2226 		/* but only mark it once... */
   2227 		/* and only if it wouldn't leave this RAID set
   2228 		   completely broken */
   2229 		if (((queue->raidPtr->Disks[queue->col].status ==
   2230 		      rf_ds_optimal) ||
   2231 		     (queue->raidPtr->Disks[queue->col].status ==
   2232 		      rf_ds_used_spare)) &&
   2233 		     (queue->raidPtr->numFailures <
   2234 		      queue->raidPtr->Layout.map->faultsTolerated)) {
   2235 			printf("raid%d: IO Error.  Marking %s as failed.\n",
   2236 			       queue->raidPtr->raidid,
   2237 			       queue->raidPtr->Disks[queue->col].devname);
   2238 			queue->raidPtr->Disks[queue->col].status =
   2239 			    rf_ds_failed;
   2240 			queue->raidPtr->status = rf_rs_degraded;
   2241 			queue->raidPtr->numFailures++;
   2242 			queue->raidPtr->numNewFailures++;
   2243 		} else {	/* Disk is already dead... */
   2244 			/* printf("Disk already marked as dead!\n"); */
   2245 		}
   2246 
   2247 	}
   2248 
   2249 	/* Fill in the error value */
   2250 
   2251 	req->error = bp->b_error;
   2252 
   2253 	simple_lock(&queue->raidPtr->iodone_lock);
   2254 
   2255 	/* Drop this one on the "finished" queue... */
   2256 	TAILQ_INSERT_TAIL(&(queue->raidPtr->iodone), req, iodone_entries);
   2257 
   2258 	/* Let the raidio thread know there is work to be done. */
   2259 	wakeup(&(queue->raidPtr->iodone));
   2260 
   2261 	simple_unlock(&queue->raidPtr->iodone_lock);
   2262 
   2263 	splx(s);
   2264 }
   2265 
   2266 
   2267 
   2268 /*
   2269  * initialize a buf structure for doing an I/O in the kernel.
   2270  */
   2271 static void
   2272 InitBP(struct buf *bp, struct vnode *b_vp, unsigned rw_flag, dev_t dev,
   2273        RF_SectorNum_t startSect, RF_SectorCount_t numSect, void *bf,
   2274        void (*cbFunc) (struct buf *), void *cbArg, int logBytesPerSector,
   2275        struct proc *b_proc)
   2276 {
   2277 	/* bp->b_flags       = B_PHYS | rw_flag; */
   2278 	bp->b_flags = rw_flag;	/* XXX need B_PHYS here too??? */
   2279 	bp->b_oflags = 0;
   2280 	bp->b_cflags = 0;
   2281 	bp->b_bcount = numSect << logBytesPerSector;
   2282 	bp->b_bufsize = bp->b_bcount;
   2283 	bp->b_error = 0;
   2284 	bp->b_dev = dev;
   2285 	bp->b_data = bf;
   2286 	bp->b_blkno = startSect << logBytesPerSector >> DEV_BSHIFT;
   2287 	bp->b_resid = bp->b_bcount;	/* XXX is this right!??!?!! */
   2288 	if (bp->b_bcount == 0) {
   2289 		panic("bp->b_bcount is zero in InitBP!!");
   2290 	}
   2291 	bp->b_proc = b_proc;
   2292 	bp->b_iodone = cbFunc;
   2293 	bp->b_private = cbArg;
   2294 }
   2295 
   2296 static void
   2297 raidgetdefaultlabel(RF_Raid_t *raidPtr, struct raid_softc *rs,
   2298 		    struct disklabel *lp)
   2299 {
   2300 	memset(lp, 0, sizeof(*lp));
   2301 
   2302 	/* fabricate a label... */
   2303 	lp->d_secperunit = raidPtr->totalSectors;
   2304 	lp->d_secsize = raidPtr->bytesPerSector;
   2305 	lp->d_nsectors = raidPtr->Layout.dataSectorsPerStripe;
   2306 	lp->d_ntracks = 4 * raidPtr->numCol;
   2307 	lp->d_ncylinders = raidPtr->totalSectors /
   2308 		(lp->d_nsectors * lp->d_ntracks);
   2309 	lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
   2310 
   2311 	strncpy(lp->d_typename, "raid", sizeof(lp->d_typename));
   2312 	lp->d_type = DTYPE_RAID;
   2313 	strncpy(lp->d_packname, "fictitious", sizeof(lp->d_packname));
   2314 	lp->d_rpm = 3600;
   2315 	lp->d_interleave = 1;
   2316 	lp->d_flags = 0;
   2317 
   2318 	lp->d_partitions[RAW_PART].p_offset = 0;
   2319 	lp->d_partitions[RAW_PART].p_size = raidPtr->totalSectors;
   2320 	lp->d_partitions[RAW_PART].p_fstype = FS_UNUSED;
   2321 	lp->d_npartitions = RAW_PART + 1;
   2322 
   2323 	lp->d_magic = DISKMAGIC;
   2324 	lp->d_magic2 = DISKMAGIC;
   2325 	lp->d_checksum = dkcksum(rs->sc_dkdev.dk_label);
   2326 
   2327 }
   2328 /*
   2329  * Read the disklabel from the raid device.  If one is not present, fake one
   2330  * up.
   2331  */
   2332 static void
   2333 raidgetdisklabel(dev_t dev)
   2334 {
   2335 	int     unit = raidunit(dev);
   2336 	struct raid_softc *rs = &raid_softc[unit];
   2337 	const char   *errstring;
   2338 	struct disklabel *lp = rs->sc_dkdev.dk_label;
   2339 	struct cpu_disklabel *clp = rs->sc_dkdev.dk_cpulabel;
   2340 	RF_Raid_t *raidPtr;
   2341 
   2342 	db1_printf(("Getting the disklabel...\n"));
   2343 
   2344 	memset(clp, 0, sizeof(*clp));
   2345 
   2346 	raidPtr = raidPtrs[unit];
   2347 
   2348 	raidgetdefaultlabel(raidPtr, rs, lp);
   2349 
   2350 	/*
   2351 	 * Call the generic disklabel extraction routine.
   2352 	 */
   2353 	errstring = readdisklabel(RAIDLABELDEV(dev), raidstrategy,
   2354 	    rs->sc_dkdev.dk_label, rs->sc_dkdev.dk_cpulabel);
   2355 	if (errstring)
   2356 		raidmakedisklabel(rs);
   2357 	else {
   2358 		int     i;
   2359 		struct partition *pp;
   2360 
   2361 		/*
   2362 		 * Sanity check whether the found disklabel is valid.
   2363 		 *
   2364 		 * This is necessary since total size of the raid device
   2365 		 * may vary when an interleave is changed even though exactly
   2366 		 * same components are used, and old disklabel may used
   2367 		 * if that is found.
   2368 		 */
   2369 		if (lp->d_secperunit != rs->sc_size)
   2370 			printf("raid%d: WARNING: %s: "
   2371 			    "total sector size in disklabel (%" PRIu32 ") != "
   2372 			    "the size of raid (%" PRIu64 ")\n", unit, rs->sc_xname,
   2373 			    lp->d_secperunit, rs->sc_size);
   2374 		for (i = 0; i < lp->d_npartitions; i++) {
   2375 			pp = &lp->d_partitions[i];
   2376 			if (pp->p_offset + pp->p_size > rs->sc_size)
   2377 				printf("raid%d: WARNING: %s: end of partition `%c' "
   2378 				       "exceeds the size of raid (%" PRIu64 ")\n",
   2379 				       unit, rs->sc_xname, 'a' + i, rs->sc_size);
   2380 		}
   2381 	}
   2382 
   2383 }
   2384 /*
   2385  * Take care of things one might want to take care of in the event
   2386  * that a disklabel isn't present.
   2387  */
   2388 static void
   2389 raidmakedisklabel(struct raid_softc *rs)
   2390 {
   2391 	struct disklabel *lp = rs->sc_dkdev.dk_label;
   2392 	db1_printf(("Making a label..\n"));
   2393 
   2394 	/*
   2395 	 * For historical reasons, if there's no disklabel present
   2396 	 * the raw partition must be marked FS_BSDFFS.
   2397 	 */
   2398 
   2399 	lp->d_partitions[RAW_PART].p_fstype = FS_BSDFFS;
   2400 
   2401 	strncpy(lp->d_packname, "default label", sizeof(lp->d_packname));
   2402 
   2403 	lp->d_checksum = dkcksum(lp);
   2404 }
   2405 /*
   2406  * Wait interruptibly for an exclusive lock.
   2407  *
   2408  * XXX
   2409  * Several drivers do this; it should be abstracted and made MP-safe.
   2410  * (Hmm... where have we seen this warning before :->  GO )
   2411  */
   2412 static int
   2413 raidlock(struct raid_softc *rs)
   2414 {
   2415 	int     error;
   2416 
   2417 	while ((rs->sc_flags & RAIDF_LOCKED) != 0) {
   2418 		rs->sc_flags |= RAIDF_WANTED;
   2419 		if ((error =
   2420 			tsleep(rs, PRIBIO | PCATCH, "raidlck", 0)) != 0)
   2421 			return (error);
   2422 	}
   2423 	rs->sc_flags |= RAIDF_LOCKED;
   2424 	return (0);
   2425 }
   2426 /*
   2427  * Unlock and wake up any waiters.
   2428  */
   2429 static void
   2430 raidunlock(struct raid_softc *rs)
   2431 {
   2432 
   2433 	rs->sc_flags &= ~RAIDF_LOCKED;
   2434 	if ((rs->sc_flags & RAIDF_WANTED) != 0) {
   2435 		rs->sc_flags &= ~RAIDF_WANTED;
   2436 		wakeup(rs);
   2437 	}
   2438 }
   2439 
   2440 
   2441 #define RF_COMPONENT_INFO_OFFSET  16384 /* bytes */
   2442 #define RF_COMPONENT_INFO_SIZE     1024 /* bytes */
   2443 #define RF_PARITY_MAP_SIZE   RF_PARITYMAP_NBYTE
   2444 
   2445 static daddr_t
   2446 rf_component_info_offset(void)
   2447 {
   2448 
   2449 	return RF_COMPONENT_INFO_OFFSET;
   2450 }
   2451 
   2452 static daddr_t
   2453 rf_component_info_size(unsigned secsize)
   2454 {
   2455 	daddr_t info_size;
   2456 
   2457 	KASSERT(secsize);
   2458 	if (secsize > RF_COMPONENT_INFO_SIZE)
   2459 		info_size = secsize;
   2460 	else
   2461 		info_size = RF_COMPONENT_INFO_SIZE;
   2462 
   2463 	return info_size;
   2464 }
   2465 
   2466 static daddr_t
   2467 rf_parity_map_offset(RF_Raid_t *raidPtr)
   2468 {
   2469 	daddr_t map_offset;
   2470 
   2471 	KASSERT(raidPtr->bytesPerSector);
   2472 	if (raidPtr->bytesPerSector > RF_COMPONENT_INFO_SIZE)
   2473 		map_offset = raidPtr->bytesPerSector;
   2474 	else
   2475 		map_offset = RF_COMPONENT_INFO_SIZE;
   2476 	map_offset += rf_component_info_offset();
   2477 
   2478 	return map_offset;
   2479 }
   2480 
   2481 static daddr_t
   2482 rf_parity_map_size(RF_Raid_t *raidPtr)
   2483 {
   2484 	daddr_t map_size;
   2485 
   2486 	if (raidPtr->bytesPerSector > RF_PARITY_MAP_SIZE)
   2487 		map_size = raidPtr->bytesPerSector;
   2488 	else
   2489 		map_size = RF_PARITY_MAP_SIZE;
   2490 
   2491 	return map_size;
   2492 }
   2493 
   2494 int
   2495 raidmarkclean(RF_Raid_t *raidPtr, RF_RowCol_t col)
   2496 {
   2497 	RF_ComponentLabel_t *clabel;
   2498 
   2499 	clabel = raidget_component_label(raidPtr, col);
   2500 	clabel->clean = RF_RAID_CLEAN;
   2501 	raidflush_component_label(raidPtr, col);
   2502 	return(0);
   2503 }
   2504 
   2505 
   2506 int
   2507 raidmarkdirty(RF_Raid_t *raidPtr, RF_RowCol_t col)
   2508 {
   2509 	RF_ComponentLabel_t *clabel;
   2510 
   2511 	clabel = raidget_component_label(raidPtr, col);
   2512 	clabel->clean = RF_RAID_DIRTY;
   2513 	raidflush_component_label(raidPtr, col);
   2514 	return(0);
   2515 }
   2516 
   2517 int
   2518 raidfetch_component_label(RF_Raid_t *raidPtr, RF_RowCol_t col)
   2519 {
   2520 	KASSERT(raidPtr->bytesPerSector);
   2521 	return raidread_component_label(raidPtr->bytesPerSector,
   2522 	    raidPtr->Disks[col].dev,
   2523 	    raidPtr->raid_cinfo[col].ci_vp,
   2524 	    &raidPtr->raid_cinfo[col].ci_label);
   2525 }
   2526 
   2527 RF_ComponentLabel_t *
   2528 raidget_component_label(RF_Raid_t *raidPtr, RF_RowCol_t col)
   2529 {
   2530 	return &raidPtr->raid_cinfo[col].ci_label;
   2531 }
   2532 
   2533 int
   2534 raidflush_component_label(RF_Raid_t *raidPtr, RF_RowCol_t col)
   2535 {
   2536 	RF_ComponentLabel_t *label;
   2537 
   2538 	label = &raidPtr->raid_cinfo[col].ci_label;
   2539 	label->mod_counter = raidPtr->mod_counter;
   2540 #ifndef RF_NO_PARITY_MAP
   2541 	label->parity_map_modcount = label->mod_counter;
   2542 #endif
   2543 	return raidwrite_component_label(raidPtr->bytesPerSector,
   2544 	    raidPtr->Disks[col].dev,
   2545 	    raidPtr->raid_cinfo[col].ci_vp, label);
   2546 }
   2547 
   2548 
   2549 static int
   2550 raidread_component_label(unsigned secsize, dev_t dev, struct vnode *b_vp,
   2551     RF_ComponentLabel_t *clabel)
   2552 {
   2553 	return raidread_component_area(dev, b_vp, clabel,
   2554 	    sizeof(RF_ComponentLabel_t),
   2555 	    rf_component_info_offset(),
   2556 	    rf_component_info_size(secsize));
   2557 }
   2558 
   2559 /* ARGSUSED */
   2560 static int
   2561 raidread_component_area(dev_t dev, struct vnode *b_vp, void *data,
   2562     size_t msize, daddr_t offset, daddr_t dsize)
   2563 {
   2564 	struct buf *bp;
   2565 	const struct bdevsw *bdev;
   2566 	int error;
   2567 
   2568 	/* XXX should probably ensure that we don't try to do this if
   2569 	   someone has changed rf_protected_sectors. */
   2570 
   2571 	if (b_vp == NULL) {
   2572 		/* For whatever reason, this component is not valid.
   2573 		   Don't try to read a component label from it. */
   2574 		return(EINVAL);
   2575 	}
   2576 
   2577 	/* get a block of the appropriate size... */
   2578 	bp = geteblk((int)dsize);
   2579 	bp->b_dev = dev;
   2580 
   2581 	/* get our ducks in a row for the read */
   2582 	bp->b_blkno = offset / DEV_BSIZE;
   2583 	bp->b_bcount = dsize;
   2584 	bp->b_flags |= B_READ;
   2585  	bp->b_resid = dsize;
   2586 
   2587 	bdev = bdevsw_lookup(bp->b_dev);
   2588 	if (bdev == NULL)
   2589 		return (ENXIO);
   2590 	(*bdev->d_strategy)(bp);
   2591 
   2592 	error = biowait(bp);
   2593 
   2594 	if (!error) {
   2595 		memcpy(data, bp->b_data, msize);
   2596 	}
   2597 
   2598 	brelse(bp, 0);
   2599 	return(error);
   2600 }
   2601 
   2602 
   2603 static int
   2604 raidwrite_component_label(unsigned secsize, dev_t dev, struct vnode *b_vp,
   2605     RF_ComponentLabel_t *clabel)
   2606 {
   2607 	return raidwrite_component_area(dev, b_vp, clabel,
   2608 	    sizeof(RF_ComponentLabel_t),
   2609 	    rf_component_info_offset(),
   2610 	    rf_component_info_size(secsize), 0);
   2611 }
   2612 
   2613 /* ARGSUSED */
   2614 static int
   2615 raidwrite_component_area(dev_t dev, struct vnode *b_vp, void *data,
   2616     size_t msize, daddr_t offset, daddr_t dsize, int asyncp)
   2617 {
   2618 	struct buf *bp;
   2619 	const struct bdevsw *bdev;
   2620 	int error;
   2621 
   2622 	/* get a block of the appropriate size... */
   2623 	bp = geteblk((int)dsize);
   2624 	bp->b_dev = dev;
   2625 
   2626 	/* get our ducks in a row for the write */
   2627 	bp->b_blkno = offset / DEV_BSIZE;
   2628 	bp->b_bcount = dsize;
   2629 	bp->b_flags |= B_WRITE | (asyncp ? B_ASYNC : 0);
   2630  	bp->b_resid = dsize;
   2631 
   2632 	memset(bp->b_data, 0, dsize);
   2633 	memcpy(bp->b_data, data, msize);
   2634 
   2635 	bdev = bdevsw_lookup(bp->b_dev);
   2636 	if (bdev == NULL)
   2637 		return (ENXIO);
   2638 	(*bdev->d_strategy)(bp);
   2639 	if (asyncp)
   2640 		return 0;
   2641 	error = biowait(bp);
   2642 	brelse(bp, 0);
   2643 	if (error) {
   2644 #if 1
   2645 		printf("Failed to write RAID component info!\n");
   2646 #endif
   2647 	}
   2648 
   2649 	return(error);
   2650 }
   2651 
   2652 void
   2653 rf_paritymap_kern_write(RF_Raid_t *raidPtr, struct rf_paritymap_ondisk *map)
   2654 {
   2655 	int c;
   2656 
   2657 	for (c = 0; c < raidPtr->numCol; c++) {
   2658 		/* Skip dead disks. */
   2659 		if (RF_DEAD_DISK(raidPtr->Disks[c].status))
   2660 			continue;
   2661 		/* XXXjld: what if an error occurs here? */
   2662 		raidwrite_component_area(raidPtr->Disks[c].dev,
   2663 		    raidPtr->raid_cinfo[c].ci_vp, map,
   2664 		    RF_PARITYMAP_NBYTE,
   2665 		    rf_parity_map_offset(raidPtr),
   2666 		    rf_parity_map_size(raidPtr), 0);
   2667 	}
   2668 }
   2669 
   2670 void
   2671 rf_paritymap_kern_read(RF_Raid_t *raidPtr, struct rf_paritymap_ondisk *map)
   2672 {
   2673 	struct rf_paritymap_ondisk tmp;
   2674 	int c,first;
   2675 
   2676 	first=1;
   2677 	for (c = 0; c < raidPtr->numCol; c++) {
   2678 		/* Skip dead disks. */
   2679 		if (RF_DEAD_DISK(raidPtr->Disks[c].status))
   2680 			continue;
   2681 		raidread_component_area(raidPtr->Disks[c].dev,
   2682 		    raidPtr->raid_cinfo[c].ci_vp, &tmp,
   2683 		    RF_PARITYMAP_NBYTE,
   2684 		    rf_parity_map_offset(raidPtr),
   2685 		    rf_parity_map_size(raidPtr));
   2686 		if (first) {
   2687 			memcpy(map, &tmp, sizeof(*map));
   2688 			first = 0;
   2689 		} else {
   2690 			rf_paritymap_merge(map, &tmp);
   2691 		}
   2692 	}
   2693 }
   2694 
   2695 void
   2696 rf_markalldirty(RF_Raid_t *raidPtr)
   2697 {
   2698 	RF_ComponentLabel_t *clabel;
   2699 	int sparecol;
   2700 	int c;
   2701 	int j;
   2702 	int scol = -1;
   2703 
   2704 	raidPtr->mod_counter++;
   2705 	for (c = 0; c < raidPtr->numCol; c++) {
   2706 		/* we don't want to touch (at all) a disk that has
   2707 		   failed */
   2708 		if (!RF_DEAD_DISK(raidPtr->Disks[c].status)) {
   2709 			clabel = raidget_component_label(raidPtr, c);
   2710 			if (clabel->status == rf_ds_spared) {
   2711 				/* XXX do something special...
   2712 				   but whatever you do, don't
   2713 				   try to access it!! */
   2714 			} else {
   2715 				raidmarkdirty(raidPtr, c);
   2716 			}
   2717 		}
   2718 	}
   2719 
   2720 	for( c = 0; c < raidPtr->numSpare ; c++) {
   2721 		sparecol = raidPtr->numCol + c;
   2722 		if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
   2723 			/*
   2724 
   2725 			   we claim this disk is "optimal" if it's
   2726 			   rf_ds_used_spare, as that means it should be
   2727 			   directly substitutable for the disk it replaced.
   2728 			   We note that too...
   2729 
   2730 			 */
   2731 
   2732 			for(j=0;j<raidPtr->numCol;j++) {
   2733 				if (raidPtr->Disks[j].spareCol == sparecol) {
   2734 					scol = j;
   2735 					break;
   2736 				}
   2737 			}
   2738 
   2739 			clabel = raidget_component_label(raidPtr, sparecol);
   2740 			/* make sure status is noted */
   2741 
   2742 			raid_init_component_label(raidPtr, clabel);
   2743 
   2744 			clabel->row = 0;
   2745 			clabel->column = scol;
   2746 			/* Note: we *don't* change status from rf_ds_used_spare
   2747 			   to rf_ds_optimal */
   2748 			/* clabel.status = rf_ds_optimal; */
   2749 
   2750 			raidmarkdirty(raidPtr, sparecol);
   2751 		}
   2752 	}
   2753 }
   2754 
   2755 
   2756 void
   2757 rf_update_component_labels(RF_Raid_t *raidPtr, int final)
   2758 {
   2759 	RF_ComponentLabel_t *clabel;
   2760 	int sparecol;
   2761 	int c;
   2762 	int j;
   2763 	int scol;
   2764 
   2765 	scol = -1;
   2766 
   2767 	/* XXX should do extra checks to make sure things really are clean,
   2768 	   rather than blindly setting the clean bit... */
   2769 
   2770 	raidPtr->mod_counter++;
   2771 
   2772 	for (c = 0; c < raidPtr->numCol; c++) {
   2773 		if (raidPtr->Disks[c].status == rf_ds_optimal) {
   2774 			clabel = raidget_component_label(raidPtr, c);
   2775 			/* make sure status is noted */
   2776 			clabel->status = rf_ds_optimal;
   2777 
   2778 			/* note what unit we are configured as */
   2779 			clabel->last_unit = raidPtr->raidid;
   2780 
   2781 			raidflush_component_label(raidPtr, c);
   2782 			if (final == RF_FINAL_COMPONENT_UPDATE) {
   2783 				if (raidPtr->parity_good == RF_RAID_CLEAN) {
   2784 					raidmarkclean(raidPtr, c);
   2785 				}
   2786 			}
   2787 		}
   2788 		/* else we don't touch it.. */
   2789 	}
   2790 
   2791 	for( c = 0; c < raidPtr->numSpare ; c++) {
   2792 		sparecol = raidPtr->numCol + c;
   2793 		/* Need to ensure that the reconstruct actually completed! */
   2794 		if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
   2795 			/*
   2796 
   2797 			   we claim this disk is "optimal" if it's
   2798 			   rf_ds_used_spare, as that means it should be
   2799 			   directly substitutable for the disk it replaced.
   2800 			   We note that too...
   2801 
   2802 			 */
   2803 
   2804 			for(j=0;j<raidPtr->numCol;j++) {
   2805 				if (raidPtr->Disks[j].spareCol == sparecol) {
   2806 					scol = j;
   2807 					break;
   2808 				}
   2809 			}
   2810 
   2811 			/* XXX shouldn't *really* need this... */
   2812 			clabel = raidget_component_label(raidPtr, sparecol);
   2813 			/* make sure status is noted */
   2814 
   2815 			raid_init_component_label(raidPtr, clabel);
   2816 
   2817 			clabel->column = scol;
   2818 			clabel->status = rf_ds_optimal;
   2819 			clabel->last_unit = raidPtr->raidid;
   2820 
   2821 			raidflush_component_label(raidPtr, sparecol);
   2822 			if (final == RF_FINAL_COMPONENT_UPDATE) {
   2823 				if (raidPtr->parity_good == RF_RAID_CLEAN) {
   2824 					raidmarkclean(raidPtr, sparecol);
   2825 				}
   2826 			}
   2827 		}
   2828 	}
   2829 }
   2830 
   2831 void
   2832 rf_close_component(RF_Raid_t *raidPtr, struct vnode *vp, int auto_configured)
   2833 {
   2834 
   2835 	if (vp != NULL) {
   2836 		if (auto_configured == 1) {
   2837 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
   2838 			VOP_CLOSE(vp, FREAD | FWRITE, NOCRED);
   2839 			vput(vp);
   2840 
   2841 		} else {
   2842 			(void) vn_close(vp, FREAD | FWRITE, curlwp->l_cred);
   2843 		}
   2844 	}
   2845 }
   2846 
   2847 
   2848 void
   2849 rf_UnconfigureVnodes(RF_Raid_t *raidPtr)
   2850 {
   2851 	int r,c;
   2852 	struct vnode *vp;
   2853 	int acd;
   2854 
   2855 
   2856 	/* We take this opportunity to close the vnodes like we should.. */
   2857 
   2858 	for (c = 0; c < raidPtr->numCol; c++) {
   2859 		vp = raidPtr->raid_cinfo[c].ci_vp;
   2860 		acd = raidPtr->Disks[c].auto_configured;
   2861 		rf_close_component(raidPtr, vp, acd);
   2862 		raidPtr->raid_cinfo[c].ci_vp = NULL;
   2863 		raidPtr->Disks[c].auto_configured = 0;
   2864 	}
   2865 
   2866 	for (r = 0; r < raidPtr->numSpare; r++) {
   2867 		vp = raidPtr->raid_cinfo[raidPtr->numCol + r].ci_vp;
   2868 		acd = raidPtr->Disks[raidPtr->numCol + r].auto_configured;
   2869 		rf_close_component(raidPtr, vp, acd);
   2870 		raidPtr->raid_cinfo[raidPtr->numCol + r].ci_vp = NULL;
   2871 		raidPtr->Disks[raidPtr->numCol + r].auto_configured = 0;
   2872 	}
   2873 }
   2874 
   2875 
   2876 void
   2877 rf_ReconThread(struct rf_recon_req *req)
   2878 {
   2879 	int     s;
   2880 	RF_Raid_t *raidPtr;
   2881 
   2882 	s = splbio();
   2883 	raidPtr = (RF_Raid_t *) req->raidPtr;
   2884 	raidPtr->recon_in_progress = 1;
   2885 
   2886 	rf_FailDisk((RF_Raid_t *) req->raidPtr, req->col,
   2887 		    ((req->flags & RF_FDFLAGS_RECON) ? 1 : 0));
   2888 
   2889 	RF_Free(req, sizeof(*req));
   2890 
   2891 	raidPtr->recon_in_progress = 0;
   2892 	splx(s);
   2893 
   2894 	/* That's all... */
   2895 	kthread_exit(0);	/* does not return */
   2896 }
   2897 
   2898 void
   2899 rf_RewriteParityThread(RF_Raid_t *raidPtr)
   2900 {
   2901 	int retcode;
   2902 	int s;
   2903 
   2904 	raidPtr->parity_rewrite_stripes_done = 0;
   2905 	raidPtr->parity_rewrite_in_progress = 1;
   2906 	s = splbio();
   2907 	retcode = rf_RewriteParity(raidPtr);
   2908 	splx(s);
   2909 	if (retcode) {
   2910 		printf("raid%d: Error re-writing parity (%d)!\n",
   2911 		    raidPtr->raidid, retcode);
   2912 	} else {
   2913 		/* set the clean bit!  If we shutdown correctly,
   2914 		   the clean bit on each component label will get
   2915 		   set */
   2916 		raidPtr->parity_good = RF_RAID_CLEAN;
   2917 	}
   2918 	raidPtr->parity_rewrite_in_progress = 0;
   2919 
   2920 	/* Anyone waiting for us to stop?  If so, inform them... */
   2921 	if (raidPtr->waitShutdown) {
   2922 		wakeup(&raidPtr->parity_rewrite_in_progress);
   2923 	}
   2924 
   2925 	/* That's all... */
   2926 	kthread_exit(0);	/* does not return */
   2927 }
   2928 
   2929 
   2930 void
   2931 rf_CopybackThread(RF_Raid_t *raidPtr)
   2932 {
   2933 	int s;
   2934 
   2935 	raidPtr->copyback_in_progress = 1;
   2936 	s = splbio();
   2937 	rf_CopybackReconstructedData(raidPtr);
   2938 	splx(s);
   2939 	raidPtr->copyback_in_progress = 0;
   2940 
   2941 	/* That's all... */
   2942 	kthread_exit(0);	/* does not return */
   2943 }
   2944 
   2945 
   2946 void
   2947 rf_ReconstructInPlaceThread(struct rf_recon_req *req)
   2948 {
   2949 	int s;
   2950 	RF_Raid_t *raidPtr;
   2951 
   2952 	s = splbio();
   2953 	raidPtr = req->raidPtr;
   2954 	raidPtr->recon_in_progress = 1;
   2955 	rf_ReconstructInPlace(raidPtr, req->col);
   2956 	RF_Free(req, sizeof(*req));
   2957 	raidPtr->recon_in_progress = 0;
   2958 	splx(s);
   2959 
   2960 	/* That's all... */
   2961 	kthread_exit(0);	/* does not return */
   2962 }
   2963 
   2964 static RF_AutoConfig_t *
   2965 rf_get_component(RF_AutoConfig_t *ac_list, dev_t dev, struct vnode *vp,
   2966     const char *cname, RF_SectorCount_t size, uint64_t numsecs,
   2967     unsigned secsize)
   2968 {
   2969 	int good_one = 0;
   2970 	RF_ComponentLabel_t *clabel;
   2971 	RF_AutoConfig_t *ac;
   2972 
   2973 	clabel = malloc(sizeof(RF_ComponentLabel_t), M_RAIDFRAME, M_NOWAIT);
   2974 	if (clabel == NULL) {
   2975 oomem:
   2976 		    while(ac_list) {
   2977 			    ac = ac_list;
   2978 			    if (ac->clabel)
   2979 				    free(ac->clabel, M_RAIDFRAME);
   2980 			    ac_list = ac_list->next;
   2981 			    free(ac, M_RAIDFRAME);
   2982 		    }
   2983 		    printf("RAID auto config: out of memory!\n");
   2984 		    return NULL; /* XXX probably should panic? */
   2985 	}
   2986 
   2987 	if (!raidread_component_label(secsize, dev, vp, clabel)) {
   2988 		/* Got the label.  Does it look reasonable? */
   2989 		if (rf_reasonable_label(clabel) &&
   2990 		    (clabel->partitionSize <= size)) {
   2991 			rf_fix_old_label_size(clabel, numsecs);
   2992 #ifdef DEBUG
   2993 			printf("Component on: %s: %llu\n",
   2994 				cname, (unsigned long long)size);
   2995 			rf_print_component_label(clabel);
   2996 #endif
   2997 			/* if it's reasonable, add it, else ignore it. */
   2998 			ac = malloc(sizeof(RF_AutoConfig_t), M_RAIDFRAME,
   2999 				M_NOWAIT);
   3000 			if (ac == NULL) {
   3001 				free(clabel, M_RAIDFRAME);
   3002 				goto oomem;
   3003 			}
   3004 			strlcpy(ac->devname, cname, sizeof(ac->devname));
   3005 			ac->dev = dev;
   3006 			ac->vp = vp;
   3007 			ac->clabel = clabel;
   3008 			ac->next = ac_list;
   3009 			ac_list = ac;
   3010 			good_one = 1;
   3011 		}
   3012 	}
   3013 	if (!good_one) {
   3014 		/* cleanup */
   3015 		free(clabel, M_RAIDFRAME);
   3016 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
   3017 		VOP_CLOSE(vp, FREAD | FWRITE, NOCRED);
   3018 		vput(vp);
   3019 	}
   3020 	return ac_list;
   3021 }
   3022 
   3023 RF_AutoConfig_t *
   3024 rf_find_raid_components(void)
   3025 {
   3026 	struct vnode *vp;
   3027 	struct disklabel label;
   3028 	device_t dv;
   3029 	deviter_t di;
   3030 	dev_t dev;
   3031 	int bmajor, bminor, wedge;
   3032 	int error;
   3033 	int i;
   3034 	RF_AutoConfig_t *ac_list;
   3035 	uint64_t numsecs;
   3036 	unsigned secsize;
   3037 
   3038 	RF_ASSERT(raidPtr->bytesPerSector < rf_component_info_offset());
   3039 
   3040 	/* initialize the AutoConfig list */
   3041 	ac_list = NULL;
   3042 
   3043 	/* we begin by trolling through *all* the devices on the system */
   3044 
   3045 	for (dv = deviter_first(&di, DEVITER_F_ROOT_FIRST); dv != NULL;
   3046 	     dv = deviter_next(&di)) {
   3047 
   3048 		/* we are only interested in disks... */
   3049 		if (device_class(dv) != DV_DISK)
   3050 			continue;
   3051 
   3052 		/* we don't care about floppies... */
   3053 		if (device_is_a(dv, "fd")) {
   3054 			continue;
   3055 		}
   3056 
   3057 		/* we don't care about CD's... */
   3058 		if (device_is_a(dv, "cd")) {
   3059 			continue;
   3060 		}
   3061 
   3062 		/* we don't care about md's... */
   3063 		if (device_is_a(dv, "md")) {
   3064 			continue;
   3065 		}
   3066 
   3067 		/* hdfd is the Atari/Hades floppy driver */
   3068 		if (device_is_a(dv, "hdfd")) {
   3069 			continue;
   3070 		}
   3071 
   3072 		/* fdisa is the Atari/Milan floppy driver */
   3073 		if (device_is_a(dv, "fdisa")) {
   3074 			continue;
   3075 		}
   3076 
   3077 		/* need to find the device_name_to_block_device_major stuff */
   3078 		bmajor = devsw_name2blk(device_xname(dv), NULL, 0);
   3079 
   3080 		/* get a vnode for the raw partition of this disk */
   3081 
   3082 		wedge = device_is_a(dv, "dk");
   3083 		bminor = minor(device_unit(dv));
   3084 		dev = wedge ? makedev(bmajor, bminor) :
   3085 		    MAKEDISKDEV(bmajor, bminor, RAW_PART);
   3086 		if (bdevvp(dev, &vp))
   3087 			panic("RAID can't alloc vnode");
   3088 
   3089 		error = VOP_OPEN(vp, FREAD, NOCRED);
   3090 
   3091 		if (error) {
   3092 			/* "Who cares."  Continue looking
   3093 			   for something that exists*/
   3094 			vput(vp);
   3095 			continue;
   3096 		}
   3097 
   3098 		error = getdisksize(vp, &numsecs, &secsize);
   3099 		if (error) {
   3100 			vput(vp);
   3101 			continue;
   3102 		}
   3103 		if (wedge) {
   3104 			struct dkwedge_info dkw;
   3105 			error = VOP_IOCTL(vp, DIOCGWEDGEINFO, &dkw, FREAD,
   3106 			    NOCRED);
   3107 			if (error) {
   3108 				printf("RAIDframe: can't get wedge info for "
   3109 				    "dev %s (%d)\n", device_xname(dv), error);
   3110 				vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
   3111 				VOP_CLOSE(vp, FREAD | FWRITE, NOCRED);
   3112 				vput(vp);
   3113 				continue;
   3114 			}
   3115 
   3116 			if (strcmp(dkw.dkw_ptype, DKW_PTYPE_RAIDFRAME) != 0) {
   3117 				vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
   3118 				VOP_CLOSE(vp, FREAD | FWRITE, NOCRED);
   3119 				vput(vp);
   3120 				continue;
   3121 			}
   3122 
   3123 			ac_list = rf_get_component(ac_list, dev, vp,
   3124 			    device_xname(dv), dkw.dkw_size, numsecs, secsize);
   3125 			continue;
   3126 		}
   3127 
   3128 		/* Ok, the disk exists.  Go get the disklabel. */
   3129 		error = VOP_IOCTL(vp, DIOCGDINFO, &label, FREAD, NOCRED);
   3130 		if (error) {
   3131 			/*
   3132 			 * XXX can't happen - open() would
   3133 			 * have errored out (or faked up one)
   3134 			 */
   3135 			if (error != ENOTTY)
   3136 				printf("RAIDframe: can't get label for dev "
   3137 				    "%s (%d)\n", device_xname(dv), error);
   3138 		}
   3139 
   3140 		/* don't need this any more.  We'll allocate it again
   3141 		   a little later if we really do... */
   3142 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
   3143 		VOP_CLOSE(vp, FREAD | FWRITE, NOCRED);
   3144 		vput(vp);
   3145 
   3146 		if (error)
   3147 			continue;
   3148 
   3149 		for (i = 0; i < label.d_npartitions; i++) {
   3150 			char cname[sizeof(ac_list->devname)];
   3151 
   3152 			/* We only support partitions marked as RAID */
   3153 			if (label.d_partitions[i].p_fstype != FS_RAID)
   3154 				continue;
   3155 
   3156 			dev = MAKEDISKDEV(bmajor, device_unit(dv), i);
   3157 			if (bdevvp(dev, &vp))
   3158 				panic("RAID can't alloc vnode");
   3159 
   3160 			error = VOP_OPEN(vp, FREAD, NOCRED);
   3161 			if (error) {
   3162 				/* Whatever... */
   3163 				vput(vp);
   3164 				continue;
   3165 			}
   3166 			snprintf(cname, sizeof(cname), "%s%c",
   3167 			    device_xname(dv), 'a' + i);
   3168 			ac_list = rf_get_component(ac_list, dev, vp, cname,
   3169 				label.d_partitions[i].p_size, numsecs, secsize);
   3170 		}
   3171 	}
   3172 	deviter_release(&di);
   3173 	return ac_list;
   3174 }
   3175 
   3176 
   3177 static int
   3178 rf_reasonable_label(RF_ComponentLabel_t *clabel)
   3179 {
   3180 
   3181 	if (((clabel->version==RF_COMPONENT_LABEL_VERSION_1) ||
   3182 	     (clabel->version==RF_COMPONENT_LABEL_VERSION)) &&
   3183 	    ((clabel->clean == RF_RAID_CLEAN) ||
   3184 	     (clabel->clean == RF_RAID_DIRTY)) &&
   3185 	    clabel->row >=0 &&
   3186 	    clabel->column >= 0 &&
   3187 	    clabel->num_rows > 0 &&
   3188 	    clabel->num_columns > 0 &&
   3189 	    clabel->row < clabel->num_rows &&
   3190 	    clabel->column < clabel->num_columns &&
   3191 	    clabel->blockSize > 0 &&
   3192 	    clabel->numBlocks > 0) {
   3193 		/* label looks reasonable enough... */
   3194 		return(1);
   3195 	}
   3196 	return(0);
   3197 }
   3198 
   3199 
   3200 /*
   3201  * For reasons yet unknown, some old component labels have garbage in
   3202  * the newer numBlocksHi region, and this causes lossage.  Since those
   3203  * disks will also have numsecs set to less than 32 bits of sectors,
   3204  * we can determine when this corruption has occured, and fix it.
   3205  */
   3206 static void
   3207 rf_fix_old_label_size(RF_ComponentLabel_t *clabel, uint64_t numsecs)
   3208 {
   3209 
   3210 	if (clabel->numBlocksHi && numsecs < ((uint64_t)1 << 32)) {
   3211 		printf("WARNING: total sectors < 32 bits, yet numBlocksHi set\n"
   3212 		       "WARNING: resetting numBlocksHi to zero.\n");
   3213 		clabel->numBlocksHi = 0;
   3214 	}
   3215 }
   3216 
   3217 
   3218 #ifdef DEBUG
   3219 void
   3220 rf_print_component_label(RF_ComponentLabel_t *clabel)
   3221 {
   3222 	uint64_t numBlocks = clabel->numBlocks;
   3223 
   3224 	numBlocks |= (uint64_t)clabel->numBlocksHi << 32;
   3225 
   3226 	printf("   Row: %d Column: %d Num Rows: %d Num Columns: %d\n",
   3227 	       clabel->row, clabel->column,
   3228 	       clabel->num_rows, clabel->num_columns);
   3229 	printf("   Version: %d Serial Number: %d Mod Counter: %d\n",
   3230 	       clabel->version, clabel->serial_number,
   3231 	       clabel->mod_counter);
   3232 	printf("   Clean: %s Status: %d\n",
   3233 	       clabel->clean ? "Yes" : "No", clabel->status);
   3234 	printf("   sectPerSU: %d SUsPerPU: %d SUsPerRU: %d\n",
   3235 	       clabel->sectPerSU, clabel->SUsPerPU, clabel->SUsPerRU);
   3236 	printf("   RAID Level: %c  blocksize: %d numBlocks: %"PRIu64"\n",
   3237 	       (char) clabel->parityConfig, clabel->blockSize, numBlocks);
   3238 	printf("   Autoconfig: %s\n", clabel->autoconfigure ? "Yes" : "No");
   3239 	printf("   Contains root partition: %s\n",
   3240 	       clabel->root_partition ? "Yes" : "No");
   3241 	printf("   Last configured as: raid%d\n", clabel->last_unit);
   3242 #if 0
   3243 	   printf("   Config order: %d\n", clabel->config_order);
   3244 #endif
   3245 
   3246 }
   3247 #endif
   3248 
   3249 RF_ConfigSet_t *
   3250 rf_create_auto_sets(RF_AutoConfig_t *ac_list)
   3251 {
   3252 	RF_AutoConfig_t *ac;
   3253 	RF_ConfigSet_t *config_sets;
   3254 	RF_ConfigSet_t *cset;
   3255 	RF_AutoConfig_t *ac_next;
   3256 
   3257 
   3258 	config_sets = NULL;
   3259 
   3260 	/* Go through the AutoConfig list, and figure out which components
   3261 	   belong to what sets.  */
   3262 	ac = ac_list;
   3263 	while(ac!=NULL) {
   3264 		/* we're going to putz with ac->next, so save it here
   3265 		   for use at the end of the loop */
   3266 		ac_next = ac->next;
   3267 
   3268 		if (config_sets == NULL) {
   3269 			/* will need at least this one... */
   3270 			config_sets = (RF_ConfigSet_t *)
   3271 				malloc(sizeof(RF_ConfigSet_t),
   3272 				       M_RAIDFRAME, M_NOWAIT);
   3273 			if (config_sets == NULL) {
   3274 				panic("rf_create_auto_sets: No memory!");
   3275 			}
   3276 			/* this one is easy :) */
   3277 			config_sets->ac = ac;
   3278 			config_sets->next = NULL;
   3279 			config_sets->rootable = 0;
   3280 			ac->next = NULL;
   3281 		} else {
   3282 			/* which set does this component fit into? */
   3283 			cset = config_sets;
   3284 			while(cset!=NULL) {
   3285 				if (rf_does_it_fit(cset, ac)) {
   3286 					/* looks like it matches... */
   3287 					ac->next = cset->ac;
   3288 					cset->ac = ac;
   3289 					break;
   3290 				}
   3291 				cset = cset->next;
   3292 			}
   3293 			if (cset==NULL) {
   3294 				/* didn't find a match above... new set..*/
   3295 				cset = (RF_ConfigSet_t *)
   3296 					malloc(sizeof(RF_ConfigSet_t),
   3297 					       M_RAIDFRAME, M_NOWAIT);
   3298 				if (cset == NULL) {
   3299 					panic("rf_create_auto_sets: No memory!");
   3300 				}
   3301 				cset->ac = ac;
   3302 				ac->next = NULL;
   3303 				cset->next = config_sets;
   3304 				cset->rootable = 0;
   3305 				config_sets = cset;
   3306 			}
   3307 		}
   3308 		ac = ac_next;
   3309 	}
   3310 
   3311 
   3312 	return(config_sets);
   3313 }
   3314 
   3315 static int
   3316 rf_does_it_fit(RF_ConfigSet_t *cset, RF_AutoConfig_t *ac)
   3317 {
   3318 	RF_ComponentLabel_t *clabel1, *clabel2;
   3319 
   3320 	/* If this one matches the *first* one in the set, that's good
   3321 	   enough, since the other members of the set would have been
   3322 	   through here too... */
   3323 	/* note that we are not checking partitionSize here..
   3324 
   3325 	   Note that we are also not checking the mod_counters here.
   3326 	   If everything else matches execpt the mod_counter, that's
   3327 	   good enough for this test.  We will deal with the mod_counters
   3328 	   a little later in the autoconfiguration process.
   3329 
   3330 	    (clabel1->mod_counter == clabel2->mod_counter) &&
   3331 
   3332 	   The reason we don't check for this is that failed disks
   3333 	   will have lower modification counts.  If those disks are
   3334 	   not added to the set they used to belong to, then they will
   3335 	   form their own set, which may result in 2 different sets,
   3336 	   for example, competing to be configured at raid0, and
   3337 	   perhaps competing to be the root filesystem set.  If the
   3338 	   wrong ones get configured, or both attempt to become /,
   3339 	   weird behaviour and or serious lossage will occur.  Thus we
   3340 	   need to bring them into the fold here, and kick them out at
   3341 	   a later point.
   3342 
   3343 	*/
   3344 
   3345 	clabel1 = cset->ac->clabel;
   3346 	clabel2 = ac->clabel;
   3347 	if ((clabel1->version == clabel2->version) &&
   3348 	    (clabel1->serial_number == clabel2->serial_number) &&
   3349 	    (clabel1->num_rows == clabel2->num_rows) &&
   3350 	    (clabel1->num_columns == clabel2->num_columns) &&
   3351 	    (clabel1->sectPerSU == clabel2->sectPerSU) &&
   3352 	    (clabel1->SUsPerPU == clabel2->SUsPerPU) &&
   3353 	    (clabel1->SUsPerRU == clabel2->SUsPerRU) &&
   3354 	    (clabel1->parityConfig == clabel2->parityConfig) &&
   3355 	    (clabel1->maxOutstanding == clabel2->maxOutstanding) &&
   3356 	    (clabel1->blockSize == clabel2->blockSize) &&
   3357 	    (clabel1->numBlocks == clabel2->numBlocks) &&
   3358 	    (clabel1->numBlocksHi == clabel2->numBlocksHi) &&
   3359 	    (clabel1->autoconfigure == clabel2->autoconfigure) &&
   3360 	    (clabel1->root_partition == clabel2->root_partition) &&
   3361 	    (clabel1->last_unit == clabel2->last_unit) &&
   3362 	    (clabel1->config_order == clabel2->config_order)) {
   3363 		/* if it get's here, it almost *has* to be a match */
   3364 	} else {
   3365 		/* it's not consistent with somebody in the set..
   3366 		   punt */
   3367 		return(0);
   3368 	}
   3369 	/* all was fine.. it must fit... */
   3370 	return(1);
   3371 }
   3372 
   3373 int
   3374 rf_have_enough_components(RF_ConfigSet_t *cset)
   3375 {
   3376 	RF_AutoConfig_t *ac;
   3377 	RF_AutoConfig_t *auto_config;
   3378 	RF_ComponentLabel_t *clabel;
   3379 	int c;
   3380 	int num_cols;
   3381 	int num_missing;
   3382 	int mod_counter;
   3383 	int mod_counter_found;
   3384 	int even_pair_failed;
   3385 	char parity_type;
   3386 
   3387 
   3388 	/* check to see that we have enough 'live' components
   3389 	   of this set.  If so, we can configure it if necessary */
   3390 
   3391 	num_cols = cset->ac->clabel->num_columns;
   3392 	parity_type = cset->ac->clabel->parityConfig;
   3393 
   3394 	/* XXX Check for duplicate components!?!?!? */
   3395 
   3396 	/* Determine what the mod_counter is supposed to be for this set. */
   3397 
   3398 	mod_counter_found = 0;
   3399 	mod_counter = 0;
   3400 	ac = cset->ac;
   3401 	while(ac!=NULL) {
   3402 		if (mod_counter_found==0) {
   3403 			mod_counter = ac->clabel->mod_counter;
   3404 			mod_counter_found = 1;
   3405 		} else {
   3406 			if (ac->clabel->mod_counter > mod_counter) {
   3407 				mod_counter = ac->clabel->mod_counter;
   3408 			}
   3409 		}
   3410 		ac = ac->next;
   3411 	}
   3412 
   3413 	num_missing = 0;
   3414 	auto_config = cset->ac;
   3415 
   3416 	even_pair_failed = 0;
   3417 	for(c=0; c<num_cols; c++) {
   3418 		ac = auto_config;
   3419 		while(ac!=NULL) {
   3420 			if ((ac->clabel->column == c) &&
   3421 			    (ac->clabel->mod_counter == mod_counter)) {
   3422 				/* it's this one... */
   3423 #ifdef DEBUG
   3424 				printf("Found: %s at %d\n",
   3425 				       ac->devname,c);
   3426 #endif
   3427 				break;
   3428 			}
   3429 			ac=ac->next;
   3430 		}
   3431 		if (ac==NULL) {
   3432 				/* Didn't find one here! */
   3433 				/* special case for RAID 1, especially
   3434 				   where there are more than 2
   3435 				   components (where RAIDframe treats
   3436 				   things a little differently :( ) */
   3437 			if (parity_type == '1') {
   3438 				if (c%2 == 0) { /* even component */
   3439 					even_pair_failed = 1;
   3440 				} else { /* odd component.  If
   3441 					    we're failed, and
   3442 					    so is the even
   3443 					    component, it's
   3444 					    "Good Night, Charlie" */
   3445 					if (even_pair_failed == 1) {
   3446 						return(0);
   3447 					}
   3448 				}
   3449 			} else {
   3450 				/* normal accounting */
   3451 				num_missing++;
   3452 			}
   3453 		}
   3454 		if ((parity_type == '1') && (c%2 == 1)) {
   3455 				/* Just did an even component, and we didn't
   3456 				   bail.. reset the even_pair_failed flag,
   3457 				   and go on to the next component.... */
   3458 			even_pair_failed = 0;
   3459 		}
   3460 	}
   3461 
   3462 	clabel = cset->ac->clabel;
   3463 
   3464 	if (((clabel->parityConfig == '0') && (num_missing > 0)) ||
   3465 	    ((clabel->parityConfig == '4') && (num_missing > 1)) ||
   3466 	    ((clabel->parityConfig == '5') && (num_missing > 1))) {
   3467 		/* XXX this needs to be made *much* more general */
   3468 		/* Too many failures */
   3469 		return(0);
   3470 	}
   3471 	/* otherwise, all is well, and we've got enough to take a kick
   3472 	   at autoconfiguring this set */
   3473 	return(1);
   3474 }
   3475 
   3476 void
   3477 rf_create_configuration(RF_AutoConfig_t *ac, RF_Config_t *config,
   3478 			RF_Raid_t *raidPtr)
   3479 {
   3480 	RF_ComponentLabel_t *clabel;
   3481 	int i;
   3482 
   3483 	clabel = ac->clabel;
   3484 
   3485 	/* 1. Fill in the common stuff */
   3486 	config->numRow = clabel->num_rows = 1;
   3487 	config->numCol = clabel->num_columns;
   3488 	config->numSpare = 0; /* XXX should this be set here? */
   3489 	config->sectPerSU = clabel->sectPerSU;
   3490 	config->SUsPerPU = clabel->SUsPerPU;
   3491 	config->SUsPerRU = clabel->SUsPerRU;
   3492 	config->parityConfig = clabel->parityConfig;
   3493 	/* XXX... */
   3494 	strcpy(config->diskQueueType,"fifo");
   3495 	config->maxOutstandingDiskReqs = clabel->maxOutstanding;
   3496 	config->layoutSpecificSize = 0; /* XXX ?? */
   3497 
   3498 	while(ac!=NULL) {
   3499 		/* row/col values will be in range due to the checks
   3500 		   in reasonable_label() */
   3501 		strcpy(config->devnames[0][ac->clabel->column],
   3502 		       ac->devname);
   3503 		ac = ac->next;
   3504 	}
   3505 
   3506 	for(i=0;i<RF_MAXDBGV;i++) {
   3507 		config->debugVars[i][0] = 0;
   3508 	}
   3509 }
   3510 
   3511 int
   3512 rf_set_autoconfig(RF_Raid_t *raidPtr, int new_value)
   3513 {
   3514 	RF_ComponentLabel_t *clabel;
   3515 	int column;
   3516 	int sparecol;
   3517 
   3518 	raidPtr->autoconfigure = new_value;
   3519 
   3520 	for(column=0; column<raidPtr->numCol; column++) {
   3521 		if (raidPtr->Disks[column].status == rf_ds_optimal) {
   3522 			clabel = raidget_component_label(raidPtr, column);
   3523 			clabel->autoconfigure = new_value;
   3524 			raidflush_component_label(raidPtr, column);
   3525 		}
   3526 	}
   3527 	for(column = 0; column < raidPtr->numSpare ; column++) {
   3528 		sparecol = raidPtr->numCol + column;
   3529 		if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
   3530 			clabel = raidget_component_label(raidPtr, sparecol);
   3531 			clabel->autoconfigure = new_value;
   3532 			raidflush_component_label(raidPtr, sparecol);
   3533 		}
   3534 	}
   3535 	return(new_value);
   3536 }
   3537 
   3538 int
   3539 rf_set_rootpartition(RF_Raid_t *raidPtr, int new_value)
   3540 {
   3541 	RF_ComponentLabel_t *clabel;
   3542 	int column;
   3543 	int sparecol;
   3544 
   3545 	raidPtr->root_partition = new_value;
   3546 	for(column=0; column<raidPtr->numCol; column++) {
   3547 		if (raidPtr->Disks[column].status == rf_ds_optimal) {
   3548 			clabel = raidget_component_label(raidPtr, column);
   3549 			clabel->root_partition = new_value;
   3550 			raidflush_component_label(raidPtr, column);
   3551 		}
   3552 	}
   3553 	for(column = 0; column < raidPtr->numSpare ; column++) {
   3554 		sparecol = raidPtr->numCol + column;
   3555 		if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
   3556 			clabel = raidget_component_label(raidPtr, sparecol);
   3557 			clabel->root_partition = new_value;
   3558 			raidflush_component_label(raidPtr, sparecol);
   3559 		}
   3560 	}
   3561 	return(new_value);
   3562 }
   3563 
   3564 void
   3565 rf_release_all_vps(RF_ConfigSet_t *cset)
   3566 {
   3567 	RF_AutoConfig_t *ac;
   3568 
   3569 	ac = cset->ac;
   3570 	while(ac!=NULL) {
   3571 		/* Close the vp, and give it back */
   3572 		if (ac->vp) {
   3573 			vn_lock(ac->vp, LK_EXCLUSIVE | LK_RETRY);
   3574 			VOP_CLOSE(ac->vp, FREAD, NOCRED);
   3575 			vput(ac->vp);
   3576 			ac->vp = NULL;
   3577 		}
   3578 		ac = ac->next;
   3579 	}
   3580 }
   3581 
   3582 
   3583 void
   3584 rf_cleanup_config_set(RF_ConfigSet_t *cset)
   3585 {
   3586 	RF_AutoConfig_t *ac;
   3587 	RF_AutoConfig_t *next_ac;
   3588 
   3589 	ac = cset->ac;
   3590 	while(ac!=NULL) {
   3591 		next_ac = ac->next;
   3592 		/* nuke the label */
   3593 		free(ac->clabel, M_RAIDFRAME);
   3594 		/* cleanup the config structure */
   3595 		free(ac, M_RAIDFRAME);
   3596 		/* "next.." */
   3597 		ac = next_ac;
   3598 	}
   3599 	/* and, finally, nuke the config set */
   3600 	free(cset, M_RAIDFRAME);
   3601 }
   3602 
   3603 
   3604 void
   3605 raid_init_component_label(RF_Raid_t *raidPtr, RF_ComponentLabel_t *clabel)
   3606 {
   3607 	/* current version number */
   3608 	clabel->version = RF_COMPONENT_LABEL_VERSION;
   3609 	clabel->serial_number = raidPtr->serial_number;
   3610 	clabel->mod_counter = raidPtr->mod_counter;
   3611 
   3612 	clabel->num_rows = 1;
   3613 	clabel->num_columns = raidPtr->numCol;
   3614 	clabel->clean = RF_RAID_DIRTY; /* not clean */
   3615 	clabel->status = rf_ds_optimal; /* "It's good!" */
   3616 
   3617 	clabel->sectPerSU = raidPtr->Layout.sectorsPerStripeUnit;
   3618 	clabel->SUsPerPU = raidPtr->Layout.SUsPerPU;
   3619 	clabel->SUsPerRU = raidPtr->Layout.SUsPerRU;
   3620 
   3621 	clabel->blockSize = raidPtr->bytesPerSector;
   3622 	clabel->numBlocks = raidPtr->sectorsPerDisk;
   3623 	clabel->numBlocksHi = raidPtr->sectorsPerDisk >> 32;
   3624 
   3625 	/* XXX not portable */
   3626 	clabel->parityConfig = raidPtr->Layout.map->parityConfig;
   3627 	clabel->maxOutstanding = raidPtr->maxOutstanding;
   3628 	clabel->autoconfigure = raidPtr->autoconfigure;
   3629 	clabel->root_partition = raidPtr->root_partition;
   3630 	clabel->last_unit = raidPtr->raidid;
   3631 	clabel->config_order = raidPtr->config_order;
   3632 
   3633 #ifndef RF_NO_PARITY_MAP
   3634 	rf_paritymap_init_label(raidPtr->parity_map, clabel);
   3635 #endif
   3636 }
   3637 
   3638 int
   3639 rf_auto_config_set(RF_ConfigSet_t *cset, int *unit)
   3640 {
   3641 	RF_Raid_t *raidPtr;
   3642 	RF_Config_t *config;
   3643 	int raidID;
   3644 	int retcode;
   3645 
   3646 #ifdef DEBUG
   3647 	printf("RAID autoconfigure\n");
   3648 #endif
   3649 
   3650 	retcode = 0;
   3651 	*unit = -1;
   3652 
   3653 	/* 1. Create a config structure */
   3654 
   3655 	config = (RF_Config_t *)malloc(sizeof(RF_Config_t),
   3656 				       M_RAIDFRAME,
   3657 				       M_NOWAIT);
   3658 	if (config==NULL) {
   3659 		printf("Out of mem!?!?\n");
   3660 				/* XXX do something more intelligent here. */
   3661 		return(1);
   3662 	}
   3663 
   3664 	memset(config, 0, sizeof(RF_Config_t));
   3665 
   3666 	/*
   3667 	   2. Figure out what RAID ID this one is supposed to live at
   3668 	   See if we can get the same RAID dev that it was configured
   3669 	   on last time..
   3670 	*/
   3671 
   3672 	raidID = cset->ac->clabel->last_unit;
   3673 	if ((raidID < 0) || (raidID >= numraid)) {
   3674 		/* let's not wander off into lala land. */
   3675 		raidID = numraid - 1;
   3676 	}
   3677 	if (raidPtrs[raidID]->valid != 0) {
   3678 
   3679 		/*
   3680 		   Nope... Go looking for an alternative...
   3681 		   Start high so we don't immediately use raid0 if that's
   3682 		   not taken.
   3683 		*/
   3684 
   3685 		for(raidID = numraid - 1; raidID >= 0; raidID--) {
   3686 			if (raidPtrs[raidID]->valid == 0) {
   3687 				/* can use this one! */
   3688 				break;
   3689 			}
   3690 		}
   3691 	}
   3692 
   3693 	if (raidID < 0) {
   3694 		/* punt... */
   3695 		printf("Unable to auto configure this set!\n");
   3696 		printf("(Out of RAID devs!)\n");
   3697 		free(config, M_RAIDFRAME);
   3698 		return(1);
   3699 	}
   3700 
   3701 #ifdef DEBUG
   3702 	printf("Configuring raid%d:\n",raidID);
   3703 #endif
   3704 
   3705 	raidPtr = raidPtrs[raidID];
   3706 
   3707 	/* XXX all this stuff should be done SOMEWHERE ELSE! */
   3708 	raidPtr->raidid = raidID;
   3709 	raidPtr->openings = RAIDOUTSTANDING;
   3710 
   3711 	/* 3. Build the configuration structure */
   3712 	rf_create_configuration(cset->ac, config, raidPtr);
   3713 
   3714 	/* 4. Do the configuration */
   3715 	retcode = rf_Configure(raidPtr, config, cset->ac);
   3716 
   3717 	if (retcode == 0) {
   3718 
   3719 		raidinit(raidPtrs[raidID]);
   3720 
   3721 		rf_markalldirty(raidPtrs[raidID]);
   3722 		raidPtrs[raidID]->autoconfigure = 1; /* XXX do this here? */
   3723 		if (cset->ac->clabel->root_partition==1) {
   3724 			/* everything configured just fine.  Make a note
   3725 			   that this set is eligible to be root. */
   3726 			cset->rootable = 1;
   3727 			/* XXX do this here? */
   3728 			raidPtrs[raidID]->root_partition = 1;
   3729 		}
   3730 	}
   3731 
   3732 	/* 5. Cleanup */
   3733 	free(config, M_RAIDFRAME);
   3734 
   3735 	*unit = raidID;
   3736 	return(retcode);
   3737 }
   3738 
   3739 void
   3740 rf_disk_unbusy(RF_RaidAccessDesc_t *desc)
   3741 {
   3742 	struct buf *bp;
   3743 
   3744 	bp = (struct buf *)desc->bp;
   3745 	disk_unbusy(&raid_softc[desc->raidPtr->raidid].sc_dkdev,
   3746 	    (bp->b_bcount - bp->b_resid), (bp->b_flags & B_READ));
   3747 }
   3748 
   3749 void
   3750 rf_pool_init(struct pool *p, size_t size, const char *w_chan,
   3751 	     size_t xmin, size_t xmax)
   3752 {
   3753 	pool_init(p, size, 0, 0, 0, w_chan, NULL, IPL_BIO);
   3754 	pool_sethiwat(p, xmax);
   3755 	pool_prime(p, xmin);
   3756 	pool_setlowat(p, xmin);
   3757 }
   3758 
   3759 /*
   3760  * rf_buf_queue_check(int raidid) -- looks into the buf_queue to see
   3761  * if there is IO pending and if that IO could possibly be done for a
   3762  * given RAID set.  Returns 0 if IO is waiting and can be done, 1
   3763  * otherwise.
   3764  *
   3765  */
   3766 
   3767 int
   3768 rf_buf_queue_check(int raidid)
   3769 {
   3770 	if ((bufq_peek(raid_softc[raidid].buf_queue) != NULL) &&
   3771 	    raidPtrs[raidid]->openings > 0) {
   3772 		/* there is work to do */
   3773 		return 0;
   3774 	}
   3775 	/* default is nothing to do */
   3776 	return 1;
   3777 }
   3778 
   3779 int
   3780 rf_getdisksize(struct vnode *vp, struct lwp *l, RF_RaidDisk_t *diskPtr)
   3781 {
   3782 	uint64_t numsecs;
   3783 	unsigned secsize;
   3784 	int error;
   3785 
   3786 	error = getdisksize(vp, &numsecs, &secsize);
   3787 	if (error == 0) {
   3788 		diskPtr->blockSize = secsize;
   3789 		diskPtr->numBlocks = numsecs - rf_protectedSectors;
   3790 		diskPtr->partitionSize = numsecs;
   3791 		return 0;
   3792 	}
   3793 	return error;
   3794 }
   3795 
   3796 static int
   3797 raid_match(device_t self, cfdata_t cfdata, void *aux)
   3798 {
   3799 	return 1;
   3800 }
   3801 
   3802 static void
   3803 raid_attach(device_t parent, device_t self, void *aux)
   3804 {
   3805 
   3806 }
   3807 
   3808 
   3809 static int
   3810 raid_detach(device_t self, int flags)
   3811 {
   3812 	int error;
   3813 	struct raid_softc *rs = &raid_softc[device_unit(self)];
   3814 
   3815 	if ((error = raidlock(rs)) != 0)
   3816 		return (error);
   3817 
   3818 	error = raid_detach_unlocked(rs);
   3819 
   3820 	raidunlock(rs);
   3821 
   3822 	return error;
   3823 }
   3824 
   3825 static void
   3826 rf_set_properties(struct raid_softc *rs, RF_Raid_t *raidPtr)
   3827 {
   3828 	prop_dictionary_t disk_info, odisk_info, geom;
   3829 	disk_info = prop_dictionary_create();
   3830 	geom = prop_dictionary_create();
   3831 	prop_dictionary_set_uint64(geom, "sectors-per-unit",
   3832 				   raidPtr->totalSectors);
   3833 	prop_dictionary_set_uint32(geom, "sector-size",
   3834 				   raidPtr->bytesPerSector);
   3835 
   3836 	prop_dictionary_set_uint16(geom, "sectors-per-track",
   3837 				   raidPtr->Layout.dataSectorsPerStripe);
   3838 	prop_dictionary_set_uint16(geom, "tracks-per-cylinder",
   3839 				   4 * raidPtr->numCol);
   3840 
   3841 	prop_dictionary_set_uint64(geom, "cylinders-per-unit",
   3842 	   raidPtr->totalSectors / (raidPtr->Layout.dataSectorsPerStripe *
   3843 	   (4 * raidPtr->numCol)));
   3844 
   3845 	prop_dictionary_set(disk_info, "geometry", geom);
   3846 	prop_object_release(geom);
   3847 	prop_dictionary_set(device_properties(rs->sc_dev),
   3848 			    "disk-info", disk_info);
   3849 	odisk_info = rs->sc_dkdev.dk_info;
   3850 	rs->sc_dkdev.dk_info = disk_info;
   3851 	if (odisk_info)
   3852 		prop_object_release(odisk_info);
   3853 }
   3854 
   3855 /*
   3856  * Implement forwarding of the DIOCCACHESYNC ioctl to each of the components.
   3857  * We end up returning whatever error was returned by the first cache flush
   3858  * that fails.
   3859  */
   3860 
   3861 int
   3862 rf_sync_component_caches(RF_Raid_t *raidPtr)
   3863 {
   3864 	int c, sparecol;
   3865 	int e,error;
   3866 	int force = 1;
   3867 
   3868 	error = 0;
   3869 	for (c = 0; c < raidPtr->numCol; c++) {
   3870 		if (raidPtr->Disks[c].status == rf_ds_optimal) {
   3871 			e = VOP_IOCTL(raidPtr->raid_cinfo[c].ci_vp, DIOCCACHESYNC,
   3872 					  &force, FWRITE, NOCRED);
   3873 			if (e) {
   3874 				if (e != ENODEV)
   3875 					printf("raid%d: cache flush to component %s failed.\n",
   3876 					       raidPtr->raidid, raidPtr->Disks[c].devname);
   3877 				if (error == 0) {
   3878 					error = e;
   3879 				}
   3880 			}
   3881 		}
   3882 	}
   3883 
   3884 	for( c = 0; c < raidPtr->numSpare ; c++) {
   3885 		sparecol = raidPtr->numCol + c;
   3886 		/* Need to ensure that the reconstruct actually completed! */
   3887 		if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
   3888 			e = VOP_IOCTL(raidPtr->raid_cinfo[sparecol].ci_vp,
   3889 					  DIOCCACHESYNC, &force, FWRITE, NOCRED);
   3890 			if (e) {
   3891 				if (e != ENODEV)
   3892 					printf("raid%d: cache flush to component %s failed.\n",
   3893 					       raidPtr->raidid, raidPtr->Disks[sparecol].devname);
   3894 				if (error == 0) {
   3895 					error = e;
   3896 				}
   3897 			}
   3898 		}
   3899 	}
   3900 	return error;
   3901 }
   3902