Home | History | Annotate | Line # | Download | only in raidframe
rf_netbsdkintf.c revision 1.336
      1 /*	$NetBSD: rf_netbsdkintf.c,v 1.336 2016/01/04 11:12:40 mlelstv Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1996, 1997, 1998, 2008-2011 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Greg Oster; Jason R. Thorpe.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * Copyright (c) 1988 University of Utah.
     34  * Copyright (c) 1990, 1993
     35  *      The Regents of the University of California.  All rights reserved.
     36  *
     37  * This code is derived from software contributed to Berkeley by
     38  * the Systems Programming Group of the University of Utah Computer
     39  * Science Department.
     40  *
     41  * Redistribution and use in source and binary forms, with or without
     42  * modification, are permitted provided that the following conditions
     43  * are met:
     44  * 1. Redistributions of source code must retain the above copyright
     45  *    notice, this list of conditions and the following disclaimer.
     46  * 2. Redistributions in binary form must reproduce the above copyright
     47  *    notice, this list of conditions and the following disclaimer in the
     48  *    documentation and/or other materials provided with the distribution.
     49  * 3. Neither the name of the University nor the names of its contributors
     50  *    may be used to endorse or promote products derived from this software
     51  *    without specific prior written permission.
     52  *
     53  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     54  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     55  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     56  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     57  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     58  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     59  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     60  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     61  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     62  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     63  * SUCH DAMAGE.
     64  *
     65  * from: Utah $Hdr: cd.c 1.6 90/11/28$
     66  *
     67  *      @(#)cd.c        8.2 (Berkeley) 11/16/93
     68  */
     69 
     70 /*
     71  * Copyright (c) 1995 Carnegie-Mellon University.
     72  * All rights reserved.
     73  *
     74  * Authors: Mark Holland, Jim Zelenka
     75  *
     76  * Permission to use, copy, modify and distribute this software and
     77  * its documentation is hereby granted, provided that both the copyright
     78  * notice and this permission notice appear in all copies of the
     79  * software, derivative works or modified versions, and any portions
     80  * thereof, and that both notices appear in supporting documentation.
     81  *
     82  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     83  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     84  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     85  *
     86  * Carnegie Mellon requests users of this software to return to
     87  *
     88  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     89  *  School of Computer Science
     90  *  Carnegie Mellon University
     91  *  Pittsburgh PA 15213-3890
     92  *
     93  * any improvements or extensions that they make and grant Carnegie the
     94  * rights to redistribute these changes.
     95  */
     96 
     97 /***********************************************************
     98  *
     99  * rf_kintf.c -- the kernel interface routines for RAIDframe
    100  *
    101  ***********************************************************/
    102 
    103 #include <sys/cdefs.h>
    104 __KERNEL_RCSID(0, "$NetBSD: rf_netbsdkintf.c,v 1.336 2016/01/04 11:12:40 mlelstv Exp $");
    105 
    106 #ifdef _KERNEL_OPT
    107 #include "opt_compat_netbsd.h"
    108 #include "opt_raid_autoconfig.h"
    109 #endif
    110 
    111 #include <sys/param.h>
    112 #include <sys/errno.h>
    113 #include <sys/pool.h>
    114 #include <sys/proc.h>
    115 #include <sys/queue.h>
    116 #include <sys/disk.h>
    117 #include <sys/device.h>
    118 #include <sys/stat.h>
    119 #include <sys/ioctl.h>
    120 #include <sys/fcntl.h>
    121 #include <sys/systm.h>
    122 #include <sys/vnode.h>
    123 #include <sys/disklabel.h>
    124 #include <sys/conf.h>
    125 #include <sys/buf.h>
    126 #include <sys/bufq.h>
    127 #include <sys/reboot.h>
    128 #include <sys/kauth.h>
    129 #include <sys/module.h>
    130 
    131 #include <prop/proplib.h>
    132 
    133 #include <dev/raidframe/raidframevar.h>
    134 #include <dev/raidframe/raidframeio.h>
    135 #include <dev/raidframe/rf_paritymap.h>
    136 
    137 #include "rf_raid.h"
    138 #include "rf_copyback.h"
    139 #include "rf_dag.h"
    140 #include "rf_dagflags.h"
    141 #include "rf_desc.h"
    142 #include "rf_diskqueue.h"
    143 #include "rf_etimer.h"
    144 #include "rf_general.h"
    145 #include "rf_kintf.h"
    146 #include "rf_options.h"
    147 #include "rf_driver.h"
    148 #include "rf_parityscan.h"
    149 #include "rf_threadstuff.h"
    150 
    151 #ifdef COMPAT_50
    152 #include "rf_compat50.h"
    153 #endif
    154 
    155 #include "ioconf.h"
    156 
    157 #ifdef DEBUG
    158 int     rf_kdebug_level = 0;
    159 #define db1_printf(a) if (rf_kdebug_level > 0) printf a
    160 #else				/* DEBUG */
    161 #define db1_printf(a) { }
    162 #endif				/* DEBUG */
    163 
    164 #if (RF_INCLUDE_PARITY_DECLUSTERING_DS > 0)
    165 static rf_declare_mutex2(rf_sparet_wait_mutex);
    166 static rf_declare_cond2(rf_sparet_wait_cv);
    167 static rf_declare_cond2(rf_sparet_resp_cv);
    168 
    169 static RF_SparetWait_t *rf_sparet_wait_queue;	/* requests to install a
    170 						 * spare table */
    171 static RF_SparetWait_t *rf_sparet_resp_queue;	/* responses from
    172 						 * installation process */
    173 #endif
    174 
    175 MALLOC_DEFINE(M_RAIDFRAME, "RAIDframe", "RAIDframe structures");
    176 
    177 /* prototypes */
    178 static void KernelWakeupFunc(struct buf *);
    179 static void InitBP(struct buf *, struct vnode *, unsigned,
    180     dev_t, RF_SectorNum_t, RF_SectorCount_t, void *, void (*) (struct buf *),
    181     void *, int, struct proc *);
    182 struct raid_softc;
    183 static void raidinit(struct raid_softc *);
    184 static int raiddoaccess(RF_Raid_t *raidPtr, struct buf *bp);
    185 
    186 static int raid_match(device_t, cfdata_t, void *);
    187 static void raid_attach(device_t, device_t, void *);
    188 static int raid_detach(device_t, int);
    189 
    190 static int raidread_component_area(dev_t, struct vnode *, void *, size_t,
    191     daddr_t, daddr_t);
    192 static int raidwrite_component_area(dev_t, struct vnode *, void *, size_t,
    193     daddr_t, daddr_t, int);
    194 
    195 static int raidwrite_component_label(unsigned,
    196     dev_t, struct vnode *, RF_ComponentLabel_t *);
    197 static int raidread_component_label(unsigned,
    198     dev_t, struct vnode *, RF_ComponentLabel_t *);
    199 
    200 static int raid_diskstart(device_t, struct buf *bp);
    201 static int raid_dumpblocks(device_t, void *, daddr_t, int);
    202 static int raid_lastclose(device_t);
    203 
    204 static dev_type_open(raidopen);
    205 static dev_type_close(raidclose);
    206 static dev_type_read(raidread);
    207 static dev_type_write(raidwrite);
    208 static dev_type_ioctl(raidioctl);
    209 static dev_type_strategy(raidstrategy);
    210 static dev_type_dump(raiddump);
    211 static dev_type_size(raidsize);
    212 
    213 const struct bdevsw raid_bdevsw = {
    214 	.d_open = raidopen,
    215 	.d_close = raidclose,
    216 	.d_strategy = raidstrategy,
    217 	.d_ioctl = raidioctl,
    218 	.d_dump = raiddump,
    219 	.d_psize = raidsize,
    220 	.d_discard = nodiscard,
    221 	.d_flag = D_DISK
    222 };
    223 
    224 const struct cdevsw raid_cdevsw = {
    225 	.d_open = raidopen,
    226 	.d_close = raidclose,
    227 	.d_read = raidread,
    228 	.d_write = raidwrite,
    229 	.d_ioctl = raidioctl,
    230 	.d_stop = nostop,
    231 	.d_tty = notty,
    232 	.d_poll = nopoll,
    233 	.d_mmap = nommap,
    234 	.d_kqfilter = nokqfilter,
    235 	.d_discard = nodiscard,
    236 	.d_flag = D_DISK
    237 };
    238 
    239 static struct dkdriver rf_dkdriver = {
    240 	.d_open = raidopen,
    241 	.d_close = raidclose,
    242 	.d_strategy = raidstrategy,
    243 	.d_diskstart = raid_diskstart,
    244 	.d_dumpblocks = raid_dumpblocks,
    245 	.d_lastclose = raid_lastclose,
    246 	.d_minphys = minphys
    247 };
    248 
    249 struct raid_softc {
    250 	struct dk_softc sc_dksc;
    251 	int	sc_unit;
    252 	int     sc_flags;	/* flags */
    253 	int     sc_cflags;	/* configuration flags */
    254 	kmutex_t sc_mutex;	/* interlock mutex */
    255 	kcondvar_t sc_cv;	/* and the condvar */
    256 	uint64_t sc_size;	/* size of the raid device */
    257 	char    sc_xname[20];	/* XXX external name */
    258 	RF_Raid_t sc_r;
    259 	LIST_ENTRY(raid_softc) sc_link;
    260 };
    261 /* sc_flags */
    262 #define RAIDF_INITED	0x01	/* unit has been initialized */
    263 #define RAIDF_WLABEL	0x02	/* label area is writable */
    264 #define RAIDF_LABELLING	0x04	/* unit is currently being labelled */
    265 #define RAIDF_SHUTDOWN	0x08	/* unit is being shutdown */
    266 #define RAIDF_DETACH  	0x10	/* detach after final close */
    267 #define RAIDF_WANTED	0x40	/* someone is waiting to obtain a lock */
    268 #define RAIDF_LOCKED	0x80	/* unit is locked */
    269 
    270 #define	raidunit(x)	DISKUNIT(x)
    271 #define	raidsoftc(dev)	(((struct raid_softc *)device_private(dev))->sc_r.softc)
    272 
    273 extern struct cfdriver raid_cd;
    274 CFATTACH_DECL3_NEW(raid, sizeof(struct raid_softc),
    275     raid_match, raid_attach, raid_detach, NULL, NULL, NULL,
    276     DVF_DETACH_SHUTDOWN);
    277 
    278 /*
    279  * Allow RAIDOUTSTANDING number of simultaneous IO's to this RAID device.
    280  * Be aware that large numbers can allow the driver to consume a lot of
    281  * kernel memory, especially on writes, and in degraded mode reads.
    282  *
    283  * For example: with a stripe width of 64 blocks (32k) and 5 disks,
    284  * a single 64K write will typically require 64K for the old data,
    285  * 64K for the old parity, and 64K for the new parity, for a total
    286  * of 192K (if the parity buffer is not re-used immediately).
    287  * Even it if is used immediately, that's still 128K, which when multiplied
    288  * by say 10 requests, is 1280K, *on top* of the 640K of incoming data.
    289  *
    290  * Now in degraded mode, for example, a 64K read on the above setup may
    291  * require data reconstruction, which will require *all* of the 4 remaining
    292  * disks to participate -- 4 * 32K/disk == 128K again.
    293  */
    294 
    295 #ifndef RAIDOUTSTANDING
    296 #define RAIDOUTSTANDING   6
    297 #endif
    298 
    299 #define RAIDLABELDEV(dev)	\
    300 	(MAKEDISKDEV(major((dev)), raidunit((dev)), RAW_PART))
    301 
    302 /* declared here, and made public, for the benefit of KVM stuff.. */
    303 
    304 static int raidlock(struct raid_softc *);
    305 static void raidunlock(struct raid_softc *);
    306 
    307 static int raid_detach_unlocked(struct raid_softc *);
    308 
    309 static void rf_markalldirty(RF_Raid_t *);
    310 static void rf_set_geometry(struct raid_softc *, RF_Raid_t *);
    311 
    312 void rf_ReconThread(struct rf_recon_req *);
    313 void rf_RewriteParityThread(RF_Raid_t *raidPtr);
    314 void rf_CopybackThread(RF_Raid_t *raidPtr);
    315 void rf_ReconstructInPlaceThread(struct rf_recon_req *);
    316 int rf_autoconfig(device_t);
    317 void rf_buildroothack(RF_ConfigSet_t *);
    318 
    319 RF_AutoConfig_t *rf_find_raid_components(void);
    320 RF_ConfigSet_t *rf_create_auto_sets(RF_AutoConfig_t *);
    321 static int rf_does_it_fit(RF_ConfigSet_t *,RF_AutoConfig_t *);
    322 int rf_reasonable_label(RF_ComponentLabel_t *, uint64_t);
    323 void rf_create_configuration(RF_AutoConfig_t *,RF_Config_t *, RF_Raid_t *);
    324 int rf_set_autoconfig(RF_Raid_t *, int);
    325 int rf_set_rootpartition(RF_Raid_t *, int);
    326 void rf_release_all_vps(RF_ConfigSet_t *);
    327 void rf_cleanup_config_set(RF_ConfigSet_t *);
    328 int rf_have_enough_components(RF_ConfigSet_t *);
    329 struct raid_softc *rf_auto_config_set(RF_ConfigSet_t *);
    330 static void rf_fix_old_label_size(RF_ComponentLabel_t *, uint64_t);
    331 
    332 /*
    333  * Debugging, mostly.  Set to 0 to not allow autoconfig to take place.
    334  * Note that this is overridden by having RAID_AUTOCONFIG as an option
    335  * in the kernel config file.
    336  */
    337 #ifdef RAID_AUTOCONFIG
    338 int raidautoconfig = 1;
    339 #else
    340 int raidautoconfig = 0;
    341 #endif
    342 static bool raidautoconfigdone = false;
    343 
    344 struct RF_Pools_s rf_pools;
    345 
    346 static LIST_HEAD(, raid_softc) raids = LIST_HEAD_INITIALIZER(raids);
    347 static kmutex_t raid_lock;
    348 
    349 static struct raid_softc *
    350 raidcreate(int unit) {
    351 	struct raid_softc *sc = kmem_zalloc(sizeof(*sc), KM_SLEEP);
    352 	if (sc == NULL) {
    353 #ifdef DIAGNOSTIC
    354 		printf("%s: out of memory\n", __func__);
    355 #endif
    356 		return NULL;
    357 	}
    358 	sc->sc_unit = unit;
    359 	cv_init(&sc->sc_cv, "raidunit");
    360 	mutex_init(&sc->sc_mutex, MUTEX_DEFAULT, IPL_NONE);
    361 	return sc;
    362 }
    363 
    364 static void
    365 raiddestroy(struct raid_softc *sc) {
    366 	cv_destroy(&sc->sc_cv);
    367 	mutex_destroy(&sc->sc_mutex);
    368 	kmem_free(sc, sizeof(*sc));
    369 }
    370 
    371 static struct raid_softc *
    372 raidget(int unit, bool create) {
    373 	struct raid_softc *sc;
    374 	if (unit < 0) {
    375 #ifdef DIAGNOSTIC
    376 		panic("%s: unit %d!", __func__, unit);
    377 #endif
    378 		return NULL;
    379 	}
    380 	mutex_enter(&raid_lock);
    381 	LIST_FOREACH(sc, &raids, sc_link) {
    382 		if (sc->sc_unit == unit) {
    383 			mutex_exit(&raid_lock);
    384 			return sc;
    385 		}
    386 	}
    387 	mutex_exit(&raid_lock);
    388 	if (!create)
    389 		return NULL;
    390 	if ((sc = raidcreate(unit)) == NULL)
    391 		return NULL;
    392 	mutex_enter(&raid_lock);
    393 	LIST_INSERT_HEAD(&raids, sc, sc_link);
    394 	mutex_exit(&raid_lock);
    395 	return sc;
    396 }
    397 
    398 static void
    399 raidput(struct raid_softc *sc) {
    400 	mutex_enter(&raid_lock);
    401 	LIST_REMOVE(sc, sc_link);
    402 	mutex_exit(&raid_lock);
    403 	raiddestroy(sc);
    404 }
    405 
    406 void
    407 raidattach(int num)
    408 {
    409 
    410 	/*
    411 	 * Device attachment and associated initialization now occurs
    412 	 * as part of the module initialization.
    413 	 */
    414 }
    415 
    416 int
    417 rf_autoconfig(device_t self)
    418 {
    419 	RF_AutoConfig_t *ac_list;
    420 	RF_ConfigSet_t *config_sets;
    421 
    422 	if (!raidautoconfig || raidautoconfigdone == true)
    423 		return (0);
    424 
    425 	/* XXX This code can only be run once. */
    426 	raidautoconfigdone = true;
    427 
    428 #ifdef __HAVE_CPU_BOOTCONF
    429 	/*
    430 	 * 0. find the boot device if needed first so we can use it later
    431 	 * this needs to be done before we autoconfigure any raid sets,
    432 	 * because if we use wedges we are not going to be able to open
    433 	 * the boot device later
    434 	 */
    435 	if (booted_device == NULL)
    436 		cpu_bootconf();
    437 #endif
    438 	/* 1. locate all RAID components on the system */
    439 	aprint_debug("Searching for RAID components...\n");
    440 	ac_list = rf_find_raid_components();
    441 
    442 	/* 2. Sort them into their respective sets. */
    443 	config_sets = rf_create_auto_sets(ac_list);
    444 
    445 	/*
    446 	 * 3. Evaluate each set and configure the valid ones.
    447 	 * This gets done in rf_buildroothack().
    448 	 */
    449 	rf_buildroothack(config_sets);
    450 
    451 	return 1;
    452 }
    453 
    454 static int
    455 rf_containsboot(RF_Raid_t *r, device_t bdv) {
    456 	const char *bootname = device_xname(bdv);
    457 	size_t len = strlen(bootname);
    458 
    459 	for (int col = 0; col < r->numCol; col++) {
    460 		const char *devname = r->Disks[col].devname;
    461 		devname += sizeof("/dev/") - 1;
    462 		if (strncmp(devname, "dk", 2) == 0) {
    463 			const char *parent =
    464 			    dkwedge_get_parent_name(r->Disks[col].dev);
    465 			if (parent != NULL)
    466 				devname = parent;
    467 		}
    468 		if (strncmp(devname, bootname, len) == 0) {
    469 			struct raid_softc *sc = r->softc;
    470 			aprint_debug("raid%d includes boot device %s\n",
    471 			    sc->sc_unit, devname);
    472 			return 1;
    473 		}
    474 	}
    475 	return 0;
    476 }
    477 
    478 void
    479 rf_buildroothack(RF_ConfigSet_t *config_sets)
    480 {
    481 	RF_ConfigSet_t *cset;
    482 	RF_ConfigSet_t *next_cset;
    483 	int num_root;
    484 	struct raid_softc *sc, *rsc;
    485 	struct dk_softc *dksc;
    486 
    487 	sc = rsc = NULL;
    488 	num_root = 0;
    489 	cset = config_sets;
    490 	while (cset != NULL) {
    491 		next_cset = cset->next;
    492 		if (rf_have_enough_components(cset) &&
    493 		    cset->ac->clabel->autoconfigure == 1) {
    494 			sc = rf_auto_config_set(cset);
    495 			if (sc != NULL) {
    496 				aprint_debug("raid%d: configured ok\n",
    497 				    sc->sc_unit);
    498 				if (cset->rootable) {
    499 					rsc = sc;
    500 					num_root++;
    501 				}
    502 			} else {
    503 				/* The autoconfig didn't work :( */
    504 				aprint_debug("Autoconfig failed\n");
    505 				rf_release_all_vps(cset);
    506 			}
    507 		} else {
    508 			/* we're not autoconfiguring this set...
    509 			   release the associated resources */
    510 			rf_release_all_vps(cset);
    511 		}
    512 		/* cleanup */
    513 		rf_cleanup_config_set(cset);
    514 		cset = next_cset;
    515 	}
    516 	dksc = &rsc->sc_dksc;
    517 
    518 	/* if the user has specified what the root device should be
    519 	   then we don't touch booted_device or boothowto... */
    520 
    521 	if (rootspec != NULL)
    522 		return;
    523 
    524 	/* we found something bootable... */
    525 
    526 	/*
    527 	 * XXX: The following code assumes that the root raid
    528 	 * is the first ('a') partition. This is about the best
    529 	 * we can do with a BSD disklabel, but we might be able
    530 	 * to do better with a GPT label, by setting a specified
    531 	 * attribute to indicate the root partition. We can then
    532 	 * stash the partition number in the r->root_partition
    533 	 * high bits (the bottom 2 bits are already used). For
    534 	 * now we just set booted_partition to 0 when we override
    535 	 * root.
    536 	 */
    537 	if (num_root == 1) {
    538 		device_t candidate_root;
    539 		if (dksc->sc_dkdev.dk_nwedges != 0) {
    540 			char cname[sizeof(cset->ac->devname)];
    541 			/* XXX: assume 'a' */
    542 			snprintf(cname, sizeof(cname), "%s%c",
    543 			    device_xname(dksc->sc_dev), 'a');
    544 			candidate_root = dkwedge_find_by_wname(cname);
    545 		} else
    546 			candidate_root = dksc->sc_dev;
    547 		if (booted_device == NULL ||
    548 		    rsc->sc_r.root_partition == 1 ||
    549 		    rf_containsboot(&rsc->sc_r, booted_device)) {
    550 			booted_device = candidate_root;
    551 			booted_partition = 0;	/* XXX assume 'a' */
    552 		}
    553 	} else if (num_root > 1) {
    554 
    555 		/*
    556 		 * Maybe the MD code can help. If it cannot, then
    557 		 * setroot() will discover that we have no
    558 		 * booted_device and will ask the user if nothing was
    559 		 * hardwired in the kernel config file
    560 		 */
    561 		if (booted_device == NULL)
    562 			return;
    563 
    564 		num_root = 0;
    565 		mutex_enter(&raid_lock);
    566 		LIST_FOREACH(sc, &raids, sc_link) {
    567 			RF_Raid_t *r = &sc->sc_r;
    568 			if (r->valid == 0)
    569 				continue;
    570 
    571 			if (r->root_partition == 0)
    572 				continue;
    573 
    574 			if (rf_containsboot(r, booted_device)) {
    575 				num_root++;
    576 				rsc = sc;
    577 				dksc = &rsc->sc_dksc;
    578 			}
    579 		}
    580 		mutex_exit(&raid_lock);
    581 
    582 		if (num_root == 1) {
    583 			booted_device = dksc->sc_dev;
    584 			booted_partition = 0;	/* XXX assume 'a' */
    585 		} else {
    586 			/* we can't guess.. require the user to answer... */
    587 			boothowto |= RB_ASKNAME;
    588 		}
    589 	}
    590 }
    591 
    592 static int
    593 raidsize(dev_t dev)
    594 {
    595 	struct raid_softc *rs;
    596 	struct dk_softc *dksc;
    597 	unsigned int unit;
    598 
    599 	unit = raidunit(dev);
    600 	if ((rs = raidget(unit, false)) == NULL)
    601 		return -1;
    602 	dksc = &rs->sc_dksc;
    603 
    604 	if ((rs->sc_flags & RAIDF_INITED) == 0)
    605 		return -1;
    606 
    607 	return dk_size(dksc, dev);
    608 }
    609 
    610 static int
    611 raiddump(dev_t dev, daddr_t blkno, void *va, size_t size)
    612 {
    613 	unsigned int unit;
    614 	struct raid_softc *rs;
    615 	struct dk_softc *dksc;
    616 
    617 	unit = raidunit(dev);
    618 	if ((rs = raidget(unit, false)) == NULL)
    619 		return ENXIO;
    620 	dksc = &rs->sc_dksc;
    621 
    622 	if ((rs->sc_flags & RAIDF_INITED) == 0)
    623 		return ENODEV;
    624 
    625         /*
    626            Note that blkno is relative to this particular partition.
    627            By adding adding RF_PROTECTED_SECTORS, we get a value that
    628 	   is relative to the partition used for the underlying component.
    629         */
    630 	blkno += RF_PROTECTED_SECTORS;
    631 
    632 	return dk_dump(dksc, dev, blkno, va, size);
    633 }
    634 
    635 static int
    636 raid_dumpblocks(device_t dev, void *va, daddr_t blkno, int nblk)
    637 {
    638 	struct raid_softc *rs = raidsoftc(dev);
    639 	const struct bdevsw *bdev;
    640 	RF_Raid_t *raidPtr;
    641 	int     c, sparecol, j, scol, dumpto;
    642 	int     error = 0;
    643 
    644 	raidPtr = &rs->sc_r;
    645 
    646 	/* we only support dumping to RAID 1 sets */
    647 	if (raidPtr->Layout.numDataCol != 1 ||
    648 	    raidPtr->Layout.numParityCol != 1)
    649 		return EINVAL;
    650 
    651 	if ((error = raidlock(rs)) != 0)
    652 		return error;
    653 
    654 	/* figure out what device is alive.. */
    655 
    656 	/*
    657 	   Look for a component to dump to.  The preference for the
    658 	   component to dump to is as follows:
    659 	   1) the master
    660 	   2) a used_spare of the master
    661 	   3) the slave
    662 	   4) a used_spare of the slave
    663 	*/
    664 
    665 	dumpto = -1;
    666 	for (c = 0; c < raidPtr->numCol; c++) {
    667 		if (raidPtr->Disks[c].status == rf_ds_optimal) {
    668 			/* this might be the one */
    669 			dumpto = c;
    670 			break;
    671 		}
    672 	}
    673 
    674 	/*
    675 	   At this point we have possibly selected a live master or a
    676 	   live slave.  We now check to see if there is a spared
    677 	   master (or a spared slave), if we didn't find a live master
    678 	   or a live slave.
    679 	*/
    680 
    681 	for (c = 0; c < raidPtr->numSpare; c++) {
    682 		sparecol = raidPtr->numCol + c;
    683 		if (raidPtr->Disks[sparecol].status ==  rf_ds_used_spare) {
    684 			/* How about this one? */
    685 			scol = -1;
    686 			for(j=0;j<raidPtr->numCol;j++) {
    687 				if (raidPtr->Disks[j].spareCol == sparecol) {
    688 					scol = j;
    689 					break;
    690 				}
    691 			}
    692 			if (scol == 0) {
    693 				/*
    694 				   We must have found a spared master!
    695 				   We'll take that over anything else
    696 				   found so far.  (We couldn't have
    697 				   found a real master before, since
    698 				   this is a used spare, and it's
    699 				   saying that it's replacing the
    700 				   master.)  On reboot (with
    701 				   autoconfiguration turned on)
    702 				   sparecol will become the 1st
    703 				   component (component0) of this set.
    704 				*/
    705 				dumpto = sparecol;
    706 				break;
    707 			} else if (scol != -1) {
    708 				/*
    709 				   Must be a spared slave.  We'll dump
    710 				   to that if we havn't found anything
    711 				   else so far.
    712 				*/
    713 				if (dumpto == -1)
    714 					dumpto = sparecol;
    715 			}
    716 		}
    717 	}
    718 
    719 	if (dumpto == -1) {
    720 		/* we couldn't find any live components to dump to!?!?
    721 		 */
    722 		error = EINVAL;
    723 		goto out;
    724 	}
    725 
    726 	bdev = bdevsw_lookup(raidPtr->Disks[dumpto].dev);
    727 
    728 	error = (*bdev->d_dump)(raidPtr->Disks[dumpto].dev,
    729 				blkno, va, nblk * raidPtr->bytesPerSector);
    730 
    731 out:
    732 	raidunlock(rs);
    733 
    734 	return error;
    735 }
    736 
    737 /* ARGSUSED */
    738 static int
    739 raidopen(dev_t dev, int flags, int fmt,
    740     struct lwp *l)
    741 {
    742 	int     unit = raidunit(dev);
    743 	struct raid_softc *rs;
    744 	struct dk_softc *dksc;
    745 	int     error = 0;
    746 	int     part, pmask;
    747 
    748 	if ((rs = raidget(unit, true)) == NULL)
    749 		return ENXIO;
    750 	if ((error = raidlock(rs)) != 0)
    751 		return (error);
    752 
    753 	if ((rs->sc_flags & RAIDF_SHUTDOWN) != 0) {
    754 		error = EBUSY;
    755 		goto bad;
    756 	}
    757 
    758 	dksc = &rs->sc_dksc;
    759 
    760 	part = DISKPART(dev);
    761 	pmask = (1 << part);
    762 
    763 	if (!DK_BUSY(dksc, pmask) &&
    764 	    ((rs->sc_flags & RAIDF_INITED) != 0)) {
    765 		/* First one... mark things as dirty... Note that we *MUST*
    766 		 have done a configure before this.  I DO NOT WANT TO BE
    767 		 SCRIBBLING TO RANDOM COMPONENTS UNTIL IT'S BEEN DETERMINED
    768 		 THAT THEY BELONG TOGETHER!!!!! */
    769 		/* XXX should check to see if we're only open for reading
    770 		   here... If so, we needn't do this, but then need some
    771 		   other way of keeping track of what's happened.. */
    772 
    773 		rf_markalldirty(&rs->sc_r);
    774 	}
    775 
    776 	if ((rs->sc_flags & RAIDF_INITED) != 0)
    777 		error = dk_open(dksc, dev, flags, fmt, l);
    778 
    779 bad:
    780 	raidunlock(rs);
    781 
    782 	return (error);
    783 
    784 
    785 }
    786 
    787 static int
    788 raid_lastclose(device_t self)
    789 {
    790 	struct raid_softc *rs = raidsoftc(self);
    791 
    792 	/* Last one... device is not unconfigured yet.
    793 	   Device shutdown has taken care of setting the
    794 	   clean bits if RAIDF_INITED is not set
    795 	   mark things as clean... */
    796 
    797 	rf_update_component_labels(&rs->sc_r,
    798 	    RF_FINAL_COMPONENT_UPDATE);
    799 
    800 	/* pass to unlocked code */
    801 	if ((rs->sc_flags & RAIDF_SHUTDOWN) != 0)
    802 		rs->sc_flags |= RAIDF_DETACH;
    803 
    804 	return 0;
    805 }
    806 
    807 /* ARGSUSED */
    808 static int
    809 raidclose(dev_t dev, int flags, int fmt, struct lwp *l)
    810 {
    811 	int     unit = raidunit(dev);
    812 	struct raid_softc *rs;
    813 	struct dk_softc *dksc;
    814 	cfdata_t cf;
    815 	int     error = 0, do_detach = 0, do_put = 0;
    816 
    817 	if ((rs = raidget(unit, false)) == NULL)
    818 		return ENXIO;
    819 	dksc = &rs->sc_dksc;
    820 
    821 	if ((error = raidlock(rs)) != 0)
    822 		return (error);
    823 
    824 	if ((rs->sc_flags & RAIDF_INITED) != 0) {
    825 		error = dk_close(dksc, dev, flags, fmt, l);
    826 		if ((rs->sc_flags & RAIDF_DETACH) != 0)
    827 			do_detach = 1;
    828 	} else if ((rs->sc_flags & RAIDF_SHUTDOWN) != 0)
    829 		do_put = 1;
    830 
    831 	raidunlock(rs);
    832 
    833 	if (do_detach) {
    834 		/* free the pseudo device attach bits */
    835 		cf = device_cfdata(dksc->sc_dev);
    836 		error = config_detach(dksc->sc_dev, 0);
    837 		if (error == 0)
    838 			free(cf, M_RAIDFRAME);
    839 	} else if (do_put) {
    840 		raidput(rs);
    841 	}
    842 
    843 	return (error);
    844 
    845 }
    846 
    847 static void
    848 raid_wakeup(RF_Raid_t *raidPtr)
    849 {
    850 	rf_lock_mutex2(raidPtr->iodone_lock);
    851 	rf_signal_cond2(raidPtr->iodone_cv);
    852 	rf_unlock_mutex2(raidPtr->iodone_lock);
    853 }
    854 
    855 static void
    856 raidstrategy(struct buf *bp)
    857 {
    858 	unsigned int unit;
    859 	struct raid_softc *rs;
    860 	struct dk_softc *dksc;
    861 	RF_Raid_t *raidPtr;
    862 
    863 	unit = raidunit(bp->b_dev);
    864 	if ((rs = raidget(unit, false)) == NULL) {
    865 		bp->b_error = ENXIO;
    866 		goto fail;
    867 	}
    868 	if ((rs->sc_flags & RAIDF_INITED) == 0) {
    869 		bp->b_error = ENXIO;
    870 		goto fail;
    871 	}
    872 	dksc = &rs->sc_dksc;
    873 	raidPtr = &rs->sc_r;
    874 
    875 	/* Queue IO only */
    876 	if (dk_strategy_defer(dksc, bp))
    877 		goto done;
    878 
    879 	/* schedule the IO to happen at the next convenient time */
    880 	raid_wakeup(raidPtr);
    881 
    882 done:
    883 	return;
    884 
    885 fail:
    886 	bp->b_resid = bp->b_bcount;
    887 	biodone(bp);
    888 }
    889 
    890 static int
    891 raid_diskstart(device_t dev, struct buf *bp)
    892 {
    893 	struct raid_softc *rs = raidsoftc(dev);
    894 	RF_Raid_t *raidPtr;
    895 
    896 	raidPtr = &rs->sc_r;
    897 	if (!raidPtr->valid) {
    898 		db1_printf(("raid is not valid..\n"));
    899 		return ENODEV;
    900 	}
    901 
    902 	/* XXX */
    903 	bp->b_resid = 0;
    904 
    905 	return raiddoaccess(raidPtr, bp);
    906 }
    907 
    908 void
    909 raiddone(RF_Raid_t *raidPtr, struct buf *bp)
    910 {
    911 	struct raid_softc *rs;
    912 	struct dk_softc *dksc;
    913 
    914 	rs = raidPtr->softc;
    915 	dksc = &rs->sc_dksc;
    916 
    917 	dk_done(dksc, bp);
    918 
    919 	rf_lock_mutex2(raidPtr->mutex);
    920 	raidPtr->openings++;
    921 	rf_unlock_mutex2(raidPtr->mutex);
    922 
    923 	/* schedule more IO */
    924 	raid_wakeup(raidPtr);
    925 }
    926 
    927 /* ARGSUSED */
    928 static int
    929 raidread(dev_t dev, struct uio *uio, int flags)
    930 {
    931 	int     unit = raidunit(dev);
    932 	struct raid_softc *rs;
    933 
    934 	if ((rs = raidget(unit, false)) == NULL)
    935 		return ENXIO;
    936 
    937 	if ((rs->sc_flags & RAIDF_INITED) == 0)
    938 		return (ENXIO);
    939 
    940 	return (physio(raidstrategy, NULL, dev, B_READ, minphys, uio));
    941 
    942 }
    943 
    944 /* ARGSUSED */
    945 static int
    946 raidwrite(dev_t dev, struct uio *uio, int flags)
    947 {
    948 	int     unit = raidunit(dev);
    949 	struct raid_softc *rs;
    950 
    951 	if ((rs = raidget(unit, false)) == NULL)
    952 		return ENXIO;
    953 
    954 	if ((rs->sc_flags & RAIDF_INITED) == 0)
    955 		return (ENXIO);
    956 
    957 	return (physio(raidstrategy, NULL, dev, B_WRITE, minphys, uio));
    958 
    959 }
    960 
    961 static int
    962 raid_detach_unlocked(struct raid_softc *rs)
    963 {
    964 	struct dk_softc *dksc = &rs->sc_dksc;
    965 	RF_Raid_t *raidPtr;
    966 	int error;
    967 
    968 	raidPtr = &rs->sc_r;
    969 
    970 	if (DK_BUSY(dksc, 0))
    971 		return EBUSY;
    972 
    973 	if ((rs->sc_flags & RAIDF_INITED) == 0)
    974 		return 0;
    975 
    976 	rs->sc_flags &= ~RAIDF_SHUTDOWN;
    977 
    978 	if ((error = rf_Shutdown(raidPtr)) != 0)
    979 		return error;
    980 
    981 	rs->sc_flags &= ~RAIDF_INITED;
    982 
    983 	/* Kill off any queued buffers */
    984 	dk_drain(dksc);
    985 	bufq_free(dksc->sc_bufq);
    986 
    987 	/* Detach the disk. */
    988 	dkwedge_delall(&dksc->sc_dkdev);
    989 	disk_detach(&dksc->sc_dkdev);
    990 	disk_destroy(&dksc->sc_dkdev);
    991 	dk_detach(dksc);
    992 
    993 	return 0;
    994 }
    995 
    996 static int
    997 raidioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
    998 {
    999 	int     unit = raidunit(dev);
   1000 	int     error = 0;
   1001 	int     part, pmask;
   1002 	struct raid_softc *rs;
   1003 	struct dk_softc *dksc;
   1004 	RF_Config_t *k_cfg, *u_cfg;
   1005 	RF_Raid_t *raidPtr;
   1006 	RF_RaidDisk_t *diskPtr;
   1007 	RF_AccTotals_t *totals;
   1008 	RF_DeviceConfig_t *d_cfg, **ucfgp;
   1009 	u_char *specific_buf;
   1010 	int retcode = 0;
   1011 	int column;
   1012 /*	int raidid; */
   1013 	struct rf_recon_req *rrcopy, *rr;
   1014 	RF_ComponentLabel_t *clabel;
   1015 	RF_ComponentLabel_t *ci_label;
   1016 	RF_ComponentLabel_t **clabel_ptr;
   1017 	RF_SingleComponent_t *sparePtr,*componentPtr;
   1018 	RF_SingleComponent_t component;
   1019 	RF_ProgressInfo_t progressInfo, **progressInfoPtr;
   1020 	int i, j, d;
   1021 
   1022 	if ((rs = raidget(unit, false)) == NULL)
   1023 		return ENXIO;
   1024 	dksc = &rs->sc_dksc;
   1025 	raidPtr = &rs->sc_r;
   1026 
   1027 	db1_printf(("raidioctl: %d %d %d %lu\n", (int) dev,
   1028 		(int) DISKPART(dev), (int) unit, cmd));
   1029 
   1030 	/* Must be initialized for these... */
   1031 	switch (cmd) {
   1032 	case RAIDFRAME_REWRITEPARITY:
   1033 	case RAIDFRAME_GET_INFO:
   1034 	case RAIDFRAME_RESET_ACCTOTALS:
   1035 	case RAIDFRAME_GET_ACCTOTALS:
   1036 	case RAIDFRAME_KEEP_ACCTOTALS:
   1037 	case RAIDFRAME_GET_SIZE:
   1038 	case RAIDFRAME_FAIL_DISK:
   1039 	case RAIDFRAME_COPYBACK:
   1040 	case RAIDFRAME_CHECK_RECON_STATUS:
   1041 	case RAIDFRAME_CHECK_RECON_STATUS_EXT:
   1042 	case RAIDFRAME_GET_COMPONENT_LABEL:
   1043 	case RAIDFRAME_SET_COMPONENT_LABEL:
   1044 	case RAIDFRAME_ADD_HOT_SPARE:
   1045 	case RAIDFRAME_REMOVE_HOT_SPARE:
   1046 	case RAIDFRAME_INIT_LABELS:
   1047 	case RAIDFRAME_REBUILD_IN_PLACE:
   1048 	case RAIDFRAME_CHECK_PARITY:
   1049 	case RAIDFRAME_CHECK_PARITYREWRITE_STATUS:
   1050 	case RAIDFRAME_CHECK_PARITYREWRITE_STATUS_EXT:
   1051 	case RAIDFRAME_CHECK_COPYBACK_STATUS:
   1052 	case RAIDFRAME_CHECK_COPYBACK_STATUS_EXT:
   1053 	case RAIDFRAME_SET_AUTOCONFIG:
   1054 	case RAIDFRAME_SET_ROOT:
   1055 	case RAIDFRAME_DELETE_COMPONENT:
   1056 	case RAIDFRAME_INCORPORATE_HOT_SPARE:
   1057 	case RAIDFRAME_PARITYMAP_STATUS:
   1058 	case RAIDFRAME_PARITYMAP_GET_DISABLE:
   1059 	case RAIDFRAME_PARITYMAP_SET_DISABLE:
   1060 	case RAIDFRAME_PARITYMAP_SET_PARAMS:
   1061 		if ((rs->sc_flags & RAIDF_INITED) == 0)
   1062 			return (ENXIO);
   1063 	}
   1064 
   1065 	switch (cmd) {
   1066 #ifdef COMPAT_50
   1067 	case RAIDFRAME_GET_INFO50:
   1068 		return rf_get_info50(raidPtr, data);
   1069 
   1070 	case RAIDFRAME_CONFIGURE50:
   1071 		if ((retcode = rf_config50(raidPtr, unit, data, &k_cfg)) != 0)
   1072 			return retcode;
   1073 		goto config;
   1074 #endif
   1075 		/* configure the system */
   1076 	case RAIDFRAME_CONFIGURE:
   1077 
   1078 		if (raidPtr->valid) {
   1079 			/* There is a valid RAID set running on this unit! */
   1080 			printf("raid%d: Device already configured!\n",unit);
   1081 			return(EINVAL);
   1082 		}
   1083 
   1084 		/* copy-in the configuration information */
   1085 		/* data points to a pointer to the configuration structure */
   1086 
   1087 		u_cfg = *((RF_Config_t **) data);
   1088 		RF_Malloc(k_cfg, sizeof(RF_Config_t), (RF_Config_t *));
   1089 		if (k_cfg == NULL) {
   1090 			return (ENOMEM);
   1091 		}
   1092 		retcode = copyin(u_cfg, k_cfg, sizeof(RF_Config_t));
   1093 		if (retcode) {
   1094 			RF_Free(k_cfg, sizeof(RF_Config_t));
   1095 			db1_printf(("rf_ioctl: retcode=%d copyin.1\n",
   1096 				retcode));
   1097 			goto no_config;
   1098 		}
   1099 		goto config;
   1100 	config:
   1101 		rs->sc_flags &= ~RAIDF_SHUTDOWN;
   1102 
   1103 		/* allocate a buffer for the layout-specific data, and copy it
   1104 		 * in */
   1105 		if (k_cfg->layoutSpecificSize) {
   1106 			if (k_cfg->layoutSpecificSize > 10000) {
   1107 				/* sanity check */
   1108 				RF_Free(k_cfg, sizeof(RF_Config_t));
   1109 				retcode = EINVAL;
   1110 				goto no_config;
   1111 			}
   1112 			RF_Malloc(specific_buf, k_cfg->layoutSpecificSize,
   1113 			    (u_char *));
   1114 			if (specific_buf == NULL) {
   1115 				RF_Free(k_cfg, sizeof(RF_Config_t));
   1116 				retcode = ENOMEM;
   1117 				goto no_config;
   1118 			}
   1119 			retcode = copyin(k_cfg->layoutSpecific, specific_buf,
   1120 			    k_cfg->layoutSpecificSize);
   1121 			if (retcode) {
   1122 				RF_Free(k_cfg, sizeof(RF_Config_t));
   1123 				RF_Free(specific_buf,
   1124 					k_cfg->layoutSpecificSize);
   1125 				db1_printf(("rf_ioctl: retcode=%d copyin.2\n",
   1126 					retcode));
   1127 				goto no_config;
   1128 			}
   1129 		} else
   1130 			specific_buf = NULL;
   1131 		k_cfg->layoutSpecific = specific_buf;
   1132 
   1133 		/* should do some kind of sanity check on the configuration.
   1134 		 * Store the sum of all the bytes in the last byte? */
   1135 
   1136 		/* configure the system */
   1137 
   1138 		/*
   1139 		 * Clear the entire RAID descriptor, just to make sure
   1140 		 *  there is no stale data left in the case of a
   1141 		 *  reconfiguration
   1142 		 */
   1143 		memset(raidPtr, 0, sizeof(*raidPtr));
   1144 		raidPtr->softc = rs;
   1145 		raidPtr->raidid = unit;
   1146 
   1147 		retcode = rf_Configure(raidPtr, k_cfg, NULL);
   1148 
   1149 		if (retcode == 0) {
   1150 
   1151 			/* allow this many simultaneous IO's to
   1152 			   this RAID device */
   1153 			raidPtr->openings = RAIDOUTSTANDING;
   1154 
   1155 			raidinit(rs);
   1156 			raid_wakeup(raidPtr);
   1157 			rf_markalldirty(raidPtr);
   1158 		}
   1159 		/* free the buffers.  No return code here. */
   1160 		if (k_cfg->layoutSpecificSize) {
   1161 			RF_Free(specific_buf, k_cfg->layoutSpecificSize);
   1162 		}
   1163 		RF_Free(k_cfg, sizeof(RF_Config_t));
   1164 
   1165 	no_config:
   1166 		/*
   1167 		 * If configuration failed, set sc_flags so that we
   1168 		 * will detach the device when we close it.
   1169 		 */
   1170 		if (retcode != 0)
   1171 			rs->sc_flags |= RAIDF_SHUTDOWN;
   1172 		return (retcode);
   1173 
   1174 		/* shutdown the system */
   1175 	case RAIDFRAME_SHUTDOWN:
   1176 
   1177 		part = DISKPART(dev);
   1178 		pmask = (1 << part);
   1179 
   1180 		if ((error = raidlock(rs)) != 0)
   1181 			return (error);
   1182 
   1183 		if (DK_BUSY(dksc, pmask))
   1184 			retcode = EBUSY;
   1185 		else {
   1186 			/* detach and free on close */
   1187 			rs->sc_flags |= RAIDF_SHUTDOWN;
   1188 			retcode = 0;
   1189 		}
   1190 
   1191 		raidunlock(rs);
   1192 
   1193 		return (retcode);
   1194 	case RAIDFRAME_GET_COMPONENT_LABEL:
   1195 		clabel_ptr = (RF_ComponentLabel_t **) data;
   1196 		/* need to read the component label for the disk indicated
   1197 		   by row,column in clabel */
   1198 
   1199 		/*
   1200 		 * Perhaps there should be an option to skip the in-core
   1201 		 * copy and hit the disk, as with disklabel(8).
   1202 		 */
   1203 		RF_Malloc(clabel, sizeof(*clabel), (RF_ComponentLabel_t *));
   1204 
   1205 		retcode = copyin(*clabel_ptr, clabel, sizeof(*clabel));
   1206 
   1207 		if (retcode) {
   1208 			RF_Free(clabel, sizeof(*clabel));
   1209 			return retcode;
   1210 		}
   1211 
   1212 		clabel->row = 0; /* Don't allow looking at anything else.*/
   1213 
   1214 		column = clabel->column;
   1215 
   1216 		if ((column < 0) || (column >= raidPtr->numCol +
   1217 		    raidPtr->numSpare)) {
   1218 			RF_Free(clabel, sizeof(*clabel));
   1219 			return EINVAL;
   1220 		}
   1221 
   1222 		RF_Free(clabel, sizeof(*clabel));
   1223 
   1224 		clabel = raidget_component_label(raidPtr, column);
   1225 
   1226 		return copyout(clabel, *clabel_ptr, sizeof(**clabel_ptr));
   1227 
   1228 #if 0
   1229 	case RAIDFRAME_SET_COMPONENT_LABEL:
   1230 		clabel = (RF_ComponentLabel_t *) data;
   1231 
   1232 		/* XXX check the label for valid stuff... */
   1233 		/* Note that some things *should not* get modified --
   1234 		   the user should be re-initing the labels instead of
   1235 		   trying to patch things.
   1236 		   */
   1237 
   1238 		raidid = raidPtr->raidid;
   1239 #ifdef DEBUG
   1240 		printf("raid%d: Got component label:\n", raidid);
   1241 		printf("raid%d: Version: %d\n", raidid, clabel->version);
   1242 		printf("raid%d: Serial Number: %d\n", raidid, clabel->serial_number);
   1243 		printf("raid%d: Mod counter: %d\n", raidid, clabel->mod_counter);
   1244 		printf("raid%d: Column: %d\n", raidid, clabel->column);
   1245 		printf("raid%d: Num Columns: %d\n", raidid, clabel->num_columns);
   1246 		printf("raid%d: Clean: %d\n", raidid, clabel->clean);
   1247 		printf("raid%d: Status: %d\n", raidid, clabel->status);
   1248 #endif
   1249 		clabel->row = 0;
   1250 		column = clabel->column;
   1251 
   1252 		if ((column < 0) || (column >= raidPtr->numCol)) {
   1253 			return(EINVAL);
   1254 		}
   1255 
   1256 		/* XXX this isn't allowed to do anything for now :-) */
   1257 
   1258 		/* XXX and before it is, we need to fill in the rest
   1259 		   of the fields!?!?!?! */
   1260 		memcpy(raidget_component_label(raidPtr, column),
   1261 		    clabel, sizeof(*clabel));
   1262 		raidflush_component_label(raidPtr, column);
   1263 		return (0);
   1264 #endif
   1265 
   1266 	case RAIDFRAME_INIT_LABELS:
   1267 		clabel = (RF_ComponentLabel_t *) data;
   1268 		/*
   1269 		   we only want the serial number from
   1270 		   the above.  We get all the rest of the information
   1271 		   from the config that was used to create this RAID
   1272 		   set.
   1273 		   */
   1274 
   1275 		raidPtr->serial_number = clabel->serial_number;
   1276 
   1277 		for(column=0;column<raidPtr->numCol;column++) {
   1278 			diskPtr = &raidPtr->Disks[column];
   1279 			if (!RF_DEAD_DISK(diskPtr->status)) {
   1280 				ci_label = raidget_component_label(raidPtr,
   1281 				    column);
   1282 				/* Zeroing this is important. */
   1283 				memset(ci_label, 0, sizeof(*ci_label));
   1284 				raid_init_component_label(raidPtr, ci_label);
   1285 				ci_label->serial_number =
   1286 				    raidPtr->serial_number;
   1287 				ci_label->row = 0; /* we dont' pretend to support more */
   1288 				rf_component_label_set_partitionsize(ci_label,
   1289 				    diskPtr->partitionSize);
   1290 				ci_label->column = column;
   1291 				raidflush_component_label(raidPtr, column);
   1292 			}
   1293 			/* XXXjld what about the spares? */
   1294 		}
   1295 
   1296 		return (retcode);
   1297 	case RAIDFRAME_SET_AUTOCONFIG:
   1298 		d = rf_set_autoconfig(raidPtr, *(int *) data);
   1299 		printf("raid%d: New autoconfig value is: %d\n",
   1300 		       raidPtr->raidid, d);
   1301 		*(int *) data = d;
   1302 		return (retcode);
   1303 
   1304 	case RAIDFRAME_SET_ROOT:
   1305 		d = rf_set_rootpartition(raidPtr, *(int *) data);
   1306 		printf("raid%d: New rootpartition value is: %d\n",
   1307 		       raidPtr->raidid, d);
   1308 		*(int *) data = d;
   1309 		return (retcode);
   1310 
   1311 		/* initialize all parity */
   1312 	case RAIDFRAME_REWRITEPARITY:
   1313 
   1314 		if (raidPtr->Layout.map->faultsTolerated == 0) {
   1315 			/* Parity for RAID 0 is trivially correct */
   1316 			raidPtr->parity_good = RF_RAID_CLEAN;
   1317 			return(0);
   1318 		}
   1319 
   1320 		if (raidPtr->parity_rewrite_in_progress == 1) {
   1321 			/* Re-write is already in progress! */
   1322 			return(EINVAL);
   1323 		}
   1324 
   1325 		retcode = RF_CREATE_THREAD(raidPtr->parity_rewrite_thread,
   1326 					   rf_RewriteParityThread,
   1327 					   raidPtr,"raid_parity");
   1328 		return (retcode);
   1329 
   1330 
   1331 	case RAIDFRAME_ADD_HOT_SPARE:
   1332 		sparePtr = (RF_SingleComponent_t *) data;
   1333 		memcpy( &component, sparePtr, sizeof(RF_SingleComponent_t));
   1334 		retcode = rf_add_hot_spare(raidPtr, &component);
   1335 		return(retcode);
   1336 
   1337 	case RAIDFRAME_REMOVE_HOT_SPARE:
   1338 		return(retcode);
   1339 
   1340 	case RAIDFRAME_DELETE_COMPONENT:
   1341 		componentPtr = (RF_SingleComponent_t *)data;
   1342 		memcpy( &component, componentPtr,
   1343 			sizeof(RF_SingleComponent_t));
   1344 		retcode = rf_delete_component(raidPtr, &component);
   1345 		return(retcode);
   1346 
   1347 	case RAIDFRAME_INCORPORATE_HOT_SPARE:
   1348 		componentPtr = (RF_SingleComponent_t *)data;
   1349 		memcpy( &component, componentPtr,
   1350 			sizeof(RF_SingleComponent_t));
   1351 		retcode = rf_incorporate_hot_spare(raidPtr, &component);
   1352 		return(retcode);
   1353 
   1354 	case RAIDFRAME_REBUILD_IN_PLACE:
   1355 
   1356 		if (raidPtr->Layout.map->faultsTolerated == 0) {
   1357 			/* Can't do this on a RAID 0!! */
   1358 			return(EINVAL);
   1359 		}
   1360 
   1361 		if (raidPtr->recon_in_progress == 1) {
   1362 			/* a reconstruct is already in progress! */
   1363 			return(EINVAL);
   1364 		}
   1365 
   1366 		componentPtr = (RF_SingleComponent_t *) data;
   1367 		memcpy( &component, componentPtr,
   1368 			sizeof(RF_SingleComponent_t));
   1369 		component.row = 0; /* we don't support any more */
   1370 		column = component.column;
   1371 
   1372 		if ((column < 0) || (column >= raidPtr->numCol)) {
   1373 			return(EINVAL);
   1374 		}
   1375 
   1376 		rf_lock_mutex2(raidPtr->mutex);
   1377 		if ((raidPtr->Disks[column].status == rf_ds_optimal) &&
   1378 		    (raidPtr->numFailures > 0)) {
   1379 			/* XXX 0 above shouldn't be constant!!! */
   1380 			/* some component other than this has failed.
   1381 			   Let's not make things worse than they already
   1382 			   are... */
   1383 			printf("raid%d: Unable to reconstruct to disk at:\n",
   1384 			       raidPtr->raidid);
   1385 			printf("raid%d:     Col: %d   Too many failures.\n",
   1386 			       raidPtr->raidid, column);
   1387 			rf_unlock_mutex2(raidPtr->mutex);
   1388 			return (EINVAL);
   1389 		}
   1390 		if (raidPtr->Disks[column].status ==
   1391 		    rf_ds_reconstructing) {
   1392 			printf("raid%d: Unable to reconstruct to disk at:\n",
   1393 			       raidPtr->raidid);
   1394 			printf("raid%d:    Col: %d   Reconstruction already occurring!\n", raidPtr->raidid, column);
   1395 
   1396 			rf_unlock_mutex2(raidPtr->mutex);
   1397 			return (EINVAL);
   1398 		}
   1399 		if (raidPtr->Disks[column].status == rf_ds_spared) {
   1400 			rf_unlock_mutex2(raidPtr->mutex);
   1401 			return (EINVAL);
   1402 		}
   1403 		rf_unlock_mutex2(raidPtr->mutex);
   1404 
   1405 		RF_Malloc(rrcopy, sizeof(*rrcopy), (struct rf_recon_req *));
   1406 		if (rrcopy == NULL)
   1407 			return(ENOMEM);
   1408 
   1409 		rrcopy->raidPtr = (void *) raidPtr;
   1410 		rrcopy->col = column;
   1411 
   1412 		retcode = RF_CREATE_THREAD(raidPtr->recon_thread,
   1413 					   rf_ReconstructInPlaceThread,
   1414 					   rrcopy,"raid_reconip");
   1415 		return(retcode);
   1416 
   1417 	case RAIDFRAME_GET_INFO:
   1418 		if (!raidPtr->valid)
   1419 			return (ENODEV);
   1420 		ucfgp = (RF_DeviceConfig_t **) data;
   1421 		RF_Malloc(d_cfg, sizeof(RF_DeviceConfig_t),
   1422 			  (RF_DeviceConfig_t *));
   1423 		if (d_cfg == NULL)
   1424 			return (ENOMEM);
   1425 		d_cfg->rows = 1; /* there is only 1 row now */
   1426 		d_cfg->cols = raidPtr->numCol;
   1427 		d_cfg->ndevs = raidPtr->numCol;
   1428 		if (d_cfg->ndevs >= RF_MAX_DISKS) {
   1429 			RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
   1430 			return (ENOMEM);
   1431 		}
   1432 		d_cfg->nspares = raidPtr->numSpare;
   1433 		if (d_cfg->nspares >= RF_MAX_DISKS) {
   1434 			RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
   1435 			return (ENOMEM);
   1436 		}
   1437 		d_cfg->maxqdepth = raidPtr->maxQueueDepth;
   1438 		d = 0;
   1439 		for (j = 0; j < d_cfg->cols; j++) {
   1440 			d_cfg->devs[d] = raidPtr->Disks[j];
   1441 			d++;
   1442 		}
   1443 		for (j = d_cfg->cols, i = 0; i < d_cfg->nspares; i++, j++) {
   1444 			d_cfg->spares[i] = raidPtr->Disks[j];
   1445 			if (d_cfg->spares[i].status == rf_ds_rebuilding_spare) {
   1446 				/* XXX: raidctl(8) expects to see this as a used spare */
   1447 				d_cfg->spares[i].status = rf_ds_used_spare;
   1448 			}
   1449 		}
   1450 		retcode = copyout(d_cfg, *ucfgp, sizeof(RF_DeviceConfig_t));
   1451 		RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
   1452 
   1453 		return (retcode);
   1454 
   1455 	case RAIDFRAME_CHECK_PARITY:
   1456 		*(int *) data = raidPtr->parity_good;
   1457 		return (0);
   1458 
   1459 	case RAIDFRAME_PARITYMAP_STATUS:
   1460 		if (rf_paritymap_ineligible(raidPtr))
   1461 			return EINVAL;
   1462 		rf_paritymap_status(raidPtr->parity_map,
   1463 		    (struct rf_pmstat *)data);
   1464 		return 0;
   1465 
   1466 	case RAIDFRAME_PARITYMAP_SET_PARAMS:
   1467 		if (rf_paritymap_ineligible(raidPtr))
   1468 			return EINVAL;
   1469 		if (raidPtr->parity_map == NULL)
   1470 			return ENOENT; /* ??? */
   1471 		if (0 != rf_paritymap_set_params(raidPtr->parity_map,
   1472 			(struct rf_pmparams *)data, 1))
   1473 			return EINVAL;
   1474 		return 0;
   1475 
   1476 	case RAIDFRAME_PARITYMAP_GET_DISABLE:
   1477 		if (rf_paritymap_ineligible(raidPtr))
   1478 			return EINVAL;
   1479 		*(int *) data = rf_paritymap_get_disable(raidPtr);
   1480 		return 0;
   1481 
   1482 	case RAIDFRAME_PARITYMAP_SET_DISABLE:
   1483 		if (rf_paritymap_ineligible(raidPtr))
   1484 			return EINVAL;
   1485 		rf_paritymap_set_disable(raidPtr, *(int *)data);
   1486 		/* XXX should errors be passed up? */
   1487 		return 0;
   1488 
   1489 	case RAIDFRAME_RESET_ACCTOTALS:
   1490 		memset(&raidPtr->acc_totals, 0, sizeof(raidPtr->acc_totals));
   1491 		return (0);
   1492 
   1493 	case RAIDFRAME_GET_ACCTOTALS:
   1494 		totals = (RF_AccTotals_t *) data;
   1495 		*totals = raidPtr->acc_totals;
   1496 		return (0);
   1497 
   1498 	case RAIDFRAME_KEEP_ACCTOTALS:
   1499 		raidPtr->keep_acc_totals = *(int *)data;
   1500 		return (0);
   1501 
   1502 	case RAIDFRAME_GET_SIZE:
   1503 		*(int *) data = raidPtr->totalSectors;
   1504 		return (0);
   1505 
   1506 		/* fail a disk & optionally start reconstruction */
   1507 	case RAIDFRAME_FAIL_DISK:
   1508 
   1509 		if (raidPtr->Layout.map->faultsTolerated == 0) {
   1510 			/* Can't do this on a RAID 0!! */
   1511 			return(EINVAL);
   1512 		}
   1513 
   1514 		rr = (struct rf_recon_req *) data;
   1515 		rr->row = 0;
   1516 		if (rr->col < 0 || rr->col >= raidPtr->numCol)
   1517 			return (EINVAL);
   1518 
   1519 
   1520 		rf_lock_mutex2(raidPtr->mutex);
   1521 		if (raidPtr->status == rf_rs_reconstructing) {
   1522 			/* you can't fail a disk while we're reconstructing! */
   1523 			/* XXX wrong for RAID6 */
   1524 			rf_unlock_mutex2(raidPtr->mutex);
   1525 			return (EINVAL);
   1526 		}
   1527 		if ((raidPtr->Disks[rr->col].status ==
   1528 		     rf_ds_optimal) && (raidPtr->numFailures > 0)) {
   1529 			/* some other component has failed.  Let's not make
   1530 			   things worse. XXX wrong for RAID6 */
   1531 			rf_unlock_mutex2(raidPtr->mutex);
   1532 			return (EINVAL);
   1533 		}
   1534 		if (raidPtr->Disks[rr->col].status == rf_ds_spared) {
   1535 			/* Can't fail a spared disk! */
   1536 			rf_unlock_mutex2(raidPtr->mutex);
   1537 			return (EINVAL);
   1538 		}
   1539 		rf_unlock_mutex2(raidPtr->mutex);
   1540 
   1541 		/* make a copy of the recon request so that we don't rely on
   1542 		 * the user's buffer */
   1543 		RF_Malloc(rrcopy, sizeof(*rrcopy), (struct rf_recon_req *));
   1544 		if (rrcopy == NULL)
   1545 			return(ENOMEM);
   1546 		memcpy(rrcopy, rr, sizeof(*rr));
   1547 		rrcopy->raidPtr = (void *) raidPtr;
   1548 
   1549 		retcode = RF_CREATE_THREAD(raidPtr->recon_thread,
   1550 					   rf_ReconThread,
   1551 					   rrcopy,"raid_recon");
   1552 		return (0);
   1553 
   1554 		/* invoke a copyback operation after recon on whatever disk
   1555 		 * needs it, if any */
   1556 	case RAIDFRAME_COPYBACK:
   1557 
   1558 		if (raidPtr->Layout.map->faultsTolerated == 0) {
   1559 			/* This makes no sense on a RAID 0!! */
   1560 			return(EINVAL);
   1561 		}
   1562 
   1563 		if (raidPtr->copyback_in_progress == 1) {
   1564 			/* Copyback is already in progress! */
   1565 			return(EINVAL);
   1566 		}
   1567 
   1568 		retcode = RF_CREATE_THREAD(raidPtr->copyback_thread,
   1569 					   rf_CopybackThread,
   1570 					   raidPtr,"raid_copyback");
   1571 		return (retcode);
   1572 
   1573 		/* return the percentage completion of reconstruction */
   1574 	case RAIDFRAME_CHECK_RECON_STATUS:
   1575 		if (raidPtr->Layout.map->faultsTolerated == 0) {
   1576 			/* This makes no sense on a RAID 0, so tell the
   1577 			   user it's done. */
   1578 			*(int *) data = 100;
   1579 			return(0);
   1580 		}
   1581 		if (raidPtr->status != rf_rs_reconstructing)
   1582 			*(int *) data = 100;
   1583 		else {
   1584 			if (raidPtr->reconControl->numRUsTotal > 0) {
   1585 				*(int *) data = (raidPtr->reconControl->numRUsComplete * 100 / raidPtr->reconControl->numRUsTotal);
   1586 			} else {
   1587 				*(int *) data = 0;
   1588 			}
   1589 		}
   1590 		return (0);
   1591 	case RAIDFRAME_CHECK_RECON_STATUS_EXT:
   1592 		progressInfoPtr = (RF_ProgressInfo_t **) data;
   1593 		if (raidPtr->status != rf_rs_reconstructing) {
   1594 			progressInfo.remaining = 0;
   1595 			progressInfo.completed = 100;
   1596 			progressInfo.total = 100;
   1597 		} else {
   1598 			progressInfo.total =
   1599 				raidPtr->reconControl->numRUsTotal;
   1600 			progressInfo.completed =
   1601 				raidPtr->reconControl->numRUsComplete;
   1602 			progressInfo.remaining = progressInfo.total -
   1603 				progressInfo.completed;
   1604 		}
   1605 		retcode = copyout(&progressInfo, *progressInfoPtr,
   1606 				  sizeof(RF_ProgressInfo_t));
   1607 		return (retcode);
   1608 
   1609 	case RAIDFRAME_CHECK_PARITYREWRITE_STATUS:
   1610 		if (raidPtr->Layout.map->faultsTolerated == 0) {
   1611 			/* This makes no sense on a RAID 0, so tell the
   1612 			   user it's done. */
   1613 			*(int *) data = 100;
   1614 			return(0);
   1615 		}
   1616 		if (raidPtr->parity_rewrite_in_progress == 1) {
   1617 			*(int *) data = 100 *
   1618 				raidPtr->parity_rewrite_stripes_done /
   1619 				raidPtr->Layout.numStripe;
   1620 		} else {
   1621 			*(int *) data = 100;
   1622 		}
   1623 		return (0);
   1624 
   1625 	case RAIDFRAME_CHECK_PARITYREWRITE_STATUS_EXT:
   1626 		progressInfoPtr = (RF_ProgressInfo_t **) data;
   1627 		if (raidPtr->parity_rewrite_in_progress == 1) {
   1628 			progressInfo.total = raidPtr->Layout.numStripe;
   1629 			progressInfo.completed =
   1630 				raidPtr->parity_rewrite_stripes_done;
   1631 			progressInfo.remaining = progressInfo.total -
   1632 				progressInfo.completed;
   1633 		} else {
   1634 			progressInfo.remaining = 0;
   1635 			progressInfo.completed = 100;
   1636 			progressInfo.total = 100;
   1637 		}
   1638 		retcode = copyout(&progressInfo, *progressInfoPtr,
   1639 				  sizeof(RF_ProgressInfo_t));
   1640 		return (retcode);
   1641 
   1642 	case RAIDFRAME_CHECK_COPYBACK_STATUS:
   1643 		if (raidPtr->Layout.map->faultsTolerated == 0) {
   1644 			/* This makes no sense on a RAID 0 */
   1645 			*(int *) data = 100;
   1646 			return(0);
   1647 		}
   1648 		if (raidPtr->copyback_in_progress == 1) {
   1649 			*(int *) data = 100 * raidPtr->copyback_stripes_done /
   1650 				raidPtr->Layout.numStripe;
   1651 		} else {
   1652 			*(int *) data = 100;
   1653 		}
   1654 		return (0);
   1655 
   1656 	case RAIDFRAME_CHECK_COPYBACK_STATUS_EXT:
   1657 		progressInfoPtr = (RF_ProgressInfo_t **) data;
   1658 		if (raidPtr->copyback_in_progress == 1) {
   1659 			progressInfo.total = raidPtr->Layout.numStripe;
   1660 			progressInfo.completed =
   1661 				raidPtr->copyback_stripes_done;
   1662 			progressInfo.remaining = progressInfo.total -
   1663 				progressInfo.completed;
   1664 		} else {
   1665 			progressInfo.remaining = 0;
   1666 			progressInfo.completed = 100;
   1667 			progressInfo.total = 100;
   1668 		}
   1669 		retcode = copyout(&progressInfo, *progressInfoPtr,
   1670 				  sizeof(RF_ProgressInfo_t));
   1671 		return (retcode);
   1672 
   1673 		/* the sparetable daemon calls this to wait for the kernel to
   1674 		 * need a spare table. this ioctl does not return until a
   1675 		 * spare table is needed. XXX -- calling mpsleep here in the
   1676 		 * ioctl code is almost certainly wrong and evil. -- XXX XXX
   1677 		 * -- I should either compute the spare table in the kernel,
   1678 		 * or have a different -- XXX XXX -- interface (a different
   1679 		 * character device) for delivering the table     -- XXX */
   1680 #if 0
   1681 	case RAIDFRAME_SPARET_WAIT:
   1682 		rf_lock_mutex2(rf_sparet_wait_mutex);
   1683 		while (!rf_sparet_wait_queue)
   1684 			rf_wait_cond2(rf_sparet_wait_cv, rf_sparet_wait_mutex);
   1685 		waitreq = rf_sparet_wait_queue;
   1686 		rf_sparet_wait_queue = rf_sparet_wait_queue->next;
   1687 		rf_unlock_mutex2(rf_sparet_wait_mutex);
   1688 
   1689 		/* structure assignment */
   1690 		*((RF_SparetWait_t *) data) = *waitreq;
   1691 
   1692 		RF_Free(waitreq, sizeof(*waitreq));
   1693 		return (0);
   1694 
   1695 		/* wakes up a process waiting on SPARET_WAIT and puts an error
   1696 		 * code in it that will cause the dameon to exit */
   1697 	case RAIDFRAME_ABORT_SPARET_WAIT:
   1698 		RF_Malloc(waitreq, sizeof(*waitreq), (RF_SparetWait_t *));
   1699 		waitreq->fcol = -1;
   1700 		rf_lock_mutex2(rf_sparet_wait_mutex);
   1701 		waitreq->next = rf_sparet_wait_queue;
   1702 		rf_sparet_wait_queue = waitreq;
   1703 		rf_broadcast_conf2(rf_sparet_wait_cv);
   1704 		rf_unlock_mutex2(rf_sparet_wait_mutex);
   1705 		return (0);
   1706 
   1707 		/* used by the spare table daemon to deliver a spare table
   1708 		 * into the kernel */
   1709 	case RAIDFRAME_SEND_SPARET:
   1710 
   1711 		/* install the spare table */
   1712 		retcode = rf_SetSpareTable(raidPtr, *(void **) data);
   1713 
   1714 		/* respond to the requestor.  the return status of the spare
   1715 		 * table installation is passed in the "fcol" field */
   1716 		RF_Malloc(waitreq, sizeof(*waitreq), (RF_SparetWait_t *));
   1717 		waitreq->fcol = retcode;
   1718 		rf_lock_mutex2(rf_sparet_wait_mutex);
   1719 		waitreq->next = rf_sparet_resp_queue;
   1720 		rf_sparet_resp_queue = waitreq;
   1721 		rf_broadcast_cond2(rf_sparet_resp_cv);
   1722 		rf_unlock_mutex2(rf_sparet_wait_mutex);
   1723 
   1724 		return (retcode);
   1725 #endif
   1726 
   1727 	default:
   1728 		break; /* fall through to the os-specific code below */
   1729 
   1730 	}
   1731 
   1732 	if (!raidPtr->valid)
   1733 		return (EINVAL);
   1734 
   1735 	/*
   1736 	 * Add support for "regular" device ioctls here.
   1737 	 */
   1738 
   1739 	error = dk_ioctl(dksc, dev, cmd, data, flag, l);
   1740 	if (error != EPASSTHROUGH)
   1741 		return (error);
   1742 
   1743 	switch (cmd) {
   1744 	case DIOCCACHESYNC:
   1745 		return rf_sync_component_caches(raidPtr);
   1746 
   1747 	default:
   1748 		retcode = ENOTTY;
   1749 	}
   1750 	return (retcode);
   1751 
   1752 }
   1753 
   1754 
   1755 /* raidinit -- complete the rest of the initialization for the
   1756    RAIDframe device.  */
   1757 
   1758 
   1759 static void
   1760 raidinit(struct raid_softc *rs)
   1761 {
   1762 	cfdata_t cf;
   1763 	unsigned int unit;
   1764 	struct dk_softc *dksc = &rs->sc_dksc;
   1765 	RF_Raid_t *raidPtr = &rs->sc_r;
   1766 	device_t dev;
   1767 
   1768 	unit = raidPtr->raidid;
   1769 
   1770 	/* XXX doesn't check bounds. */
   1771 	snprintf(rs->sc_xname, sizeof(rs->sc_xname), "raid%u", unit);
   1772 
   1773 	/* attach the pseudo device */
   1774 	cf = malloc(sizeof(*cf), M_RAIDFRAME, M_WAITOK);
   1775 	cf->cf_name = raid_cd.cd_name;
   1776 	cf->cf_atname = raid_cd.cd_name;
   1777 	cf->cf_unit = unit;
   1778 	cf->cf_fstate = FSTATE_STAR;
   1779 
   1780 	dev = config_attach_pseudo(cf);
   1781 	if (dev == NULL) {
   1782 		printf("raid%d: config_attach_pseudo failed\n",
   1783 		    raidPtr->raidid);
   1784 		free(cf, M_RAIDFRAME);
   1785 		return;
   1786 	}
   1787 
   1788 	/* provide a backpointer to the real softc */
   1789 	raidsoftc(dev) = rs;
   1790 
   1791 	/* disk_attach actually creates space for the CPU disklabel, among
   1792 	 * other things, so it's critical to call this *BEFORE* we try putzing
   1793 	 * with disklabels. */
   1794 	dk_init(dksc, dev, DKTYPE_RAID);
   1795 	disk_init(&dksc->sc_dkdev, rs->sc_xname, &rf_dkdriver);
   1796 
   1797 	/* XXX There may be a weird interaction here between this, and
   1798 	 * protectedSectors, as used in RAIDframe.  */
   1799 
   1800 	rs->sc_size = raidPtr->totalSectors;
   1801 
   1802 	/* Attach dk and disk subsystems */
   1803 	dk_attach(dksc);
   1804 	disk_attach(&dksc->sc_dkdev);
   1805 	rf_set_geometry(rs, raidPtr);
   1806 
   1807 	bufq_alloc(&dksc->sc_bufq, "fcfs", BUFQ_SORT_RAWBLOCK);
   1808 
   1809 	/* mark unit as usuable */
   1810 	rs->sc_flags |= RAIDF_INITED;
   1811 
   1812 	dkwedge_discover(&dksc->sc_dkdev);
   1813 }
   1814 
   1815 #if (RF_INCLUDE_PARITY_DECLUSTERING_DS > 0)
   1816 /* wake up the daemon & tell it to get us a spare table
   1817  * XXX
   1818  * the entries in the queues should be tagged with the raidPtr
   1819  * so that in the extremely rare case that two recons happen at once,
   1820  * we know for which device were requesting a spare table
   1821  * XXX
   1822  *
   1823  * XXX This code is not currently used. GO
   1824  */
   1825 int
   1826 rf_GetSpareTableFromDaemon(RF_SparetWait_t *req)
   1827 {
   1828 	int     retcode;
   1829 
   1830 	rf_lock_mutex2(rf_sparet_wait_mutex);
   1831 	req->next = rf_sparet_wait_queue;
   1832 	rf_sparet_wait_queue = req;
   1833 	rf_broadcast_cond2(rf_sparet_wait_cv);
   1834 
   1835 	/* mpsleep unlocks the mutex */
   1836 	while (!rf_sparet_resp_queue) {
   1837 		rf_wait_cond2(rf_sparet_resp_cv, rf_sparet_wait_mutex);
   1838 	}
   1839 	req = rf_sparet_resp_queue;
   1840 	rf_sparet_resp_queue = req->next;
   1841 	rf_unlock_mutex2(rf_sparet_wait_mutex);
   1842 
   1843 	retcode = req->fcol;
   1844 	RF_Free(req, sizeof(*req));	/* this is not the same req as we
   1845 					 * alloc'd */
   1846 	return (retcode);
   1847 }
   1848 #endif
   1849 
   1850 /* a wrapper around rf_DoAccess that extracts appropriate info from the
   1851  * bp & passes it down.
   1852  * any calls originating in the kernel must use non-blocking I/O
   1853  * do some extra sanity checking to return "appropriate" error values for
   1854  * certain conditions (to make some standard utilities work)
   1855  *
   1856  * Formerly known as: rf_DoAccessKernel
   1857  */
   1858 void
   1859 raidstart(RF_Raid_t *raidPtr)
   1860 {
   1861 	struct raid_softc *rs;
   1862 	struct dk_softc *dksc;
   1863 
   1864 	rs = raidPtr->softc;
   1865 	dksc = &rs->sc_dksc;
   1866 	/* quick check to see if anything has died recently */
   1867 	rf_lock_mutex2(raidPtr->mutex);
   1868 	if (raidPtr->numNewFailures > 0) {
   1869 		rf_unlock_mutex2(raidPtr->mutex);
   1870 		rf_update_component_labels(raidPtr,
   1871 					   RF_NORMAL_COMPONENT_UPDATE);
   1872 		rf_lock_mutex2(raidPtr->mutex);
   1873 		raidPtr->numNewFailures--;
   1874 	}
   1875 	rf_unlock_mutex2(raidPtr->mutex);
   1876 
   1877 	if ((rs->sc_flags & RAIDF_INITED) == 0) {
   1878 		printf("raid%d: raidstart not ready\n", raidPtr->raidid);
   1879 		return;
   1880 	}
   1881 
   1882 	dk_start(dksc, NULL);
   1883 }
   1884 
   1885 static int
   1886 raiddoaccess(RF_Raid_t *raidPtr, struct buf *bp)
   1887 {
   1888 	RF_SectorCount_t num_blocks, pb, sum;
   1889 	RF_RaidAddr_t raid_addr;
   1890 	daddr_t blocknum;
   1891 	int     do_async;
   1892 	int rc;
   1893 
   1894 	rf_lock_mutex2(raidPtr->mutex);
   1895 	if (raidPtr->openings == 0) {
   1896 		rf_unlock_mutex2(raidPtr->mutex);
   1897 		return EAGAIN;
   1898 	}
   1899 	rf_unlock_mutex2(raidPtr->mutex);
   1900 
   1901 	blocknum = bp->b_rawblkno;
   1902 
   1903 	db1_printf(("Blocks: %d, %d\n", (int) bp->b_blkno,
   1904 		    (int) blocknum));
   1905 
   1906 	db1_printf(("bp->b_bcount = %d\n", (int) bp->b_bcount));
   1907 	db1_printf(("bp->b_resid = %d\n", (int) bp->b_resid));
   1908 
   1909 	/* *THIS* is where we adjust what block we're going to...
   1910 	 * but DO NOT TOUCH bp->b_blkno!!! */
   1911 	raid_addr = blocknum;
   1912 
   1913 	num_blocks = bp->b_bcount >> raidPtr->logBytesPerSector;
   1914 	pb = (bp->b_bcount & raidPtr->sectorMask) ? 1 : 0;
   1915 	sum = raid_addr + num_blocks + pb;
   1916 	if (1 || rf_debugKernelAccess) {
   1917 		db1_printf(("raid_addr=%d sum=%d num_blocks=%d(+%d) (%d)\n",
   1918 			    (int) raid_addr, (int) sum, (int) num_blocks,
   1919 			    (int) pb, (int) bp->b_resid));
   1920 	}
   1921 	if ((sum > raidPtr->totalSectors) || (sum < raid_addr)
   1922 	    || (sum < num_blocks) || (sum < pb)) {
   1923 		rc = ENOSPC;
   1924 		goto done;
   1925 	}
   1926 	/*
   1927 	 * XXX rf_DoAccess() should do this, not just DoAccessKernel()
   1928 	 */
   1929 
   1930 	if (bp->b_bcount & raidPtr->sectorMask) {
   1931 		rc = ENOSPC;
   1932 		goto done;
   1933 	}
   1934 	db1_printf(("Calling DoAccess..\n"));
   1935 
   1936 
   1937 	rf_lock_mutex2(raidPtr->mutex);
   1938 	raidPtr->openings--;
   1939 	rf_unlock_mutex2(raidPtr->mutex);
   1940 
   1941 	/*
   1942 	 * Everything is async.
   1943 	 */
   1944 	do_async = 1;
   1945 
   1946 	/* don't ever condition on bp->b_flags & B_WRITE.
   1947 	 * always condition on B_READ instead */
   1948 
   1949 	rc = rf_DoAccess(raidPtr, (bp->b_flags & B_READ) ?
   1950 			 RF_IO_TYPE_READ : RF_IO_TYPE_WRITE,
   1951 			 do_async, raid_addr, num_blocks,
   1952 			 bp->b_data, bp, RF_DAG_NONBLOCKING_IO);
   1953 
   1954 done:
   1955 	return rc;
   1956 }
   1957 
   1958 /* invoke an I/O from kernel mode.  Disk queue should be locked upon entry */
   1959 
   1960 int
   1961 rf_DispatchKernelIO(RF_DiskQueue_t *queue, RF_DiskQueueData_t *req)
   1962 {
   1963 	int     op = (req->type == RF_IO_TYPE_READ) ? B_READ : B_WRITE;
   1964 	struct buf *bp;
   1965 
   1966 	req->queue = queue;
   1967 	bp = req->bp;
   1968 
   1969 	switch (req->type) {
   1970 	case RF_IO_TYPE_NOP:	/* used primarily to unlock a locked queue */
   1971 		/* XXX need to do something extra here.. */
   1972 		/* I'm leaving this in, as I've never actually seen it used,
   1973 		 * and I'd like folks to report it... GO */
   1974 		printf(("WAKEUP CALLED\n"));
   1975 		queue->numOutstanding++;
   1976 
   1977 		bp->b_flags = 0;
   1978 		bp->b_private = req;
   1979 
   1980 		KernelWakeupFunc(bp);
   1981 		break;
   1982 
   1983 	case RF_IO_TYPE_READ:
   1984 	case RF_IO_TYPE_WRITE:
   1985 #if RF_ACC_TRACE > 0
   1986 		if (req->tracerec) {
   1987 			RF_ETIMER_START(req->tracerec->timer);
   1988 		}
   1989 #endif
   1990 		InitBP(bp, queue->rf_cinfo->ci_vp,
   1991 		    op, queue->rf_cinfo->ci_dev,
   1992 		    req->sectorOffset, req->numSector,
   1993 		    req->buf, KernelWakeupFunc, (void *) req,
   1994 		    queue->raidPtr->logBytesPerSector, req->b_proc);
   1995 
   1996 		if (rf_debugKernelAccess) {
   1997 			db1_printf(("dispatch: bp->b_blkno = %ld\n",
   1998 				(long) bp->b_blkno));
   1999 		}
   2000 		queue->numOutstanding++;
   2001 		queue->last_deq_sector = req->sectorOffset;
   2002 		/* acc wouldn't have been let in if there were any pending
   2003 		 * reqs at any other priority */
   2004 		queue->curPriority = req->priority;
   2005 
   2006 		db1_printf(("Going for %c to unit %d col %d\n",
   2007 			    req->type, queue->raidPtr->raidid,
   2008 			    queue->col));
   2009 		db1_printf(("sector %d count %d (%d bytes) %d\n",
   2010 			(int) req->sectorOffset, (int) req->numSector,
   2011 			(int) (req->numSector <<
   2012 			    queue->raidPtr->logBytesPerSector),
   2013 			(int) queue->raidPtr->logBytesPerSector));
   2014 
   2015 		/*
   2016 		 * XXX: drop lock here since this can block at
   2017 		 * least with backing SCSI devices.  Retake it
   2018 		 * to minimize fuss with calling interfaces.
   2019 		 */
   2020 
   2021 		RF_UNLOCK_QUEUE_MUTEX(queue, "unusedparam");
   2022 		bdev_strategy(bp);
   2023 		RF_LOCK_QUEUE_MUTEX(queue, "unusedparam");
   2024 		break;
   2025 
   2026 	default:
   2027 		panic("bad req->type in rf_DispatchKernelIO");
   2028 	}
   2029 	db1_printf(("Exiting from DispatchKernelIO\n"));
   2030 
   2031 	return (0);
   2032 }
   2033 /* this is the callback function associated with a I/O invoked from
   2034    kernel code.
   2035  */
   2036 static void
   2037 KernelWakeupFunc(struct buf *bp)
   2038 {
   2039 	RF_DiskQueueData_t *req = NULL;
   2040 	RF_DiskQueue_t *queue;
   2041 
   2042 	db1_printf(("recovering the request queue:\n"));
   2043 
   2044 	req = bp->b_private;
   2045 
   2046 	queue = (RF_DiskQueue_t *) req->queue;
   2047 
   2048 	rf_lock_mutex2(queue->raidPtr->iodone_lock);
   2049 
   2050 #if RF_ACC_TRACE > 0
   2051 	if (req->tracerec) {
   2052 		RF_ETIMER_STOP(req->tracerec->timer);
   2053 		RF_ETIMER_EVAL(req->tracerec->timer);
   2054 		rf_lock_mutex2(rf_tracing_mutex);
   2055 		req->tracerec->diskwait_us += RF_ETIMER_VAL_US(req->tracerec->timer);
   2056 		req->tracerec->phys_io_us += RF_ETIMER_VAL_US(req->tracerec->timer);
   2057 		req->tracerec->num_phys_ios++;
   2058 		rf_unlock_mutex2(rf_tracing_mutex);
   2059 	}
   2060 #endif
   2061 
   2062 	/* XXX Ok, let's get aggressive... If b_error is set, let's go
   2063 	 * ballistic, and mark the component as hosed... */
   2064 
   2065 	if (bp->b_error != 0) {
   2066 		/* Mark the disk as dead */
   2067 		/* but only mark it once... */
   2068 		/* and only if it wouldn't leave this RAID set
   2069 		   completely broken */
   2070 		if (((queue->raidPtr->Disks[queue->col].status ==
   2071 		      rf_ds_optimal) ||
   2072 		     (queue->raidPtr->Disks[queue->col].status ==
   2073 		      rf_ds_used_spare)) &&
   2074 		     (queue->raidPtr->numFailures <
   2075 		      queue->raidPtr->Layout.map->faultsTolerated)) {
   2076 			printf("raid%d: IO Error (%d). Marking %s as failed.\n",
   2077 			       queue->raidPtr->raidid,
   2078 			       bp->b_error,
   2079 			       queue->raidPtr->Disks[queue->col].devname);
   2080 			queue->raidPtr->Disks[queue->col].status =
   2081 			    rf_ds_failed;
   2082 			queue->raidPtr->status = rf_rs_degraded;
   2083 			queue->raidPtr->numFailures++;
   2084 			queue->raidPtr->numNewFailures++;
   2085 		} else {	/* Disk is already dead... */
   2086 			/* printf("Disk already marked as dead!\n"); */
   2087 		}
   2088 
   2089 	}
   2090 
   2091 	/* Fill in the error value */
   2092 	req->error = bp->b_error;
   2093 
   2094 	/* Drop this one on the "finished" queue... */
   2095 	TAILQ_INSERT_TAIL(&(queue->raidPtr->iodone), req, iodone_entries);
   2096 
   2097 	/* Let the raidio thread know there is work to be done. */
   2098 	rf_signal_cond2(queue->raidPtr->iodone_cv);
   2099 
   2100 	rf_unlock_mutex2(queue->raidPtr->iodone_lock);
   2101 }
   2102 
   2103 
   2104 /*
   2105  * initialize a buf structure for doing an I/O in the kernel.
   2106  */
   2107 static void
   2108 InitBP(struct buf *bp, struct vnode *b_vp, unsigned rw_flag, dev_t dev,
   2109        RF_SectorNum_t startSect, RF_SectorCount_t numSect, void *bf,
   2110        void (*cbFunc) (struct buf *), void *cbArg, int logBytesPerSector,
   2111        struct proc *b_proc)
   2112 {
   2113 	/* bp->b_flags       = B_PHYS | rw_flag; */
   2114 	bp->b_flags = rw_flag;	/* XXX need B_PHYS here too??? */
   2115 	bp->b_oflags = 0;
   2116 	bp->b_cflags = 0;
   2117 	bp->b_bcount = numSect << logBytesPerSector;
   2118 	bp->b_bufsize = bp->b_bcount;
   2119 	bp->b_error = 0;
   2120 	bp->b_dev = dev;
   2121 	bp->b_data = bf;
   2122 	bp->b_blkno = startSect << logBytesPerSector >> DEV_BSHIFT;
   2123 	bp->b_resid = bp->b_bcount;	/* XXX is this right!??!?!! */
   2124 	if (bp->b_bcount == 0) {
   2125 		panic("bp->b_bcount is zero in InitBP!!");
   2126 	}
   2127 	bp->b_proc = b_proc;
   2128 	bp->b_iodone = cbFunc;
   2129 	bp->b_private = cbArg;
   2130 }
   2131 
   2132 /*
   2133  * Wait interruptibly for an exclusive lock.
   2134  *
   2135  * XXX
   2136  * Several drivers do this; it should be abstracted and made MP-safe.
   2137  * (Hmm... where have we seen this warning before :->  GO )
   2138  */
   2139 static int
   2140 raidlock(struct raid_softc *rs)
   2141 {
   2142 	int     error;
   2143 
   2144 	error = 0;
   2145 	mutex_enter(&rs->sc_mutex);
   2146 	while ((rs->sc_flags & RAIDF_LOCKED) != 0) {
   2147 		rs->sc_flags |= RAIDF_WANTED;
   2148 		error = cv_wait_sig(&rs->sc_cv, &rs->sc_mutex);
   2149 		if (error != 0)
   2150 			goto done;
   2151 	}
   2152 	rs->sc_flags |= RAIDF_LOCKED;
   2153 done:
   2154 	mutex_exit(&rs->sc_mutex);
   2155 	return (error);
   2156 }
   2157 /*
   2158  * Unlock and wake up any waiters.
   2159  */
   2160 static void
   2161 raidunlock(struct raid_softc *rs)
   2162 {
   2163 
   2164 	mutex_enter(&rs->sc_mutex);
   2165 	rs->sc_flags &= ~RAIDF_LOCKED;
   2166 	if ((rs->sc_flags & RAIDF_WANTED) != 0) {
   2167 		rs->sc_flags &= ~RAIDF_WANTED;
   2168 		cv_broadcast(&rs->sc_cv);
   2169 	}
   2170 	mutex_exit(&rs->sc_mutex);
   2171 }
   2172 
   2173 
   2174 #define RF_COMPONENT_INFO_OFFSET  16384 /* bytes */
   2175 #define RF_COMPONENT_INFO_SIZE     1024 /* bytes */
   2176 #define RF_PARITY_MAP_SIZE   RF_PARITYMAP_NBYTE
   2177 
   2178 static daddr_t
   2179 rf_component_info_offset(void)
   2180 {
   2181 
   2182 	return RF_COMPONENT_INFO_OFFSET;
   2183 }
   2184 
   2185 static daddr_t
   2186 rf_component_info_size(unsigned secsize)
   2187 {
   2188 	daddr_t info_size;
   2189 
   2190 	KASSERT(secsize);
   2191 	if (secsize > RF_COMPONENT_INFO_SIZE)
   2192 		info_size = secsize;
   2193 	else
   2194 		info_size = RF_COMPONENT_INFO_SIZE;
   2195 
   2196 	return info_size;
   2197 }
   2198 
   2199 static daddr_t
   2200 rf_parity_map_offset(RF_Raid_t *raidPtr)
   2201 {
   2202 	daddr_t map_offset;
   2203 
   2204 	KASSERT(raidPtr->bytesPerSector);
   2205 	if (raidPtr->bytesPerSector > RF_COMPONENT_INFO_SIZE)
   2206 		map_offset = raidPtr->bytesPerSector;
   2207 	else
   2208 		map_offset = RF_COMPONENT_INFO_SIZE;
   2209 	map_offset += rf_component_info_offset();
   2210 
   2211 	return map_offset;
   2212 }
   2213 
   2214 static daddr_t
   2215 rf_parity_map_size(RF_Raid_t *raidPtr)
   2216 {
   2217 	daddr_t map_size;
   2218 
   2219 	if (raidPtr->bytesPerSector > RF_PARITY_MAP_SIZE)
   2220 		map_size = raidPtr->bytesPerSector;
   2221 	else
   2222 		map_size = RF_PARITY_MAP_SIZE;
   2223 
   2224 	return map_size;
   2225 }
   2226 
   2227 int
   2228 raidmarkclean(RF_Raid_t *raidPtr, RF_RowCol_t col)
   2229 {
   2230 	RF_ComponentLabel_t *clabel;
   2231 
   2232 	clabel = raidget_component_label(raidPtr, col);
   2233 	clabel->clean = RF_RAID_CLEAN;
   2234 	raidflush_component_label(raidPtr, col);
   2235 	return(0);
   2236 }
   2237 
   2238 
   2239 int
   2240 raidmarkdirty(RF_Raid_t *raidPtr, RF_RowCol_t col)
   2241 {
   2242 	RF_ComponentLabel_t *clabel;
   2243 
   2244 	clabel = raidget_component_label(raidPtr, col);
   2245 	clabel->clean = RF_RAID_DIRTY;
   2246 	raidflush_component_label(raidPtr, col);
   2247 	return(0);
   2248 }
   2249 
   2250 int
   2251 raidfetch_component_label(RF_Raid_t *raidPtr, RF_RowCol_t col)
   2252 {
   2253 	KASSERT(raidPtr->bytesPerSector);
   2254 	return raidread_component_label(raidPtr->bytesPerSector,
   2255 	    raidPtr->Disks[col].dev,
   2256 	    raidPtr->raid_cinfo[col].ci_vp,
   2257 	    &raidPtr->raid_cinfo[col].ci_label);
   2258 }
   2259 
   2260 RF_ComponentLabel_t *
   2261 raidget_component_label(RF_Raid_t *raidPtr, RF_RowCol_t col)
   2262 {
   2263 	return &raidPtr->raid_cinfo[col].ci_label;
   2264 }
   2265 
   2266 int
   2267 raidflush_component_label(RF_Raid_t *raidPtr, RF_RowCol_t col)
   2268 {
   2269 	RF_ComponentLabel_t *label;
   2270 
   2271 	label = &raidPtr->raid_cinfo[col].ci_label;
   2272 	label->mod_counter = raidPtr->mod_counter;
   2273 #ifndef RF_NO_PARITY_MAP
   2274 	label->parity_map_modcount = label->mod_counter;
   2275 #endif
   2276 	return raidwrite_component_label(raidPtr->bytesPerSector,
   2277 	    raidPtr->Disks[col].dev,
   2278 	    raidPtr->raid_cinfo[col].ci_vp, label);
   2279 }
   2280 
   2281 
   2282 static int
   2283 raidread_component_label(unsigned secsize, dev_t dev, struct vnode *b_vp,
   2284     RF_ComponentLabel_t *clabel)
   2285 {
   2286 	return raidread_component_area(dev, b_vp, clabel,
   2287 	    sizeof(RF_ComponentLabel_t),
   2288 	    rf_component_info_offset(),
   2289 	    rf_component_info_size(secsize));
   2290 }
   2291 
   2292 /* ARGSUSED */
   2293 static int
   2294 raidread_component_area(dev_t dev, struct vnode *b_vp, void *data,
   2295     size_t msize, daddr_t offset, daddr_t dsize)
   2296 {
   2297 	struct buf *bp;
   2298 	int error;
   2299 
   2300 	/* XXX should probably ensure that we don't try to do this if
   2301 	   someone has changed rf_protected_sectors. */
   2302 
   2303 	if (b_vp == NULL) {
   2304 		/* For whatever reason, this component is not valid.
   2305 		   Don't try to read a component label from it. */
   2306 		return(EINVAL);
   2307 	}
   2308 
   2309 	/* get a block of the appropriate size... */
   2310 	bp = geteblk((int)dsize);
   2311 	bp->b_dev = dev;
   2312 
   2313 	/* get our ducks in a row for the read */
   2314 	bp->b_blkno = offset / DEV_BSIZE;
   2315 	bp->b_bcount = dsize;
   2316 	bp->b_flags |= B_READ;
   2317  	bp->b_resid = dsize;
   2318 
   2319 	bdev_strategy(bp);
   2320 	error = biowait(bp);
   2321 
   2322 	if (!error) {
   2323 		memcpy(data, bp->b_data, msize);
   2324 	}
   2325 
   2326 	brelse(bp, 0);
   2327 	return(error);
   2328 }
   2329 
   2330 
   2331 static int
   2332 raidwrite_component_label(unsigned secsize, dev_t dev, struct vnode *b_vp,
   2333     RF_ComponentLabel_t *clabel)
   2334 {
   2335 	return raidwrite_component_area(dev, b_vp, clabel,
   2336 	    sizeof(RF_ComponentLabel_t),
   2337 	    rf_component_info_offset(),
   2338 	    rf_component_info_size(secsize), 0);
   2339 }
   2340 
   2341 /* ARGSUSED */
   2342 static int
   2343 raidwrite_component_area(dev_t dev, struct vnode *b_vp, void *data,
   2344     size_t msize, daddr_t offset, daddr_t dsize, int asyncp)
   2345 {
   2346 	struct buf *bp;
   2347 	int error;
   2348 
   2349 	/* get a block of the appropriate size... */
   2350 	bp = geteblk((int)dsize);
   2351 	bp->b_dev = dev;
   2352 
   2353 	/* get our ducks in a row for the write */
   2354 	bp->b_blkno = offset / DEV_BSIZE;
   2355 	bp->b_bcount = dsize;
   2356 	bp->b_flags |= B_WRITE | (asyncp ? B_ASYNC : 0);
   2357  	bp->b_resid = dsize;
   2358 
   2359 	memset(bp->b_data, 0, dsize);
   2360 	memcpy(bp->b_data, data, msize);
   2361 
   2362 	bdev_strategy(bp);
   2363 	if (asyncp)
   2364 		return 0;
   2365 	error = biowait(bp);
   2366 	brelse(bp, 0);
   2367 	if (error) {
   2368 #if 1
   2369 		printf("Failed to write RAID component info!\n");
   2370 #endif
   2371 	}
   2372 
   2373 	return(error);
   2374 }
   2375 
   2376 void
   2377 rf_paritymap_kern_write(RF_Raid_t *raidPtr, struct rf_paritymap_ondisk *map)
   2378 {
   2379 	int c;
   2380 
   2381 	for (c = 0; c < raidPtr->numCol; c++) {
   2382 		/* Skip dead disks. */
   2383 		if (RF_DEAD_DISK(raidPtr->Disks[c].status))
   2384 			continue;
   2385 		/* XXXjld: what if an error occurs here? */
   2386 		raidwrite_component_area(raidPtr->Disks[c].dev,
   2387 		    raidPtr->raid_cinfo[c].ci_vp, map,
   2388 		    RF_PARITYMAP_NBYTE,
   2389 		    rf_parity_map_offset(raidPtr),
   2390 		    rf_parity_map_size(raidPtr), 0);
   2391 	}
   2392 }
   2393 
   2394 void
   2395 rf_paritymap_kern_read(RF_Raid_t *raidPtr, struct rf_paritymap_ondisk *map)
   2396 {
   2397 	struct rf_paritymap_ondisk tmp;
   2398 	int c,first;
   2399 
   2400 	first=1;
   2401 	for (c = 0; c < raidPtr->numCol; c++) {
   2402 		/* Skip dead disks. */
   2403 		if (RF_DEAD_DISK(raidPtr->Disks[c].status))
   2404 			continue;
   2405 		raidread_component_area(raidPtr->Disks[c].dev,
   2406 		    raidPtr->raid_cinfo[c].ci_vp, &tmp,
   2407 		    RF_PARITYMAP_NBYTE,
   2408 		    rf_parity_map_offset(raidPtr),
   2409 		    rf_parity_map_size(raidPtr));
   2410 		if (first) {
   2411 			memcpy(map, &tmp, sizeof(*map));
   2412 			first = 0;
   2413 		} else {
   2414 			rf_paritymap_merge(map, &tmp);
   2415 		}
   2416 	}
   2417 }
   2418 
   2419 void
   2420 rf_markalldirty(RF_Raid_t *raidPtr)
   2421 {
   2422 	RF_ComponentLabel_t *clabel;
   2423 	int sparecol;
   2424 	int c;
   2425 	int j;
   2426 	int scol = -1;
   2427 
   2428 	raidPtr->mod_counter++;
   2429 	for (c = 0; c < raidPtr->numCol; c++) {
   2430 		/* we don't want to touch (at all) a disk that has
   2431 		   failed */
   2432 		if (!RF_DEAD_DISK(raidPtr->Disks[c].status)) {
   2433 			clabel = raidget_component_label(raidPtr, c);
   2434 			if (clabel->status == rf_ds_spared) {
   2435 				/* XXX do something special...
   2436 				   but whatever you do, don't
   2437 				   try to access it!! */
   2438 			} else {
   2439 				raidmarkdirty(raidPtr, c);
   2440 			}
   2441 		}
   2442 	}
   2443 
   2444 	for( c = 0; c < raidPtr->numSpare ; c++) {
   2445 		sparecol = raidPtr->numCol + c;
   2446 		if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
   2447 			/*
   2448 
   2449 			   we claim this disk is "optimal" if it's
   2450 			   rf_ds_used_spare, as that means it should be
   2451 			   directly substitutable for the disk it replaced.
   2452 			   We note that too...
   2453 
   2454 			 */
   2455 
   2456 			for(j=0;j<raidPtr->numCol;j++) {
   2457 				if (raidPtr->Disks[j].spareCol == sparecol) {
   2458 					scol = j;
   2459 					break;
   2460 				}
   2461 			}
   2462 
   2463 			clabel = raidget_component_label(raidPtr, sparecol);
   2464 			/* make sure status is noted */
   2465 
   2466 			raid_init_component_label(raidPtr, clabel);
   2467 
   2468 			clabel->row = 0;
   2469 			clabel->column = scol;
   2470 			/* Note: we *don't* change status from rf_ds_used_spare
   2471 			   to rf_ds_optimal */
   2472 			/* clabel.status = rf_ds_optimal; */
   2473 
   2474 			raidmarkdirty(raidPtr, sparecol);
   2475 		}
   2476 	}
   2477 }
   2478 
   2479 
   2480 void
   2481 rf_update_component_labels(RF_Raid_t *raidPtr, int final)
   2482 {
   2483 	RF_ComponentLabel_t *clabel;
   2484 	int sparecol;
   2485 	int c;
   2486 	int j;
   2487 	int scol;
   2488 
   2489 	scol = -1;
   2490 
   2491 	/* XXX should do extra checks to make sure things really are clean,
   2492 	   rather than blindly setting the clean bit... */
   2493 
   2494 	raidPtr->mod_counter++;
   2495 
   2496 	for (c = 0; c < raidPtr->numCol; c++) {
   2497 		if (raidPtr->Disks[c].status == rf_ds_optimal) {
   2498 			clabel = raidget_component_label(raidPtr, c);
   2499 			/* make sure status is noted */
   2500 			clabel->status = rf_ds_optimal;
   2501 
   2502 			/* note what unit we are configured as */
   2503 			clabel->last_unit = raidPtr->raidid;
   2504 
   2505 			raidflush_component_label(raidPtr, c);
   2506 			if (final == RF_FINAL_COMPONENT_UPDATE) {
   2507 				if (raidPtr->parity_good == RF_RAID_CLEAN) {
   2508 					raidmarkclean(raidPtr, c);
   2509 				}
   2510 			}
   2511 		}
   2512 		/* else we don't touch it.. */
   2513 	}
   2514 
   2515 	for( c = 0; c < raidPtr->numSpare ; c++) {
   2516 		sparecol = raidPtr->numCol + c;
   2517 		/* Need to ensure that the reconstruct actually completed! */
   2518 		if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
   2519 			/*
   2520 
   2521 			   we claim this disk is "optimal" if it's
   2522 			   rf_ds_used_spare, as that means it should be
   2523 			   directly substitutable for the disk it replaced.
   2524 			   We note that too...
   2525 
   2526 			 */
   2527 
   2528 			for(j=0;j<raidPtr->numCol;j++) {
   2529 				if (raidPtr->Disks[j].spareCol == sparecol) {
   2530 					scol = j;
   2531 					break;
   2532 				}
   2533 			}
   2534 
   2535 			/* XXX shouldn't *really* need this... */
   2536 			clabel = raidget_component_label(raidPtr, sparecol);
   2537 			/* make sure status is noted */
   2538 
   2539 			raid_init_component_label(raidPtr, clabel);
   2540 
   2541 			clabel->column = scol;
   2542 			clabel->status = rf_ds_optimal;
   2543 			clabel->last_unit = raidPtr->raidid;
   2544 
   2545 			raidflush_component_label(raidPtr, sparecol);
   2546 			if (final == RF_FINAL_COMPONENT_UPDATE) {
   2547 				if (raidPtr->parity_good == RF_RAID_CLEAN) {
   2548 					raidmarkclean(raidPtr, sparecol);
   2549 				}
   2550 			}
   2551 		}
   2552 	}
   2553 }
   2554 
   2555 void
   2556 rf_close_component(RF_Raid_t *raidPtr, struct vnode *vp, int auto_configured)
   2557 {
   2558 
   2559 	if (vp != NULL) {
   2560 		if (auto_configured == 1) {
   2561 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
   2562 			VOP_CLOSE(vp, FREAD | FWRITE, NOCRED);
   2563 			vput(vp);
   2564 
   2565 		} else {
   2566 			(void) vn_close(vp, FREAD | FWRITE, curlwp->l_cred);
   2567 		}
   2568 	}
   2569 }
   2570 
   2571 
   2572 void
   2573 rf_UnconfigureVnodes(RF_Raid_t *raidPtr)
   2574 {
   2575 	int r,c;
   2576 	struct vnode *vp;
   2577 	int acd;
   2578 
   2579 
   2580 	/* We take this opportunity to close the vnodes like we should.. */
   2581 
   2582 	for (c = 0; c < raidPtr->numCol; c++) {
   2583 		vp = raidPtr->raid_cinfo[c].ci_vp;
   2584 		acd = raidPtr->Disks[c].auto_configured;
   2585 		rf_close_component(raidPtr, vp, acd);
   2586 		raidPtr->raid_cinfo[c].ci_vp = NULL;
   2587 		raidPtr->Disks[c].auto_configured = 0;
   2588 	}
   2589 
   2590 	for (r = 0; r < raidPtr->numSpare; r++) {
   2591 		vp = raidPtr->raid_cinfo[raidPtr->numCol + r].ci_vp;
   2592 		acd = raidPtr->Disks[raidPtr->numCol + r].auto_configured;
   2593 		rf_close_component(raidPtr, vp, acd);
   2594 		raidPtr->raid_cinfo[raidPtr->numCol + r].ci_vp = NULL;
   2595 		raidPtr->Disks[raidPtr->numCol + r].auto_configured = 0;
   2596 	}
   2597 }
   2598 
   2599 
   2600 void
   2601 rf_ReconThread(struct rf_recon_req *req)
   2602 {
   2603 	int     s;
   2604 	RF_Raid_t *raidPtr;
   2605 
   2606 	s = splbio();
   2607 	raidPtr = (RF_Raid_t *) req->raidPtr;
   2608 	raidPtr->recon_in_progress = 1;
   2609 
   2610 	rf_FailDisk((RF_Raid_t *) req->raidPtr, req->col,
   2611 		    ((req->flags & RF_FDFLAGS_RECON) ? 1 : 0));
   2612 
   2613 	RF_Free(req, sizeof(*req));
   2614 
   2615 	raidPtr->recon_in_progress = 0;
   2616 	splx(s);
   2617 
   2618 	/* That's all... */
   2619 	kthread_exit(0);	/* does not return */
   2620 }
   2621 
   2622 void
   2623 rf_RewriteParityThread(RF_Raid_t *raidPtr)
   2624 {
   2625 	int retcode;
   2626 	int s;
   2627 
   2628 	raidPtr->parity_rewrite_stripes_done = 0;
   2629 	raidPtr->parity_rewrite_in_progress = 1;
   2630 	s = splbio();
   2631 	retcode = rf_RewriteParity(raidPtr);
   2632 	splx(s);
   2633 	if (retcode) {
   2634 		printf("raid%d: Error re-writing parity (%d)!\n",
   2635 		    raidPtr->raidid, retcode);
   2636 	} else {
   2637 		/* set the clean bit!  If we shutdown correctly,
   2638 		   the clean bit on each component label will get
   2639 		   set */
   2640 		raidPtr->parity_good = RF_RAID_CLEAN;
   2641 	}
   2642 	raidPtr->parity_rewrite_in_progress = 0;
   2643 
   2644 	/* Anyone waiting for us to stop?  If so, inform them... */
   2645 	if (raidPtr->waitShutdown) {
   2646 		wakeup(&raidPtr->parity_rewrite_in_progress);
   2647 	}
   2648 
   2649 	/* That's all... */
   2650 	kthread_exit(0);	/* does not return */
   2651 }
   2652 
   2653 
   2654 void
   2655 rf_CopybackThread(RF_Raid_t *raidPtr)
   2656 {
   2657 	int s;
   2658 
   2659 	raidPtr->copyback_in_progress = 1;
   2660 	s = splbio();
   2661 	rf_CopybackReconstructedData(raidPtr);
   2662 	splx(s);
   2663 	raidPtr->copyback_in_progress = 0;
   2664 
   2665 	/* That's all... */
   2666 	kthread_exit(0);	/* does not return */
   2667 }
   2668 
   2669 
   2670 void
   2671 rf_ReconstructInPlaceThread(struct rf_recon_req *req)
   2672 {
   2673 	int s;
   2674 	RF_Raid_t *raidPtr;
   2675 
   2676 	s = splbio();
   2677 	raidPtr = req->raidPtr;
   2678 	raidPtr->recon_in_progress = 1;
   2679 	rf_ReconstructInPlace(raidPtr, req->col);
   2680 	RF_Free(req, sizeof(*req));
   2681 	raidPtr->recon_in_progress = 0;
   2682 	splx(s);
   2683 
   2684 	/* That's all... */
   2685 	kthread_exit(0);	/* does not return */
   2686 }
   2687 
   2688 static RF_AutoConfig_t *
   2689 rf_get_component(RF_AutoConfig_t *ac_list, dev_t dev, struct vnode *vp,
   2690     const char *cname, RF_SectorCount_t size, uint64_t numsecs,
   2691     unsigned secsize)
   2692 {
   2693 	int good_one = 0;
   2694 	RF_ComponentLabel_t *clabel;
   2695 	RF_AutoConfig_t *ac;
   2696 
   2697 	clabel = malloc(sizeof(RF_ComponentLabel_t), M_RAIDFRAME, M_NOWAIT);
   2698 	if (clabel == NULL) {
   2699 oomem:
   2700 		    while(ac_list) {
   2701 			    ac = ac_list;
   2702 			    if (ac->clabel)
   2703 				    free(ac->clabel, M_RAIDFRAME);
   2704 			    ac_list = ac_list->next;
   2705 			    free(ac, M_RAIDFRAME);
   2706 		    }
   2707 		    printf("RAID auto config: out of memory!\n");
   2708 		    return NULL; /* XXX probably should panic? */
   2709 	}
   2710 
   2711 	if (!raidread_component_label(secsize, dev, vp, clabel)) {
   2712 		/* Got the label.  Does it look reasonable? */
   2713 		if (rf_reasonable_label(clabel, numsecs) &&
   2714 		    (rf_component_label_partitionsize(clabel) <= size)) {
   2715 #ifdef DEBUG
   2716 			printf("Component on: %s: %llu\n",
   2717 				cname, (unsigned long long)size);
   2718 			rf_print_component_label(clabel);
   2719 #endif
   2720 			/* if it's reasonable, add it, else ignore it. */
   2721 			ac = malloc(sizeof(RF_AutoConfig_t), M_RAIDFRAME,
   2722 				M_NOWAIT);
   2723 			if (ac == NULL) {
   2724 				free(clabel, M_RAIDFRAME);
   2725 				goto oomem;
   2726 			}
   2727 			strlcpy(ac->devname, cname, sizeof(ac->devname));
   2728 			ac->dev = dev;
   2729 			ac->vp = vp;
   2730 			ac->clabel = clabel;
   2731 			ac->next = ac_list;
   2732 			ac_list = ac;
   2733 			good_one = 1;
   2734 		}
   2735 	}
   2736 	if (!good_one) {
   2737 		/* cleanup */
   2738 		free(clabel, M_RAIDFRAME);
   2739 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
   2740 		VOP_CLOSE(vp, FREAD | FWRITE, NOCRED);
   2741 		vput(vp);
   2742 	}
   2743 	return ac_list;
   2744 }
   2745 
   2746 RF_AutoConfig_t *
   2747 rf_find_raid_components(void)
   2748 {
   2749 	struct vnode *vp;
   2750 	struct disklabel label;
   2751 	device_t dv;
   2752 	deviter_t di;
   2753 	dev_t dev;
   2754 	int bmajor, bminor, wedge, rf_part_found;
   2755 	int error;
   2756 	int i;
   2757 	RF_AutoConfig_t *ac_list;
   2758 	uint64_t numsecs;
   2759 	unsigned secsize;
   2760 	int dowedges;
   2761 
   2762 	/* initialize the AutoConfig list */
   2763 	ac_list = NULL;
   2764 
   2765 	/*
   2766 	 * we begin by trolling through *all* the devices on the system *twice*
   2767 	 * first we scan for wedges, second for other devices. This avoids
   2768 	 * using a raw partition instead of a wedge that covers the whole disk
   2769 	 */
   2770 
   2771 	for (dowedges=1; dowedges>=0; --dowedges) {
   2772 		for (dv = deviter_first(&di, DEVITER_F_ROOT_FIRST); dv != NULL;
   2773 		     dv = deviter_next(&di)) {
   2774 
   2775 			/* we are only interested in disks... */
   2776 			if (device_class(dv) != DV_DISK)
   2777 				continue;
   2778 
   2779 			/* we don't care about floppies... */
   2780 			if (device_is_a(dv, "fd")) {
   2781 				continue;
   2782 			}
   2783 
   2784 			/* we don't care about CD's... */
   2785 			if (device_is_a(dv, "cd")) {
   2786 				continue;
   2787 			}
   2788 
   2789 			/* we don't care about md's... */
   2790 			if (device_is_a(dv, "md")) {
   2791 				continue;
   2792 			}
   2793 
   2794 			/* hdfd is the Atari/Hades floppy driver */
   2795 			if (device_is_a(dv, "hdfd")) {
   2796 				continue;
   2797 			}
   2798 
   2799 			/* fdisa is the Atari/Milan floppy driver */
   2800 			if (device_is_a(dv, "fdisa")) {
   2801 				continue;
   2802 			}
   2803 
   2804 			/* are we in the wedges pass ? */
   2805 			wedge = device_is_a(dv, "dk");
   2806 			if (wedge != dowedges) {
   2807 				continue;
   2808 			}
   2809 
   2810 			/* need to find the device_name_to_block_device_major stuff */
   2811 			bmajor = devsw_name2blk(device_xname(dv), NULL, 0);
   2812 
   2813 			rf_part_found = 0; /*No raid partition as yet*/
   2814 
   2815 			/* get a vnode for the raw partition of this disk */
   2816 			bminor = minor(device_unit(dv));
   2817 			dev = wedge ? makedev(bmajor, bminor) :
   2818 			    MAKEDISKDEV(bmajor, bminor, RAW_PART);
   2819 			if (bdevvp(dev, &vp))
   2820 				panic("RAID can't alloc vnode");
   2821 
   2822 			error = VOP_OPEN(vp, FREAD | FSILENT, NOCRED);
   2823 
   2824 			if (error) {
   2825 				/* "Who cares."  Continue looking
   2826 				   for something that exists*/
   2827 				vput(vp);
   2828 				continue;
   2829 			}
   2830 
   2831 			error = getdisksize(vp, &numsecs, &secsize);
   2832 			if (error) {
   2833 				printf("RAIDframe: can't get disk size for "
   2834 				    "dev %s (%d)\n", device_xname(dv), error);
   2835 				vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
   2836 				VOP_CLOSE(vp, FREAD | FWRITE, NOCRED);
   2837 				vput(vp);
   2838 				continue;
   2839 			}
   2840 			if (wedge) {
   2841 				struct dkwedge_info dkw;
   2842 				error = VOP_IOCTL(vp, DIOCGWEDGEINFO, &dkw, FREAD,
   2843 				    NOCRED);
   2844 				if (error) {
   2845 					printf("RAIDframe: can't get wedge info for "
   2846 					    "dev %s (%d)\n", device_xname(dv), error);
   2847 					vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
   2848 					VOP_CLOSE(vp, FREAD | FWRITE, NOCRED);
   2849 					vput(vp);
   2850 					continue;
   2851 				}
   2852 
   2853 				if (strcmp(dkw.dkw_ptype, DKW_PTYPE_RAIDFRAME) != 0) {
   2854 					vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
   2855 					VOP_CLOSE(vp, FREAD | FWRITE, NOCRED);
   2856 					vput(vp);
   2857 					continue;
   2858 				}
   2859 
   2860 				ac_list = rf_get_component(ac_list, dev, vp,
   2861 				    device_xname(dv), dkw.dkw_size, numsecs, secsize);
   2862 				rf_part_found = 1; /*There is a raid component on this disk*/
   2863 				continue;
   2864 			}
   2865 
   2866 			/* Ok, the disk exists.  Go get the disklabel. */
   2867 			error = VOP_IOCTL(vp, DIOCGDINFO, &label, FREAD, NOCRED);
   2868 			if (error) {
   2869 				/*
   2870 				 * XXX can't happen - open() would
   2871 				 * have errored out (or faked up one)
   2872 				 */
   2873 				if (error != ENOTTY)
   2874 					printf("RAIDframe: can't get label for dev "
   2875 					    "%s (%d)\n", device_xname(dv), error);
   2876 			}
   2877 
   2878 			/* don't need this any more.  We'll allocate it again
   2879 			   a little later if we really do... */
   2880 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
   2881 			VOP_CLOSE(vp, FREAD | FWRITE, NOCRED);
   2882 			vput(vp);
   2883 
   2884 			if (error)
   2885 				continue;
   2886 
   2887 			rf_part_found = 0; /*No raid partitions yet*/
   2888 			for (i = 0; i < label.d_npartitions; i++) {
   2889 				char cname[sizeof(ac_list->devname)];
   2890 
   2891 				/* We only support partitions marked as RAID */
   2892 				if (label.d_partitions[i].p_fstype != FS_RAID)
   2893 					continue;
   2894 
   2895 				dev = MAKEDISKDEV(bmajor, device_unit(dv), i);
   2896 				if (bdevvp(dev, &vp))
   2897 					panic("RAID can't alloc vnode");
   2898 
   2899 				error = VOP_OPEN(vp, FREAD, NOCRED);
   2900 				if (error) {
   2901 					/* Whatever... */
   2902 					vput(vp);
   2903 					continue;
   2904 				}
   2905 				snprintf(cname, sizeof(cname), "%s%c",
   2906 				    device_xname(dv), 'a' + i);
   2907 				ac_list = rf_get_component(ac_list, dev, vp, cname,
   2908 					label.d_partitions[i].p_size, numsecs, secsize);
   2909 				rf_part_found = 1; /*There is at least one raid partition on this disk*/
   2910 			}
   2911 
   2912 			/*
   2913 			 *If there is no raid component on this disk, either in a
   2914 			 *disklabel or inside a wedge, check the raw partition as well,
   2915 			 *as it is possible to configure raid components on raw disk
   2916 			 *devices.
   2917 			 */
   2918 
   2919 			if (!rf_part_found) {
   2920 				char cname[sizeof(ac_list->devname)];
   2921 
   2922 				dev = MAKEDISKDEV(bmajor, device_unit(dv), RAW_PART);
   2923 				if (bdevvp(dev, &vp))
   2924 					panic("RAID can't alloc vnode");
   2925 
   2926 				error = VOP_OPEN(vp, FREAD, NOCRED);
   2927 				if (error) {
   2928 					/* Whatever... */
   2929 					vput(vp);
   2930 					continue;
   2931 				}
   2932 				snprintf(cname, sizeof(cname), "%s%c",
   2933 				    device_xname(dv), 'a' + RAW_PART);
   2934 				ac_list = rf_get_component(ac_list, dev, vp, cname,
   2935 					label.d_partitions[RAW_PART].p_size, numsecs, secsize);
   2936 			}
   2937 		}
   2938 		deviter_release(&di);
   2939 	}
   2940 	return ac_list;
   2941 }
   2942 
   2943 
   2944 int
   2945 rf_reasonable_label(RF_ComponentLabel_t *clabel, uint64_t numsecs)
   2946 {
   2947 
   2948 	if (((clabel->version==RF_COMPONENT_LABEL_VERSION_1) ||
   2949 	     (clabel->version==RF_COMPONENT_LABEL_VERSION)) &&
   2950 	    ((clabel->clean == RF_RAID_CLEAN) ||
   2951 	     (clabel->clean == RF_RAID_DIRTY)) &&
   2952 	    clabel->row >=0 &&
   2953 	    clabel->column >= 0 &&
   2954 	    clabel->num_rows > 0 &&
   2955 	    clabel->num_columns > 0 &&
   2956 	    clabel->row < clabel->num_rows &&
   2957 	    clabel->column < clabel->num_columns &&
   2958 	    clabel->blockSize > 0 &&
   2959 	    /*
   2960 	     * numBlocksHi may contain garbage, but it is ok since
   2961 	     * the type is unsigned.  If it is really garbage,
   2962 	     * rf_fix_old_label_size() will fix it.
   2963 	     */
   2964 	    rf_component_label_numblocks(clabel) > 0) {
   2965 		/*
   2966 		 * label looks reasonable enough...
   2967 		 * let's make sure it has no old garbage.
   2968 		 */
   2969 		if (numsecs)
   2970 			rf_fix_old_label_size(clabel, numsecs);
   2971 		return(1);
   2972 	}
   2973 	return(0);
   2974 }
   2975 
   2976 
   2977 /*
   2978  * For reasons yet unknown, some old component labels have garbage in
   2979  * the newer numBlocksHi region, and this causes lossage.  Since those
   2980  * disks will also have numsecs set to less than 32 bits of sectors,
   2981  * we can determine when this corruption has occurred, and fix it.
   2982  *
   2983  * The exact same problem, with the same unknown reason, happens to
   2984  * the partitionSizeHi member as well.
   2985  */
   2986 static void
   2987 rf_fix_old_label_size(RF_ComponentLabel_t *clabel, uint64_t numsecs)
   2988 {
   2989 
   2990 	if (numsecs < ((uint64_t)1 << 32)) {
   2991 		if (clabel->numBlocksHi) {
   2992 			printf("WARNING: total sectors < 32 bits, yet "
   2993 			       "numBlocksHi set\n"
   2994 			       "WARNING: resetting numBlocksHi to zero.\n");
   2995 			clabel->numBlocksHi = 0;
   2996 		}
   2997 
   2998 		if (clabel->partitionSizeHi) {
   2999 			printf("WARNING: total sectors < 32 bits, yet "
   3000 			       "partitionSizeHi set\n"
   3001 			       "WARNING: resetting partitionSizeHi to zero.\n");
   3002 			clabel->partitionSizeHi = 0;
   3003 		}
   3004 	}
   3005 }
   3006 
   3007 
   3008 #ifdef DEBUG
   3009 void
   3010 rf_print_component_label(RF_ComponentLabel_t *clabel)
   3011 {
   3012 	uint64_t numBlocks;
   3013 	static const char *rp[] = {
   3014 	    "No", "Force", "Soft", "*invalid*"
   3015 	};
   3016 
   3017 
   3018 	numBlocks = rf_component_label_numblocks(clabel);
   3019 
   3020 	printf("   Row: %d Column: %d Num Rows: %d Num Columns: %d\n",
   3021 	       clabel->row, clabel->column,
   3022 	       clabel->num_rows, clabel->num_columns);
   3023 	printf("   Version: %d Serial Number: %d Mod Counter: %d\n",
   3024 	       clabel->version, clabel->serial_number,
   3025 	       clabel->mod_counter);
   3026 	printf("   Clean: %s Status: %d\n",
   3027 	       clabel->clean ? "Yes" : "No", clabel->status);
   3028 	printf("   sectPerSU: %d SUsPerPU: %d SUsPerRU: %d\n",
   3029 	       clabel->sectPerSU, clabel->SUsPerPU, clabel->SUsPerRU);
   3030 	printf("   RAID Level: %c  blocksize: %d numBlocks: %"PRIu64"\n",
   3031 	       (char) clabel->parityConfig, clabel->blockSize, numBlocks);
   3032 	printf("   Autoconfig: %s\n", clabel->autoconfigure ? "Yes" : "No");
   3033 	printf("   Root partition: %s\n", rp[clabel->root_partition & 3]);
   3034 	printf("   Last configured as: raid%d\n", clabel->last_unit);
   3035 #if 0
   3036 	   printf("   Config order: %d\n", clabel->config_order);
   3037 #endif
   3038 
   3039 }
   3040 #endif
   3041 
   3042 RF_ConfigSet_t *
   3043 rf_create_auto_sets(RF_AutoConfig_t *ac_list)
   3044 {
   3045 	RF_AutoConfig_t *ac;
   3046 	RF_ConfigSet_t *config_sets;
   3047 	RF_ConfigSet_t *cset;
   3048 	RF_AutoConfig_t *ac_next;
   3049 
   3050 
   3051 	config_sets = NULL;
   3052 
   3053 	/* Go through the AutoConfig list, and figure out which components
   3054 	   belong to what sets.  */
   3055 	ac = ac_list;
   3056 	while(ac!=NULL) {
   3057 		/* we're going to putz with ac->next, so save it here
   3058 		   for use at the end of the loop */
   3059 		ac_next = ac->next;
   3060 
   3061 		if (config_sets == NULL) {
   3062 			/* will need at least this one... */
   3063 			config_sets = (RF_ConfigSet_t *)
   3064 				malloc(sizeof(RF_ConfigSet_t),
   3065 				       M_RAIDFRAME, M_NOWAIT);
   3066 			if (config_sets == NULL) {
   3067 				panic("rf_create_auto_sets: No memory!");
   3068 			}
   3069 			/* this one is easy :) */
   3070 			config_sets->ac = ac;
   3071 			config_sets->next = NULL;
   3072 			config_sets->rootable = 0;
   3073 			ac->next = NULL;
   3074 		} else {
   3075 			/* which set does this component fit into? */
   3076 			cset = config_sets;
   3077 			while(cset!=NULL) {
   3078 				if (rf_does_it_fit(cset, ac)) {
   3079 					/* looks like it matches... */
   3080 					ac->next = cset->ac;
   3081 					cset->ac = ac;
   3082 					break;
   3083 				}
   3084 				cset = cset->next;
   3085 			}
   3086 			if (cset==NULL) {
   3087 				/* didn't find a match above... new set..*/
   3088 				cset = (RF_ConfigSet_t *)
   3089 					malloc(sizeof(RF_ConfigSet_t),
   3090 					       M_RAIDFRAME, M_NOWAIT);
   3091 				if (cset == NULL) {
   3092 					panic("rf_create_auto_sets: No memory!");
   3093 				}
   3094 				cset->ac = ac;
   3095 				ac->next = NULL;
   3096 				cset->next = config_sets;
   3097 				cset->rootable = 0;
   3098 				config_sets = cset;
   3099 			}
   3100 		}
   3101 		ac = ac_next;
   3102 	}
   3103 
   3104 
   3105 	return(config_sets);
   3106 }
   3107 
   3108 static int
   3109 rf_does_it_fit(RF_ConfigSet_t *cset, RF_AutoConfig_t *ac)
   3110 {
   3111 	RF_ComponentLabel_t *clabel1, *clabel2;
   3112 
   3113 	/* If this one matches the *first* one in the set, that's good
   3114 	   enough, since the other members of the set would have been
   3115 	   through here too... */
   3116 	/* note that we are not checking partitionSize here..
   3117 
   3118 	   Note that we are also not checking the mod_counters here.
   3119 	   If everything else matches except the mod_counter, that's
   3120 	   good enough for this test.  We will deal with the mod_counters
   3121 	   a little later in the autoconfiguration process.
   3122 
   3123 	    (clabel1->mod_counter == clabel2->mod_counter) &&
   3124 
   3125 	   The reason we don't check for this is that failed disks
   3126 	   will have lower modification counts.  If those disks are
   3127 	   not added to the set they used to belong to, then they will
   3128 	   form their own set, which may result in 2 different sets,
   3129 	   for example, competing to be configured at raid0, and
   3130 	   perhaps competing to be the root filesystem set.  If the
   3131 	   wrong ones get configured, or both attempt to become /,
   3132 	   weird behaviour and or serious lossage will occur.  Thus we
   3133 	   need to bring them into the fold here, and kick them out at
   3134 	   a later point.
   3135 
   3136 	*/
   3137 
   3138 	clabel1 = cset->ac->clabel;
   3139 	clabel2 = ac->clabel;
   3140 	if ((clabel1->version == clabel2->version) &&
   3141 	    (clabel1->serial_number == clabel2->serial_number) &&
   3142 	    (clabel1->num_rows == clabel2->num_rows) &&
   3143 	    (clabel1->num_columns == clabel2->num_columns) &&
   3144 	    (clabel1->sectPerSU == clabel2->sectPerSU) &&
   3145 	    (clabel1->SUsPerPU == clabel2->SUsPerPU) &&
   3146 	    (clabel1->SUsPerRU == clabel2->SUsPerRU) &&
   3147 	    (clabel1->parityConfig == clabel2->parityConfig) &&
   3148 	    (clabel1->maxOutstanding == clabel2->maxOutstanding) &&
   3149 	    (clabel1->blockSize == clabel2->blockSize) &&
   3150 	    rf_component_label_numblocks(clabel1) ==
   3151 	    rf_component_label_numblocks(clabel2) &&
   3152 	    (clabel1->autoconfigure == clabel2->autoconfigure) &&
   3153 	    (clabel1->root_partition == clabel2->root_partition) &&
   3154 	    (clabel1->last_unit == clabel2->last_unit) &&
   3155 	    (clabel1->config_order == clabel2->config_order)) {
   3156 		/* if it get's here, it almost *has* to be a match */
   3157 	} else {
   3158 		/* it's not consistent with somebody in the set..
   3159 		   punt */
   3160 		return(0);
   3161 	}
   3162 	/* all was fine.. it must fit... */
   3163 	return(1);
   3164 }
   3165 
   3166 int
   3167 rf_have_enough_components(RF_ConfigSet_t *cset)
   3168 {
   3169 	RF_AutoConfig_t *ac;
   3170 	RF_AutoConfig_t *auto_config;
   3171 	RF_ComponentLabel_t *clabel;
   3172 	int c;
   3173 	int num_cols;
   3174 	int num_missing;
   3175 	int mod_counter;
   3176 	int mod_counter_found;
   3177 	int even_pair_failed;
   3178 	char parity_type;
   3179 
   3180 
   3181 	/* check to see that we have enough 'live' components
   3182 	   of this set.  If so, we can configure it if necessary */
   3183 
   3184 	num_cols = cset->ac->clabel->num_columns;
   3185 	parity_type = cset->ac->clabel->parityConfig;
   3186 
   3187 	/* XXX Check for duplicate components!?!?!? */
   3188 
   3189 	/* Determine what the mod_counter is supposed to be for this set. */
   3190 
   3191 	mod_counter_found = 0;
   3192 	mod_counter = 0;
   3193 	ac = cset->ac;
   3194 	while(ac!=NULL) {
   3195 		if (mod_counter_found==0) {
   3196 			mod_counter = ac->clabel->mod_counter;
   3197 			mod_counter_found = 1;
   3198 		} else {
   3199 			if (ac->clabel->mod_counter > mod_counter) {
   3200 				mod_counter = ac->clabel->mod_counter;
   3201 			}
   3202 		}
   3203 		ac = ac->next;
   3204 	}
   3205 
   3206 	num_missing = 0;
   3207 	auto_config = cset->ac;
   3208 
   3209 	even_pair_failed = 0;
   3210 	for(c=0; c<num_cols; c++) {
   3211 		ac = auto_config;
   3212 		while(ac!=NULL) {
   3213 			if ((ac->clabel->column == c) &&
   3214 			    (ac->clabel->mod_counter == mod_counter)) {
   3215 				/* it's this one... */
   3216 #ifdef DEBUG
   3217 				printf("Found: %s at %d\n",
   3218 				       ac->devname,c);
   3219 #endif
   3220 				break;
   3221 			}
   3222 			ac=ac->next;
   3223 		}
   3224 		if (ac==NULL) {
   3225 				/* Didn't find one here! */
   3226 				/* special case for RAID 1, especially
   3227 				   where there are more than 2
   3228 				   components (where RAIDframe treats
   3229 				   things a little differently :( ) */
   3230 			if (parity_type == '1') {
   3231 				if (c%2 == 0) { /* even component */
   3232 					even_pair_failed = 1;
   3233 				} else { /* odd component.  If
   3234 					    we're failed, and
   3235 					    so is the even
   3236 					    component, it's
   3237 					    "Good Night, Charlie" */
   3238 					if (even_pair_failed == 1) {
   3239 						return(0);
   3240 					}
   3241 				}
   3242 			} else {
   3243 				/* normal accounting */
   3244 				num_missing++;
   3245 			}
   3246 		}
   3247 		if ((parity_type == '1') && (c%2 == 1)) {
   3248 				/* Just did an even component, and we didn't
   3249 				   bail.. reset the even_pair_failed flag,
   3250 				   and go on to the next component.... */
   3251 			even_pair_failed = 0;
   3252 		}
   3253 	}
   3254 
   3255 	clabel = cset->ac->clabel;
   3256 
   3257 	if (((clabel->parityConfig == '0') && (num_missing > 0)) ||
   3258 	    ((clabel->parityConfig == '4') && (num_missing > 1)) ||
   3259 	    ((clabel->parityConfig == '5') && (num_missing > 1))) {
   3260 		/* XXX this needs to be made *much* more general */
   3261 		/* Too many failures */
   3262 		return(0);
   3263 	}
   3264 	/* otherwise, all is well, and we've got enough to take a kick
   3265 	   at autoconfiguring this set */
   3266 	return(1);
   3267 }
   3268 
   3269 void
   3270 rf_create_configuration(RF_AutoConfig_t *ac, RF_Config_t *config,
   3271 			RF_Raid_t *raidPtr)
   3272 {
   3273 	RF_ComponentLabel_t *clabel;
   3274 	int i;
   3275 
   3276 	clabel = ac->clabel;
   3277 
   3278 	/* 1. Fill in the common stuff */
   3279 	config->numRow = clabel->num_rows = 1;
   3280 	config->numCol = clabel->num_columns;
   3281 	config->numSpare = 0; /* XXX should this be set here? */
   3282 	config->sectPerSU = clabel->sectPerSU;
   3283 	config->SUsPerPU = clabel->SUsPerPU;
   3284 	config->SUsPerRU = clabel->SUsPerRU;
   3285 	config->parityConfig = clabel->parityConfig;
   3286 	/* XXX... */
   3287 	strcpy(config->diskQueueType,"fifo");
   3288 	config->maxOutstandingDiskReqs = clabel->maxOutstanding;
   3289 	config->layoutSpecificSize = 0; /* XXX ?? */
   3290 
   3291 	while(ac!=NULL) {
   3292 		/* row/col values will be in range due to the checks
   3293 		   in reasonable_label() */
   3294 		strcpy(config->devnames[0][ac->clabel->column],
   3295 		       ac->devname);
   3296 		ac = ac->next;
   3297 	}
   3298 
   3299 	for(i=0;i<RF_MAXDBGV;i++) {
   3300 		config->debugVars[i][0] = 0;
   3301 	}
   3302 }
   3303 
   3304 int
   3305 rf_set_autoconfig(RF_Raid_t *raidPtr, int new_value)
   3306 {
   3307 	RF_ComponentLabel_t *clabel;
   3308 	int column;
   3309 	int sparecol;
   3310 
   3311 	raidPtr->autoconfigure = new_value;
   3312 
   3313 	for(column=0; column<raidPtr->numCol; column++) {
   3314 		if (raidPtr->Disks[column].status == rf_ds_optimal) {
   3315 			clabel = raidget_component_label(raidPtr, column);
   3316 			clabel->autoconfigure = new_value;
   3317 			raidflush_component_label(raidPtr, column);
   3318 		}
   3319 	}
   3320 	for(column = 0; column < raidPtr->numSpare ; column++) {
   3321 		sparecol = raidPtr->numCol + column;
   3322 		if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
   3323 			clabel = raidget_component_label(raidPtr, sparecol);
   3324 			clabel->autoconfigure = new_value;
   3325 			raidflush_component_label(raidPtr, sparecol);
   3326 		}
   3327 	}
   3328 	return(new_value);
   3329 }
   3330 
   3331 int
   3332 rf_set_rootpartition(RF_Raid_t *raidPtr, int new_value)
   3333 {
   3334 	RF_ComponentLabel_t *clabel;
   3335 	int column;
   3336 	int sparecol;
   3337 
   3338 	raidPtr->root_partition = new_value;
   3339 	for(column=0; column<raidPtr->numCol; column++) {
   3340 		if (raidPtr->Disks[column].status == rf_ds_optimal) {
   3341 			clabel = raidget_component_label(raidPtr, column);
   3342 			clabel->root_partition = new_value;
   3343 			raidflush_component_label(raidPtr, column);
   3344 		}
   3345 	}
   3346 	for(column = 0; column < raidPtr->numSpare ; column++) {
   3347 		sparecol = raidPtr->numCol + column;
   3348 		if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
   3349 			clabel = raidget_component_label(raidPtr, sparecol);
   3350 			clabel->root_partition = new_value;
   3351 			raidflush_component_label(raidPtr, sparecol);
   3352 		}
   3353 	}
   3354 	return(new_value);
   3355 }
   3356 
   3357 void
   3358 rf_release_all_vps(RF_ConfigSet_t *cset)
   3359 {
   3360 	RF_AutoConfig_t *ac;
   3361 
   3362 	ac = cset->ac;
   3363 	while(ac!=NULL) {
   3364 		/* Close the vp, and give it back */
   3365 		if (ac->vp) {
   3366 			vn_lock(ac->vp, LK_EXCLUSIVE | LK_RETRY);
   3367 			VOP_CLOSE(ac->vp, FREAD | FWRITE, NOCRED);
   3368 			vput(ac->vp);
   3369 			ac->vp = NULL;
   3370 		}
   3371 		ac = ac->next;
   3372 	}
   3373 }
   3374 
   3375 
   3376 void
   3377 rf_cleanup_config_set(RF_ConfigSet_t *cset)
   3378 {
   3379 	RF_AutoConfig_t *ac;
   3380 	RF_AutoConfig_t *next_ac;
   3381 
   3382 	ac = cset->ac;
   3383 	while(ac!=NULL) {
   3384 		next_ac = ac->next;
   3385 		/* nuke the label */
   3386 		free(ac->clabel, M_RAIDFRAME);
   3387 		/* cleanup the config structure */
   3388 		free(ac, M_RAIDFRAME);
   3389 		/* "next.." */
   3390 		ac = next_ac;
   3391 	}
   3392 	/* and, finally, nuke the config set */
   3393 	free(cset, M_RAIDFRAME);
   3394 }
   3395 
   3396 
   3397 void
   3398 raid_init_component_label(RF_Raid_t *raidPtr, RF_ComponentLabel_t *clabel)
   3399 {
   3400 	/* current version number */
   3401 	clabel->version = RF_COMPONENT_LABEL_VERSION;
   3402 	clabel->serial_number = raidPtr->serial_number;
   3403 	clabel->mod_counter = raidPtr->mod_counter;
   3404 
   3405 	clabel->num_rows = 1;
   3406 	clabel->num_columns = raidPtr->numCol;
   3407 	clabel->clean = RF_RAID_DIRTY; /* not clean */
   3408 	clabel->status = rf_ds_optimal; /* "It's good!" */
   3409 
   3410 	clabel->sectPerSU = raidPtr->Layout.sectorsPerStripeUnit;
   3411 	clabel->SUsPerPU = raidPtr->Layout.SUsPerPU;
   3412 	clabel->SUsPerRU = raidPtr->Layout.SUsPerRU;
   3413 
   3414 	clabel->blockSize = raidPtr->bytesPerSector;
   3415 	rf_component_label_set_numblocks(clabel, raidPtr->sectorsPerDisk);
   3416 
   3417 	/* XXX not portable */
   3418 	clabel->parityConfig = raidPtr->Layout.map->parityConfig;
   3419 	clabel->maxOutstanding = raidPtr->maxOutstanding;
   3420 	clabel->autoconfigure = raidPtr->autoconfigure;
   3421 	clabel->root_partition = raidPtr->root_partition;
   3422 	clabel->last_unit = raidPtr->raidid;
   3423 	clabel->config_order = raidPtr->config_order;
   3424 
   3425 #ifndef RF_NO_PARITY_MAP
   3426 	rf_paritymap_init_label(raidPtr->parity_map, clabel);
   3427 #endif
   3428 }
   3429 
   3430 struct raid_softc *
   3431 rf_auto_config_set(RF_ConfigSet_t *cset)
   3432 {
   3433 	RF_Raid_t *raidPtr;
   3434 	RF_Config_t *config;
   3435 	int raidID;
   3436 	struct raid_softc *sc;
   3437 
   3438 #ifdef DEBUG
   3439 	printf("RAID autoconfigure\n");
   3440 #endif
   3441 
   3442 	/* 1. Create a config structure */
   3443 	config = malloc(sizeof(*config), M_RAIDFRAME, M_NOWAIT|M_ZERO);
   3444 	if (config == NULL) {
   3445 		printf("%s: Out of mem - config!?!?\n", __func__);
   3446 				/* XXX do something more intelligent here. */
   3447 		return NULL;
   3448 	}
   3449 
   3450 	/*
   3451 	   2. Figure out what RAID ID this one is supposed to live at
   3452 	   See if we can get the same RAID dev that it was configured
   3453 	   on last time..
   3454 	*/
   3455 
   3456 	raidID = cset->ac->clabel->last_unit;
   3457 	for (sc = raidget(raidID, false); sc && sc->sc_r.valid != 0;
   3458 	     sc = raidget(++raidID, false))
   3459 		continue;
   3460 #ifdef DEBUG
   3461 	printf("Configuring raid%d:\n",raidID);
   3462 #endif
   3463 
   3464 	if (sc == NULL)
   3465 		sc = raidget(raidID, true);
   3466 	if (sc == NULL) {
   3467 		printf("%s: Out of mem - softc!?!?\n", __func__);
   3468 				/* XXX do something more intelligent here. */
   3469 		free(config, M_RAIDFRAME);
   3470 		return NULL;
   3471 	}
   3472 
   3473 	raidPtr = &sc->sc_r;
   3474 
   3475 	/* XXX all this stuff should be done SOMEWHERE ELSE! */
   3476 	raidPtr->softc = sc;
   3477 	raidPtr->raidid = raidID;
   3478 	raidPtr->openings = RAIDOUTSTANDING;
   3479 
   3480 	/* 3. Build the configuration structure */
   3481 	rf_create_configuration(cset->ac, config, raidPtr);
   3482 
   3483 	/* 4. Do the configuration */
   3484 	if (rf_Configure(raidPtr, config, cset->ac) == 0) {
   3485 		raidinit(sc);
   3486 
   3487 		rf_markalldirty(raidPtr);
   3488 		raidPtr->autoconfigure = 1; /* XXX do this here? */
   3489 		switch (cset->ac->clabel->root_partition) {
   3490 		case 1:	/* Force Root */
   3491 		case 2:	/* Soft Root: root when boot partition part of raid */
   3492 			/*
   3493 			 * everything configured just fine.  Make a note
   3494 			 * that this set is eligible to be root,
   3495 			 * or forced to be root
   3496 			 */
   3497 			cset->rootable = cset->ac->clabel->root_partition;
   3498 			/* XXX do this here? */
   3499 			raidPtr->root_partition = cset->rootable;
   3500 			break;
   3501 		default:
   3502 			break;
   3503 		}
   3504 	} else {
   3505 		raidput(sc);
   3506 		sc = NULL;
   3507 	}
   3508 
   3509 	/* 5. Cleanup */
   3510 	free(config, M_RAIDFRAME);
   3511 	return sc;
   3512 }
   3513 
   3514 void
   3515 rf_pool_init(struct pool *p, size_t size, const char *w_chan,
   3516 	     size_t xmin, size_t xmax)
   3517 {
   3518 	pool_init(p, size, 0, 0, 0, w_chan, NULL, IPL_BIO);
   3519 	pool_sethiwat(p, xmax);
   3520 	pool_prime(p, xmin);
   3521 	pool_setlowat(p, xmin);
   3522 }
   3523 
   3524 /*
   3525  * rf_buf_queue_check(RF_Raid_t raidPtr) -- looks into the buffer queue
   3526  * to see if there is IO pending and if that IO could possibly be done
   3527  * for a given RAID set.  Returns 0 if IO is waiting and can be done, 1
   3528  * otherwise.
   3529  *
   3530  */
   3531 int
   3532 rf_buf_queue_check(RF_Raid_t *raidPtr)
   3533 {
   3534 	struct raid_softc *rs;
   3535 	struct dk_softc *dksc;
   3536 
   3537 	rs = raidPtr->softc;
   3538 	dksc = &rs->sc_dksc;
   3539 
   3540 	if ((rs->sc_flags & RAIDF_INITED) == 0)
   3541 		return 1;
   3542 
   3543 	if (dk_strategy_pending(dksc) && raidPtr->openings > 0) {
   3544 		/* there is work to do */
   3545 		return 0;
   3546 	}
   3547 	/* default is nothing to do */
   3548 	return 1;
   3549 }
   3550 
   3551 int
   3552 rf_getdisksize(struct vnode *vp, RF_RaidDisk_t *diskPtr)
   3553 {
   3554 	uint64_t numsecs;
   3555 	unsigned secsize;
   3556 	int error;
   3557 
   3558 	error = getdisksize(vp, &numsecs, &secsize);
   3559 	if (error == 0) {
   3560 		diskPtr->blockSize = secsize;
   3561 		diskPtr->numBlocks = numsecs - rf_protectedSectors;
   3562 		diskPtr->partitionSize = numsecs;
   3563 		return 0;
   3564 	}
   3565 	return error;
   3566 }
   3567 
   3568 static int
   3569 raid_match(device_t self, cfdata_t cfdata, void *aux)
   3570 {
   3571 	return 1;
   3572 }
   3573 
   3574 static void
   3575 raid_attach(device_t parent, device_t self, void *aux)
   3576 {
   3577 }
   3578 
   3579 
   3580 static int
   3581 raid_detach(device_t self, int flags)
   3582 {
   3583 	int error;
   3584 	struct raid_softc *rs = raidsoftc(self);
   3585 
   3586 	if (rs == NULL)
   3587 		return ENXIO;
   3588 
   3589 	if ((error = raidlock(rs)) != 0)
   3590 		return (error);
   3591 
   3592 	error = raid_detach_unlocked(rs);
   3593 
   3594 	raidunlock(rs);
   3595 
   3596 	/* XXX raid can be referenced here */
   3597 
   3598 	if (error)
   3599 		return error;
   3600 
   3601 	/* Free the softc */
   3602 	raidput(rs);
   3603 
   3604 	return 0;
   3605 }
   3606 
   3607 static void
   3608 rf_set_geometry(struct raid_softc *rs, RF_Raid_t *raidPtr)
   3609 {
   3610 	struct dk_softc *dksc = &rs->sc_dksc;
   3611 	struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
   3612 
   3613 	memset(dg, 0, sizeof(*dg));
   3614 
   3615 	dg->dg_secperunit = raidPtr->totalSectors;
   3616 	dg->dg_secsize = raidPtr->bytesPerSector;
   3617 	dg->dg_nsectors = raidPtr->Layout.dataSectorsPerStripe;
   3618 	dg->dg_ntracks = 4 * raidPtr->numCol;
   3619 
   3620 	disk_set_info(dksc->sc_dev, &dksc->sc_dkdev, NULL);
   3621 }
   3622 
   3623 /*
   3624  * Implement forwarding of the DIOCCACHESYNC ioctl to each of the components.
   3625  * We end up returning whatever error was returned by the first cache flush
   3626  * that fails.
   3627  */
   3628 
   3629 int
   3630 rf_sync_component_caches(RF_Raid_t *raidPtr)
   3631 {
   3632 	int c, sparecol;
   3633 	int e,error;
   3634 	int force = 1;
   3635 
   3636 	error = 0;
   3637 	for (c = 0; c < raidPtr->numCol; c++) {
   3638 		if (raidPtr->Disks[c].status == rf_ds_optimal) {
   3639 			e = VOP_IOCTL(raidPtr->raid_cinfo[c].ci_vp, DIOCCACHESYNC,
   3640 					  &force, FWRITE, NOCRED);
   3641 			if (e) {
   3642 				if (e != ENODEV)
   3643 					printf("raid%d: cache flush to component %s failed.\n",
   3644 					       raidPtr->raidid, raidPtr->Disks[c].devname);
   3645 				if (error == 0) {
   3646 					error = e;
   3647 				}
   3648 			}
   3649 		}
   3650 	}
   3651 
   3652 	for( c = 0; c < raidPtr->numSpare ; c++) {
   3653 		sparecol = raidPtr->numCol + c;
   3654 		/* Need to ensure that the reconstruct actually completed! */
   3655 		if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
   3656 			e = VOP_IOCTL(raidPtr->raid_cinfo[sparecol].ci_vp,
   3657 					  DIOCCACHESYNC, &force, FWRITE, NOCRED);
   3658 			if (e) {
   3659 				if (e != ENODEV)
   3660 					printf("raid%d: cache flush to component %s failed.\n",
   3661 					       raidPtr->raidid, raidPtr->Disks[sparecol].devname);
   3662 				if (error == 0) {
   3663 					error = e;
   3664 				}
   3665 			}
   3666 		}
   3667 	}
   3668 	return error;
   3669 }
   3670 
   3671 /*
   3672  * Module interface
   3673  */
   3674 
   3675 MODULE(MODULE_CLASS_DRIVER, raid, "dk_subr");
   3676 
   3677 #ifdef _MODULE
   3678 CFDRIVER_DECL(raid, DV_DISK, NULL);
   3679 #endif
   3680 
   3681 static int raid_modcmd(modcmd_t, void *);
   3682 static int raid_modcmd_init(void);
   3683 static int raid_modcmd_fini(void);
   3684 
   3685 static int
   3686 raid_modcmd(modcmd_t cmd, void *data)
   3687 {
   3688 	int error;
   3689 
   3690 	error = 0;
   3691 	switch (cmd) {
   3692 	case MODULE_CMD_INIT:
   3693 		error = raid_modcmd_init();
   3694 		break;
   3695 	case MODULE_CMD_FINI:
   3696 		error = raid_modcmd_fini();
   3697 		break;
   3698 	default:
   3699 		error = ENOTTY;
   3700 		break;
   3701 	}
   3702 	return error;
   3703 }
   3704 
   3705 static int
   3706 raid_modcmd_init(void)
   3707 {
   3708 	int error;
   3709 	int bmajor, cmajor;
   3710 
   3711 	mutex_init(&raid_lock, MUTEX_DEFAULT, IPL_NONE);
   3712 	mutex_enter(&raid_lock);
   3713 #if (RF_INCLUDE_PARITY_DECLUSTERING_DS > 0)
   3714 	rf_init_mutex2(rf_sparet_wait_mutex, IPL_VM);
   3715 	rf_init_cond2(rf_sparet_wait_cv, "sparetw");
   3716 	rf_init_cond2(rf_sparet_resp_cv, "rfgst");
   3717 
   3718 	rf_sparet_wait_queue = rf_sparet_resp_queue = NULL;
   3719 #endif
   3720 
   3721 	bmajor = cmajor = -1;
   3722 	error = devsw_attach("raid", &raid_bdevsw, &bmajor,
   3723 	    &raid_cdevsw, &cmajor);
   3724 	if (error != 0 && error != EEXIST) {
   3725 		aprint_error("%s: devsw_attach failed %d\n", __func__, error);
   3726 		mutex_exit(&raid_lock);
   3727 		return error;
   3728 	}
   3729 #ifdef _MODULE
   3730 	error = config_cfdriver_attach(&raid_cd);
   3731 	if (error != 0) {
   3732 		aprint_error("%s: config_cfdriver_attach failed %d\n",
   3733 		    __func__, error);
   3734 		devsw_detach(&raid_bdevsw, &raid_cdevsw);
   3735 		mutex_exit(&raid_lock);
   3736 		return error;
   3737 	}
   3738 #endif
   3739 	error = config_cfattach_attach(raid_cd.cd_name, &raid_ca);
   3740 	if (error != 0) {
   3741 		aprint_error("%s: config_cfattach_attach failed %d\n",
   3742 		    __func__, error);
   3743 #ifdef _MODULE
   3744 		config_cfdriver_detach(&raid_cd);
   3745 #endif
   3746 		devsw_detach(&raid_bdevsw, &raid_cdevsw);
   3747 		mutex_exit(&raid_lock);
   3748 		return error;
   3749 	}
   3750 
   3751 	raidautoconfigdone = false;
   3752 
   3753 	mutex_exit(&raid_lock);
   3754 
   3755 	if (error == 0) {
   3756 		if (rf_BootRaidframe(true) == 0)
   3757 			aprint_verbose("Kernelized RAIDframe activated\n");
   3758 		else
   3759 			panic("Serious error activating RAID!!");
   3760 	}
   3761 
   3762 	/*
   3763 	 * Register a finalizer which will be used to auto-config RAID
   3764 	 * sets once all real hardware devices have been found.
   3765 	 */
   3766 	error = config_finalize_register(NULL, rf_autoconfig);
   3767 	if (error != 0) {
   3768 		aprint_error("WARNING: unable to register RAIDframe "
   3769 		    "finalizer\n");
   3770 		error = 0;
   3771 	}
   3772 
   3773 	return error;
   3774 }
   3775 
   3776 static int
   3777 raid_modcmd_fini(void)
   3778 {
   3779 	int error;
   3780 
   3781 	mutex_enter(&raid_lock);
   3782 
   3783 	/* Don't allow unload if raid device(s) exist.  */
   3784 	if (!LIST_EMPTY(&raids)) {
   3785 		mutex_exit(&raid_lock);
   3786 		return EBUSY;
   3787 	}
   3788 
   3789 	error = config_cfattach_detach(raid_cd.cd_name, &raid_ca);
   3790 	if (error != 0) {
   3791 		aprint_error("%s: cannot detach cfattach\n",__func__);
   3792 		mutex_exit(&raid_lock);
   3793 		return error;
   3794 	}
   3795 #ifdef _MODULE
   3796 	error = config_cfdriver_detach(&raid_cd);
   3797 	if (error != 0) {
   3798 		aprint_error("%s: cannot detach cfdriver\n",__func__);
   3799 		config_cfattach_attach(raid_cd.cd_name, &raid_ca);
   3800 		mutex_exit(&raid_lock);
   3801 		return error;
   3802 	}
   3803 #endif
   3804 	error = devsw_detach(&raid_bdevsw, &raid_cdevsw);
   3805 	if (error != 0) {
   3806 		aprint_error("%s: cannot detach devsw\n",__func__);
   3807 #ifdef _MODULE
   3808 		config_cfdriver_attach(&raid_cd);
   3809 #endif
   3810 		config_cfattach_attach(raid_cd.cd_name, &raid_ca);
   3811 		mutex_exit(&raid_lock);
   3812 		return error;
   3813 	}
   3814 	rf_BootRaidframe(false);
   3815 #if (RF_INCLUDE_PARITY_DECLUSTERING_DS > 0)
   3816 	rf_destroy_mutex2(rf_sparet_wait_mutex);
   3817 	rf_destroy_cond2(rf_sparet_wait_cv);
   3818 	rf_destroy_cond2(rf_sparet_resp_cv);
   3819 #endif
   3820 	mutex_exit(&raid_lock);
   3821 	mutex_destroy(&raid_lock);
   3822 
   3823 	return error;
   3824 }
   3825