Home | History | Annotate | Line # | Download | only in raidframe
rf_netbsdkintf.c revision 1.250.4.3
      1 /*	$NetBSD: rf_netbsdkintf.c,v 1.250.4.3 2009/03/02 20:58:27 snj Exp $	*/
      2 /*-
      3  * Copyright (c) 1996, 1997, 1998, 2008 The NetBSD Foundation, Inc.
      4  * All rights reserved.
      5  *
      6  * This code is derived from software contributed to The NetBSD Foundation
      7  * by Greg Oster; Jason R. Thorpe.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  *
     18  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     19  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     20  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     21  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     22  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     28  * POSSIBILITY OF SUCH DAMAGE.
     29  */
     30 
     31 /*
     32  * Copyright (c) 1990, 1993
     33  *      The Regents of the University of California.  All rights reserved.
     34  *
     35  * This code is derived from software contributed to Berkeley by
     36  * the Systems Programming Group of the University of Utah Computer
     37  * Science Department.
     38  *
     39  * Redistribution and use in source and binary forms, with or without
     40  * modification, are permitted provided that the following conditions
     41  * are met:
     42  * 1. Redistributions of source code must retain the above copyright
     43  *    notice, this list of conditions and the following disclaimer.
     44  * 2. Redistributions in binary form must reproduce the above copyright
     45  *    notice, this list of conditions and the following disclaimer in the
     46  *    documentation and/or other materials provided with the distribution.
     47  * 3. Neither the name of the University nor the names of its contributors
     48  *    may be used to endorse or promote products derived from this software
     49  *    without specific prior written permission.
     50  *
     51  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     52  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     53  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     54  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     55  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     56  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     57  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     58  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     59  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     60  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     61  * SUCH DAMAGE.
     62  *
     63  * from: Utah $Hdr: cd.c 1.6 90/11/28$
     64  *
     65  *      @(#)cd.c        8.2 (Berkeley) 11/16/93
     66  */
     67 
     68 /*
     69  * Copyright (c) 1988 University of Utah.
     70  *
     71  * This code is derived from software contributed to Berkeley by
     72  * the Systems Programming Group of the University of Utah Computer
     73  * Science Department.
     74  *
     75  * Redistribution and use in source and binary forms, with or without
     76  * modification, are permitted provided that the following conditions
     77  * are met:
     78  * 1. Redistributions of source code must retain the above copyright
     79  *    notice, this list of conditions and the following disclaimer.
     80  * 2. Redistributions in binary form must reproduce the above copyright
     81  *    notice, this list of conditions and the following disclaimer in the
     82  *    documentation and/or other materials provided with the distribution.
     83  * 3. All advertising materials mentioning features or use of this software
     84  *    must display the following acknowledgement:
     85  *      This product includes software developed by the University of
     86  *      California, Berkeley and its contributors.
     87  * 4. Neither the name of the University nor the names of its contributors
     88  *    may be used to endorse or promote products derived from this software
     89  *    without specific prior written permission.
     90  *
     91  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     92  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     93  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     94  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     95  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     96  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     97  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     98  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     99  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
    100  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
    101  * SUCH DAMAGE.
    102  *
    103  * from: Utah $Hdr: cd.c 1.6 90/11/28$
    104  *
    105  *      @(#)cd.c        8.2 (Berkeley) 11/16/93
    106  */
    107 
    108 /*
    109  * Copyright (c) 1995 Carnegie-Mellon University.
    110  * All rights reserved.
    111  *
    112  * Authors: Mark Holland, Jim Zelenka
    113  *
    114  * Permission to use, copy, modify and distribute this software and
    115  * its documentation is hereby granted, provided that both the copyright
    116  * notice and this permission notice appear in all copies of the
    117  * software, derivative works or modified versions, and any portions
    118  * thereof, and that both notices appear in supporting documentation.
    119  *
    120  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
    121  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
    122  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
    123  *
    124  * Carnegie Mellon requests users of this software to return to
    125  *
    126  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
    127  *  School of Computer Science
    128  *  Carnegie Mellon University
    129  *  Pittsburgh PA 15213-3890
    130  *
    131  * any improvements or extensions that they make and grant Carnegie the
    132  * rights to redistribute these changes.
    133  */
    134 
    135 /***********************************************************
    136  *
    137  * rf_kintf.c -- the kernel interface routines for RAIDframe
    138  *
    139  ***********************************************************/
    140 
    141 #include <sys/cdefs.h>
    142 __KERNEL_RCSID(0, "$NetBSD: rf_netbsdkintf.c,v 1.250.4.3 2009/03/02 20:58:27 snj Exp $");
    143 
    144 #include <sys/param.h>
    145 #include <sys/errno.h>
    146 #include <sys/pool.h>
    147 #include <sys/proc.h>
    148 #include <sys/queue.h>
    149 #include <sys/disk.h>
    150 #include <sys/device.h>
    151 #include <sys/stat.h>
    152 #include <sys/ioctl.h>
    153 #include <sys/fcntl.h>
    154 #include <sys/systm.h>
    155 #include <sys/vnode.h>
    156 #include <sys/disklabel.h>
    157 #include <sys/conf.h>
    158 #include <sys/buf.h>
    159 #include <sys/bufq.h>
    160 #include <sys/user.h>
    161 #include <sys/reboot.h>
    162 #include <sys/kauth.h>
    163 
    164 #include <prop/proplib.h>
    165 
    166 #include <dev/raidframe/raidframevar.h>
    167 #include <dev/raidframe/raidframeio.h>
    168 #include "raid.h"
    169 #include "opt_raid_autoconfig.h"
    170 #include "rf_raid.h"
    171 #include "rf_copyback.h"
    172 #include "rf_dag.h"
    173 #include "rf_dagflags.h"
    174 #include "rf_desc.h"
    175 #include "rf_diskqueue.h"
    176 #include "rf_etimer.h"
    177 #include "rf_general.h"
    178 #include "rf_kintf.h"
    179 #include "rf_options.h"
    180 #include "rf_driver.h"
    181 #include "rf_parityscan.h"
    182 #include "rf_threadstuff.h"
    183 
    184 #ifdef DEBUG
    185 int     rf_kdebug_level = 0;
    186 #define db1_printf(a) if (rf_kdebug_level > 0) printf a
    187 #else				/* DEBUG */
    188 #define db1_printf(a) { }
    189 #endif				/* DEBUG */
    190 
    191 static RF_Raid_t **raidPtrs;	/* global raid device descriptors */
    192 
    193 #if (RF_INCLUDE_PARITY_DECLUSTERING_DS > 0)
    194 RF_DECLARE_STATIC_MUTEX(rf_sparet_wait_mutex)
    195 
    196 static RF_SparetWait_t *rf_sparet_wait_queue;	/* requests to install a
    197 						 * spare table */
    198 static RF_SparetWait_t *rf_sparet_resp_queue;	/* responses from
    199 						 * installation process */
    200 #endif
    201 
    202 MALLOC_DEFINE(M_RAIDFRAME, "RAIDframe", "RAIDframe structures");
    203 
    204 /* prototypes */
    205 static void KernelWakeupFunc(struct buf *);
    206 static void InitBP(struct buf *, struct vnode *, unsigned,
    207     dev_t, RF_SectorNum_t, RF_SectorCount_t, void *, void (*) (struct buf *),
    208     void *, int, struct proc *);
    209 static void raidinit(RF_Raid_t *);
    210 
    211 void raidattach(int);
    212 static int raid_match(struct device *, struct cfdata *, void *);
    213 static void raid_attach(struct device *, struct device *, void *);
    214 static int raid_detach(struct device *, int);
    215 
    216 dev_type_open(raidopen);
    217 dev_type_close(raidclose);
    218 dev_type_read(raidread);
    219 dev_type_write(raidwrite);
    220 dev_type_ioctl(raidioctl);
    221 dev_type_strategy(raidstrategy);
    222 dev_type_dump(raiddump);
    223 dev_type_size(raidsize);
    224 
    225 const struct bdevsw raid_bdevsw = {
    226 	raidopen, raidclose, raidstrategy, raidioctl,
    227 	raiddump, raidsize, D_DISK
    228 };
    229 
    230 const struct cdevsw raid_cdevsw = {
    231 	raidopen, raidclose, raidread, raidwrite, raidioctl,
    232 	nostop, notty, nopoll, nommap, nokqfilter, D_DISK
    233 };
    234 
    235 static struct dkdriver rf_dkdriver = { raidstrategy, minphys };
    236 
    237 /* XXX Not sure if the following should be replacing the raidPtrs above,
    238    or if it should be used in conjunction with that...
    239 */
    240 
    241 struct raid_softc {
    242 	struct device *sc_dev;
    243 	int     sc_flags;	/* flags */
    244 	int     sc_cflags;	/* configuration flags */
    245 	uint64_t sc_size;	/* size of the raid device */
    246 	char    sc_xname[20];	/* XXX external name */
    247 	struct disk sc_dkdev;	/* generic disk device info */
    248 	struct bufq_state *buf_queue;	/* used for the device queue */
    249 };
    250 /* sc_flags */
    251 #define RAIDF_INITED	0x01	/* unit has been initialized */
    252 #define RAIDF_WLABEL	0x02	/* label area is writable */
    253 #define RAIDF_LABELLING	0x04	/* unit is currently being labelled */
    254 #define RAIDF_WANTED	0x40	/* someone is waiting to obtain a lock */
    255 #define RAIDF_LOCKED	0x80	/* unit is locked */
    256 
    257 #define	raidunit(x)	DISKUNIT(x)
    258 int numraid = 0;
    259 
    260 extern struct cfdriver raid_cd;
    261 CFATTACH_DECL_NEW(raid, sizeof(struct raid_softc),
    262     raid_match, raid_attach, raid_detach, NULL);
    263 
    264 /*
    265  * Allow RAIDOUTSTANDING number of simultaneous IO's to this RAID device.
    266  * Be aware that large numbers can allow the driver to consume a lot of
    267  * kernel memory, especially on writes, and in degraded mode reads.
    268  *
    269  * For example: with a stripe width of 64 blocks (32k) and 5 disks,
    270  * a single 64K write will typically require 64K for the old data,
    271  * 64K for the old parity, and 64K for the new parity, for a total
    272  * of 192K (if the parity buffer is not re-used immediately).
    273  * Even it if is used immediately, that's still 128K, which when multiplied
    274  * by say 10 requests, is 1280K, *on top* of the 640K of incoming data.
    275  *
    276  * Now in degraded mode, for example, a 64K read on the above setup may
    277  * require data reconstruction, which will require *all* of the 4 remaining
    278  * disks to participate -- 4 * 32K/disk == 128K again.
    279  */
    280 
    281 #ifndef RAIDOUTSTANDING
    282 #define RAIDOUTSTANDING   6
    283 #endif
    284 
    285 #define RAIDLABELDEV(dev)	\
    286 	(MAKEDISKDEV(major((dev)), raidunit((dev)), RAW_PART))
    287 
    288 /* declared here, and made public, for the benefit of KVM stuff.. */
    289 struct raid_softc *raid_softc;
    290 
    291 static void raidgetdefaultlabel(RF_Raid_t *, struct raid_softc *,
    292 				     struct disklabel *);
    293 static void raidgetdisklabel(dev_t);
    294 static void raidmakedisklabel(struct raid_softc *);
    295 
    296 static int raidlock(struct raid_softc *);
    297 static void raidunlock(struct raid_softc *);
    298 
    299 static void rf_markalldirty(RF_Raid_t *);
    300 static void rf_set_properties(struct raid_softc *, RF_Raid_t *);
    301 
    302 void rf_ReconThread(struct rf_recon_req *);
    303 void rf_RewriteParityThread(RF_Raid_t *raidPtr);
    304 void rf_CopybackThread(RF_Raid_t *raidPtr);
    305 void rf_ReconstructInPlaceThread(struct rf_recon_req *);
    306 int rf_autoconfig(struct device *self);
    307 void rf_buildroothack(RF_ConfigSet_t *);
    308 
    309 RF_AutoConfig_t *rf_find_raid_components(void);
    310 RF_ConfigSet_t *rf_create_auto_sets(RF_AutoConfig_t *);
    311 static int rf_does_it_fit(RF_ConfigSet_t *,RF_AutoConfig_t *);
    312 static int rf_reasonable_label(RF_ComponentLabel_t *);
    313 void rf_create_configuration(RF_AutoConfig_t *,RF_Config_t *, RF_Raid_t *);
    314 int rf_set_autoconfig(RF_Raid_t *, int);
    315 int rf_set_rootpartition(RF_Raid_t *, int);
    316 void rf_release_all_vps(RF_ConfigSet_t *);
    317 void rf_cleanup_config_set(RF_ConfigSet_t *);
    318 int rf_have_enough_components(RF_ConfigSet_t *);
    319 int rf_auto_config_set(RF_ConfigSet_t *, int *);
    320 static int rf_sync_component_caches(RF_Raid_t *raidPtr);
    321 
    322 static int raidautoconfig = 0; /* Debugging, mostly.  Set to 0 to not
    323 				  allow autoconfig to take place.
    324 				  Note that this is overridden by having
    325 				  RAID_AUTOCONFIG as an option in the
    326 				  kernel config file.  */
    327 
    328 struct RF_Pools_s rf_pools;
    329 
    330 void
    331 raidattach(int num)
    332 {
    333 	int raidID;
    334 	int i, rc;
    335 
    336 #ifdef DEBUG
    337 	printf("raidattach: Asked for %d units\n", num);
    338 #endif
    339 
    340 	if (num <= 0) {
    341 #ifdef DIAGNOSTIC
    342 		panic("raidattach: count <= 0");
    343 #endif
    344 		return;
    345 	}
    346 	/* This is where all the initialization stuff gets done. */
    347 
    348 	numraid = num;
    349 
    350 	/* Make some space for requested number of units... */
    351 
    352 	RF_Malloc(raidPtrs, num * sizeof(RF_Raid_t *), (RF_Raid_t **));
    353 	if (raidPtrs == NULL) {
    354 		panic("raidPtrs is NULL!!");
    355 	}
    356 
    357 #if (RF_INCLUDE_PARITY_DECLUSTERING_DS > 0)
    358 	rf_mutex_init(&rf_sparet_wait_mutex);
    359 
    360 	rf_sparet_wait_queue = rf_sparet_resp_queue = NULL;
    361 #endif
    362 
    363 	for (i = 0; i < num; i++)
    364 		raidPtrs[i] = NULL;
    365 	rc = rf_BootRaidframe();
    366 	if (rc == 0)
    367 		aprint_normal("Kernelized RAIDframe activated\n");
    368 	else
    369 		panic("Serious error booting RAID!!");
    370 
    371 	/* put together some datastructures like the CCD device does.. This
    372 	 * lets us lock the device and what-not when it gets opened. */
    373 
    374 	raid_softc = (struct raid_softc *)
    375 		malloc(num * sizeof(struct raid_softc),
    376 		       M_RAIDFRAME, M_NOWAIT);
    377 	if (raid_softc == NULL) {
    378 		aprint_error("WARNING: no memory for RAIDframe driver\n");
    379 		return;
    380 	}
    381 
    382 	memset(raid_softc, 0, num * sizeof(struct raid_softc));
    383 
    384 	for (raidID = 0; raidID < num; raidID++) {
    385 		bufq_alloc(&raid_softc[raidID].buf_queue, "fcfs", 0);
    386 
    387 		RF_Malloc(raidPtrs[raidID], sizeof(RF_Raid_t),
    388 			  (RF_Raid_t *));
    389 		if (raidPtrs[raidID] == NULL) {
    390 			aprint_error("WARNING: raidPtrs[%d] is NULL\n", raidID);
    391 			numraid = raidID;
    392 			return;
    393 		}
    394 	}
    395 
    396 	if (config_cfattach_attach(raid_cd.cd_name, &raid_ca)) {
    397 		aprint_error("raidattach: config_cfattach_attach failed?\n");
    398 	}
    399 
    400 #ifdef RAID_AUTOCONFIG
    401 	raidautoconfig = 1;
    402 #endif
    403 
    404 	/*
    405 	 * Register a finalizer which will be used to auto-config RAID
    406 	 * sets once all real hardware devices have been found.
    407 	 */
    408 	if (config_finalize_register(NULL, rf_autoconfig) != 0)
    409 		aprint_error("WARNING: unable to register RAIDframe finalizer\n");
    410 }
    411 
    412 int
    413 rf_autoconfig(struct device *self)
    414 {
    415 	RF_AutoConfig_t *ac_list;
    416 	RF_ConfigSet_t *config_sets;
    417 
    418 	if (raidautoconfig == 0)
    419 		return (0);
    420 
    421 	/* XXX This code can only be run once. */
    422 	raidautoconfig = 0;
    423 
    424 	/* 1. locate all RAID components on the system */
    425 #ifdef DEBUG
    426 	printf("Searching for RAID components...\n");
    427 #endif
    428 	ac_list = rf_find_raid_components();
    429 
    430 	/* 2. Sort them into their respective sets. */
    431 	config_sets = rf_create_auto_sets(ac_list);
    432 
    433 	/*
    434 	 * 3. Evaluate each set andconfigure the valid ones.
    435 	 * This gets done in rf_buildroothack().
    436 	 */
    437 	rf_buildroothack(config_sets);
    438 
    439 	return 1;
    440 }
    441 
    442 void
    443 rf_buildroothack(RF_ConfigSet_t *config_sets)
    444 {
    445 	RF_ConfigSet_t *cset;
    446 	RF_ConfigSet_t *next_cset;
    447 	int retcode;
    448 	int raidID;
    449 	int rootID;
    450 	int col;
    451 	int num_root;
    452 	char *devname;
    453 
    454 	rootID = 0;
    455 	num_root = 0;
    456 	cset = config_sets;
    457 	while(cset != NULL ) {
    458 		next_cset = cset->next;
    459 		if (rf_have_enough_components(cset) &&
    460 		    cset->ac->clabel->autoconfigure==1) {
    461 			retcode = rf_auto_config_set(cset,&raidID);
    462 			if (!retcode) {
    463 #ifdef DEBUG
    464 				printf("raid%d: configured ok\n", raidID);
    465 #endif
    466 				if (cset->rootable) {
    467 					rootID = raidID;
    468 					num_root++;
    469 				}
    470 			} else {
    471 				/* The autoconfig didn't work :( */
    472 #ifdef DEBUG
    473 				printf("Autoconfig failed with code %d for raid%d\n", retcode, raidID);
    474 #endif
    475 				rf_release_all_vps(cset);
    476 			}
    477 		} else {
    478 			/* we're not autoconfiguring this set...
    479 			   release the associated resources */
    480 			rf_release_all_vps(cset);
    481 		}
    482 		/* cleanup */
    483 		rf_cleanup_config_set(cset);
    484 		cset = next_cset;
    485 	}
    486 
    487 	/* if the user has specified what the root device should be
    488 	   then we don't touch booted_device or boothowto... */
    489 
    490 	if (rootspec != NULL)
    491 		return;
    492 
    493 	/* we found something bootable... */
    494 
    495 	if (num_root == 1) {
    496 		booted_device = raid_softc[rootID].sc_dev;
    497 	} else if (num_root > 1) {
    498 
    499 		/*
    500 		 * Maybe the MD code can help. If it cannot, then
    501 		 * setroot() will discover that we have no
    502 		 * booted_device and will ask the user if nothing was
    503 		 * hardwired in the kernel config file
    504 		 */
    505 
    506 		if (booted_device == NULL)
    507 			cpu_rootconf();
    508 		if (booted_device == NULL)
    509 			return;
    510 
    511 		num_root = 0;
    512 		for (raidID = 0; raidID < numraid; raidID++) {
    513 			if (raidPtrs[raidID]->valid == 0)
    514 				continue;
    515 
    516 			if (raidPtrs[raidID]->root_partition == 0)
    517 				continue;
    518 
    519 			for (col = 0; col < raidPtrs[raidID]->numCol; col++) {
    520 				devname = raidPtrs[raidID]->Disks[col].devname;
    521 				devname += sizeof("/dev/") - 1;
    522 				if (strncmp(devname, device_xname(booted_device),
    523 					    strlen(device_xname(booted_device))) != 0)
    524 					continue;
    525 #ifdef DEBUG
    526 				printf("raid%d includes boot device %s\n",
    527 				       raidID, devname);
    528 #endif
    529 				num_root++;
    530 				rootID = raidID;
    531 			}
    532 		}
    533 
    534 		if (num_root == 1) {
    535 			booted_device = raid_softc[rootID].sc_dev;
    536 		} else {
    537 			/* we can't guess.. require the user to answer... */
    538 			boothowto |= RB_ASKNAME;
    539 		}
    540 	}
    541 }
    542 
    543 
    544 int
    545 raidsize(dev_t dev)
    546 {
    547 	struct raid_softc *rs;
    548 	struct disklabel *lp;
    549 	int     part, unit, omask, size;
    550 
    551 	unit = raidunit(dev);
    552 	if (unit >= numraid)
    553 		return (-1);
    554 	rs = &raid_softc[unit];
    555 
    556 	if ((rs->sc_flags & RAIDF_INITED) == 0)
    557 		return (-1);
    558 
    559 	part = DISKPART(dev);
    560 	omask = rs->sc_dkdev.dk_openmask & (1 << part);
    561 	lp = rs->sc_dkdev.dk_label;
    562 
    563 	if (omask == 0 && raidopen(dev, 0, S_IFBLK, curlwp))
    564 		return (-1);
    565 
    566 	if (lp->d_partitions[part].p_fstype != FS_SWAP)
    567 		size = -1;
    568 	else
    569 		size = lp->d_partitions[part].p_size *
    570 		    (lp->d_secsize / DEV_BSIZE);
    571 
    572 	if (omask == 0 && raidclose(dev, 0, S_IFBLK, curlwp))
    573 		return (-1);
    574 
    575 	return (size);
    576 
    577 }
    578 
    579 int
    580 raiddump(dev_t dev, daddr_t blkno, void *va, size_t size)
    581 {
    582 	int     unit = raidunit(dev);
    583 	struct raid_softc *rs;
    584 	const struct bdevsw *bdev;
    585 	struct disklabel *lp;
    586 	RF_Raid_t *raidPtr;
    587 	daddr_t offset;
    588 	int     part, c, sparecol, j, scol, dumpto;
    589 	int     error = 0;
    590 
    591 	if (unit >= numraid)
    592 		return (ENXIO);
    593 
    594 	rs = &raid_softc[unit];
    595 	raidPtr = raidPtrs[unit];
    596 
    597 	if ((rs->sc_flags & RAIDF_INITED) == 0)
    598 		return ENXIO;
    599 
    600 	/* we only support dumping to RAID 1 sets */
    601 	if (raidPtr->Layout.numDataCol != 1 ||
    602 	    raidPtr->Layout.numParityCol != 1)
    603 		return EINVAL;
    604 
    605 
    606 	if ((error = raidlock(rs)) != 0)
    607 		return error;
    608 
    609 	if (size % DEV_BSIZE != 0) {
    610 		error = EINVAL;
    611 		goto out;
    612 	}
    613 
    614 	if (blkno + size / DEV_BSIZE > rs->sc_size) {
    615 		printf("%s: blkno (%" PRIu64 ") + size / DEV_BSIZE (%zu) > "
    616 		    "sc->sc_size (%" PRIu64 ")\n", __func__, blkno,
    617 		    size / DEV_BSIZE, rs->sc_size);
    618 		error = EINVAL;
    619 		goto out;
    620 	}
    621 
    622 	part = DISKPART(dev);
    623 	lp = rs->sc_dkdev.dk_label;
    624 	offset = lp->d_partitions[part].p_offset + RF_PROTECTED_SECTORS;
    625 
    626 	/* figure out what device is alive.. */
    627 
    628 	/*
    629 	   Look for a component to dump to.  The preference for the
    630 	   component to dump to is as follows:
    631 	   1) the master
    632 	   2) a used_spare of the master
    633 	   3) the slave
    634 	   4) a used_spare of the slave
    635 	*/
    636 
    637 	dumpto = -1;
    638 	for (c = 0; c < raidPtr->numCol; c++) {
    639 		if (raidPtr->Disks[c].status == rf_ds_optimal) {
    640 			/* this might be the one */
    641 			dumpto = c;
    642 			break;
    643 		}
    644 	}
    645 
    646 	/*
    647 	   At this point we have possibly selected a live master or a
    648 	   live slave.  We now check to see if there is a spared
    649 	   master (or a spared slave), if we didn't find a live master
    650 	   or a live slave.
    651 	*/
    652 
    653 	for (c = 0; c < raidPtr->numSpare; c++) {
    654 		sparecol = raidPtr->numCol + c;
    655 		if (raidPtr->Disks[sparecol].status ==  rf_ds_used_spare) {
    656 			/* How about this one? */
    657 			scol = -1;
    658 			for(j=0;j<raidPtr->numCol;j++) {
    659 				if (raidPtr->Disks[j].spareCol == sparecol) {
    660 					scol = j;
    661 					break;
    662 				}
    663 			}
    664 			if (scol == 0) {
    665 				/*
    666 				   We must have found a spared master!
    667 				   We'll take that over anything else
    668 				   found so far.  (We couldn't have
    669 				   found a real master before, since
    670 				   this is a used spare, and it's
    671 				   saying that it's replacing the
    672 				   master.)  On reboot (with
    673 				   autoconfiguration turned on)
    674 				   sparecol will become the 1st
    675 				   component (component0) of this set.
    676 				*/
    677 				dumpto = sparecol;
    678 				break;
    679 			} else if (scol != -1) {
    680 				/*
    681 				   Must be a spared slave.  We'll dump
    682 				   to that if we havn't found anything
    683 				   else so far.
    684 				*/
    685 				if (dumpto == -1)
    686 					dumpto = sparecol;
    687 			}
    688 		}
    689 	}
    690 
    691 	if (dumpto == -1) {
    692 		/* we couldn't find any live components to dump to!?!?
    693 		 */
    694 		error = EINVAL;
    695 		goto out;
    696 	}
    697 
    698 	bdev = bdevsw_lookup(raidPtr->Disks[dumpto].dev);
    699 
    700 	/*
    701 	   Note that blkno is relative to this particular partition.
    702 	   By adding the offset of this partition in the RAID
    703 	   set, and also adding RF_PROTECTED_SECTORS, we get a
    704 	   value that is relative to the partition used for the
    705 	   underlying component.
    706 	*/
    707 
    708 	error = (*bdev->d_dump)(raidPtr->Disks[dumpto].dev,
    709 				blkno + offset, va, size);
    710 
    711 out:
    712 	raidunlock(rs);
    713 
    714 	return error;
    715 }
    716 /* ARGSUSED */
    717 int
    718 raidopen(dev_t dev, int flags, int fmt,
    719     struct lwp *l)
    720 {
    721 	int     unit = raidunit(dev);
    722 	struct raid_softc *rs;
    723 	struct disklabel *lp;
    724 	int     part, pmask;
    725 	int     error = 0;
    726 
    727 	if (unit >= numraid)
    728 		return (ENXIO);
    729 	rs = &raid_softc[unit];
    730 
    731 	if ((error = raidlock(rs)) != 0)
    732 		return (error);
    733 	lp = rs->sc_dkdev.dk_label;
    734 
    735 	part = DISKPART(dev);
    736 
    737 	/*
    738 	 * If there are wedges, and this is not RAW_PART, then we
    739 	 * need to fail.
    740 	 */
    741 	if (rs->sc_dkdev.dk_nwedges != 0 && part != RAW_PART) {
    742 		error = EBUSY;
    743 		goto bad;
    744 	}
    745 	pmask = (1 << part);
    746 
    747 	if ((rs->sc_flags & RAIDF_INITED) &&
    748 	    (rs->sc_dkdev.dk_openmask == 0))
    749 		raidgetdisklabel(dev);
    750 
    751 	/* make sure that this partition exists */
    752 
    753 	if (part != RAW_PART) {
    754 		if (((rs->sc_flags & RAIDF_INITED) == 0) ||
    755 		    ((part >= lp->d_npartitions) ||
    756 			(lp->d_partitions[part].p_fstype == FS_UNUSED))) {
    757 			error = ENXIO;
    758 			goto bad;
    759 		}
    760 	}
    761 	/* Prevent this unit from being unconfigured while open. */
    762 	switch (fmt) {
    763 	case S_IFCHR:
    764 		rs->sc_dkdev.dk_copenmask |= pmask;
    765 		break;
    766 
    767 	case S_IFBLK:
    768 		rs->sc_dkdev.dk_bopenmask |= pmask;
    769 		break;
    770 	}
    771 
    772 	if ((rs->sc_dkdev.dk_openmask == 0) &&
    773 	    ((rs->sc_flags & RAIDF_INITED) != 0)) {
    774 		/* First one... mark things as dirty... Note that we *MUST*
    775 		 have done a configure before this.  I DO NOT WANT TO BE
    776 		 SCRIBBLING TO RANDOM COMPONENTS UNTIL IT'S BEEN DETERMINED
    777 		 THAT THEY BELONG TOGETHER!!!!! */
    778 		/* XXX should check to see if we're only open for reading
    779 		   here... If so, we needn't do this, but then need some
    780 		   other way of keeping track of what's happened.. */
    781 
    782 		rf_markalldirty( raidPtrs[unit] );
    783 	}
    784 
    785 
    786 	rs->sc_dkdev.dk_openmask =
    787 	    rs->sc_dkdev.dk_copenmask | rs->sc_dkdev.dk_bopenmask;
    788 
    789 bad:
    790 	raidunlock(rs);
    791 
    792 	return (error);
    793 
    794 
    795 }
    796 /* ARGSUSED */
    797 int
    798 raidclose(dev_t dev, int flags, int fmt, struct lwp *l)
    799 {
    800 	int     unit = raidunit(dev);
    801 	struct cfdata *cf;
    802 	struct raid_softc *rs;
    803 	int     error = 0;
    804 	int     part;
    805 
    806 	if (unit >= numraid)
    807 		return (ENXIO);
    808 	rs = &raid_softc[unit];
    809 
    810 	if ((error = raidlock(rs)) != 0)
    811 		return (error);
    812 
    813 	part = DISKPART(dev);
    814 
    815 	/* ...that much closer to allowing unconfiguration... */
    816 	switch (fmt) {
    817 	case S_IFCHR:
    818 		rs->sc_dkdev.dk_copenmask &= ~(1 << part);
    819 		break;
    820 
    821 	case S_IFBLK:
    822 		rs->sc_dkdev.dk_bopenmask &= ~(1 << part);
    823 		break;
    824 	}
    825 	rs->sc_dkdev.dk_openmask =
    826 	    rs->sc_dkdev.dk_copenmask | rs->sc_dkdev.dk_bopenmask;
    827 
    828 	if ((rs->sc_dkdev.dk_openmask == 0) &&
    829 	    ((rs->sc_flags & RAIDF_INITED) != 0)) {
    830 		/* Last one... device is not unconfigured yet.
    831 		   Device shutdown has taken care of setting the
    832 		   clean bits if RAIDF_INITED is not set
    833 		   mark things as clean... */
    834 
    835 		rf_update_component_labels(raidPtrs[unit],
    836 						 RF_FINAL_COMPONENT_UPDATE);
    837 		if (doing_shutdown) {
    838 			/* last one, and we're going down, so
    839 			   lights out for this RAID set too. */
    840 			error = rf_Shutdown(raidPtrs[unit]);
    841 
    842 			/* It's no longer initialized... */
    843 			rs->sc_flags &= ~RAIDF_INITED;
    844 
    845 			/* detach the device */
    846 
    847 			cf = device_cfdata(rs->sc_dev);
    848 			error = config_detach(rs->sc_dev, DETACH_QUIET);
    849 			free(cf, M_RAIDFRAME);
    850 
    851 			/* Detach the disk. */
    852 			disk_detach(&rs->sc_dkdev);
    853 			disk_destroy(&rs->sc_dkdev);
    854 		}
    855 	}
    856 
    857 	raidunlock(rs);
    858 	return (0);
    859 
    860 }
    861 
    862 void
    863 raidstrategy(struct buf *bp)
    864 {
    865 	int s;
    866 
    867 	unsigned int raidID = raidunit(bp->b_dev);
    868 	RF_Raid_t *raidPtr;
    869 	struct raid_softc *rs = &raid_softc[raidID];
    870 	int     wlabel;
    871 
    872 	if ((rs->sc_flags & RAIDF_INITED) ==0) {
    873 		bp->b_error = ENXIO;
    874 		goto done;
    875 	}
    876 	if (raidID >= numraid || !raidPtrs[raidID]) {
    877 		bp->b_error = ENODEV;
    878 		goto done;
    879 	}
    880 	raidPtr = raidPtrs[raidID];
    881 	if (!raidPtr->valid) {
    882 		bp->b_error = ENODEV;
    883 		goto done;
    884 	}
    885 	if (bp->b_bcount == 0) {
    886 		db1_printf(("b_bcount is zero..\n"));
    887 		goto done;
    888 	}
    889 
    890 	/*
    891 	 * Do bounds checking and adjust transfer.  If there's an
    892 	 * error, the bounds check will flag that for us.
    893 	 */
    894 
    895 	wlabel = rs->sc_flags & (RAIDF_WLABEL | RAIDF_LABELLING);
    896 	if (DISKPART(bp->b_dev) == RAW_PART) {
    897 		uint64_t size; /* device size in DEV_BSIZE unit */
    898 
    899 		if (raidPtr->logBytesPerSector > DEV_BSHIFT) {
    900 			size = raidPtr->totalSectors <<
    901 			    (raidPtr->logBytesPerSector - DEV_BSHIFT);
    902 		} else {
    903 			size = raidPtr->totalSectors >>
    904 			    (DEV_BSHIFT - raidPtr->logBytesPerSector);
    905 		}
    906 		if (bounds_check_with_mediasize(bp, DEV_BSIZE, size) <= 0) {
    907 			goto done;
    908 		}
    909 	} else {
    910 		if (bounds_check_with_label(&rs->sc_dkdev, bp, wlabel) <= 0) {
    911 			db1_printf(("Bounds check failed!!:%d %d\n",
    912 				(int) bp->b_blkno, (int) wlabel));
    913 			goto done;
    914 		}
    915 	}
    916 	s = splbio();
    917 
    918 	bp->b_resid = 0;
    919 
    920 	/* stuff it onto our queue */
    921 	BUFQ_PUT(rs->buf_queue, bp);
    922 
    923 	/* scheduled the IO to happen at the next convenient time */
    924 	wakeup(&(raidPtrs[raidID]->iodone));
    925 
    926 	splx(s);
    927 	return;
    928 
    929 done:
    930 	bp->b_resid = bp->b_bcount;
    931 	biodone(bp);
    932 }
    933 /* ARGSUSED */
    934 int
    935 raidread(dev_t dev, struct uio *uio, int flags)
    936 {
    937 	int     unit = raidunit(dev);
    938 	struct raid_softc *rs;
    939 
    940 	if (unit >= numraid)
    941 		return (ENXIO);
    942 	rs = &raid_softc[unit];
    943 
    944 	if ((rs->sc_flags & RAIDF_INITED) == 0)
    945 		return (ENXIO);
    946 
    947 	return (physio(raidstrategy, NULL, dev, B_READ, minphys, uio));
    948 
    949 }
    950 /* ARGSUSED */
    951 int
    952 raidwrite(dev_t dev, struct uio *uio, int flags)
    953 {
    954 	int     unit = raidunit(dev);
    955 	struct raid_softc *rs;
    956 
    957 	if (unit >= numraid)
    958 		return (ENXIO);
    959 	rs = &raid_softc[unit];
    960 
    961 	if ((rs->sc_flags & RAIDF_INITED) == 0)
    962 		return (ENXIO);
    963 
    964 	return (physio(raidstrategy, NULL, dev, B_WRITE, minphys, uio));
    965 
    966 }
    967 
    968 int
    969 raidioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
    970 {
    971 	int     unit = raidunit(dev);
    972 	int     error = 0;
    973 	int     part, pmask;
    974 	struct cfdata *cf;
    975 	struct raid_softc *rs;
    976 	RF_Config_t *k_cfg, *u_cfg;
    977 	RF_Raid_t *raidPtr;
    978 	RF_RaidDisk_t *diskPtr;
    979 	RF_AccTotals_t *totals;
    980 	RF_DeviceConfig_t *d_cfg, **ucfgp;
    981 	u_char *specific_buf;
    982 	int retcode = 0;
    983 	int column;
    984 	int raidid;
    985 	struct rf_recon_req *rrcopy, *rr;
    986 	RF_ComponentLabel_t *clabel;
    987 	RF_ComponentLabel_t *ci_label;
    988 	RF_ComponentLabel_t **clabel_ptr;
    989 	RF_SingleComponent_t *sparePtr,*componentPtr;
    990 	RF_SingleComponent_t component;
    991 	RF_ProgressInfo_t progressInfo, **progressInfoPtr;
    992 	int i, j, d;
    993 #ifdef __HAVE_OLD_DISKLABEL
    994 	struct disklabel newlabel;
    995 #endif
    996 	struct dkwedge_info *dkw;
    997 
    998 	if (unit >= numraid)
    999 		return (ENXIO);
   1000 	rs = &raid_softc[unit];
   1001 	raidPtr = raidPtrs[unit];
   1002 
   1003 	db1_printf(("raidioctl: %d %d %d %d\n", (int) dev,
   1004 		(int) DISKPART(dev), (int) unit, (int) cmd));
   1005 
   1006 	/* Must be open for writes for these commands... */
   1007 	switch (cmd) {
   1008 #ifdef DIOCGSECTORSIZE
   1009 	case DIOCGSECTORSIZE:
   1010 		*(u_int *)data = raidPtr->bytesPerSector;
   1011 		return 0;
   1012 	case DIOCGMEDIASIZE:
   1013 		*(off_t *)data =
   1014 		    (off_t)raidPtr->totalSectors * raidPtr->bytesPerSector;
   1015 		return 0;
   1016 #endif
   1017 	case DIOCSDINFO:
   1018 	case DIOCWDINFO:
   1019 #ifdef __HAVE_OLD_DISKLABEL
   1020 	case ODIOCWDINFO:
   1021 	case ODIOCSDINFO:
   1022 #endif
   1023 	case DIOCWLABEL:
   1024 	case DIOCAWEDGE:
   1025 	case DIOCDWEDGE:
   1026 		if ((flag & FWRITE) == 0)
   1027 			return (EBADF);
   1028 	}
   1029 
   1030 	/* Must be initialized for these... */
   1031 	switch (cmd) {
   1032 	case DIOCGDINFO:
   1033 	case DIOCSDINFO:
   1034 	case DIOCWDINFO:
   1035 #ifdef __HAVE_OLD_DISKLABEL
   1036 	case ODIOCGDINFO:
   1037 	case ODIOCWDINFO:
   1038 	case ODIOCSDINFO:
   1039 	case ODIOCGDEFLABEL:
   1040 #endif
   1041 	case DIOCGPART:
   1042 	case DIOCWLABEL:
   1043 	case DIOCGDEFLABEL:
   1044 	case DIOCAWEDGE:
   1045 	case DIOCDWEDGE:
   1046 	case DIOCLWEDGES:
   1047 	case DIOCCACHESYNC:
   1048 	case RAIDFRAME_SHUTDOWN:
   1049 	case RAIDFRAME_REWRITEPARITY:
   1050 	case RAIDFRAME_GET_INFO:
   1051 	case RAIDFRAME_RESET_ACCTOTALS:
   1052 	case RAIDFRAME_GET_ACCTOTALS:
   1053 	case RAIDFRAME_KEEP_ACCTOTALS:
   1054 	case RAIDFRAME_GET_SIZE:
   1055 	case RAIDFRAME_FAIL_DISK:
   1056 	case RAIDFRAME_COPYBACK:
   1057 	case RAIDFRAME_CHECK_RECON_STATUS:
   1058 	case RAIDFRAME_CHECK_RECON_STATUS_EXT:
   1059 	case RAIDFRAME_GET_COMPONENT_LABEL:
   1060 	case RAIDFRAME_SET_COMPONENT_LABEL:
   1061 	case RAIDFRAME_ADD_HOT_SPARE:
   1062 	case RAIDFRAME_REMOVE_HOT_SPARE:
   1063 	case RAIDFRAME_INIT_LABELS:
   1064 	case RAIDFRAME_REBUILD_IN_PLACE:
   1065 	case RAIDFRAME_CHECK_PARITY:
   1066 	case RAIDFRAME_CHECK_PARITYREWRITE_STATUS:
   1067 	case RAIDFRAME_CHECK_PARITYREWRITE_STATUS_EXT:
   1068 	case RAIDFRAME_CHECK_COPYBACK_STATUS:
   1069 	case RAIDFRAME_CHECK_COPYBACK_STATUS_EXT:
   1070 	case RAIDFRAME_SET_AUTOCONFIG:
   1071 	case RAIDFRAME_SET_ROOT:
   1072 	case RAIDFRAME_DELETE_COMPONENT:
   1073 	case RAIDFRAME_INCORPORATE_HOT_SPARE:
   1074 		if ((rs->sc_flags & RAIDF_INITED) == 0)
   1075 			return (ENXIO);
   1076 	}
   1077 
   1078 	switch (cmd) {
   1079 
   1080 		/* configure the system */
   1081 	case RAIDFRAME_CONFIGURE:
   1082 
   1083 		if (raidPtr->valid) {
   1084 			/* There is a valid RAID set running on this unit! */
   1085 			printf("raid%d: Device already configured!\n",unit);
   1086 			return(EINVAL);
   1087 		}
   1088 
   1089 		/* copy-in the configuration information */
   1090 		/* data points to a pointer to the configuration structure */
   1091 
   1092 		u_cfg = *((RF_Config_t **) data);
   1093 		RF_Malloc(k_cfg, sizeof(RF_Config_t), (RF_Config_t *));
   1094 		if (k_cfg == NULL) {
   1095 			return (ENOMEM);
   1096 		}
   1097 		retcode = copyin(u_cfg, k_cfg, sizeof(RF_Config_t));
   1098 		if (retcode) {
   1099 			RF_Free(k_cfg, sizeof(RF_Config_t));
   1100 			db1_printf(("rf_ioctl: retcode=%d copyin.1\n",
   1101 				retcode));
   1102 			return (retcode);
   1103 		}
   1104 		/* allocate a buffer for the layout-specific data, and copy it
   1105 		 * in */
   1106 		if (k_cfg->layoutSpecificSize) {
   1107 			if (k_cfg->layoutSpecificSize > 10000) {
   1108 				/* sanity check */
   1109 				RF_Free(k_cfg, sizeof(RF_Config_t));
   1110 				return (EINVAL);
   1111 			}
   1112 			RF_Malloc(specific_buf, k_cfg->layoutSpecificSize,
   1113 			    (u_char *));
   1114 			if (specific_buf == NULL) {
   1115 				RF_Free(k_cfg, sizeof(RF_Config_t));
   1116 				return (ENOMEM);
   1117 			}
   1118 			retcode = copyin(k_cfg->layoutSpecific, specific_buf,
   1119 			    k_cfg->layoutSpecificSize);
   1120 			if (retcode) {
   1121 				RF_Free(k_cfg, sizeof(RF_Config_t));
   1122 				RF_Free(specific_buf,
   1123 					k_cfg->layoutSpecificSize);
   1124 				db1_printf(("rf_ioctl: retcode=%d copyin.2\n",
   1125 					retcode));
   1126 				return (retcode);
   1127 			}
   1128 		} else
   1129 			specific_buf = NULL;
   1130 		k_cfg->layoutSpecific = specific_buf;
   1131 
   1132 		/* should do some kind of sanity check on the configuration.
   1133 		 * Store the sum of all the bytes in the last byte? */
   1134 
   1135 		/* configure the system */
   1136 
   1137 		/*
   1138 		 * Clear the entire RAID descriptor, just to make sure
   1139 		 *  there is no stale data left in the case of a
   1140 		 *  reconfiguration
   1141 		 */
   1142 		memset((char *) raidPtr, 0, sizeof(RF_Raid_t));
   1143 		raidPtr->raidid = unit;
   1144 
   1145 		retcode = rf_Configure(raidPtr, k_cfg, NULL);
   1146 
   1147 		if (retcode == 0) {
   1148 
   1149 			/* allow this many simultaneous IO's to
   1150 			   this RAID device */
   1151 			raidPtr->openings = RAIDOUTSTANDING;
   1152 
   1153 			raidinit(raidPtr);
   1154 			rf_markalldirty(raidPtr);
   1155 		}
   1156 		/* free the buffers.  No return code here. */
   1157 		if (k_cfg->layoutSpecificSize) {
   1158 			RF_Free(specific_buf, k_cfg->layoutSpecificSize);
   1159 		}
   1160 		RF_Free(k_cfg, sizeof(RF_Config_t));
   1161 
   1162 		return (retcode);
   1163 
   1164 		/* shutdown the system */
   1165 	case RAIDFRAME_SHUTDOWN:
   1166 
   1167 		if ((error = raidlock(rs)) != 0)
   1168 			return (error);
   1169 
   1170 		/*
   1171 		 * If somebody has a partition mounted, we shouldn't
   1172 		 * shutdown.
   1173 		 */
   1174 
   1175 		part = DISKPART(dev);
   1176 		pmask = (1 << part);
   1177 		if ((rs->sc_dkdev.dk_openmask & ~pmask) ||
   1178 		    ((rs->sc_dkdev.dk_bopenmask & pmask) &&
   1179 			(rs->sc_dkdev.dk_copenmask & pmask))) {
   1180 			raidunlock(rs);
   1181 			return (EBUSY);
   1182 		}
   1183 
   1184 		retcode = rf_Shutdown(raidPtr);
   1185 
   1186 		/* It's no longer initialized... */
   1187 		rs->sc_flags &= ~RAIDF_INITED;
   1188 
   1189 		/* free the pseudo device attach bits */
   1190 
   1191 		cf = device_cfdata(rs->sc_dev);
   1192 		/* XXX this causes us to not return any errors
   1193 		   from the above call to rf_Shutdown() */
   1194 		retcode = config_detach(rs->sc_dev, DETACH_QUIET);
   1195 		free(cf, M_RAIDFRAME);
   1196 
   1197 		/* Detach the disk. */
   1198 		disk_detach(&rs->sc_dkdev);
   1199 		disk_destroy(&rs->sc_dkdev);
   1200 
   1201 		raidunlock(rs);
   1202 
   1203 		return (retcode);
   1204 	case RAIDFRAME_GET_COMPONENT_LABEL:
   1205 		clabel_ptr = (RF_ComponentLabel_t **) data;
   1206 		/* need to read the component label for the disk indicated
   1207 		   by row,column in clabel */
   1208 
   1209 		/* For practice, let's get it directly fromdisk, rather
   1210 		   than from the in-core copy */
   1211 		RF_Malloc( clabel, sizeof( RF_ComponentLabel_t ),
   1212 			   (RF_ComponentLabel_t *));
   1213 		if (clabel == NULL)
   1214 			return (ENOMEM);
   1215 
   1216 		retcode = copyin( *clabel_ptr, clabel,
   1217 				  sizeof(RF_ComponentLabel_t));
   1218 
   1219 		if (retcode) {
   1220 			RF_Free( clabel, sizeof(RF_ComponentLabel_t));
   1221 			return(retcode);
   1222 		}
   1223 
   1224 		clabel->row = 0; /* Don't allow looking at anything else.*/
   1225 
   1226 		column = clabel->column;
   1227 
   1228 		if ((column < 0) || (column >= raidPtr->numCol +
   1229 				     raidPtr->numSpare)) {
   1230 			RF_Free( clabel, sizeof(RF_ComponentLabel_t));
   1231 			return(EINVAL);
   1232 		}
   1233 
   1234 		retcode = raidread_component_label(raidPtr->Disks[column].dev,
   1235 				raidPtr->raid_cinfo[column].ci_vp,
   1236 				clabel );
   1237 
   1238 		if (retcode == 0) {
   1239 			retcode = copyout(clabel, *clabel_ptr,
   1240 					  sizeof(RF_ComponentLabel_t));
   1241 		}
   1242 		RF_Free(clabel, sizeof(RF_ComponentLabel_t));
   1243 		return (retcode);
   1244 
   1245 	case RAIDFRAME_SET_COMPONENT_LABEL:
   1246 		clabel = (RF_ComponentLabel_t *) data;
   1247 
   1248 		/* XXX check the label for valid stuff... */
   1249 		/* Note that some things *should not* get modified --
   1250 		   the user should be re-initing the labels instead of
   1251 		   trying to patch things.
   1252 		   */
   1253 
   1254 		raidid = raidPtr->raidid;
   1255 #ifdef DEBUG
   1256 		printf("raid%d: Got component label:\n", raidid);
   1257 		printf("raid%d: Version: %d\n", raidid, clabel->version);
   1258 		printf("raid%d: Serial Number: %d\n", raidid, clabel->serial_number);
   1259 		printf("raid%d: Mod counter: %d\n", raidid, clabel->mod_counter);
   1260 		printf("raid%d: Column: %d\n", raidid, clabel->column);
   1261 		printf("raid%d: Num Columns: %d\n", raidid, clabel->num_columns);
   1262 		printf("raid%d: Clean: %d\n", raidid, clabel->clean);
   1263 		printf("raid%d: Status: %d\n", raidid, clabel->status);
   1264 #endif
   1265 		clabel->row = 0;
   1266 		column = clabel->column;
   1267 
   1268 		if ((column < 0) || (column >= raidPtr->numCol)) {
   1269 			return(EINVAL);
   1270 		}
   1271 
   1272 		/* XXX this isn't allowed to do anything for now :-) */
   1273 
   1274 		/* XXX and before it is, we need to fill in the rest
   1275 		   of the fields!?!?!?! */
   1276 #if 0
   1277 		raidwrite_component_label(
   1278 		     raidPtr->Disks[column].dev,
   1279 			    raidPtr->raid_cinfo[column].ci_vp,
   1280 			    clabel );
   1281 #endif
   1282 		return (0);
   1283 
   1284 	case RAIDFRAME_INIT_LABELS:
   1285 		clabel = (RF_ComponentLabel_t *) data;
   1286 		/*
   1287 		   we only want the serial number from
   1288 		   the above.  We get all the rest of the information
   1289 		   from the config that was used to create this RAID
   1290 		   set.
   1291 		   */
   1292 
   1293 		raidPtr->serial_number = clabel->serial_number;
   1294 
   1295 		RF_Malloc(ci_label, sizeof(RF_ComponentLabel_t),
   1296 			  (RF_ComponentLabel_t *));
   1297 		if (ci_label == NULL)
   1298 			return (ENOMEM);
   1299 
   1300 		raid_init_component_label(raidPtr, ci_label);
   1301 		ci_label->serial_number = clabel->serial_number;
   1302 		ci_label->row = 0; /* we dont' pretend to support more */
   1303 
   1304 		for(column=0;column<raidPtr->numCol;column++) {
   1305 			diskPtr = &raidPtr->Disks[column];
   1306 			if (!RF_DEAD_DISK(diskPtr->status)) {
   1307 				ci_label->partitionSize = diskPtr->partitionSize;
   1308 				ci_label->column = column;
   1309 				raidwrite_component_label(
   1310 							  raidPtr->Disks[column].dev,
   1311 							  raidPtr->raid_cinfo[column].ci_vp,
   1312 							  ci_label );
   1313 			}
   1314 		}
   1315 		RF_Free(ci_label, sizeof(RF_ComponentLabel_t));
   1316 
   1317 		return (retcode);
   1318 	case RAIDFRAME_SET_AUTOCONFIG:
   1319 		d = rf_set_autoconfig(raidPtr, *(int *) data);
   1320 		printf("raid%d: New autoconfig value is: %d\n",
   1321 		       raidPtr->raidid, d);
   1322 		*(int *) data = d;
   1323 		return (retcode);
   1324 
   1325 	case RAIDFRAME_SET_ROOT:
   1326 		d = rf_set_rootpartition(raidPtr, *(int *) data);
   1327 		printf("raid%d: New rootpartition value is: %d\n",
   1328 		       raidPtr->raidid, d);
   1329 		*(int *) data = d;
   1330 		return (retcode);
   1331 
   1332 		/* initialize all parity */
   1333 	case RAIDFRAME_REWRITEPARITY:
   1334 
   1335 		if (raidPtr->Layout.map->faultsTolerated == 0) {
   1336 			/* Parity for RAID 0 is trivially correct */
   1337 			raidPtr->parity_good = RF_RAID_CLEAN;
   1338 			return(0);
   1339 		}
   1340 
   1341 		if (raidPtr->parity_rewrite_in_progress == 1) {
   1342 			/* Re-write is already in progress! */
   1343 			return(EINVAL);
   1344 		}
   1345 
   1346 		retcode = RF_CREATE_THREAD(raidPtr->parity_rewrite_thread,
   1347 					   rf_RewriteParityThread,
   1348 					   raidPtr,"raid_parity");
   1349 		return (retcode);
   1350 
   1351 
   1352 	case RAIDFRAME_ADD_HOT_SPARE:
   1353 		sparePtr = (RF_SingleComponent_t *) data;
   1354 		memcpy( &component, sparePtr, sizeof(RF_SingleComponent_t));
   1355 		retcode = rf_add_hot_spare(raidPtr, &component);
   1356 		return(retcode);
   1357 
   1358 	case RAIDFRAME_REMOVE_HOT_SPARE:
   1359 		return(retcode);
   1360 
   1361 	case RAIDFRAME_DELETE_COMPONENT:
   1362 		componentPtr = (RF_SingleComponent_t *)data;
   1363 		memcpy( &component, componentPtr,
   1364 			sizeof(RF_SingleComponent_t));
   1365 		retcode = rf_delete_component(raidPtr, &component);
   1366 		return(retcode);
   1367 
   1368 	case RAIDFRAME_INCORPORATE_HOT_SPARE:
   1369 		componentPtr = (RF_SingleComponent_t *)data;
   1370 		memcpy( &component, componentPtr,
   1371 			sizeof(RF_SingleComponent_t));
   1372 		retcode = rf_incorporate_hot_spare(raidPtr, &component);
   1373 		return(retcode);
   1374 
   1375 	case RAIDFRAME_REBUILD_IN_PLACE:
   1376 
   1377 		if (raidPtr->Layout.map->faultsTolerated == 0) {
   1378 			/* Can't do this on a RAID 0!! */
   1379 			return(EINVAL);
   1380 		}
   1381 
   1382 		if (raidPtr->recon_in_progress == 1) {
   1383 			/* a reconstruct is already in progress! */
   1384 			return(EINVAL);
   1385 		}
   1386 
   1387 		componentPtr = (RF_SingleComponent_t *) data;
   1388 		memcpy( &component, componentPtr,
   1389 			sizeof(RF_SingleComponent_t));
   1390 		component.row = 0; /* we don't support any more */
   1391 		column = component.column;
   1392 
   1393 		if ((column < 0) || (column >= raidPtr->numCol)) {
   1394 			return(EINVAL);
   1395 		}
   1396 
   1397 		RF_LOCK_MUTEX(raidPtr->mutex);
   1398 		if ((raidPtr->Disks[column].status == rf_ds_optimal) &&
   1399 		    (raidPtr->numFailures > 0)) {
   1400 			/* XXX 0 above shouldn't be constant!!! */
   1401 			/* some component other than this has failed.
   1402 			   Let's not make things worse than they already
   1403 			   are... */
   1404 			printf("raid%d: Unable to reconstruct to disk at:\n",
   1405 			       raidPtr->raidid);
   1406 			printf("raid%d:     Col: %d   Too many failures.\n",
   1407 			       raidPtr->raidid, column);
   1408 			RF_UNLOCK_MUTEX(raidPtr->mutex);
   1409 			return (EINVAL);
   1410 		}
   1411 		if (raidPtr->Disks[column].status ==
   1412 		    rf_ds_reconstructing) {
   1413 			printf("raid%d: Unable to reconstruct to disk at:\n",
   1414 			       raidPtr->raidid);
   1415 			printf("raid%d:    Col: %d   Reconstruction already occuring!\n", raidPtr->raidid, column);
   1416 
   1417 			RF_UNLOCK_MUTEX(raidPtr->mutex);
   1418 			return (EINVAL);
   1419 		}
   1420 		if (raidPtr->Disks[column].status == rf_ds_spared) {
   1421 			RF_UNLOCK_MUTEX(raidPtr->mutex);
   1422 			return (EINVAL);
   1423 		}
   1424 		RF_UNLOCK_MUTEX(raidPtr->mutex);
   1425 
   1426 		RF_Malloc(rrcopy, sizeof(*rrcopy), (struct rf_recon_req *));
   1427 		if (rrcopy == NULL)
   1428 			return(ENOMEM);
   1429 
   1430 		rrcopy->raidPtr = (void *) raidPtr;
   1431 		rrcopy->col = column;
   1432 
   1433 		retcode = RF_CREATE_THREAD(raidPtr->recon_thread,
   1434 					   rf_ReconstructInPlaceThread,
   1435 					   rrcopy,"raid_reconip");
   1436 		return(retcode);
   1437 
   1438 	case RAIDFRAME_GET_INFO:
   1439 		if (!raidPtr->valid)
   1440 			return (ENODEV);
   1441 		ucfgp = (RF_DeviceConfig_t **) data;
   1442 		RF_Malloc(d_cfg, sizeof(RF_DeviceConfig_t),
   1443 			  (RF_DeviceConfig_t *));
   1444 		if (d_cfg == NULL)
   1445 			return (ENOMEM);
   1446 		d_cfg->rows = 1; /* there is only 1 row now */
   1447 		d_cfg->cols = raidPtr->numCol;
   1448 		d_cfg->ndevs = raidPtr->numCol;
   1449 		if (d_cfg->ndevs >= RF_MAX_DISKS) {
   1450 			RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
   1451 			return (ENOMEM);
   1452 		}
   1453 		d_cfg->nspares = raidPtr->numSpare;
   1454 		if (d_cfg->nspares >= RF_MAX_DISKS) {
   1455 			RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
   1456 			return (ENOMEM);
   1457 		}
   1458 		d_cfg->maxqdepth = raidPtr->maxQueueDepth;
   1459 		d = 0;
   1460 		for (j = 0; j < d_cfg->cols; j++) {
   1461 			d_cfg->devs[d] = raidPtr->Disks[j];
   1462 			d++;
   1463 		}
   1464 		for (j = d_cfg->cols, i = 0; i < d_cfg->nspares; i++, j++) {
   1465 			d_cfg->spares[i] = raidPtr->Disks[j];
   1466 		}
   1467 		retcode = copyout(d_cfg, *ucfgp, sizeof(RF_DeviceConfig_t));
   1468 		RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
   1469 
   1470 		return (retcode);
   1471 
   1472 	case RAIDFRAME_CHECK_PARITY:
   1473 		*(int *) data = raidPtr->parity_good;
   1474 		return (0);
   1475 
   1476 	case RAIDFRAME_RESET_ACCTOTALS:
   1477 		memset(&raidPtr->acc_totals, 0, sizeof(raidPtr->acc_totals));
   1478 		return (0);
   1479 
   1480 	case RAIDFRAME_GET_ACCTOTALS:
   1481 		totals = (RF_AccTotals_t *) data;
   1482 		*totals = raidPtr->acc_totals;
   1483 		return (0);
   1484 
   1485 	case RAIDFRAME_KEEP_ACCTOTALS:
   1486 		raidPtr->keep_acc_totals = *(int *)data;
   1487 		return (0);
   1488 
   1489 	case RAIDFRAME_GET_SIZE:
   1490 		*(int *) data = raidPtr->totalSectors;
   1491 		return (0);
   1492 
   1493 		/* fail a disk & optionally start reconstruction */
   1494 	case RAIDFRAME_FAIL_DISK:
   1495 
   1496 		if (raidPtr->Layout.map->faultsTolerated == 0) {
   1497 			/* Can't do this on a RAID 0!! */
   1498 			return(EINVAL);
   1499 		}
   1500 
   1501 		rr = (struct rf_recon_req *) data;
   1502 		rr->row = 0;
   1503 		if (rr->col < 0 || rr->col >= raidPtr->numCol)
   1504 			return (EINVAL);
   1505 
   1506 
   1507 		RF_LOCK_MUTEX(raidPtr->mutex);
   1508 		if (raidPtr->status == rf_rs_reconstructing) {
   1509 			/* you can't fail a disk while we're reconstructing! */
   1510 			/* XXX wrong for RAID6 */
   1511 			RF_UNLOCK_MUTEX(raidPtr->mutex);
   1512 			return (EINVAL);
   1513 		}
   1514 		if ((raidPtr->Disks[rr->col].status ==
   1515 		     rf_ds_optimal) && (raidPtr->numFailures > 0)) {
   1516 			/* some other component has failed.  Let's not make
   1517 			   things worse. XXX wrong for RAID6 */
   1518 			RF_UNLOCK_MUTEX(raidPtr->mutex);
   1519 			return (EINVAL);
   1520 		}
   1521 		if (raidPtr->Disks[rr->col].status == rf_ds_spared) {
   1522 			/* Can't fail a spared disk! */
   1523 			RF_UNLOCK_MUTEX(raidPtr->mutex);
   1524 			return (EINVAL);
   1525 		}
   1526 		RF_UNLOCK_MUTEX(raidPtr->mutex);
   1527 
   1528 		/* make a copy of the recon request so that we don't rely on
   1529 		 * the user's buffer */
   1530 		RF_Malloc(rrcopy, sizeof(*rrcopy), (struct rf_recon_req *));
   1531 		if (rrcopy == NULL)
   1532 			return(ENOMEM);
   1533 		memcpy(rrcopy, rr, sizeof(*rr));
   1534 		rrcopy->raidPtr = (void *) raidPtr;
   1535 
   1536 		retcode = RF_CREATE_THREAD(raidPtr->recon_thread,
   1537 					   rf_ReconThread,
   1538 					   rrcopy,"raid_recon");
   1539 		return (0);
   1540 
   1541 		/* invoke a copyback operation after recon on whatever disk
   1542 		 * needs it, if any */
   1543 	case RAIDFRAME_COPYBACK:
   1544 
   1545 		if (raidPtr->Layout.map->faultsTolerated == 0) {
   1546 			/* This makes no sense on a RAID 0!! */
   1547 			return(EINVAL);
   1548 		}
   1549 
   1550 		if (raidPtr->copyback_in_progress == 1) {
   1551 			/* Copyback is already in progress! */
   1552 			return(EINVAL);
   1553 		}
   1554 
   1555 		retcode = RF_CREATE_THREAD(raidPtr->copyback_thread,
   1556 					   rf_CopybackThread,
   1557 					   raidPtr,"raid_copyback");
   1558 		return (retcode);
   1559 
   1560 		/* return the percentage completion of reconstruction */
   1561 	case RAIDFRAME_CHECK_RECON_STATUS:
   1562 		if (raidPtr->Layout.map->faultsTolerated == 0) {
   1563 			/* This makes no sense on a RAID 0, so tell the
   1564 			   user it's done. */
   1565 			*(int *) data = 100;
   1566 			return(0);
   1567 		}
   1568 		if (raidPtr->status != rf_rs_reconstructing)
   1569 			*(int *) data = 100;
   1570 		else {
   1571 			if (raidPtr->reconControl->numRUsTotal > 0) {
   1572 				*(int *) data = (raidPtr->reconControl->numRUsComplete * 100 / raidPtr->reconControl->numRUsTotal);
   1573 			} else {
   1574 				*(int *) data = 0;
   1575 			}
   1576 		}
   1577 		return (0);
   1578 	case RAIDFRAME_CHECK_RECON_STATUS_EXT:
   1579 		progressInfoPtr = (RF_ProgressInfo_t **) data;
   1580 		if (raidPtr->status != rf_rs_reconstructing) {
   1581 			progressInfo.remaining = 0;
   1582 			progressInfo.completed = 100;
   1583 			progressInfo.total = 100;
   1584 		} else {
   1585 			progressInfo.total =
   1586 				raidPtr->reconControl->numRUsTotal;
   1587 			progressInfo.completed =
   1588 				raidPtr->reconControl->numRUsComplete;
   1589 			progressInfo.remaining = progressInfo.total -
   1590 				progressInfo.completed;
   1591 		}
   1592 		retcode = copyout(&progressInfo, *progressInfoPtr,
   1593 				  sizeof(RF_ProgressInfo_t));
   1594 		return (retcode);
   1595 
   1596 	case RAIDFRAME_CHECK_PARITYREWRITE_STATUS:
   1597 		if (raidPtr->Layout.map->faultsTolerated == 0) {
   1598 			/* This makes no sense on a RAID 0, so tell the
   1599 			   user it's done. */
   1600 			*(int *) data = 100;
   1601 			return(0);
   1602 		}
   1603 		if (raidPtr->parity_rewrite_in_progress == 1) {
   1604 			*(int *) data = 100 *
   1605 				raidPtr->parity_rewrite_stripes_done /
   1606 				raidPtr->Layout.numStripe;
   1607 		} else {
   1608 			*(int *) data = 100;
   1609 		}
   1610 		return (0);
   1611 
   1612 	case RAIDFRAME_CHECK_PARITYREWRITE_STATUS_EXT:
   1613 		progressInfoPtr = (RF_ProgressInfo_t **) data;
   1614 		if (raidPtr->parity_rewrite_in_progress == 1) {
   1615 			progressInfo.total = raidPtr->Layout.numStripe;
   1616 			progressInfo.completed =
   1617 				raidPtr->parity_rewrite_stripes_done;
   1618 			progressInfo.remaining = progressInfo.total -
   1619 				progressInfo.completed;
   1620 		} else {
   1621 			progressInfo.remaining = 0;
   1622 			progressInfo.completed = 100;
   1623 			progressInfo.total = 100;
   1624 		}
   1625 		retcode = copyout(&progressInfo, *progressInfoPtr,
   1626 				  sizeof(RF_ProgressInfo_t));
   1627 		return (retcode);
   1628 
   1629 	case RAIDFRAME_CHECK_COPYBACK_STATUS:
   1630 		if (raidPtr->Layout.map->faultsTolerated == 0) {
   1631 			/* This makes no sense on a RAID 0 */
   1632 			*(int *) data = 100;
   1633 			return(0);
   1634 		}
   1635 		if (raidPtr->copyback_in_progress == 1) {
   1636 			*(int *) data = 100 * raidPtr->copyback_stripes_done /
   1637 				raidPtr->Layout.numStripe;
   1638 		} else {
   1639 			*(int *) data = 100;
   1640 		}
   1641 		return (0);
   1642 
   1643 	case RAIDFRAME_CHECK_COPYBACK_STATUS_EXT:
   1644 		progressInfoPtr = (RF_ProgressInfo_t **) data;
   1645 		if (raidPtr->copyback_in_progress == 1) {
   1646 			progressInfo.total = raidPtr->Layout.numStripe;
   1647 			progressInfo.completed =
   1648 				raidPtr->copyback_stripes_done;
   1649 			progressInfo.remaining = progressInfo.total -
   1650 				progressInfo.completed;
   1651 		} else {
   1652 			progressInfo.remaining = 0;
   1653 			progressInfo.completed = 100;
   1654 			progressInfo.total = 100;
   1655 		}
   1656 		retcode = copyout(&progressInfo, *progressInfoPtr,
   1657 				  sizeof(RF_ProgressInfo_t));
   1658 		return (retcode);
   1659 
   1660 		/* the sparetable daemon calls this to wait for the kernel to
   1661 		 * need a spare table. this ioctl does not return until a
   1662 		 * spare table is needed. XXX -- calling mpsleep here in the
   1663 		 * ioctl code is almost certainly wrong and evil. -- XXX XXX
   1664 		 * -- I should either compute the spare table in the kernel,
   1665 		 * or have a different -- XXX XXX -- interface (a different
   1666 		 * character device) for delivering the table     -- XXX */
   1667 #if 0
   1668 	case RAIDFRAME_SPARET_WAIT:
   1669 		RF_LOCK_MUTEX(rf_sparet_wait_mutex);
   1670 		while (!rf_sparet_wait_queue)
   1671 			mpsleep(&rf_sparet_wait_queue, (PZERO + 1) | PCATCH, "sparet wait", 0, (void *) simple_lock_addr(rf_sparet_wait_mutex), MS_LOCK_SIMPLE);
   1672 		waitreq = rf_sparet_wait_queue;
   1673 		rf_sparet_wait_queue = rf_sparet_wait_queue->next;
   1674 		RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
   1675 
   1676 		/* structure assignment */
   1677 		*((RF_SparetWait_t *) data) = *waitreq;
   1678 
   1679 		RF_Free(waitreq, sizeof(*waitreq));
   1680 		return (0);
   1681 
   1682 		/* wakes up a process waiting on SPARET_WAIT and puts an error
   1683 		 * code in it that will cause the dameon to exit */
   1684 	case RAIDFRAME_ABORT_SPARET_WAIT:
   1685 		RF_Malloc(waitreq, sizeof(*waitreq), (RF_SparetWait_t *));
   1686 		waitreq->fcol = -1;
   1687 		RF_LOCK_MUTEX(rf_sparet_wait_mutex);
   1688 		waitreq->next = rf_sparet_wait_queue;
   1689 		rf_sparet_wait_queue = waitreq;
   1690 		RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
   1691 		wakeup(&rf_sparet_wait_queue);
   1692 		return (0);
   1693 
   1694 		/* used by the spare table daemon to deliver a spare table
   1695 		 * into the kernel */
   1696 	case RAIDFRAME_SEND_SPARET:
   1697 
   1698 		/* install the spare table */
   1699 		retcode = rf_SetSpareTable(raidPtr, *(void **) data);
   1700 
   1701 		/* respond to the requestor.  the return status of the spare
   1702 		 * table installation is passed in the "fcol" field */
   1703 		RF_Malloc(waitreq, sizeof(*waitreq), (RF_SparetWait_t *));
   1704 		waitreq->fcol = retcode;
   1705 		RF_LOCK_MUTEX(rf_sparet_wait_mutex);
   1706 		waitreq->next = rf_sparet_resp_queue;
   1707 		rf_sparet_resp_queue = waitreq;
   1708 		wakeup(&rf_sparet_resp_queue);
   1709 		RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
   1710 
   1711 		return (retcode);
   1712 #endif
   1713 
   1714 	default:
   1715 		break; /* fall through to the os-specific code below */
   1716 
   1717 	}
   1718 
   1719 	if (!raidPtr->valid)
   1720 		return (EINVAL);
   1721 
   1722 	/*
   1723 	 * Add support for "regular" device ioctls here.
   1724 	 */
   1725 
   1726 	switch (cmd) {
   1727 	case DIOCGDINFO:
   1728 		*(struct disklabel *) data = *(rs->sc_dkdev.dk_label);
   1729 		break;
   1730 #ifdef __HAVE_OLD_DISKLABEL
   1731 	case ODIOCGDINFO:
   1732 		newlabel = *(rs->sc_dkdev.dk_label);
   1733 		if (newlabel.d_npartitions > OLDMAXPARTITIONS)
   1734 			return ENOTTY;
   1735 		memcpy(data, &newlabel, sizeof (struct olddisklabel));
   1736 		break;
   1737 #endif
   1738 
   1739 	case DIOCGPART:
   1740 		((struct partinfo *) data)->disklab = rs->sc_dkdev.dk_label;
   1741 		((struct partinfo *) data)->part =
   1742 		    &rs->sc_dkdev.dk_label->d_partitions[DISKPART(dev)];
   1743 		break;
   1744 
   1745 	case DIOCWDINFO:
   1746 	case DIOCSDINFO:
   1747 #ifdef __HAVE_OLD_DISKLABEL
   1748 	case ODIOCWDINFO:
   1749 	case ODIOCSDINFO:
   1750 #endif
   1751 	{
   1752 		struct disklabel *lp;
   1753 #ifdef __HAVE_OLD_DISKLABEL
   1754 		if (cmd == ODIOCSDINFO || cmd == ODIOCWDINFO) {
   1755 			memset(&newlabel, 0, sizeof newlabel);
   1756 			memcpy(&newlabel, data, sizeof (struct olddisklabel));
   1757 			lp = &newlabel;
   1758 		} else
   1759 #endif
   1760 		lp = (struct disklabel *)data;
   1761 
   1762 		if ((error = raidlock(rs)) != 0)
   1763 			return (error);
   1764 
   1765 		rs->sc_flags |= RAIDF_LABELLING;
   1766 
   1767 		error = setdisklabel(rs->sc_dkdev.dk_label,
   1768 		    lp, 0, rs->sc_dkdev.dk_cpulabel);
   1769 		if (error == 0) {
   1770 			if (cmd == DIOCWDINFO
   1771 #ifdef __HAVE_OLD_DISKLABEL
   1772 			    || cmd == ODIOCWDINFO
   1773 #endif
   1774 			   )
   1775 				error = writedisklabel(RAIDLABELDEV(dev),
   1776 				    raidstrategy, rs->sc_dkdev.dk_label,
   1777 				    rs->sc_dkdev.dk_cpulabel);
   1778 		}
   1779 		rs->sc_flags &= ~RAIDF_LABELLING;
   1780 
   1781 		raidunlock(rs);
   1782 
   1783 		if (error)
   1784 			return (error);
   1785 		break;
   1786 	}
   1787 
   1788 	case DIOCWLABEL:
   1789 		if (*(int *) data != 0)
   1790 			rs->sc_flags |= RAIDF_WLABEL;
   1791 		else
   1792 			rs->sc_flags &= ~RAIDF_WLABEL;
   1793 		break;
   1794 
   1795 	case DIOCGDEFLABEL:
   1796 		raidgetdefaultlabel(raidPtr, rs, (struct disklabel *) data);
   1797 		break;
   1798 
   1799 #ifdef __HAVE_OLD_DISKLABEL
   1800 	case ODIOCGDEFLABEL:
   1801 		raidgetdefaultlabel(raidPtr, rs, &newlabel);
   1802 		if (newlabel.d_npartitions > OLDMAXPARTITIONS)
   1803 			return ENOTTY;
   1804 		memcpy(data, &newlabel, sizeof (struct olddisklabel));
   1805 		break;
   1806 #endif
   1807 
   1808 	case DIOCAWEDGE:
   1809 	case DIOCDWEDGE:
   1810 	    	dkw = (void *)data;
   1811 
   1812 		/* If the ioctl happens here, the parent is us. */
   1813 		(void)strcpy(dkw->dkw_parent, rs->sc_xname);
   1814 		return cmd == DIOCAWEDGE ? dkwedge_add(dkw) : dkwedge_del(dkw);
   1815 
   1816 	case DIOCLWEDGES:
   1817 		return dkwedge_list(&rs->sc_dkdev,
   1818 		    (struct dkwedge_list *)data, l);
   1819 	case DIOCCACHESYNC:
   1820 		return rf_sync_component_caches(raidPtr);
   1821 	default:
   1822 		retcode = ENOTTY;
   1823 	}
   1824 	return (retcode);
   1825 
   1826 }
   1827 
   1828 
   1829 /* raidinit -- complete the rest of the initialization for the
   1830    RAIDframe device.  */
   1831 
   1832 
   1833 static void
   1834 raidinit(RF_Raid_t *raidPtr)
   1835 {
   1836 	struct cfdata *cf;
   1837 	struct raid_softc *rs;
   1838 	int     unit;
   1839 
   1840 	unit = raidPtr->raidid;
   1841 
   1842 	rs = &raid_softc[unit];
   1843 
   1844 	/* XXX should check return code first... */
   1845 	rs->sc_flags |= RAIDF_INITED;
   1846 
   1847 	/* XXX doesn't check bounds. */
   1848 	snprintf(rs->sc_xname, sizeof(rs->sc_xname), "raid%d", unit);
   1849 
   1850 	/* attach the pseudo device */
   1851 	cf = malloc(sizeof(*cf), M_RAIDFRAME, M_WAITOK);
   1852 	cf->cf_name = raid_cd.cd_name;
   1853 	cf->cf_atname = raid_cd.cd_name;
   1854 	cf->cf_unit = unit;
   1855 	cf->cf_fstate = FSTATE_STAR;
   1856 
   1857 	rs->sc_dev = config_attach_pseudo(cf);
   1858 
   1859 	if (rs->sc_dev==NULL) {
   1860 		printf("raid%d: config_attach_pseudo failed\n",
   1861 		       raidPtr->raidid);
   1862 	}
   1863 
   1864 	/* disk_attach actually creates space for the CPU disklabel, among
   1865 	 * other things, so it's critical to call this *BEFORE* we try putzing
   1866 	 * with disklabels. */
   1867 
   1868 	disk_init(&rs->sc_dkdev, rs->sc_xname, &rf_dkdriver);
   1869 	disk_attach(&rs->sc_dkdev);
   1870 
   1871 	/* XXX There may be a weird interaction here between this, and
   1872 	 * protectedSectors, as used in RAIDframe.  */
   1873 
   1874 	rs->sc_size = raidPtr->totalSectors;
   1875 
   1876 	dkwedge_discover(&rs->sc_dkdev);
   1877 
   1878 	rf_set_properties(rs, raidPtr);
   1879 
   1880 }
   1881 #if (RF_INCLUDE_PARITY_DECLUSTERING_DS > 0)
   1882 /* wake up the daemon & tell it to get us a spare table
   1883  * XXX
   1884  * the entries in the queues should be tagged with the raidPtr
   1885  * so that in the extremely rare case that two recons happen at once,
   1886  * we know for which device were requesting a spare table
   1887  * XXX
   1888  *
   1889  * XXX This code is not currently used. GO
   1890  */
   1891 int
   1892 rf_GetSpareTableFromDaemon(RF_SparetWait_t *req)
   1893 {
   1894 	int     retcode;
   1895 
   1896 	RF_LOCK_MUTEX(rf_sparet_wait_mutex);
   1897 	req->next = rf_sparet_wait_queue;
   1898 	rf_sparet_wait_queue = req;
   1899 	wakeup(&rf_sparet_wait_queue);
   1900 
   1901 	/* mpsleep unlocks the mutex */
   1902 	while (!rf_sparet_resp_queue) {
   1903 		tsleep(&rf_sparet_resp_queue, PRIBIO,
   1904 		    "raidframe getsparetable", 0);
   1905 	}
   1906 	req = rf_sparet_resp_queue;
   1907 	rf_sparet_resp_queue = req->next;
   1908 	RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
   1909 
   1910 	retcode = req->fcol;
   1911 	RF_Free(req, sizeof(*req));	/* this is not the same req as we
   1912 					 * alloc'd */
   1913 	return (retcode);
   1914 }
   1915 #endif
   1916 
   1917 /* a wrapper around rf_DoAccess that extracts appropriate info from the
   1918  * bp & passes it down.
   1919  * any calls originating in the kernel must use non-blocking I/O
   1920  * do some extra sanity checking to return "appropriate" error values for
   1921  * certain conditions (to make some standard utilities work)
   1922  *
   1923  * Formerly known as: rf_DoAccessKernel
   1924  */
   1925 void
   1926 raidstart(RF_Raid_t *raidPtr)
   1927 {
   1928 	RF_SectorCount_t num_blocks, pb, sum;
   1929 	RF_RaidAddr_t raid_addr;
   1930 	struct partition *pp;
   1931 	daddr_t blocknum;
   1932 	int     unit;
   1933 	struct raid_softc *rs;
   1934 	int     do_async;
   1935 	struct buf *bp;
   1936 	int rc;
   1937 
   1938 	unit = raidPtr->raidid;
   1939 	rs = &raid_softc[unit];
   1940 
   1941 	/* quick check to see if anything has died recently */
   1942 	RF_LOCK_MUTEX(raidPtr->mutex);
   1943 	if (raidPtr->numNewFailures > 0) {
   1944 		RF_UNLOCK_MUTEX(raidPtr->mutex);
   1945 		rf_update_component_labels(raidPtr,
   1946 					   RF_NORMAL_COMPONENT_UPDATE);
   1947 		RF_LOCK_MUTEX(raidPtr->mutex);
   1948 		raidPtr->numNewFailures--;
   1949 	}
   1950 
   1951 	/* Check to see if we're at the limit... */
   1952 	while (raidPtr->openings > 0) {
   1953 		RF_UNLOCK_MUTEX(raidPtr->mutex);
   1954 
   1955 		/* get the next item, if any, from the queue */
   1956 		if ((bp = BUFQ_GET(rs->buf_queue)) == NULL) {
   1957 			/* nothing more to do */
   1958 			return;
   1959 		}
   1960 
   1961 		/* Ok, for the bp we have here, bp->b_blkno is relative to the
   1962 		 * partition.. Need to make it absolute to the underlying
   1963 		 * device.. */
   1964 
   1965 		blocknum = bp->b_blkno;
   1966 		if (DISKPART(bp->b_dev) != RAW_PART) {
   1967 			pp = &rs->sc_dkdev.dk_label->d_partitions[DISKPART(bp->b_dev)];
   1968 			blocknum += pp->p_offset;
   1969 		}
   1970 
   1971 		db1_printf(("Blocks: %d, %d\n", (int) bp->b_blkno,
   1972 			    (int) blocknum));
   1973 
   1974 		db1_printf(("bp->b_bcount = %d\n", (int) bp->b_bcount));
   1975 		db1_printf(("bp->b_resid = %d\n", (int) bp->b_resid));
   1976 
   1977 		/* *THIS* is where we adjust what block we're going to...
   1978 		 * but DO NOT TOUCH bp->b_blkno!!! */
   1979 		raid_addr = blocknum;
   1980 
   1981 		num_blocks = bp->b_bcount >> raidPtr->logBytesPerSector;
   1982 		pb = (bp->b_bcount & raidPtr->sectorMask) ? 1 : 0;
   1983 		sum = raid_addr + num_blocks + pb;
   1984 		if (1 || rf_debugKernelAccess) {
   1985 			db1_printf(("raid_addr=%d sum=%d num_blocks=%d(+%d) (%d)\n",
   1986 				    (int) raid_addr, (int) sum, (int) num_blocks,
   1987 				    (int) pb, (int) bp->b_resid));
   1988 		}
   1989 		if ((sum > raidPtr->totalSectors) || (sum < raid_addr)
   1990 		    || (sum < num_blocks) || (sum < pb)) {
   1991 			bp->b_error = ENOSPC;
   1992 			bp->b_resid = bp->b_bcount;
   1993 			biodone(bp);
   1994 			RF_LOCK_MUTEX(raidPtr->mutex);
   1995 			continue;
   1996 		}
   1997 		/*
   1998 		 * XXX rf_DoAccess() should do this, not just DoAccessKernel()
   1999 		 */
   2000 
   2001 		if (bp->b_bcount & raidPtr->sectorMask) {
   2002 			bp->b_error = EINVAL;
   2003 			bp->b_resid = bp->b_bcount;
   2004 			biodone(bp);
   2005 			RF_LOCK_MUTEX(raidPtr->mutex);
   2006 			continue;
   2007 
   2008 		}
   2009 		db1_printf(("Calling DoAccess..\n"));
   2010 
   2011 
   2012 		RF_LOCK_MUTEX(raidPtr->mutex);
   2013 		raidPtr->openings--;
   2014 		RF_UNLOCK_MUTEX(raidPtr->mutex);
   2015 
   2016 		/*
   2017 		 * Everything is async.
   2018 		 */
   2019 		do_async = 1;
   2020 
   2021 		disk_busy(&rs->sc_dkdev);
   2022 
   2023 		/* XXX we're still at splbio() here... do we *really*
   2024 		   need to be? */
   2025 
   2026 		/* don't ever condition on bp->b_flags & B_WRITE.
   2027 		 * always condition on B_READ instead */
   2028 
   2029 		rc = rf_DoAccess(raidPtr, (bp->b_flags & B_READ) ?
   2030 				 RF_IO_TYPE_READ : RF_IO_TYPE_WRITE,
   2031 				 do_async, raid_addr, num_blocks,
   2032 				 bp->b_data, bp, RF_DAG_NONBLOCKING_IO);
   2033 
   2034 		if (rc) {
   2035 			bp->b_error = rc;
   2036 			bp->b_resid = bp->b_bcount;
   2037 			biodone(bp);
   2038 			/* continue loop */
   2039 		}
   2040 
   2041 		RF_LOCK_MUTEX(raidPtr->mutex);
   2042 	}
   2043 	RF_UNLOCK_MUTEX(raidPtr->mutex);
   2044 }
   2045 
   2046 
   2047 
   2048 
   2049 /* invoke an I/O from kernel mode.  Disk queue should be locked upon entry */
   2050 
   2051 int
   2052 rf_DispatchKernelIO(RF_DiskQueue_t *queue, RF_DiskQueueData_t *req)
   2053 {
   2054 	int     op = (req->type == RF_IO_TYPE_READ) ? B_READ : B_WRITE;
   2055 	struct buf *bp;
   2056 
   2057 	req->queue = queue;
   2058 
   2059 #if DIAGNOSTIC
   2060 	if (queue->raidPtr->raidid >= numraid) {
   2061 		printf("Invalid unit number: %d %d\n", queue->raidPtr->raidid,
   2062 		    numraid);
   2063 		panic("Invalid Unit number in rf_DispatchKernelIO");
   2064 	}
   2065 #endif
   2066 
   2067 	bp = req->bp;
   2068 
   2069 	switch (req->type) {
   2070 	case RF_IO_TYPE_NOP:	/* used primarily to unlock a locked queue */
   2071 		/* XXX need to do something extra here.. */
   2072 		/* I'm leaving this in, as I've never actually seen it used,
   2073 		 * and I'd like folks to report it... GO */
   2074 		printf(("WAKEUP CALLED\n"));
   2075 		queue->numOutstanding++;
   2076 
   2077 		bp->b_flags = 0;
   2078 		bp->b_private = req;
   2079 
   2080 		KernelWakeupFunc(bp);
   2081 		break;
   2082 
   2083 	case RF_IO_TYPE_READ:
   2084 	case RF_IO_TYPE_WRITE:
   2085 #if RF_ACC_TRACE > 0
   2086 		if (req->tracerec) {
   2087 			RF_ETIMER_START(req->tracerec->timer);
   2088 		}
   2089 #endif
   2090 		InitBP(bp, queue->rf_cinfo->ci_vp,
   2091 		    op, queue->rf_cinfo->ci_dev,
   2092 		    req->sectorOffset, req->numSector,
   2093 		    req->buf, KernelWakeupFunc, (void *) req,
   2094 		    queue->raidPtr->logBytesPerSector, req->b_proc);
   2095 
   2096 		if (rf_debugKernelAccess) {
   2097 			db1_printf(("dispatch: bp->b_blkno = %ld\n",
   2098 				(long) bp->b_blkno));
   2099 		}
   2100 		queue->numOutstanding++;
   2101 		queue->last_deq_sector = req->sectorOffset;
   2102 		/* acc wouldn't have been let in if there were any pending
   2103 		 * reqs at any other priority */
   2104 		queue->curPriority = req->priority;
   2105 
   2106 		db1_printf(("Going for %c to unit %d col %d\n",
   2107 			    req->type, queue->raidPtr->raidid,
   2108 			    queue->col));
   2109 		db1_printf(("sector %d count %d (%d bytes) %d\n",
   2110 			(int) req->sectorOffset, (int) req->numSector,
   2111 			(int) (req->numSector <<
   2112 			    queue->raidPtr->logBytesPerSector),
   2113 			(int) queue->raidPtr->logBytesPerSector));
   2114 
   2115 		/*
   2116 		 * XXX: drop lock here since this can block at
   2117 		 * least with backing SCSI devices.  Retake it
   2118 		 * to minimize fuss with calling interfaces.
   2119 		 */
   2120 
   2121 		RF_UNLOCK_QUEUE_MUTEX(queue, "unusedparam");
   2122 		bdev_strategy(bp);
   2123 		RF_LOCK_QUEUE_MUTEX(queue, "unusedparam");
   2124 		break;
   2125 
   2126 	default:
   2127 		panic("bad req->type in rf_DispatchKernelIO");
   2128 	}
   2129 	db1_printf(("Exiting from DispatchKernelIO\n"));
   2130 
   2131 	return (0);
   2132 }
   2133 /* this is the callback function associated with a I/O invoked from
   2134    kernel code.
   2135  */
   2136 static void
   2137 KernelWakeupFunc(struct buf *bp)
   2138 {
   2139 	RF_DiskQueueData_t *req = NULL;
   2140 	RF_DiskQueue_t *queue;
   2141 	int s;
   2142 
   2143 	s = splbio();
   2144 	db1_printf(("recovering the request queue:\n"));
   2145 	req = bp->b_private;
   2146 
   2147 	queue = (RF_DiskQueue_t *) req->queue;
   2148 
   2149 #if RF_ACC_TRACE > 0
   2150 	if (req->tracerec) {
   2151 		RF_ETIMER_STOP(req->tracerec->timer);
   2152 		RF_ETIMER_EVAL(req->tracerec->timer);
   2153 		RF_LOCK_MUTEX(rf_tracing_mutex);
   2154 		req->tracerec->diskwait_us += RF_ETIMER_VAL_US(req->tracerec->timer);
   2155 		req->tracerec->phys_io_us += RF_ETIMER_VAL_US(req->tracerec->timer);
   2156 		req->tracerec->num_phys_ios++;
   2157 		RF_UNLOCK_MUTEX(rf_tracing_mutex);
   2158 	}
   2159 #endif
   2160 
   2161 	/* XXX Ok, let's get aggressive... If b_error is set, let's go
   2162 	 * ballistic, and mark the component as hosed... */
   2163 
   2164 	if (bp->b_error != 0) {
   2165 		/* Mark the disk as dead */
   2166 		/* but only mark it once... */
   2167 		/* and only if it wouldn't leave this RAID set
   2168 		   completely broken */
   2169 		if (((queue->raidPtr->Disks[queue->col].status ==
   2170 		      rf_ds_optimal) ||
   2171 		     (queue->raidPtr->Disks[queue->col].status ==
   2172 		      rf_ds_used_spare)) &&
   2173 		     (queue->raidPtr->numFailures <
   2174 		      queue->raidPtr->Layout.map->faultsTolerated)) {
   2175 			printf("raid%d: IO Error.  Marking %s as failed.\n",
   2176 			       queue->raidPtr->raidid,
   2177 			       queue->raidPtr->Disks[queue->col].devname);
   2178 			queue->raidPtr->Disks[queue->col].status =
   2179 			    rf_ds_failed;
   2180 			queue->raidPtr->status = rf_rs_degraded;
   2181 			queue->raidPtr->numFailures++;
   2182 			queue->raidPtr->numNewFailures++;
   2183 		} else {	/* Disk is already dead... */
   2184 			/* printf("Disk already marked as dead!\n"); */
   2185 		}
   2186 
   2187 	}
   2188 
   2189 	/* Fill in the error value */
   2190 
   2191 	req->error = bp->b_error;
   2192 
   2193 	simple_lock(&queue->raidPtr->iodone_lock);
   2194 
   2195 	/* Drop this one on the "finished" queue... */
   2196 	TAILQ_INSERT_TAIL(&(queue->raidPtr->iodone), req, iodone_entries);
   2197 
   2198 	/* Let the raidio thread know there is work to be done. */
   2199 	wakeup(&(queue->raidPtr->iodone));
   2200 
   2201 	simple_unlock(&queue->raidPtr->iodone_lock);
   2202 
   2203 	splx(s);
   2204 }
   2205 
   2206 
   2207 
   2208 /*
   2209  * initialize a buf structure for doing an I/O in the kernel.
   2210  */
   2211 static void
   2212 InitBP(struct buf *bp, struct vnode *b_vp, unsigned rw_flag, dev_t dev,
   2213        RF_SectorNum_t startSect, RF_SectorCount_t numSect, void *bf,
   2214        void (*cbFunc) (struct buf *), void *cbArg, int logBytesPerSector,
   2215        struct proc *b_proc)
   2216 {
   2217 	/* bp->b_flags       = B_PHYS | rw_flag; */
   2218 	bp->b_flags = rw_flag;	/* XXX need B_PHYS here too??? */
   2219 	bp->b_oflags = 0;
   2220 	bp->b_cflags = 0;
   2221 	bp->b_bcount = numSect << logBytesPerSector;
   2222 	bp->b_bufsize = bp->b_bcount;
   2223 	bp->b_error = 0;
   2224 	bp->b_dev = dev;
   2225 	bp->b_data = bf;
   2226 	bp->b_blkno = startSect;
   2227 	bp->b_resid = bp->b_bcount;	/* XXX is this right!??!?!! */
   2228 	if (bp->b_bcount == 0) {
   2229 		panic("bp->b_bcount is zero in InitBP!!");
   2230 	}
   2231 	bp->b_proc = b_proc;
   2232 	bp->b_iodone = cbFunc;
   2233 	bp->b_private = cbArg;
   2234 }
   2235 
   2236 static void
   2237 raidgetdefaultlabel(RF_Raid_t *raidPtr, struct raid_softc *rs,
   2238 		    struct disklabel *lp)
   2239 {
   2240 	memset(lp, 0, sizeof(*lp));
   2241 
   2242 	/* fabricate a label... */
   2243 	lp->d_secperunit = raidPtr->totalSectors;
   2244 	lp->d_secsize = raidPtr->bytesPerSector;
   2245 	lp->d_nsectors = raidPtr->Layout.dataSectorsPerStripe;
   2246 	lp->d_ntracks = 4 * raidPtr->numCol;
   2247 	lp->d_ncylinders = raidPtr->totalSectors /
   2248 		(lp->d_nsectors * lp->d_ntracks);
   2249 	lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
   2250 
   2251 	strncpy(lp->d_typename, "raid", sizeof(lp->d_typename));
   2252 	lp->d_type = DTYPE_RAID;
   2253 	strncpy(lp->d_packname, "fictitious", sizeof(lp->d_packname));
   2254 	lp->d_rpm = 3600;
   2255 	lp->d_interleave = 1;
   2256 	lp->d_flags = 0;
   2257 
   2258 	lp->d_partitions[RAW_PART].p_offset = 0;
   2259 	lp->d_partitions[RAW_PART].p_size = raidPtr->totalSectors;
   2260 	lp->d_partitions[RAW_PART].p_fstype = FS_UNUSED;
   2261 	lp->d_npartitions = RAW_PART + 1;
   2262 
   2263 	lp->d_magic = DISKMAGIC;
   2264 	lp->d_magic2 = DISKMAGIC;
   2265 	lp->d_checksum = dkcksum(rs->sc_dkdev.dk_label);
   2266 
   2267 }
   2268 /*
   2269  * Read the disklabel from the raid device.  If one is not present, fake one
   2270  * up.
   2271  */
   2272 static void
   2273 raidgetdisklabel(dev_t dev)
   2274 {
   2275 	int     unit = raidunit(dev);
   2276 	struct raid_softc *rs = &raid_softc[unit];
   2277 	const char   *errstring;
   2278 	struct disklabel *lp = rs->sc_dkdev.dk_label;
   2279 	struct cpu_disklabel *clp = rs->sc_dkdev.dk_cpulabel;
   2280 	RF_Raid_t *raidPtr;
   2281 
   2282 	db1_printf(("Getting the disklabel...\n"));
   2283 
   2284 	memset(clp, 0, sizeof(*clp));
   2285 
   2286 	raidPtr = raidPtrs[unit];
   2287 
   2288 	raidgetdefaultlabel(raidPtr, rs, lp);
   2289 
   2290 	/*
   2291 	 * Call the generic disklabel extraction routine.
   2292 	 */
   2293 	errstring = readdisklabel(RAIDLABELDEV(dev), raidstrategy,
   2294 	    rs->sc_dkdev.dk_label, rs->sc_dkdev.dk_cpulabel);
   2295 	if (errstring)
   2296 		raidmakedisklabel(rs);
   2297 	else {
   2298 		int     i;
   2299 		struct partition *pp;
   2300 
   2301 		/*
   2302 		 * Sanity check whether the found disklabel is valid.
   2303 		 *
   2304 		 * This is necessary since total size of the raid device
   2305 		 * may vary when an interleave is changed even though exactly
   2306 		 * same components are used, and old disklabel may used
   2307 		 * if that is found.
   2308 		 */
   2309 		if (lp->d_secperunit != rs->sc_size)
   2310 			printf("raid%d: WARNING: %s: "
   2311 			    "total sector size in disklabel (%d) != "
   2312 			    "the size of raid (%ld)\n", unit, rs->sc_xname,
   2313 			    lp->d_secperunit, (long) rs->sc_size);
   2314 		for (i = 0; i < lp->d_npartitions; i++) {
   2315 			pp = &lp->d_partitions[i];
   2316 			if (pp->p_offset + pp->p_size > rs->sc_size)
   2317 				printf("raid%d: WARNING: %s: end of partition `%c' "
   2318 				       "exceeds the size of raid (%ld)\n",
   2319 				       unit, rs->sc_xname, 'a' + i, (long) rs->sc_size);
   2320 		}
   2321 	}
   2322 
   2323 }
   2324 /*
   2325  * Take care of things one might want to take care of in the event
   2326  * that a disklabel isn't present.
   2327  */
   2328 static void
   2329 raidmakedisklabel(struct raid_softc *rs)
   2330 {
   2331 	struct disklabel *lp = rs->sc_dkdev.dk_label;
   2332 	db1_printf(("Making a label..\n"));
   2333 
   2334 	/*
   2335 	 * For historical reasons, if there's no disklabel present
   2336 	 * the raw partition must be marked FS_BSDFFS.
   2337 	 */
   2338 
   2339 	lp->d_partitions[RAW_PART].p_fstype = FS_BSDFFS;
   2340 
   2341 	strncpy(lp->d_packname, "default label", sizeof(lp->d_packname));
   2342 
   2343 	lp->d_checksum = dkcksum(lp);
   2344 }
   2345 /*
   2346  * Wait interruptibly for an exclusive lock.
   2347  *
   2348  * XXX
   2349  * Several drivers do this; it should be abstracted and made MP-safe.
   2350  * (Hmm... where have we seen this warning before :->  GO )
   2351  */
   2352 static int
   2353 raidlock(struct raid_softc *rs)
   2354 {
   2355 	int     error;
   2356 
   2357 	while ((rs->sc_flags & RAIDF_LOCKED) != 0) {
   2358 		rs->sc_flags |= RAIDF_WANTED;
   2359 		if ((error =
   2360 			tsleep(rs, PRIBIO | PCATCH, "raidlck", 0)) != 0)
   2361 			return (error);
   2362 	}
   2363 	rs->sc_flags |= RAIDF_LOCKED;
   2364 	return (0);
   2365 }
   2366 /*
   2367  * Unlock and wake up any waiters.
   2368  */
   2369 static void
   2370 raidunlock(struct raid_softc *rs)
   2371 {
   2372 
   2373 	rs->sc_flags &= ~RAIDF_LOCKED;
   2374 	if ((rs->sc_flags & RAIDF_WANTED) != 0) {
   2375 		rs->sc_flags &= ~RAIDF_WANTED;
   2376 		wakeup(rs);
   2377 	}
   2378 }
   2379 
   2380 
   2381 #define RF_COMPONENT_INFO_OFFSET  16384 /* bytes */
   2382 #define RF_COMPONENT_INFO_SIZE     1024 /* bytes */
   2383 
   2384 int
   2385 raidmarkclean(dev_t dev, struct vnode *b_vp, int mod_counter)
   2386 {
   2387 	RF_ComponentLabel_t clabel;
   2388 	raidread_component_label(dev, b_vp, &clabel);
   2389 	clabel.mod_counter = mod_counter;
   2390 	clabel.clean = RF_RAID_CLEAN;
   2391 	raidwrite_component_label(dev, b_vp, &clabel);
   2392 	return(0);
   2393 }
   2394 
   2395 
   2396 int
   2397 raidmarkdirty(dev_t dev, struct vnode *b_vp, int mod_counter)
   2398 {
   2399 	RF_ComponentLabel_t clabel;
   2400 	raidread_component_label(dev, b_vp, &clabel);
   2401 	clabel.mod_counter = mod_counter;
   2402 	clabel.clean = RF_RAID_DIRTY;
   2403 	raidwrite_component_label(dev, b_vp, &clabel);
   2404 	return(0);
   2405 }
   2406 
   2407 /* ARGSUSED */
   2408 int
   2409 raidread_component_label(dev_t dev, struct vnode *b_vp,
   2410 			 RF_ComponentLabel_t *clabel)
   2411 {
   2412 	struct buf *bp;
   2413 	const struct bdevsw *bdev;
   2414 	int error;
   2415 
   2416 	/* XXX should probably ensure that we don't try to do this if
   2417 	   someone has changed rf_protected_sectors. */
   2418 
   2419 	if (b_vp == NULL) {
   2420 		/* For whatever reason, this component is not valid.
   2421 		   Don't try to read a component label from it. */
   2422 		return(EINVAL);
   2423 	}
   2424 
   2425 	/* get a block of the appropriate size... */
   2426 	bp = geteblk((int)RF_COMPONENT_INFO_SIZE);
   2427 	bp->b_dev = dev;
   2428 
   2429 	/* get our ducks in a row for the read */
   2430 	bp->b_blkno = RF_COMPONENT_INFO_OFFSET / DEV_BSIZE;
   2431 	bp->b_bcount = RF_COMPONENT_INFO_SIZE;
   2432 	bp->b_flags |= B_READ;
   2433  	bp->b_resid = RF_COMPONENT_INFO_SIZE / DEV_BSIZE;
   2434 
   2435 	bdev = bdevsw_lookup(bp->b_dev);
   2436 	if (bdev == NULL)
   2437 		return (ENXIO);
   2438 	(*bdev->d_strategy)(bp);
   2439 
   2440 	error = biowait(bp);
   2441 
   2442 	if (!error) {
   2443 		memcpy(clabel, bp->b_data,
   2444 		       sizeof(RF_ComponentLabel_t));
   2445 	}
   2446 
   2447 	brelse(bp, 0);
   2448 	return(error);
   2449 }
   2450 /* ARGSUSED */
   2451 int
   2452 raidwrite_component_label(dev_t dev, struct vnode *b_vp,
   2453 			  RF_ComponentLabel_t *clabel)
   2454 {
   2455 	struct buf *bp;
   2456 	const struct bdevsw *bdev;
   2457 	int error;
   2458 
   2459 	/* get a block of the appropriate size... */
   2460 	bp = geteblk((int)RF_COMPONENT_INFO_SIZE);
   2461 	bp->b_dev = dev;
   2462 
   2463 	/* get our ducks in a row for the write */
   2464 	bp->b_blkno = RF_COMPONENT_INFO_OFFSET / DEV_BSIZE;
   2465 	bp->b_bcount = RF_COMPONENT_INFO_SIZE;
   2466 	bp->b_flags |= B_WRITE;
   2467  	bp->b_resid = RF_COMPONENT_INFO_SIZE / DEV_BSIZE;
   2468 
   2469 	memset(bp->b_data, 0, RF_COMPONENT_INFO_SIZE );
   2470 
   2471 	memcpy(bp->b_data, clabel, sizeof(RF_ComponentLabel_t));
   2472 
   2473 	bdev = bdevsw_lookup(bp->b_dev);
   2474 	if (bdev == NULL)
   2475 		return (ENXIO);
   2476 	(*bdev->d_strategy)(bp);
   2477 	error = biowait(bp);
   2478 	brelse(bp, 0);
   2479 	if (error) {
   2480 #if 1
   2481 		printf("Failed to write RAID component info!\n");
   2482 #endif
   2483 	}
   2484 
   2485 	return(error);
   2486 }
   2487 
   2488 void
   2489 rf_markalldirty(RF_Raid_t *raidPtr)
   2490 {
   2491 	RF_ComponentLabel_t clabel;
   2492 	int sparecol;
   2493 	int c;
   2494 	int j;
   2495 	int scol = -1;
   2496 
   2497 	raidPtr->mod_counter++;
   2498 	for (c = 0; c < raidPtr->numCol; c++) {
   2499 		/* we don't want to touch (at all) a disk that has
   2500 		   failed */
   2501 		if (!RF_DEAD_DISK(raidPtr->Disks[c].status)) {
   2502 			raidread_component_label(
   2503 						 raidPtr->Disks[c].dev,
   2504 						 raidPtr->raid_cinfo[c].ci_vp,
   2505 						 &clabel);
   2506 			if (clabel.status == rf_ds_spared) {
   2507 				/* XXX do something special...
   2508 				   but whatever you do, don't
   2509 				   try to access it!! */
   2510 			} else {
   2511 				raidmarkdirty(
   2512 					      raidPtr->Disks[c].dev,
   2513 					      raidPtr->raid_cinfo[c].ci_vp,
   2514 					      raidPtr->mod_counter);
   2515 			}
   2516 		}
   2517 	}
   2518 
   2519 	for( c = 0; c < raidPtr->numSpare ; c++) {
   2520 		sparecol = raidPtr->numCol + c;
   2521 		if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
   2522 			/*
   2523 
   2524 			   we claim this disk is "optimal" if it's
   2525 			   rf_ds_used_spare, as that means it should be
   2526 			   directly substitutable for the disk it replaced.
   2527 			   We note that too...
   2528 
   2529 			 */
   2530 
   2531 			for(j=0;j<raidPtr->numCol;j++) {
   2532 				if (raidPtr->Disks[j].spareCol == sparecol) {
   2533 					scol = j;
   2534 					break;
   2535 				}
   2536 			}
   2537 
   2538 			raidread_component_label(
   2539 				 raidPtr->Disks[sparecol].dev,
   2540 				 raidPtr->raid_cinfo[sparecol].ci_vp,
   2541 				 &clabel);
   2542 			/* make sure status is noted */
   2543 
   2544 			raid_init_component_label(raidPtr, &clabel);
   2545 
   2546 			clabel.row = 0;
   2547 			clabel.column = scol;
   2548 			/* Note: we *don't* change status from rf_ds_used_spare
   2549 			   to rf_ds_optimal */
   2550 			/* clabel.status = rf_ds_optimal; */
   2551 
   2552 			raidmarkdirty(raidPtr->Disks[sparecol].dev,
   2553 				      raidPtr->raid_cinfo[sparecol].ci_vp,
   2554 				      raidPtr->mod_counter);
   2555 		}
   2556 	}
   2557 }
   2558 
   2559 
   2560 void
   2561 rf_update_component_labels(RF_Raid_t *raidPtr, int final)
   2562 {
   2563 	RF_ComponentLabel_t clabel;
   2564 	int sparecol;
   2565 	int c;
   2566 	int j;
   2567 	int scol;
   2568 
   2569 	scol = -1;
   2570 
   2571 	/* XXX should do extra checks to make sure things really are clean,
   2572 	   rather than blindly setting the clean bit... */
   2573 
   2574 	raidPtr->mod_counter++;
   2575 
   2576 	for (c = 0; c < raidPtr->numCol; c++) {
   2577 		if (raidPtr->Disks[c].status == rf_ds_optimal) {
   2578 			raidread_component_label(
   2579 						 raidPtr->Disks[c].dev,
   2580 						 raidPtr->raid_cinfo[c].ci_vp,
   2581 						 &clabel);
   2582 			/* make sure status is noted */
   2583 			clabel.status = rf_ds_optimal;
   2584 
   2585 			/* bump the counter */
   2586 			clabel.mod_counter = raidPtr->mod_counter;
   2587 
   2588 			/* note what unit we are configured as */
   2589 			clabel.last_unit = raidPtr->raidid;
   2590 
   2591 			raidwrite_component_label(
   2592 						  raidPtr->Disks[c].dev,
   2593 						  raidPtr->raid_cinfo[c].ci_vp,
   2594 						  &clabel);
   2595 			if (final == RF_FINAL_COMPONENT_UPDATE) {
   2596 				if (raidPtr->parity_good == RF_RAID_CLEAN) {
   2597 					raidmarkclean(
   2598 						      raidPtr->Disks[c].dev,
   2599 						      raidPtr->raid_cinfo[c].ci_vp,
   2600 						      raidPtr->mod_counter);
   2601 				}
   2602 			}
   2603 		}
   2604 		/* else we don't touch it.. */
   2605 	}
   2606 
   2607 	for( c = 0; c < raidPtr->numSpare ; c++) {
   2608 		sparecol = raidPtr->numCol + c;
   2609 		/* Need to ensure that the reconstruct actually completed! */
   2610 		if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
   2611 			/*
   2612 
   2613 			   we claim this disk is "optimal" if it's
   2614 			   rf_ds_used_spare, as that means it should be
   2615 			   directly substitutable for the disk it replaced.
   2616 			   We note that too...
   2617 
   2618 			 */
   2619 
   2620 			for(j=0;j<raidPtr->numCol;j++) {
   2621 				if (raidPtr->Disks[j].spareCol == sparecol) {
   2622 					scol = j;
   2623 					break;
   2624 				}
   2625 			}
   2626 
   2627 			/* XXX shouldn't *really* need this... */
   2628 			raidread_component_label(
   2629 				      raidPtr->Disks[sparecol].dev,
   2630 				      raidPtr->raid_cinfo[sparecol].ci_vp,
   2631 				      &clabel);
   2632 			/* make sure status is noted */
   2633 
   2634 			raid_init_component_label(raidPtr, &clabel);
   2635 
   2636 			clabel.mod_counter = raidPtr->mod_counter;
   2637 			clabel.column = scol;
   2638 			clabel.status = rf_ds_optimal;
   2639 			clabel.last_unit = raidPtr->raidid;
   2640 
   2641 			raidwrite_component_label(
   2642 				      raidPtr->Disks[sparecol].dev,
   2643 				      raidPtr->raid_cinfo[sparecol].ci_vp,
   2644 				      &clabel);
   2645 			if (final == RF_FINAL_COMPONENT_UPDATE) {
   2646 				if (raidPtr->parity_good == RF_RAID_CLEAN) {
   2647 					raidmarkclean( raidPtr->Disks[sparecol].dev,
   2648 						       raidPtr->raid_cinfo[sparecol].ci_vp,
   2649 						       raidPtr->mod_counter);
   2650 				}
   2651 			}
   2652 		}
   2653 	}
   2654 }
   2655 
   2656 void
   2657 rf_close_component(RF_Raid_t *raidPtr, struct vnode *vp, int auto_configured)
   2658 {
   2659 
   2660 	if (vp != NULL) {
   2661 		if (auto_configured == 1) {
   2662 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
   2663 			VOP_CLOSE(vp, FREAD | FWRITE, NOCRED);
   2664 			vput(vp);
   2665 
   2666 		} else {
   2667 			(void) vn_close(vp, FREAD | FWRITE, curlwp->l_cred);
   2668 		}
   2669 	}
   2670 }
   2671 
   2672 
   2673 void
   2674 rf_UnconfigureVnodes(RF_Raid_t *raidPtr)
   2675 {
   2676 	int r,c;
   2677 	struct vnode *vp;
   2678 	int acd;
   2679 
   2680 
   2681 	/* We take this opportunity to close the vnodes like we should.. */
   2682 
   2683 	for (c = 0; c < raidPtr->numCol; c++) {
   2684 		vp = raidPtr->raid_cinfo[c].ci_vp;
   2685 		acd = raidPtr->Disks[c].auto_configured;
   2686 		rf_close_component(raidPtr, vp, acd);
   2687 		raidPtr->raid_cinfo[c].ci_vp = NULL;
   2688 		raidPtr->Disks[c].auto_configured = 0;
   2689 	}
   2690 
   2691 	for (r = 0; r < raidPtr->numSpare; r++) {
   2692 		vp = raidPtr->raid_cinfo[raidPtr->numCol + r].ci_vp;
   2693 		acd = raidPtr->Disks[raidPtr->numCol + r].auto_configured;
   2694 		rf_close_component(raidPtr, vp, acd);
   2695 		raidPtr->raid_cinfo[raidPtr->numCol + r].ci_vp = NULL;
   2696 		raidPtr->Disks[raidPtr->numCol + r].auto_configured = 0;
   2697 	}
   2698 }
   2699 
   2700 
   2701 void
   2702 rf_ReconThread(struct rf_recon_req *req)
   2703 {
   2704 	int     s;
   2705 	RF_Raid_t *raidPtr;
   2706 
   2707 	s = splbio();
   2708 	raidPtr = (RF_Raid_t *) req->raidPtr;
   2709 	raidPtr->recon_in_progress = 1;
   2710 
   2711 	rf_FailDisk((RF_Raid_t *) req->raidPtr, req->col,
   2712 		    ((req->flags & RF_FDFLAGS_RECON) ? 1 : 0));
   2713 
   2714 	RF_Free(req, sizeof(*req));
   2715 
   2716 	raidPtr->recon_in_progress = 0;
   2717 	splx(s);
   2718 
   2719 	/* That's all... */
   2720 	kthread_exit(0);	/* does not return */
   2721 }
   2722 
   2723 void
   2724 rf_RewriteParityThread(RF_Raid_t *raidPtr)
   2725 {
   2726 	int retcode;
   2727 	int s;
   2728 
   2729 	raidPtr->parity_rewrite_stripes_done = 0;
   2730 	raidPtr->parity_rewrite_in_progress = 1;
   2731 	s = splbio();
   2732 	retcode = rf_RewriteParity(raidPtr);
   2733 	splx(s);
   2734 	if (retcode) {
   2735 		printf("raid%d: Error re-writing parity!\n",raidPtr->raidid);
   2736 	} else {
   2737 		/* set the clean bit!  If we shutdown correctly,
   2738 		   the clean bit on each component label will get
   2739 		   set */
   2740 		raidPtr->parity_good = RF_RAID_CLEAN;
   2741 	}
   2742 	raidPtr->parity_rewrite_in_progress = 0;
   2743 
   2744 	/* Anyone waiting for us to stop?  If so, inform them... */
   2745 	if (raidPtr->waitShutdown) {
   2746 		wakeup(&raidPtr->parity_rewrite_in_progress);
   2747 	}
   2748 
   2749 	/* That's all... */
   2750 	kthread_exit(0);	/* does not return */
   2751 }
   2752 
   2753 
   2754 void
   2755 rf_CopybackThread(RF_Raid_t *raidPtr)
   2756 {
   2757 	int s;
   2758 
   2759 	raidPtr->copyback_in_progress = 1;
   2760 	s = splbio();
   2761 	rf_CopybackReconstructedData(raidPtr);
   2762 	splx(s);
   2763 	raidPtr->copyback_in_progress = 0;
   2764 
   2765 	/* That's all... */
   2766 	kthread_exit(0);	/* does not return */
   2767 }
   2768 
   2769 
   2770 void
   2771 rf_ReconstructInPlaceThread(struct rf_recon_req *req)
   2772 {
   2773 	int s;
   2774 	RF_Raid_t *raidPtr;
   2775 
   2776 	s = splbio();
   2777 	raidPtr = req->raidPtr;
   2778 	raidPtr->recon_in_progress = 1;
   2779 	rf_ReconstructInPlace(raidPtr, req->col);
   2780 	RF_Free(req, sizeof(*req));
   2781 	raidPtr->recon_in_progress = 0;
   2782 	splx(s);
   2783 
   2784 	/* That's all... */
   2785 	kthread_exit(0);	/* does not return */
   2786 }
   2787 
   2788 static RF_AutoConfig_t *
   2789 rf_get_component(RF_AutoConfig_t *ac_list, dev_t dev, struct vnode *vp,
   2790     const char *cname, RF_SectorCount_t size)
   2791 {
   2792 	int good_one = 0;
   2793 	RF_ComponentLabel_t *clabel;
   2794 	RF_AutoConfig_t *ac;
   2795 
   2796 	clabel = malloc(sizeof(RF_ComponentLabel_t), M_RAIDFRAME, M_NOWAIT);
   2797 	if (clabel == NULL) {
   2798 oomem:
   2799 		    while(ac_list) {
   2800 			    ac = ac_list;
   2801 			    if (ac->clabel)
   2802 				    free(ac->clabel, M_RAIDFRAME);
   2803 			    ac_list = ac_list->next;
   2804 			    free(ac, M_RAIDFRAME);
   2805 		    }
   2806 		    printf("RAID auto config: out of memory!\n");
   2807 		    return NULL; /* XXX probably should panic? */
   2808 	}
   2809 
   2810 	if (!raidread_component_label(dev, vp, clabel)) {
   2811 		    /* Got the label.  Does it look reasonable? */
   2812 		    if (rf_reasonable_label(clabel) &&
   2813 			(clabel->partitionSize <= size)) {
   2814 #ifdef DEBUG
   2815 			    printf("Component on: %s: %llu\n",
   2816 				cname, (unsigned long long)size);
   2817 			    rf_print_component_label(clabel);
   2818 #endif
   2819 			    /* if it's reasonable, add it, else ignore it. */
   2820 			    ac = malloc(sizeof(RF_AutoConfig_t), M_RAIDFRAME,
   2821 				M_NOWAIT);
   2822 			    if (ac == NULL) {
   2823 				    free(clabel, M_RAIDFRAME);
   2824 				    goto oomem;
   2825 			    }
   2826 			    strlcpy(ac->devname, cname, sizeof(ac->devname));
   2827 			    ac->dev = dev;
   2828 			    ac->vp = vp;
   2829 			    ac->clabel = clabel;
   2830 			    ac->next = ac_list;
   2831 			    ac_list = ac;
   2832 			    good_one = 1;
   2833 		    }
   2834 	}
   2835 	if (!good_one) {
   2836 		/* cleanup */
   2837 		free(clabel, M_RAIDFRAME);
   2838 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
   2839 		VOP_CLOSE(vp, FREAD | FWRITE, NOCRED);
   2840 		vput(vp);
   2841 	}
   2842 	return ac_list;
   2843 }
   2844 
   2845 RF_AutoConfig_t *
   2846 rf_find_raid_components()
   2847 {
   2848 	struct vnode *vp;
   2849 	struct disklabel label;
   2850 	struct device *dv;
   2851 	dev_t dev;
   2852 	int bmajor, bminor, wedge;
   2853 	int error;
   2854 	int i;
   2855 	RF_AutoConfig_t *ac_list;
   2856 
   2857 
   2858 	/* initialize the AutoConfig list */
   2859 	ac_list = NULL;
   2860 
   2861 	/* we begin by trolling through *all* the devices on the system */
   2862 
   2863 	for (dv = alldevs.tqh_first; dv != NULL;
   2864 	     dv = dv->dv_list.tqe_next) {
   2865 
   2866 		/* we are only interested in disks... */
   2867 		if (device_class(dv) != DV_DISK)
   2868 			continue;
   2869 
   2870 		/* we don't care about floppies... */
   2871 		if (device_is_a(dv, "fd")) {
   2872 			continue;
   2873 		}
   2874 
   2875 		/* we don't care about CD's... */
   2876 		if (device_is_a(dv, "cd")) {
   2877 			continue;
   2878 		}
   2879 
   2880 		/* we don't care about md's... */
   2881 		if (device_is_a(dv, "md")) {
   2882 			continue;
   2883 		}
   2884 
   2885 		/* hdfd is the Atari/Hades floppy driver */
   2886 		if (device_is_a(dv, "hdfd")) {
   2887 			continue;
   2888 		}
   2889 
   2890 		/* fdisa is the Atari/Milan floppy driver */
   2891 		if (device_is_a(dv, "fdisa")) {
   2892 			continue;
   2893 		}
   2894 
   2895 		/* need to find the device_name_to_block_device_major stuff */
   2896 		bmajor = devsw_name2blk(device_xname(dv), NULL, 0);
   2897 
   2898 		/* get a vnode for the raw partition of this disk */
   2899 
   2900 		wedge = device_is_a(dv, "dk");
   2901 		bminor = minor(device_unit(dv));
   2902 		dev = wedge ? makedev(bmajor, bminor) :
   2903 		    MAKEDISKDEV(bmajor, bminor, RAW_PART);
   2904 		if (bdevvp(dev, &vp))
   2905 			panic("RAID can't alloc vnode");
   2906 
   2907 		error = VOP_OPEN(vp, FREAD, NOCRED);
   2908 
   2909 		if (error) {
   2910 			/* "Who cares."  Continue looking
   2911 			   for something that exists*/
   2912 			vput(vp);
   2913 			continue;
   2914 		}
   2915 
   2916 		if (wedge) {
   2917 			struct dkwedge_info dkw;
   2918 			error = VOP_IOCTL(vp, DIOCGWEDGEINFO, &dkw, FREAD,
   2919 			    NOCRED);
   2920 			if (error) {
   2921 				printf("RAIDframe: can't get wedge info for "
   2922 				    "dev %s (%d)\n", device_xname(dv), error);
   2923 				vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
   2924 				VOP_CLOSE(vp, FREAD | FWRITE, NOCRED);
   2925 				vput(vp);
   2926 				continue;
   2927 			}
   2928 
   2929 			if (strcmp(dkw.dkw_ptype, DKW_PTYPE_RAIDFRAME) != 0) {
   2930 				vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
   2931 				VOP_CLOSE(vp, FREAD | FWRITE, NOCRED);
   2932 				vput(vp);
   2933 				continue;
   2934 			}
   2935 
   2936 			ac_list = rf_get_component(ac_list, dev, vp,
   2937 			    device_xname(dv), dkw.dkw_size);
   2938 			continue;
   2939 		}
   2940 
   2941 		/* Ok, the disk exists.  Go get the disklabel. */
   2942 		error = VOP_IOCTL(vp, DIOCGDINFO, &label, FREAD, NOCRED);
   2943 		if (error) {
   2944 			/*
   2945 			 * XXX can't happen - open() would
   2946 			 * have errored out (or faked up one)
   2947 			 */
   2948 			if (error != ENOTTY)
   2949 				printf("RAIDframe: can't get label for dev "
   2950 				    "%s (%d)\n", device_xname(dv), error);
   2951 		}
   2952 
   2953 		/* don't need this any more.  We'll allocate it again
   2954 		   a little later if we really do... */
   2955 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
   2956 		VOP_CLOSE(vp, FREAD | FWRITE, NOCRED);
   2957 		vput(vp);
   2958 
   2959 		if (error)
   2960 			continue;
   2961 
   2962 		for (i = 0; i < label.d_npartitions; i++) {
   2963 			char cname[sizeof(ac_list->devname)];
   2964 
   2965 			/* We only support partitions marked as RAID */
   2966 			if (label.d_partitions[i].p_fstype != FS_RAID)
   2967 				continue;
   2968 
   2969 			dev = MAKEDISKDEV(bmajor, device_unit(dv), i);
   2970 			if (bdevvp(dev, &vp))
   2971 				panic("RAID can't alloc vnode");
   2972 
   2973 			error = VOP_OPEN(vp, FREAD, NOCRED);
   2974 			if (error) {
   2975 				/* Whatever... */
   2976 				vput(vp);
   2977 				continue;
   2978 			}
   2979 			snprintf(cname, sizeof(cname), "%s%c",
   2980 			    device_xname(dv), 'a' + i);
   2981 			ac_list = rf_get_component(ac_list, dev, vp, cname,
   2982 				label.d_partitions[i].p_size);
   2983 		}
   2984 	}
   2985 	return ac_list;
   2986 }
   2987 
   2988 
   2989 static int
   2990 rf_reasonable_label(RF_ComponentLabel_t *clabel)
   2991 {
   2992 
   2993 	if (((clabel->version==RF_COMPONENT_LABEL_VERSION_1) ||
   2994 	     (clabel->version==RF_COMPONENT_LABEL_VERSION)) &&
   2995 	    ((clabel->clean == RF_RAID_CLEAN) ||
   2996 	     (clabel->clean == RF_RAID_DIRTY)) &&
   2997 	    clabel->row >=0 &&
   2998 	    clabel->column >= 0 &&
   2999 	    clabel->num_rows > 0 &&
   3000 	    clabel->num_columns > 0 &&
   3001 	    clabel->row < clabel->num_rows &&
   3002 	    clabel->column < clabel->num_columns &&
   3003 	    clabel->blockSize > 0 &&
   3004 	    clabel->numBlocks > 0) {
   3005 		/* label looks reasonable enough... */
   3006 		return(1);
   3007 	}
   3008 	return(0);
   3009 }
   3010 
   3011 
   3012 #ifdef DEBUG
   3013 void
   3014 rf_print_component_label(RF_ComponentLabel_t *clabel)
   3015 {
   3016 	printf("   Row: %d Column: %d Num Rows: %d Num Columns: %d\n",
   3017 	       clabel->row, clabel->column,
   3018 	       clabel->num_rows, clabel->num_columns);
   3019 	printf("   Version: %d Serial Number: %d Mod Counter: %d\n",
   3020 	       clabel->version, clabel->serial_number,
   3021 	       clabel->mod_counter);
   3022 	printf("   Clean: %s Status: %d\n",
   3023 	       clabel->clean ? "Yes" : "No", clabel->status );
   3024 	printf("   sectPerSU: %d SUsPerPU: %d SUsPerRU: %d\n",
   3025 	       clabel->sectPerSU, clabel->SUsPerPU, clabel->SUsPerRU);
   3026 	printf("   RAID Level: %c  blocksize: %d numBlocks: %d\n",
   3027 	       (char) clabel->parityConfig, clabel->blockSize,
   3028 	       clabel->numBlocks);
   3029 	printf("   Autoconfig: %s\n", clabel->autoconfigure ? "Yes" : "No" );
   3030 	printf("   Contains root partition: %s\n",
   3031 	       clabel->root_partition ? "Yes" : "No" );
   3032 	printf("   Last configured as: raid%d\n", clabel->last_unit );
   3033 #if 0
   3034 	   printf("   Config order: %d\n", clabel->config_order);
   3035 #endif
   3036 
   3037 }
   3038 #endif
   3039 
   3040 RF_ConfigSet_t *
   3041 rf_create_auto_sets(RF_AutoConfig_t *ac_list)
   3042 {
   3043 	RF_AutoConfig_t *ac;
   3044 	RF_ConfigSet_t *config_sets;
   3045 	RF_ConfigSet_t *cset;
   3046 	RF_AutoConfig_t *ac_next;
   3047 
   3048 
   3049 	config_sets = NULL;
   3050 
   3051 	/* Go through the AutoConfig list, and figure out which components
   3052 	   belong to what sets.  */
   3053 	ac = ac_list;
   3054 	while(ac!=NULL) {
   3055 		/* we're going to putz with ac->next, so save it here
   3056 		   for use at the end of the loop */
   3057 		ac_next = ac->next;
   3058 
   3059 		if (config_sets == NULL) {
   3060 			/* will need at least this one... */
   3061 			config_sets = (RF_ConfigSet_t *)
   3062 				malloc(sizeof(RF_ConfigSet_t),
   3063 				       M_RAIDFRAME, M_NOWAIT);
   3064 			if (config_sets == NULL) {
   3065 				panic("rf_create_auto_sets: No memory!");
   3066 			}
   3067 			/* this one is easy :) */
   3068 			config_sets->ac = ac;
   3069 			config_sets->next = NULL;
   3070 			config_sets->rootable = 0;
   3071 			ac->next = NULL;
   3072 		} else {
   3073 			/* which set does this component fit into? */
   3074 			cset = config_sets;
   3075 			while(cset!=NULL) {
   3076 				if (rf_does_it_fit(cset, ac)) {
   3077 					/* looks like it matches... */
   3078 					ac->next = cset->ac;
   3079 					cset->ac = ac;
   3080 					break;
   3081 				}
   3082 				cset = cset->next;
   3083 			}
   3084 			if (cset==NULL) {
   3085 				/* didn't find a match above... new set..*/
   3086 				cset = (RF_ConfigSet_t *)
   3087 					malloc(sizeof(RF_ConfigSet_t),
   3088 					       M_RAIDFRAME, M_NOWAIT);
   3089 				if (cset == NULL) {
   3090 					panic("rf_create_auto_sets: No memory!");
   3091 				}
   3092 				cset->ac = ac;
   3093 				ac->next = NULL;
   3094 				cset->next = config_sets;
   3095 				cset->rootable = 0;
   3096 				config_sets = cset;
   3097 			}
   3098 		}
   3099 		ac = ac_next;
   3100 	}
   3101 
   3102 
   3103 	return(config_sets);
   3104 }
   3105 
   3106 static int
   3107 rf_does_it_fit(RF_ConfigSet_t *cset, RF_AutoConfig_t *ac)
   3108 {
   3109 	RF_ComponentLabel_t *clabel1, *clabel2;
   3110 
   3111 	/* If this one matches the *first* one in the set, that's good
   3112 	   enough, since the other members of the set would have been
   3113 	   through here too... */
   3114 	/* note that we are not checking partitionSize here..
   3115 
   3116 	   Note that we are also not checking the mod_counters here.
   3117 	   If everything else matches execpt the mod_counter, that's
   3118 	   good enough for this test.  We will deal with the mod_counters
   3119 	   a little later in the autoconfiguration process.
   3120 
   3121 	    (clabel1->mod_counter == clabel2->mod_counter) &&
   3122 
   3123 	   The reason we don't check for this is that failed disks
   3124 	   will have lower modification counts.  If those disks are
   3125 	   not added to the set they used to belong to, then they will
   3126 	   form their own set, which may result in 2 different sets,
   3127 	   for example, competing to be configured at raid0, and
   3128 	   perhaps competing to be the root filesystem set.  If the
   3129 	   wrong ones get configured, or both attempt to become /,
   3130 	   weird behaviour and or serious lossage will occur.  Thus we
   3131 	   need to bring them into the fold here, and kick them out at
   3132 	   a later point.
   3133 
   3134 	*/
   3135 
   3136 	clabel1 = cset->ac->clabel;
   3137 	clabel2 = ac->clabel;
   3138 	if ((clabel1->version == clabel2->version) &&
   3139 	    (clabel1->serial_number == clabel2->serial_number) &&
   3140 	    (clabel1->num_rows == clabel2->num_rows) &&
   3141 	    (clabel1->num_columns == clabel2->num_columns) &&
   3142 	    (clabel1->sectPerSU == clabel2->sectPerSU) &&
   3143 	    (clabel1->SUsPerPU == clabel2->SUsPerPU) &&
   3144 	    (clabel1->SUsPerRU == clabel2->SUsPerRU) &&
   3145 	    (clabel1->parityConfig == clabel2->parityConfig) &&
   3146 	    (clabel1->maxOutstanding == clabel2->maxOutstanding) &&
   3147 	    (clabel1->blockSize == clabel2->blockSize) &&
   3148 	    (clabel1->numBlocks == clabel2->numBlocks) &&
   3149 	    (clabel1->autoconfigure == clabel2->autoconfigure) &&
   3150 	    (clabel1->root_partition == clabel2->root_partition) &&
   3151 	    (clabel1->last_unit == clabel2->last_unit) &&
   3152 	    (clabel1->config_order == clabel2->config_order)) {
   3153 		/* if it get's here, it almost *has* to be a match */
   3154 	} else {
   3155 		/* it's not consistent with somebody in the set..
   3156 		   punt */
   3157 		return(0);
   3158 	}
   3159 	/* all was fine.. it must fit... */
   3160 	return(1);
   3161 }
   3162 
   3163 int
   3164 rf_have_enough_components(RF_ConfigSet_t *cset)
   3165 {
   3166 	RF_AutoConfig_t *ac;
   3167 	RF_AutoConfig_t *auto_config;
   3168 	RF_ComponentLabel_t *clabel;
   3169 	int c;
   3170 	int num_cols;
   3171 	int num_missing;
   3172 	int mod_counter;
   3173 	int mod_counter_found;
   3174 	int even_pair_failed;
   3175 	char parity_type;
   3176 
   3177 
   3178 	/* check to see that we have enough 'live' components
   3179 	   of this set.  If so, we can configure it if necessary */
   3180 
   3181 	num_cols = cset->ac->clabel->num_columns;
   3182 	parity_type = cset->ac->clabel->parityConfig;
   3183 
   3184 	/* XXX Check for duplicate components!?!?!? */
   3185 
   3186 	/* Determine what the mod_counter is supposed to be for this set. */
   3187 
   3188 	mod_counter_found = 0;
   3189 	mod_counter = 0;
   3190 	ac = cset->ac;
   3191 	while(ac!=NULL) {
   3192 		if (mod_counter_found==0) {
   3193 			mod_counter = ac->clabel->mod_counter;
   3194 			mod_counter_found = 1;
   3195 		} else {
   3196 			if (ac->clabel->mod_counter > mod_counter) {
   3197 				mod_counter = ac->clabel->mod_counter;
   3198 			}
   3199 		}
   3200 		ac = ac->next;
   3201 	}
   3202 
   3203 	num_missing = 0;
   3204 	auto_config = cset->ac;
   3205 
   3206 	even_pair_failed = 0;
   3207 	for(c=0; c<num_cols; c++) {
   3208 		ac = auto_config;
   3209 		while(ac!=NULL) {
   3210 			if ((ac->clabel->column == c) &&
   3211 			    (ac->clabel->mod_counter == mod_counter)) {
   3212 				/* it's this one... */
   3213 #ifdef DEBUG
   3214 				printf("Found: %s at %d\n",
   3215 				       ac->devname,c);
   3216 #endif
   3217 				break;
   3218 			}
   3219 			ac=ac->next;
   3220 		}
   3221 		if (ac==NULL) {
   3222 				/* Didn't find one here! */
   3223 				/* special case for RAID 1, especially
   3224 				   where there are more than 2
   3225 				   components (where RAIDframe treats
   3226 				   things a little differently :( ) */
   3227 			if (parity_type == '1') {
   3228 				if (c%2 == 0) { /* even component */
   3229 					even_pair_failed = 1;
   3230 				} else { /* odd component.  If
   3231 					    we're failed, and
   3232 					    so is the even
   3233 					    component, it's
   3234 					    "Good Night, Charlie" */
   3235 					if (even_pair_failed == 1) {
   3236 						return(0);
   3237 					}
   3238 				}
   3239 			} else {
   3240 				/* normal accounting */
   3241 				num_missing++;
   3242 			}
   3243 		}
   3244 		if ((parity_type == '1') && (c%2 == 1)) {
   3245 				/* Just did an even component, and we didn't
   3246 				   bail.. reset the even_pair_failed flag,
   3247 				   and go on to the next component.... */
   3248 			even_pair_failed = 0;
   3249 		}
   3250 	}
   3251 
   3252 	clabel = cset->ac->clabel;
   3253 
   3254 	if (((clabel->parityConfig == '0') && (num_missing > 0)) ||
   3255 	    ((clabel->parityConfig == '4') && (num_missing > 1)) ||
   3256 	    ((clabel->parityConfig == '5') && (num_missing > 1))) {
   3257 		/* XXX this needs to be made *much* more general */
   3258 		/* Too many failures */
   3259 		return(0);
   3260 	}
   3261 	/* otherwise, all is well, and we've got enough to take a kick
   3262 	   at autoconfiguring this set */
   3263 	return(1);
   3264 }
   3265 
   3266 void
   3267 rf_create_configuration(RF_AutoConfig_t *ac, RF_Config_t *config,
   3268 			RF_Raid_t *raidPtr)
   3269 {
   3270 	RF_ComponentLabel_t *clabel;
   3271 	int i;
   3272 
   3273 	clabel = ac->clabel;
   3274 
   3275 	/* 1. Fill in the common stuff */
   3276 	config->numRow = clabel->num_rows = 1;
   3277 	config->numCol = clabel->num_columns;
   3278 	config->numSpare = 0; /* XXX should this be set here? */
   3279 	config->sectPerSU = clabel->sectPerSU;
   3280 	config->SUsPerPU = clabel->SUsPerPU;
   3281 	config->SUsPerRU = clabel->SUsPerRU;
   3282 	config->parityConfig = clabel->parityConfig;
   3283 	/* XXX... */
   3284 	strcpy(config->diskQueueType,"fifo");
   3285 	config->maxOutstandingDiskReqs = clabel->maxOutstanding;
   3286 	config->layoutSpecificSize = 0; /* XXX ?? */
   3287 
   3288 	while(ac!=NULL) {
   3289 		/* row/col values will be in range due to the checks
   3290 		   in reasonable_label() */
   3291 		strcpy(config->devnames[0][ac->clabel->column],
   3292 		       ac->devname);
   3293 		ac = ac->next;
   3294 	}
   3295 
   3296 	for(i=0;i<RF_MAXDBGV;i++) {
   3297 		config->debugVars[i][0] = 0;
   3298 	}
   3299 }
   3300 
   3301 int
   3302 rf_set_autoconfig(RF_Raid_t *raidPtr, int new_value)
   3303 {
   3304 	RF_ComponentLabel_t clabel;
   3305 	struct vnode *vp;
   3306 	dev_t dev;
   3307 	int column;
   3308 	int sparecol;
   3309 
   3310 	raidPtr->autoconfigure = new_value;
   3311 
   3312 	for(column=0; column<raidPtr->numCol; column++) {
   3313 		if (raidPtr->Disks[column].status == rf_ds_optimal) {
   3314 			dev = raidPtr->Disks[column].dev;
   3315 			vp = raidPtr->raid_cinfo[column].ci_vp;
   3316 			raidread_component_label(dev, vp, &clabel);
   3317 			clabel.autoconfigure = new_value;
   3318 			raidwrite_component_label(dev, vp, &clabel);
   3319 		}
   3320 	}
   3321 	for(column = 0; column < raidPtr->numSpare ; column++) {
   3322 		sparecol = raidPtr->numCol + column;
   3323 		if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
   3324 			dev = raidPtr->Disks[sparecol].dev;
   3325 			vp = raidPtr->raid_cinfo[sparecol].ci_vp;
   3326 			raidread_component_label(dev, vp, &clabel);
   3327 			clabel.autoconfigure = new_value;
   3328 			raidwrite_component_label(dev, vp, &clabel);
   3329 		}
   3330 	}
   3331 	return(new_value);
   3332 }
   3333 
   3334 int
   3335 rf_set_rootpartition(RF_Raid_t *raidPtr, int new_value)
   3336 {
   3337 	RF_ComponentLabel_t clabel;
   3338 	struct vnode *vp;
   3339 	dev_t dev;
   3340 	int column;
   3341 	int sparecol;
   3342 
   3343 	raidPtr->root_partition = new_value;
   3344 	for(column=0; column<raidPtr->numCol; column++) {
   3345 		if (raidPtr->Disks[column].status == rf_ds_optimal) {
   3346 			dev = raidPtr->Disks[column].dev;
   3347 			vp = raidPtr->raid_cinfo[column].ci_vp;
   3348 			raidread_component_label(dev, vp, &clabel);
   3349 			clabel.root_partition = new_value;
   3350 			raidwrite_component_label(dev, vp, &clabel);
   3351 		}
   3352 	}
   3353 	for(column = 0; column < raidPtr->numSpare ; column++) {
   3354 		sparecol = raidPtr->numCol + column;
   3355 		if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
   3356 			dev = raidPtr->Disks[sparecol].dev;
   3357 			vp = raidPtr->raid_cinfo[sparecol].ci_vp;
   3358 			raidread_component_label(dev, vp, &clabel);
   3359 			clabel.root_partition = new_value;
   3360 			raidwrite_component_label(dev, vp, &clabel);
   3361 		}
   3362 	}
   3363 	return(new_value);
   3364 }
   3365 
   3366 void
   3367 rf_release_all_vps(RF_ConfigSet_t *cset)
   3368 {
   3369 	RF_AutoConfig_t *ac;
   3370 
   3371 	ac = cset->ac;
   3372 	while(ac!=NULL) {
   3373 		/* Close the vp, and give it back */
   3374 		if (ac->vp) {
   3375 			vn_lock(ac->vp, LK_EXCLUSIVE | LK_RETRY);
   3376 			VOP_CLOSE(ac->vp, FREAD, NOCRED);
   3377 			vput(ac->vp);
   3378 			ac->vp = NULL;
   3379 		}
   3380 		ac = ac->next;
   3381 	}
   3382 }
   3383 
   3384 
   3385 void
   3386 rf_cleanup_config_set(RF_ConfigSet_t *cset)
   3387 {
   3388 	RF_AutoConfig_t *ac;
   3389 	RF_AutoConfig_t *next_ac;
   3390 
   3391 	ac = cset->ac;
   3392 	while(ac!=NULL) {
   3393 		next_ac = ac->next;
   3394 		/* nuke the label */
   3395 		free(ac->clabel, M_RAIDFRAME);
   3396 		/* cleanup the config structure */
   3397 		free(ac, M_RAIDFRAME);
   3398 		/* "next.." */
   3399 		ac = next_ac;
   3400 	}
   3401 	/* and, finally, nuke the config set */
   3402 	free(cset, M_RAIDFRAME);
   3403 }
   3404 
   3405 
   3406 void
   3407 raid_init_component_label(RF_Raid_t *raidPtr, RF_ComponentLabel_t *clabel)
   3408 {
   3409 	/* current version number */
   3410 	clabel->version = RF_COMPONENT_LABEL_VERSION;
   3411 	clabel->serial_number = raidPtr->serial_number;
   3412 	clabel->mod_counter = raidPtr->mod_counter;
   3413 	clabel->num_rows = 1;
   3414 	clabel->num_columns = raidPtr->numCol;
   3415 	clabel->clean = RF_RAID_DIRTY; /* not clean */
   3416 	clabel->status = rf_ds_optimal; /* "It's good!" */
   3417 
   3418 	clabel->sectPerSU = raidPtr->Layout.sectorsPerStripeUnit;
   3419 	clabel->SUsPerPU = raidPtr->Layout.SUsPerPU;
   3420 	clabel->SUsPerRU = raidPtr->Layout.SUsPerRU;
   3421 
   3422 	clabel->blockSize = raidPtr->bytesPerSector;
   3423 	clabel->numBlocks = raidPtr->sectorsPerDisk;
   3424 
   3425 	/* XXX not portable */
   3426 	clabel->parityConfig = raidPtr->Layout.map->parityConfig;
   3427 	clabel->maxOutstanding = raidPtr->maxOutstanding;
   3428 	clabel->autoconfigure = raidPtr->autoconfigure;
   3429 	clabel->root_partition = raidPtr->root_partition;
   3430 	clabel->last_unit = raidPtr->raidid;
   3431 	clabel->config_order = raidPtr->config_order;
   3432 }
   3433 
   3434 int
   3435 rf_auto_config_set(RF_ConfigSet_t *cset, int *unit)
   3436 {
   3437 	RF_Raid_t *raidPtr;
   3438 	RF_Config_t *config;
   3439 	int raidID;
   3440 	int retcode;
   3441 
   3442 #ifdef DEBUG
   3443 	printf("RAID autoconfigure\n");
   3444 #endif
   3445 
   3446 	retcode = 0;
   3447 	*unit = -1;
   3448 
   3449 	/* 1. Create a config structure */
   3450 
   3451 	config = (RF_Config_t *)malloc(sizeof(RF_Config_t),
   3452 				       M_RAIDFRAME,
   3453 				       M_NOWAIT);
   3454 	if (config==NULL) {
   3455 		printf("Out of mem!?!?\n");
   3456 				/* XXX do something more intelligent here. */
   3457 		return(1);
   3458 	}
   3459 
   3460 	memset(config, 0, sizeof(RF_Config_t));
   3461 
   3462 	/*
   3463 	   2. Figure out what RAID ID this one is supposed to live at
   3464 	   See if we can get the same RAID dev that it was configured
   3465 	   on last time..
   3466 	*/
   3467 
   3468 	raidID = cset->ac->clabel->last_unit;
   3469 	if ((raidID < 0) || (raidID >= numraid)) {
   3470 		/* let's not wander off into lala land. */
   3471 		raidID = numraid - 1;
   3472 	}
   3473 	if (raidPtrs[raidID]->valid != 0) {
   3474 
   3475 		/*
   3476 		   Nope... Go looking for an alternative...
   3477 		   Start high so we don't immediately use raid0 if that's
   3478 		   not taken.
   3479 		*/
   3480 
   3481 		for(raidID = numraid - 1; raidID >= 0; raidID--) {
   3482 			if (raidPtrs[raidID]->valid == 0) {
   3483 				/* can use this one! */
   3484 				break;
   3485 			}
   3486 		}
   3487 	}
   3488 
   3489 	if (raidID < 0) {
   3490 		/* punt... */
   3491 		printf("Unable to auto configure this set!\n");
   3492 		printf("(Out of RAID devs!)\n");
   3493 		free(config, M_RAIDFRAME);
   3494 		return(1);
   3495 	}
   3496 
   3497 #ifdef DEBUG
   3498 	printf("Configuring raid%d:\n",raidID);
   3499 #endif
   3500 
   3501 	raidPtr = raidPtrs[raidID];
   3502 
   3503 	/* XXX all this stuff should be done SOMEWHERE ELSE! */
   3504 	raidPtr->raidid = raidID;
   3505 	raidPtr->openings = RAIDOUTSTANDING;
   3506 
   3507 	/* 3. Build the configuration structure */
   3508 	rf_create_configuration(cset->ac, config, raidPtr);
   3509 
   3510 	/* 4. Do the configuration */
   3511 	retcode = rf_Configure(raidPtr, config, cset->ac);
   3512 
   3513 	if (retcode == 0) {
   3514 
   3515 		raidinit(raidPtrs[raidID]);
   3516 
   3517 		rf_markalldirty(raidPtrs[raidID]);
   3518 		raidPtrs[raidID]->autoconfigure = 1; /* XXX do this here? */
   3519 		if (cset->ac->clabel->root_partition==1) {
   3520 			/* everything configured just fine.  Make a note
   3521 			   that this set is eligible to be root. */
   3522 			cset->rootable = 1;
   3523 			/* XXX do this here? */
   3524 			raidPtrs[raidID]->root_partition = 1;
   3525 		}
   3526 	}
   3527 
   3528 	/* 5. Cleanup */
   3529 	free(config, M_RAIDFRAME);
   3530 
   3531 	*unit = raidID;
   3532 	return(retcode);
   3533 }
   3534 
   3535 void
   3536 rf_disk_unbusy(RF_RaidAccessDesc_t *desc)
   3537 {
   3538 	struct buf *bp;
   3539 
   3540 	bp = (struct buf *)desc->bp;
   3541 	disk_unbusy(&raid_softc[desc->raidPtr->raidid].sc_dkdev,
   3542 	    (bp->b_bcount - bp->b_resid), (bp->b_flags & B_READ));
   3543 }
   3544 
   3545 void
   3546 rf_pool_init(struct pool *p, size_t size, const char *w_chan,
   3547 	     size_t xmin, size_t xmax)
   3548 {
   3549 	pool_init(p, size, 0, 0, 0, w_chan, NULL, IPL_BIO);
   3550 	pool_sethiwat(p, xmax);
   3551 	pool_prime(p, xmin);
   3552 	pool_setlowat(p, xmin);
   3553 }
   3554 
   3555 /*
   3556  * rf_buf_queue_check(int raidid) -- looks into the buf_queue to see
   3557  * if there is IO pending and if that IO could possibly be done for a
   3558  * given RAID set.  Returns 0 if IO is waiting and can be done, 1
   3559  * otherwise.
   3560  *
   3561  */
   3562 
   3563 int
   3564 rf_buf_queue_check(int raidid)
   3565 {
   3566 	if ((BUFQ_PEEK(raid_softc[raidid].buf_queue) != NULL) &&
   3567 	    raidPtrs[raidid]->openings > 0) {
   3568 		/* there is work to do */
   3569 		return 0;
   3570 	}
   3571 	/* default is nothing to do */
   3572 	return 1;
   3573 }
   3574 
   3575 int
   3576 rf_getdisksize(struct vnode *vp, struct lwp *l, RF_RaidDisk_t *diskPtr)
   3577 {
   3578 	struct partinfo dpart;
   3579 	struct dkwedge_info dkw;
   3580 	int error;
   3581 
   3582 	error = VOP_IOCTL(vp, DIOCGPART, &dpart, FREAD, l->l_cred);
   3583 	if (error == 0) {
   3584 		diskPtr->blockSize = dpart.disklab->d_secsize;
   3585 		diskPtr->numBlocks = dpart.part->p_size - rf_protectedSectors;
   3586 		diskPtr->partitionSize = dpart.part->p_size;
   3587 		return 0;
   3588 	}
   3589 
   3590 	error = VOP_IOCTL(vp, DIOCGWEDGEINFO, &dkw, FREAD, l->l_cred);
   3591 	if (error == 0) {
   3592 		diskPtr->blockSize = 512;	/* XXX */
   3593 		diskPtr->numBlocks = dkw.dkw_size - rf_protectedSectors;
   3594 		diskPtr->partitionSize = dkw.dkw_size;
   3595 		return 0;
   3596 	}
   3597 	return error;
   3598 }
   3599 
   3600 static int
   3601 raid_match(struct device *self, struct cfdata *cfdata,
   3602     void *aux)
   3603 {
   3604 	return 1;
   3605 }
   3606 
   3607 static void
   3608 raid_attach(struct device *parent, struct device *self,
   3609     void *aux)
   3610 {
   3611 
   3612 }
   3613 
   3614 
   3615 static int
   3616 raid_detach(struct device *self, int flags)
   3617 {
   3618 	struct raid_softc *rs = (struct raid_softc *)self;
   3619 
   3620 	if (rs->sc_flags & RAIDF_INITED)
   3621 		return EBUSY;
   3622 
   3623 	return 0;
   3624 }
   3625 
   3626 static void
   3627 rf_set_properties(struct raid_softc *rs, RF_Raid_t *raidPtr)
   3628 {
   3629 	prop_dictionary_t disk_info, odisk_info, geom;
   3630 	disk_info = prop_dictionary_create();
   3631 	geom = prop_dictionary_create();
   3632 	prop_dictionary_set_uint64(geom, "sectors-per-unit",
   3633 				   raidPtr->totalSectors);
   3634 	prop_dictionary_set_uint32(geom, "sector-size",
   3635 				   raidPtr->bytesPerSector);
   3636 
   3637 	prop_dictionary_set_uint16(geom, "sectors-per-track",
   3638 				   raidPtr->Layout.dataSectorsPerStripe);
   3639 	prop_dictionary_set_uint16(geom, "tracks-per-cylinder",
   3640 				   4 * raidPtr->numCol);
   3641 
   3642 	prop_dictionary_set_uint64(geom, "cylinders-per-unit",
   3643 	   raidPtr->totalSectors / (raidPtr->Layout.dataSectorsPerStripe *
   3644 	   (4 * raidPtr->numCol)));
   3645 
   3646 	prop_dictionary_set(disk_info, "geometry", geom);
   3647 	prop_object_release(geom);
   3648 	prop_dictionary_set(device_properties(rs->sc_dev),
   3649 			    "disk-info", disk_info);
   3650 	odisk_info = rs->sc_dkdev.dk_info;
   3651 	rs->sc_dkdev.dk_info = disk_info;
   3652 	if (odisk_info)
   3653 		prop_object_release(odisk_info);
   3654 }
   3655 
   3656 /*
   3657  * Implement forwarding of the DIOCCACHESYNC ioctl to each of the components.
   3658  * We end up returning whatever error was returned by the first cache flush
   3659  * that fails.
   3660  */
   3661 
   3662 static int
   3663 rf_sync_component_caches(RF_Raid_t *raidPtr)
   3664 {
   3665 	int c, sparecol;
   3666 	int e,error;
   3667 	int force = 1;
   3668 
   3669 	error = 0;
   3670 	for (c = 0; c < raidPtr->numCol; c++) {
   3671 		if (raidPtr->Disks[c].status == rf_ds_optimal) {
   3672 			e = VOP_IOCTL(raidPtr->raid_cinfo[c].ci_vp, DIOCCACHESYNC,
   3673 					  &force, FWRITE, NOCRED);
   3674 			if (e) {
   3675 				if (e != ENODEV)
   3676 					printf("raid%d: cache flush to component %s failed.\n",
   3677 					       raidPtr->raidid, raidPtr->Disks[c].devname);
   3678 				if (error == 0) {
   3679 					error = e;
   3680 				}
   3681 			}
   3682 		}
   3683 	}
   3684 
   3685 	for( c = 0; c < raidPtr->numSpare ; c++) {
   3686 		sparecol = raidPtr->numCol + c;
   3687 		/* Need to ensure that the reconstruct actually completed! */
   3688 		if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
   3689 			e = VOP_IOCTL(raidPtr->raid_cinfo[sparecol].ci_vp,
   3690 					  DIOCCACHESYNC, &force, FWRITE, NOCRED);
   3691 			if (e) {
   3692 				if (e != ENODEV)
   3693 					printf("raid%d: cache flush to component %s failed.\n",
   3694 					       raidPtr->raidid, raidPtr->Disks[sparecol].devname);
   3695 				if (error == 0) {
   3696 					error = e;
   3697 				}
   3698 			}
   3699 		}
   3700 	}
   3701 	return error;
   3702 }
   3703