Home | History | Annotate | Line # | Download | only in raidframe
rf_netbsdkintf.c revision 1.201
      1 /*	$NetBSD: rf_netbsdkintf.c,v 1.201 2006/02/24 03:20:22 oster Exp $	*/
      2 /*-
      3  * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
      4  * All rights reserved.
      5  *
      6  * This code is derived from software contributed to The NetBSD Foundation
      7  * by Greg Oster; Jason R. Thorpe.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *        This product includes software developed by the NetBSD
     20  *        Foundation, Inc. and its contributors.
     21  * 4. Neither the name of The NetBSD Foundation nor the names of its
     22  *    contributors may be used to endorse or promote products derived
     23  *    from this software without specific prior written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     26  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*
     39  * Copyright (c) 1990, 1993
     40  *      The Regents of the University of California.  All rights reserved.
     41  *
     42  * This code is derived from software contributed to Berkeley by
     43  * the Systems Programming Group of the University of Utah Computer
     44  * Science Department.
     45  *
     46  * Redistribution and use in source and binary forms, with or without
     47  * modification, are permitted provided that the following conditions
     48  * are met:
     49  * 1. Redistributions of source code must retain the above copyright
     50  *    notice, this list of conditions and the following disclaimer.
     51  * 2. Redistributions in binary form must reproduce the above copyright
     52  *    notice, this list of conditions and the following disclaimer in the
     53  *    documentation and/or other materials provided with the distribution.
     54  * 3. Neither the name of the University nor the names of its contributors
     55  *    may be used to endorse or promote products derived from this software
     56  *    without specific prior written permission.
     57  *
     58  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     59  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     60  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     61  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     62  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     63  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     64  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     65  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     66  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     67  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     68  * SUCH DAMAGE.
     69  *
     70  * from: Utah $Hdr: cd.c 1.6 90/11/28$
     71  *
     72  *      @(#)cd.c        8.2 (Berkeley) 11/16/93
     73  */
     74 
     75 /*
     76  * Copyright (c) 1988 University of Utah.
     77  *
     78  * This code is derived from software contributed to Berkeley by
     79  * the Systems Programming Group of the University of Utah Computer
     80  * Science Department.
     81  *
     82  * Redistribution and use in source and binary forms, with or without
     83  * modification, are permitted provided that the following conditions
     84  * are met:
     85  * 1. Redistributions of source code must retain the above copyright
     86  *    notice, this list of conditions and the following disclaimer.
     87  * 2. Redistributions in binary form must reproduce the above copyright
     88  *    notice, this list of conditions and the following disclaimer in the
     89  *    documentation and/or other materials provided with the distribution.
     90  * 3. All advertising materials mentioning features or use of this software
     91  *    must display the following acknowledgement:
     92  *      This product includes software developed by the University of
     93  *      California, Berkeley and its contributors.
     94  * 4. Neither the name of the University nor the names of its contributors
     95  *    may be used to endorse or promote products derived from this software
     96  *    without specific prior written permission.
     97  *
     98  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     99  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
    100  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
    101  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
    102  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
    103  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
    104  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
    105  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
    106  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
    107  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
    108  * SUCH DAMAGE.
    109  *
    110  * from: Utah $Hdr: cd.c 1.6 90/11/28$
    111  *
    112  *      @(#)cd.c        8.2 (Berkeley) 11/16/93
    113  */
    114 
    115 /*
    116  * Copyright (c) 1995 Carnegie-Mellon University.
    117  * All rights reserved.
    118  *
    119  * Authors: Mark Holland, Jim Zelenka
    120  *
    121  * Permission to use, copy, modify and distribute this software and
    122  * its documentation is hereby granted, provided that both the copyright
    123  * notice and this permission notice appear in all copies of the
    124  * software, derivative works or modified versions, and any portions
    125  * thereof, and that both notices appear in supporting documentation.
    126  *
    127  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
    128  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
    129  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
    130  *
    131  * Carnegie Mellon requests users of this software to return to
    132  *
    133  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
    134  *  School of Computer Science
    135  *  Carnegie Mellon University
    136  *  Pittsburgh PA 15213-3890
    137  *
    138  * any improvements or extensions that they make and grant Carnegie the
    139  * rights to redistribute these changes.
    140  */
    141 
    142 /***********************************************************
    143  *
    144  * rf_kintf.c -- the kernel interface routines for RAIDframe
    145  *
    146  ***********************************************************/
    147 
    148 #include <sys/cdefs.h>
    149 __KERNEL_RCSID(0, "$NetBSD: rf_netbsdkintf.c,v 1.201 2006/02/24 03:20:22 oster Exp $");
    150 
    151 #include <sys/param.h>
    152 #include <sys/errno.h>
    153 #include <sys/pool.h>
    154 #include <sys/proc.h>
    155 #include <sys/queue.h>
    156 #include <sys/disk.h>
    157 #include <sys/device.h>
    158 #include <sys/stat.h>
    159 #include <sys/ioctl.h>
    160 #include <sys/fcntl.h>
    161 #include <sys/systm.h>
    162 #include <sys/namei.h>
    163 #include <sys/vnode.h>
    164 #include <sys/disklabel.h>
    165 #include <sys/conf.h>
    166 #include <sys/lock.h>
    167 #include <sys/buf.h>
    168 #include <sys/bufq.h>
    169 #include <sys/user.h>
    170 #include <sys/reboot.h>
    171 
    172 #include <dev/raidframe/raidframevar.h>
    173 #include <dev/raidframe/raidframeio.h>
    174 #include "raid.h"
    175 #include "opt_raid_autoconfig.h"
    176 #include "rf_raid.h"
    177 #include "rf_copyback.h"
    178 #include "rf_dag.h"
    179 #include "rf_dagflags.h"
    180 #include "rf_desc.h"
    181 #include "rf_diskqueue.h"
    182 #include "rf_etimer.h"
    183 #include "rf_general.h"
    184 #include "rf_kintf.h"
    185 #include "rf_options.h"
    186 #include "rf_driver.h"
    187 #include "rf_parityscan.h"
    188 #include "rf_threadstuff.h"
    189 
    190 #ifdef DEBUG
    191 int     rf_kdebug_level = 0;
    192 #define db1_printf(a) if (rf_kdebug_level > 0) printf a
    193 #else				/* DEBUG */
    194 #define db1_printf(a) { }
    195 #endif				/* DEBUG */
    196 
    197 static RF_Raid_t **raidPtrs;	/* global raid device descriptors */
    198 
    199 RF_DECLARE_STATIC_MUTEX(rf_sparet_wait_mutex)
    200 
    201 static RF_SparetWait_t *rf_sparet_wait_queue;	/* requests to install a
    202 						 * spare table */
    203 static RF_SparetWait_t *rf_sparet_resp_queue;	/* responses from
    204 						 * installation process */
    205 
    206 MALLOC_DEFINE(M_RAIDFRAME, "RAIDframe", "RAIDframe structures");
    207 
    208 /* prototypes */
    209 static void KernelWakeupFunc(struct buf *);
    210 static void InitBP(struct buf *, struct vnode *, unsigned,
    211     dev_t, RF_SectorNum_t, RF_SectorCount_t, caddr_t, void (*) (struct buf *),
    212     void *, int, struct proc *);
    213 static void raidinit(RF_Raid_t *);
    214 
    215 void raidattach(int);
    216 
    217 dev_type_open(raidopen);
    218 dev_type_close(raidclose);
    219 dev_type_read(raidread);
    220 dev_type_write(raidwrite);
    221 dev_type_ioctl(raidioctl);
    222 dev_type_strategy(raidstrategy);
    223 dev_type_dump(raiddump);
    224 dev_type_size(raidsize);
    225 
    226 const struct bdevsw raid_bdevsw = {
    227 	raidopen, raidclose, raidstrategy, raidioctl,
    228 	raiddump, raidsize, D_DISK
    229 };
    230 
    231 const struct cdevsw raid_cdevsw = {
    232 	raidopen, raidclose, raidread, raidwrite, raidioctl,
    233 	nostop, notty, nopoll, nommap, nokqfilter, D_DISK
    234 };
    235 
    236 /* XXX Not sure if the following should be replacing the raidPtrs above,
    237    or if it should be used in conjunction with that...
    238 */
    239 
    240 struct raid_softc {
    241 	int     sc_flags;	/* flags */
    242 	int     sc_cflags;	/* configuration flags */
    243 	size_t  sc_size;        /* size of the raid device */
    244 	char    sc_xname[20];	/* XXX external name */
    245 	struct disk sc_dkdev;	/* generic disk device info */
    246 	struct bufq_state *buf_queue;	/* used for the device queue */
    247 };
    248 /* sc_flags */
    249 #define RAIDF_INITED	0x01	/* unit has been initialized */
    250 #define RAIDF_WLABEL	0x02	/* label area is writable */
    251 #define RAIDF_LABELLING	0x04	/* unit is currently being labelled */
    252 #define RAIDF_WANTED	0x40	/* someone is waiting to obtain a lock */
    253 #define RAIDF_LOCKED	0x80	/* unit is locked */
    254 
    255 #define	raidunit(x)	DISKUNIT(x)
    256 int numraid = 0;
    257 
    258 /*
    259  * Allow RAIDOUTSTANDING number of simultaneous IO's to this RAID device.
    260  * Be aware that large numbers can allow the driver to consume a lot of
    261  * kernel memory, especially on writes, and in degraded mode reads.
    262  *
    263  * For example: with a stripe width of 64 blocks (32k) and 5 disks,
    264  * a single 64K write will typically require 64K for the old data,
    265  * 64K for the old parity, and 64K for the new parity, for a total
    266  * of 192K (if the parity buffer is not re-used immediately).
    267  * Even it if is used immediately, that's still 128K, which when multiplied
    268  * by say 10 requests, is 1280K, *on top* of the 640K of incoming data.
    269  *
    270  * Now in degraded mode, for example, a 64K read on the above setup may
    271  * require data reconstruction, which will require *all* of the 4 remaining
    272  * disks to participate -- 4 * 32K/disk == 128K again.
    273  */
    274 
    275 #ifndef RAIDOUTSTANDING
    276 #define RAIDOUTSTANDING   6
    277 #endif
    278 
    279 #define RAIDLABELDEV(dev)	\
    280 	(MAKEDISKDEV(major((dev)), raidunit((dev)), RAW_PART))
    281 
    282 /* declared here, and made public, for the benefit of KVM stuff.. */
    283 struct raid_softc *raid_softc;
    284 
    285 static void raidgetdefaultlabel(RF_Raid_t *, struct raid_softc *,
    286 				     struct disklabel *);
    287 static void raidgetdisklabel(dev_t);
    288 static void raidmakedisklabel(struct raid_softc *);
    289 
    290 static int raidlock(struct raid_softc *);
    291 static void raidunlock(struct raid_softc *);
    292 
    293 static void rf_markalldirty(RF_Raid_t *);
    294 
    295 struct device *raidrootdev;
    296 
    297 void rf_ReconThread(struct rf_recon_req *);
    298 void rf_RewriteParityThread(RF_Raid_t *raidPtr);
    299 void rf_CopybackThread(RF_Raid_t *raidPtr);
    300 void rf_ReconstructInPlaceThread(struct rf_recon_req *);
    301 int rf_autoconfig(struct device *self);
    302 void rf_buildroothack(RF_ConfigSet_t *);
    303 
    304 RF_AutoConfig_t *rf_find_raid_components(void);
    305 RF_ConfigSet_t *rf_create_auto_sets(RF_AutoConfig_t *);
    306 static int rf_does_it_fit(RF_ConfigSet_t *,RF_AutoConfig_t *);
    307 static int rf_reasonable_label(RF_ComponentLabel_t *);
    308 void rf_create_configuration(RF_AutoConfig_t *,RF_Config_t *, RF_Raid_t *);
    309 int rf_set_autoconfig(RF_Raid_t *, int);
    310 int rf_set_rootpartition(RF_Raid_t *, int);
    311 void rf_release_all_vps(RF_ConfigSet_t *);
    312 void rf_cleanup_config_set(RF_ConfigSet_t *);
    313 int rf_have_enough_components(RF_ConfigSet_t *);
    314 int rf_auto_config_set(RF_ConfigSet_t *, int *);
    315 
    316 static int raidautoconfig = 0; /* Debugging, mostly.  Set to 0 to not
    317 				  allow autoconfig to take place.
    318 			          Note that this is overridden by having
    319 			          RAID_AUTOCONFIG as an option in the
    320 			          kernel config file.  */
    321 
    322 struct RF_Pools_s rf_pools;
    323 
    324 void
    325 raidattach(int num)
    326 {
    327 	int raidID;
    328 	int i, rc;
    329 
    330 #ifdef DEBUG
    331 	printf("raidattach: Asked for %d units\n", num);
    332 #endif
    333 
    334 	if (num <= 0) {
    335 #ifdef DIAGNOSTIC
    336 		panic("raidattach: count <= 0");
    337 #endif
    338 		return;
    339 	}
    340 	/* This is where all the initialization stuff gets done. */
    341 
    342 	numraid = num;
    343 
    344 	/* Make some space for requested number of units... */
    345 
    346 	RF_Malloc(raidPtrs, num * sizeof(RF_Raid_t *), (RF_Raid_t **));
    347 	if (raidPtrs == NULL) {
    348 		panic("raidPtrs is NULL!!");
    349 	}
    350 
    351 	rf_mutex_init(&rf_sparet_wait_mutex);
    352 
    353 	rf_sparet_wait_queue = rf_sparet_resp_queue = NULL;
    354 
    355 	for (i = 0; i < num; i++)
    356 		raidPtrs[i] = NULL;
    357 	rc = rf_BootRaidframe();
    358 	if (rc == 0)
    359 		printf("Kernelized RAIDframe activated\n");
    360 	else
    361 		panic("Serious error booting RAID!!");
    362 
    363 	/* put together some datastructures like the CCD device does.. This
    364 	 * lets us lock the device and what-not when it gets opened. */
    365 
    366 	raid_softc = (struct raid_softc *)
    367 		malloc(num * sizeof(struct raid_softc),
    368 		       M_RAIDFRAME, M_NOWAIT);
    369 	if (raid_softc == NULL) {
    370 		printf("WARNING: no memory for RAIDframe driver\n");
    371 		return;
    372 	}
    373 
    374 	memset(raid_softc, 0, num * sizeof(struct raid_softc));
    375 
    376 	raidrootdev = (struct device *)malloc(num * sizeof(struct device),
    377 					      M_RAIDFRAME, M_NOWAIT);
    378 	if (raidrootdev == NULL) {
    379 		panic("No memory for RAIDframe driver!!?!?!");
    380 	}
    381 
    382 	for (raidID = 0; raidID < num; raidID++) {
    383 		bufq_alloc(&raid_softc[raidID].buf_queue, "fcfs", 0);
    384 		pseudo_disk_init(&raid_softc[raidID].sc_dkdev);
    385 
    386 		/* XXXJRT Should use config_attach_pseudo() */
    387 
    388 		raidrootdev[raidID].dv_class  = DV_DISK;
    389 		raidrootdev[raidID].dv_cfdata = NULL;
    390 		raidrootdev[raidID].dv_unit   = raidID;
    391 		raidrootdev[raidID].dv_parent = NULL;
    392 		raidrootdev[raidID].dv_flags  = 0;
    393 		snprintf(raidrootdev[raidID].dv_xname,
    394 		    sizeof(raidrootdev[raidID].dv_xname), "raid%d", raidID);
    395 
    396 		RF_Malloc(raidPtrs[raidID], sizeof(RF_Raid_t),
    397 			  (RF_Raid_t *));
    398 		if (raidPtrs[raidID] == NULL) {
    399 			printf("WARNING: raidPtrs[%d] is NULL\n", raidID);
    400 			numraid = raidID;
    401 			return;
    402 		}
    403 	}
    404 
    405 #ifdef RAID_AUTOCONFIG
    406 	raidautoconfig = 1;
    407 #endif
    408 
    409 	/*
    410 	 * Register a finalizer which will be used to auto-config RAID
    411 	 * sets once all real hardware devices have been found.
    412 	 */
    413 	if (config_finalize_register(NULL, rf_autoconfig) != 0)
    414 		printf("WARNING: unable to register RAIDframe finalizer\n");
    415 }
    416 
    417 int
    418 rf_autoconfig(struct device *self)
    419 {
    420 	RF_AutoConfig_t *ac_list;
    421 	RF_ConfigSet_t *config_sets;
    422 
    423 	if (raidautoconfig == 0)
    424 		return (0);
    425 
    426 	/* XXX This code can only be run once. */
    427 	raidautoconfig = 0;
    428 
    429 	/* 1. locate all RAID components on the system */
    430 #ifdef DEBUG
    431 	printf("Searching for RAID components...\n");
    432 #endif
    433 	ac_list = rf_find_raid_components();
    434 
    435 	/* 2. Sort them into their respective sets. */
    436 	config_sets = rf_create_auto_sets(ac_list);
    437 
    438 	/*
    439 	 * 3. Evaluate each set andconfigure the valid ones.
    440 	 * This gets done in rf_buildroothack().
    441 	 */
    442 	rf_buildroothack(config_sets);
    443 
    444 	return (1);
    445 }
    446 
    447 void
    448 rf_buildroothack(RF_ConfigSet_t *config_sets)
    449 {
    450 	RF_ConfigSet_t *cset;
    451 	RF_ConfigSet_t *next_cset;
    452 	int retcode;
    453 	int raidID;
    454 	int rootID;
    455 	int num_root;
    456 
    457 	rootID = 0;
    458 	num_root = 0;
    459 	cset = config_sets;
    460 	while(cset != NULL ) {
    461 		next_cset = cset->next;
    462 		if (rf_have_enough_components(cset) &&
    463 		    cset->ac->clabel->autoconfigure==1) {
    464 			retcode = rf_auto_config_set(cset,&raidID);
    465 			if (!retcode) {
    466 				if (cset->rootable) {
    467 					rootID = raidID;
    468 					num_root++;
    469 				}
    470 			} else {
    471 				/* The autoconfig didn't work :( */
    472 #if DEBUG
    473 				printf("Autoconfig failed with code %d for raid%d\n", retcode, raidID);
    474 #endif
    475 				rf_release_all_vps(cset);
    476 			}
    477 		} else {
    478 			/* we're not autoconfiguring this set...
    479 			   release the associated resources */
    480 			rf_release_all_vps(cset);
    481 		}
    482 		/* cleanup */
    483 		rf_cleanup_config_set(cset);
    484 		cset = next_cset;
    485 	}
    486 
    487 	/* we found something bootable... */
    488 
    489 	if (num_root == 1) {
    490 		booted_device = &raidrootdev[rootID];
    491 	} else if (num_root > 1) {
    492 		/* we can't guess.. require the user to answer... */
    493 		boothowto |= RB_ASKNAME;
    494 	}
    495 }
    496 
    497 
    498 int
    499 raidsize(dev_t dev)
    500 {
    501 	struct raid_softc *rs;
    502 	struct disklabel *lp;
    503 	int     part, unit, omask, size;
    504 
    505 	unit = raidunit(dev);
    506 	if (unit >= numraid)
    507 		return (-1);
    508 	rs = &raid_softc[unit];
    509 
    510 	if ((rs->sc_flags & RAIDF_INITED) == 0)
    511 		return (-1);
    512 
    513 	part = DISKPART(dev);
    514 	omask = rs->sc_dkdev.dk_openmask & (1 << part);
    515 	lp = rs->sc_dkdev.dk_label;
    516 
    517 	if (omask == 0 && raidopen(dev, 0, S_IFBLK, curlwp))
    518 		return (-1);
    519 
    520 	if (lp->d_partitions[part].p_fstype != FS_SWAP)
    521 		size = -1;
    522 	else
    523 		size = lp->d_partitions[part].p_size *
    524 		    (lp->d_secsize / DEV_BSIZE);
    525 
    526 	if (omask == 0 && raidclose(dev, 0, S_IFBLK, curlwp))
    527 		return (-1);
    528 
    529 	return (size);
    530 
    531 }
    532 
    533 int
    534 raiddump(dev_t dev, daddr_t blkno, caddr_t va, size_t  size)
    535 {
    536 	/* Not implemented. */
    537 	return ENXIO;
    538 }
    539 /* ARGSUSED */
    540 int
    541 raidopen(dev_t dev, int flags, int fmt, struct lwp *l)
    542 {
    543 	int     unit = raidunit(dev);
    544 	struct raid_softc *rs;
    545 	struct disklabel *lp;
    546 	int     part, pmask;
    547 	int     error = 0;
    548 
    549 	if (unit >= numraid)
    550 		return (ENXIO);
    551 	rs = &raid_softc[unit];
    552 
    553 	if ((error = raidlock(rs)) != 0)
    554 		return (error);
    555 	lp = rs->sc_dkdev.dk_label;
    556 
    557 	part = DISKPART(dev);
    558 	pmask = (1 << part);
    559 
    560 	if ((rs->sc_flags & RAIDF_INITED) &&
    561 	    (rs->sc_dkdev.dk_openmask == 0))
    562 		raidgetdisklabel(dev);
    563 
    564 	/* make sure that this partition exists */
    565 
    566 	if (part != RAW_PART) {
    567 		if (((rs->sc_flags & RAIDF_INITED) == 0) ||
    568 		    ((part >= lp->d_npartitions) ||
    569 			(lp->d_partitions[part].p_fstype == FS_UNUSED))) {
    570 			error = ENXIO;
    571 			raidunlock(rs);
    572 			return (error);
    573 		}
    574 	}
    575 	/* Prevent this unit from being unconfigured while open. */
    576 	switch (fmt) {
    577 	case S_IFCHR:
    578 		rs->sc_dkdev.dk_copenmask |= pmask;
    579 		break;
    580 
    581 	case S_IFBLK:
    582 		rs->sc_dkdev.dk_bopenmask |= pmask;
    583 		break;
    584 	}
    585 
    586 	if ((rs->sc_dkdev.dk_openmask == 0) &&
    587 	    ((rs->sc_flags & RAIDF_INITED) != 0)) {
    588 		/* First one... mark things as dirty... Note that we *MUST*
    589 		 have done a configure before this.  I DO NOT WANT TO BE
    590 		 SCRIBBLING TO RANDOM COMPONENTS UNTIL IT'S BEEN DETERMINED
    591 		 THAT THEY BELONG TOGETHER!!!!! */
    592 		/* XXX should check to see if we're only open for reading
    593 		   here... If so, we needn't do this, but then need some
    594 		   other way of keeping track of what's happened.. */
    595 
    596 		rf_markalldirty( raidPtrs[unit] );
    597 	}
    598 
    599 
    600 	rs->sc_dkdev.dk_openmask =
    601 	    rs->sc_dkdev.dk_copenmask | rs->sc_dkdev.dk_bopenmask;
    602 
    603 	raidunlock(rs);
    604 
    605 	return (error);
    606 
    607 
    608 }
    609 /* ARGSUSED */
    610 int
    611 raidclose(dev_t dev, int flags, int fmt, struct lwp *l)
    612 {
    613 	int     unit = raidunit(dev);
    614 	struct raid_softc *rs;
    615 	int     error = 0;
    616 	int     part;
    617 
    618 	if (unit >= numraid)
    619 		return (ENXIO);
    620 	rs = &raid_softc[unit];
    621 
    622 	if ((error = raidlock(rs)) != 0)
    623 		return (error);
    624 
    625 	part = DISKPART(dev);
    626 
    627 	/* ...that much closer to allowing unconfiguration... */
    628 	switch (fmt) {
    629 	case S_IFCHR:
    630 		rs->sc_dkdev.dk_copenmask &= ~(1 << part);
    631 		break;
    632 
    633 	case S_IFBLK:
    634 		rs->sc_dkdev.dk_bopenmask &= ~(1 << part);
    635 		break;
    636 	}
    637 	rs->sc_dkdev.dk_openmask =
    638 	    rs->sc_dkdev.dk_copenmask | rs->sc_dkdev.dk_bopenmask;
    639 
    640 	if ((rs->sc_dkdev.dk_openmask == 0) &&
    641 	    ((rs->sc_flags & RAIDF_INITED) != 0)) {
    642 		/* Last one... device is not unconfigured yet.
    643 		   Device shutdown has taken care of setting the
    644 		   clean bits if RAIDF_INITED is not set
    645 		   mark things as clean... */
    646 
    647 		rf_update_component_labels(raidPtrs[unit],
    648 						 RF_FINAL_COMPONENT_UPDATE);
    649 		if (doing_shutdown) {
    650 			/* last one, and we're going down, so
    651 			   lights out for this RAID set too. */
    652 			error = rf_Shutdown(raidPtrs[unit]);
    653 
    654 			/* It's no longer initialized... */
    655 			rs->sc_flags &= ~RAIDF_INITED;
    656 
    657 			/* Detach the disk. */
    658 			pseudo_disk_detach(&rs->sc_dkdev);
    659 		}
    660 	}
    661 
    662 	raidunlock(rs);
    663 	return (0);
    664 
    665 }
    666 
    667 void
    668 raidstrategy(struct buf *bp)
    669 {
    670 	int s;
    671 
    672 	unsigned int raidID = raidunit(bp->b_dev);
    673 	RF_Raid_t *raidPtr;
    674 	struct raid_softc *rs = &raid_softc[raidID];
    675 	int     wlabel;
    676 
    677 	if ((rs->sc_flags & RAIDF_INITED) ==0) {
    678 		bp->b_error = ENXIO;
    679 		bp->b_flags |= B_ERROR;
    680 		goto done;
    681 	}
    682 	if (raidID >= numraid || !raidPtrs[raidID]) {
    683 		bp->b_error = ENODEV;
    684 		bp->b_flags |= B_ERROR;
    685 		goto done;
    686 	}
    687 	raidPtr = raidPtrs[raidID];
    688 	if (!raidPtr->valid) {
    689 		bp->b_error = ENODEV;
    690 		bp->b_flags |= B_ERROR;
    691 		goto done;
    692 	}
    693 	if (bp->b_bcount == 0) {
    694 		db1_printf(("b_bcount is zero..\n"));
    695 		goto done;
    696 	}
    697 
    698 	/*
    699 	 * Do bounds checking and adjust transfer.  If there's an
    700 	 * error, the bounds check will flag that for us.
    701 	 */
    702 
    703 	wlabel = rs->sc_flags & (RAIDF_WLABEL | RAIDF_LABELLING);
    704 	if (DISKPART(bp->b_dev) == RAW_PART) {
    705 		uint64_t size; /* device size in DEV_BSIZE unit */
    706 
    707 		if (raidPtr->logBytesPerSector > DEV_BSHIFT) {
    708 			size = raidPtr->totalSectors <<
    709 			    (raidPtr->logBytesPerSector - DEV_BSHIFT);
    710 		} else {
    711 			size = raidPtr->totalSectors >>
    712 			    (DEV_BSHIFT - raidPtr->logBytesPerSector);
    713 		}
    714 		if (bounds_check_with_mediasize(bp, DEV_BSIZE, size) <= 0) {
    715 			goto done;
    716 		}
    717 	} else {
    718 		if (bounds_check_with_label(&rs->sc_dkdev, bp, wlabel) <= 0) {
    719 			db1_printf(("Bounds check failed!!:%d %d\n",
    720 				(int) bp->b_blkno, (int) wlabel));
    721 			goto done;
    722 		}
    723 	}
    724 	s = splbio();
    725 
    726 	bp->b_resid = 0;
    727 
    728 	/* stuff it onto our queue */
    729 	BUFQ_PUT(rs->buf_queue, bp);
    730 
    731 	/* scheduled the IO to happen at the next convenient time */
    732 	wakeup(&(raidPtrs[raidID]->iodone));
    733 
    734 	splx(s);
    735 	return;
    736 
    737 done:
    738 	bp->b_resid = bp->b_bcount;
    739 	biodone(bp);
    740 }
    741 /* ARGSUSED */
    742 int
    743 raidread(dev_t dev, struct uio *uio, int flags)
    744 {
    745 	int     unit = raidunit(dev);
    746 	struct raid_softc *rs;
    747 
    748 	if (unit >= numraid)
    749 		return (ENXIO);
    750 	rs = &raid_softc[unit];
    751 
    752 	if ((rs->sc_flags & RAIDF_INITED) == 0)
    753 		return (ENXIO);
    754 
    755 	return (physio(raidstrategy, NULL, dev, B_READ, minphys, uio));
    756 
    757 }
    758 /* ARGSUSED */
    759 int
    760 raidwrite(dev_t dev, struct uio *uio, int flags)
    761 {
    762 	int     unit = raidunit(dev);
    763 	struct raid_softc *rs;
    764 
    765 	if (unit >= numraid)
    766 		return (ENXIO);
    767 	rs = &raid_softc[unit];
    768 
    769 	if ((rs->sc_flags & RAIDF_INITED) == 0)
    770 		return (ENXIO);
    771 
    772 	return (physio(raidstrategy, NULL, dev, B_WRITE, minphys, uio));
    773 
    774 }
    775 
    776 int
    777 raidioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct lwp *l)
    778 {
    779 	int     unit = raidunit(dev);
    780 	int     error = 0;
    781 	int     part, pmask;
    782 	struct raid_softc *rs;
    783 	RF_Config_t *k_cfg, *u_cfg;
    784 	RF_Raid_t *raidPtr;
    785 	RF_RaidDisk_t *diskPtr;
    786 	RF_AccTotals_t *totals;
    787 	RF_DeviceConfig_t *d_cfg, **ucfgp;
    788 	u_char *specific_buf;
    789 	int retcode = 0;
    790 	int column;
    791 	int raidid;
    792 	struct rf_recon_req *rrcopy, *rr;
    793 	RF_ComponentLabel_t *clabel;
    794 	RF_ComponentLabel_t ci_label;
    795 	RF_ComponentLabel_t **clabel_ptr;
    796 	RF_SingleComponent_t *sparePtr,*componentPtr;
    797 	RF_SingleComponent_t hot_spare;
    798 	RF_SingleComponent_t component;
    799 	RF_ProgressInfo_t progressInfo, **progressInfoPtr;
    800 	int i, j, d;
    801 #ifdef __HAVE_OLD_DISKLABEL
    802 	struct disklabel newlabel;
    803 #endif
    804 
    805 	if (unit >= numraid)
    806 		return (ENXIO);
    807 	rs = &raid_softc[unit];
    808 	raidPtr = raidPtrs[unit];
    809 
    810 	db1_printf(("raidioctl: %d %d %d %d\n", (int) dev,
    811 		(int) DISKPART(dev), (int) unit, (int) cmd));
    812 
    813 	/* Must be open for writes for these commands... */
    814 	switch (cmd) {
    815 	case DIOCSDINFO:
    816 	case DIOCWDINFO:
    817 #ifdef __HAVE_OLD_DISKLABEL
    818 	case ODIOCWDINFO:
    819 	case ODIOCSDINFO:
    820 #endif
    821 	case DIOCWLABEL:
    822 		if ((flag & FWRITE) == 0)
    823 			return (EBADF);
    824 	}
    825 
    826 	/* Must be initialized for these... */
    827 	switch (cmd) {
    828 	case DIOCGDINFO:
    829 	case DIOCSDINFO:
    830 	case DIOCWDINFO:
    831 #ifdef __HAVE_OLD_DISKLABEL
    832 	case ODIOCGDINFO:
    833 	case ODIOCWDINFO:
    834 	case ODIOCSDINFO:
    835 	case ODIOCGDEFLABEL:
    836 #endif
    837 	case DIOCGPART:
    838 	case DIOCWLABEL:
    839 	case DIOCGDEFLABEL:
    840 	case RAIDFRAME_SHUTDOWN:
    841 	case RAIDFRAME_REWRITEPARITY:
    842 	case RAIDFRAME_GET_INFO:
    843 	case RAIDFRAME_RESET_ACCTOTALS:
    844 	case RAIDFRAME_GET_ACCTOTALS:
    845 	case RAIDFRAME_KEEP_ACCTOTALS:
    846 	case RAIDFRAME_GET_SIZE:
    847 	case RAIDFRAME_FAIL_DISK:
    848 	case RAIDFRAME_COPYBACK:
    849 	case RAIDFRAME_CHECK_RECON_STATUS:
    850 	case RAIDFRAME_CHECK_RECON_STATUS_EXT:
    851 	case RAIDFRAME_GET_COMPONENT_LABEL:
    852 	case RAIDFRAME_SET_COMPONENT_LABEL:
    853 	case RAIDFRAME_ADD_HOT_SPARE:
    854 	case RAIDFRAME_REMOVE_HOT_SPARE:
    855 	case RAIDFRAME_INIT_LABELS:
    856 	case RAIDFRAME_REBUILD_IN_PLACE:
    857 	case RAIDFRAME_CHECK_PARITY:
    858 	case RAIDFRAME_CHECK_PARITYREWRITE_STATUS:
    859 	case RAIDFRAME_CHECK_PARITYREWRITE_STATUS_EXT:
    860 	case RAIDFRAME_CHECK_COPYBACK_STATUS:
    861 	case RAIDFRAME_CHECK_COPYBACK_STATUS_EXT:
    862 	case RAIDFRAME_SET_AUTOCONFIG:
    863 	case RAIDFRAME_SET_ROOT:
    864 	case RAIDFRAME_DELETE_COMPONENT:
    865 	case RAIDFRAME_INCORPORATE_HOT_SPARE:
    866 		if ((rs->sc_flags & RAIDF_INITED) == 0)
    867 			return (ENXIO);
    868 	}
    869 
    870 	switch (cmd) {
    871 
    872 		/* configure the system */
    873 	case RAIDFRAME_CONFIGURE:
    874 
    875 		if (raidPtr->valid) {
    876 			/* There is a valid RAID set running on this unit! */
    877 			printf("raid%d: Device already configured!\n",unit);
    878 			return(EINVAL);
    879 		}
    880 
    881 		/* copy-in the configuration information */
    882 		/* data points to a pointer to the configuration structure */
    883 
    884 		u_cfg = *((RF_Config_t **) data);
    885 		RF_Malloc(k_cfg, sizeof(RF_Config_t), (RF_Config_t *));
    886 		if (k_cfg == NULL) {
    887 			return (ENOMEM);
    888 		}
    889 		retcode = copyin(u_cfg, k_cfg, sizeof(RF_Config_t));
    890 		if (retcode) {
    891 			RF_Free(k_cfg, sizeof(RF_Config_t));
    892 			db1_printf(("rf_ioctl: retcode=%d copyin.1\n",
    893 				retcode));
    894 			return (retcode);
    895 		}
    896 		/* allocate a buffer for the layout-specific data, and copy it
    897 		 * in */
    898 		if (k_cfg->layoutSpecificSize) {
    899 			if (k_cfg->layoutSpecificSize > 10000) {
    900 				/* sanity check */
    901 				RF_Free(k_cfg, sizeof(RF_Config_t));
    902 				return (EINVAL);
    903 			}
    904 			RF_Malloc(specific_buf, k_cfg->layoutSpecificSize,
    905 			    (u_char *));
    906 			if (specific_buf == NULL) {
    907 				RF_Free(k_cfg, sizeof(RF_Config_t));
    908 				return (ENOMEM);
    909 			}
    910 			retcode = copyin(k_cfg->layoutSpecific, specific_buf,
    911 			    k_cfg->layoutSpecificSize);
    912 			if (retcode) {
    913 				RF_Free(k_cfg, sizeof(RF_Config_t));
    914 				RF_Free(specific_buf,
    915 					k_cfg->layoutSpecificSize);
    916 				db1_printf(("rf_ioctl: retcode=%d copyin.2\n",
    917 					retcode));
    918 				return (retcode);
    919 			}
    920 		} else
    921 			specific_buf = NULL;
    922 		k_cfg->layoutSpecific = specific_buf;
    923 
    924 		/* should do some kind of sanity check on the configuration.
    925 		 * Store the sum of all the bytes in the last byte? */
    926 
    927 		/* configure the system */
    928 
    929 		/*
    930 		 * Clear the entire RAID descriptor, just to make sure
    931 		 *  there is no stale data left in the case of a
    932 		 *  reconfiguration
    933 		 */
    934 		memset((char *) raidPtr, 0, sizeof(RF_Raid_t));
    935 		raidPtr->raidid = unit;
    936 
    937 		retcode = rf_Configure(raidPtr, k_cfg, NULL);
    938 
    939 		if (retcode == 0) {
    940 
    941 			/* allow this many simultaneous IO's to
    942 			   this RAID device */
    943 			raidPtr->openings = RAIDOUTSTANDING;
    944 
    945 			raidinit(raidPtr);
    946 			rf_markalldirty(raidPtr);
    947 		}
    948 		/* free the buffers.  No return code here. */
    949 		if (k_cfg->layoutSpecificSize) {
    950 			RF_Free(specific_buf, k_cfg->layoutSpecificSize);
    951 		}
    952 		RF_Free(k_cfg, sizeof(RF_Config_t));
    953 
    954 		return (retcode);
    955 
    956 		/* shutdown the system */
    957 	case RAIDFRAME_SHUTDOWN:
    958 
    959 		if ((error = raidlock(rs)) != 0)
    960 			return (error);
    961 
    962 		/*
    963 		 * If somebody has a partition mounted, we shouldn't
    964 		 * shutdown.
    965 		 */
    966 
    967 		part = DISKPART(dev);
    968 		pmask = (1 << part);
    969 		if ((rs->sc_dkdev.dk_openmask & ~pmask) ||
    970 		    ((rs->sc_dkdev.dk_bopenmask & pmask) &&
    971 			(rs->sc_dkdev.dk_copenmask & pmask))) {
    972 			raidunlock(rs);
    973 			return (EBUSY);
    974 		}
    975 
    976 		retcode = rf_Shutdown(raidPtr);
    977 
    978 		/* It's no longer initialized... */
    979 		rs->sc_flags &= ~RAIDF_INITED;
    980 
    981 		/* Detach the disk. */
    982 		pseudo_disk_detach(&rs->sc_dkdev);
    983 
    984 		raidunlock(rs);
    985 
    986 		return (retcode);
    987 	case RAIDFRAME_GET_COMPONENT_LABEL:
    988 		clabel_ptr = (RF_ComponentLabel_t **) data;
    989 		/* need to read the component label for the disk indicated
    990 		   by row,column in clabel */
    991 
    992 		/* For practice, let's get it directly fromdisk, rather
    993 		   than from the in-core copy */
    994 		RF_Malloc( clabel, sizeof( RF_ComponentLabel_t ),
    995 			   (RF_ComponentLabel_t *));
    996 		if (clabel == NULL)
    997 			return (ENOMEM);
    998 
    999 		retcode = copyin( *clabel_ptr, clabel,
   1000 				  sizeof(RF_ComponentLabel_t));
   1001 
   1002 		if (retcode) {
   1003 			RF_Free( clabel, sizeof(RF_ComponentLabel_t));
   1004 			return(retcode);
   1005 		}
   1006 
   1007 		clabel->row = 0; /* Don't allow looking at anything else.*/
   1008 
   1009 		column = clabel->column;
   1010 
   1011 		if ((column < 0) || (column >= raidPtr->numCol +
   1012 				     raidPtr->numSpare)) {
   1013 			RF_Free( clabel, sizeof(RF_ComponentLabel_t));
   1014 			return(EINVAL);
   1015 		}
   1016 
   1017 		retcode = raidread_component_label(raidPtr->Disks[column].dev,
   1018 				raidPtr->raid_cinfo[column].ci_vp,
   1019 				clabel );
   1020 
   1021 		if (retcode == 0) {
   1022 			retcode = copyout(clabel, *clabel_ptr,
   1023 					  sizeof(RF_ComponentLabel_t));
   1024 		}
   1025 		RF_Free(clabel, sizeof(RF_ComponentLabel_t));
   1026 		return (retcode);
   1027 
   1028 	case RAIDFRAME_SET_COMPONENT_LABEL:
   1029 		clabel = (RF_ComponentLabel_t *) data;
   1030 
   1031 		/* XXX check the label for valid stuff... */
   1032 		/* Note that some things *should not* get modified --
   1033 		   the user should be re-initing the labels instead of
   1034 		   trying to patch things.
   1035 		   */
   1036 
   1037 		raidid = raidPtr->raidid;
   1038 #if DEBUG
   1039 		printf("raid%d: Got component label:\n", raidid);
   1040 		printf("raid%d: Version: %d\n", raidid, clabel->version);
   1041 		printf("raid%d: Serial Number: %d\n", raidid, clabel->serial_number);
   1042 		printf("raid%d: Mod counter: %d\n", raidid, clabel->mod_counter);
   1043 		printf("raid%d: Column: %d\n", raidid, clabel->column);
   1044 		printf("raid%d: Num Columns: %d\n", raidid, clabel->num_columns);
   1045 		printf("raid%d: Clean: %d\n", raidid, clabel->clean);
   1046 		printf("raid%d: Status: %d\n", raidid, clabel->status);
   1047 #endif
   1048 		clabel->row = 0;
   1049 		column = clabel->column;
   1050 
   1051 		if ((column < 0) || (column >= raidPtr->numCol)) {
   1052 			return(EINVAL);
   1053 		}
   1054 
   1055 		/* XXX this isn't allowed to do anything for now :-) */
   1056 
   1057 		/* XXX and before it is, we need to fill in the rest
   1058 		   of the fields!?!?!?! */
   1059 #if 0
   1060 		raidwrite_component_label(
   1061                             raidPtr->Disks[column].dev,
   1062 			    raidPtr->raid_cinfo[column].ci_vp,
   1063 			    clabel );
   1064 #endif
   1065 		return (0);
   1066 
   1067 	case RAIDFRAME_INIT_LABELS:
   1068 		clabel = (RF_ComponentLabel_t *) data;
   1069 		/*
   1070 		   we only want the serial number from
   1071 		   the above.  We get all the rest of the information
   1072 		   from the config that was used to create this RAID
   1073 		   set.
   1074 		   */
   1075 
   1076 		raidPtr->serial_number = clabel->serial_number;
   1077 
   1078 		raid_init_component_label(raidPtr, &ci_label);
   1079 		ci_label.serial_number = clabel->serial_number;
   1080 		ci_label.row = 0; /* we dont' pretend to support more */
   1081 
   1082 		for(column=0;column<raidPtr->numCol;column++) {
   1083 			diskPtr = &raidPtr->Disks[column];
   1084 			if (!RF_DEAD_DISK(diskPtr->status)) {
   1085 				ci_label.partitionSize = diskPtr->partitionSize;
   1086 				ci_label.column = column;
   1087 				raidwrite_component_label(
   1088 							  raidPtr->Disks[column].dev,
   1089 							  raidPtr->raid_cinfo[column].ci_vp,
   1090 							  &ci_label );
   1091 			}
   1092 		}
   1093 
   1094 		return (retcode);
   1095 	case RAIDFRAME_SET_AUTOCONFIG:
   1096 		d = rf_set_autoconfig(raidPtr, *(int *) data);
   1097 		printf("raid%d: New autoconfig value is: %d\n",
   1098 		       raidPtr->raidid, d);
   1099 		*(int *) data = d;
   1100 		return (retcode);
   1101 
   1102 	case RAIDFRAME_SET_ROOT:
   1103 		d = rf_set_rootpartition(raidPtr, *(int *) data);
   1104 		printf("raid%d: New rootpartition value is: %d\n",
   1105 		       raidPtr->raidid, d);
   1106 		*(int *) data = d;
   1107 		return (retcode);
   1108 
   1109 		/* initialize all parity */
   1110 	case RAIDFRAME_REWRITEPARITY:
   1111 
   1112 		if (raidPtr->Layout.map->faultsTolerated == 0) {
   1113 			/* Parity for RAID 0 is trivially correct */
   1114 			raidPtr->parity_good = RF_RAID_CLEAN;
   1115 			return(0);
   1116 		}
   1117 
   1118 		if (raidPtr->parity_rewrite_in_progress == 1) {
   1119 			/* Re-write is already in progress! */
   1120 			return(EINVAL);
   1121 		}
   1122 
   1123 		retcode = RF_CREATE_THREAD(raidPtr->parity_rewrite_thread,
   1124 					   rf_RewriteParityThread,
   1125 					   raidPtr,"raid_parity");
   1126 		return (retcode);
   1127 
   1128 
   1129 	case RAIDFRAME_ADD_HOT_SPARE:
   1130 		sparePtr = (RF_SingleComponent_t *) data;
   1131 		memcpy( &hot_spare, sparePtr, sizeof(RF_SingleComponent_t));
   1132 		retcode = rf_add_hot_spare(raidPtr, &hot_spare);
   1133 		return(retcode);
   1134 
   1135 	case RAIDFRAME_REMOVE_HOT_SPARE:
   1136 		return(retcode);
   1137 
   1138 	case RAIDFRAME_DELETE_COMPONENT:
   1139 		componentPtr = (RF_SingleComponent_t *)data;
   1140 		memcpy( &component, componentPtr,
   1141 			sizeof(RF_SingleComponent_t));
   1142 		retcode = rf_delete_component(raidPtr, &component);
   1143 		return(retcode);
   1144 
   1145 	case RAIDFRAME_INCORPORATE_HOT_SPARE:
   1146 		componentPtr = (RF_SingleComponent_t *)data;
   1147 		memcpy( &component, componentPtr,
   1148 			sizeof(RF_SingleComponent_t));
   1149 		retcode = rf_incorporate_hot_spare(raidPtr, &component);
   1150 		return(retcode);
   1151 
   1152 	case RAIDFRAME_REBUILD_IN_PLACE:
   1153 
   1154 		if (raidPtr->Layout.map->faultsTolerated == 0) {
   1155 			/* Can't do this on a RAID 0!! */
   1156 			return(EINVAL);
   1157 		}
   1158 
   1159 		if (raidPtr->recon_in_progress == 1) {
   1160 			/* a reconstruct is already in progress! */
   1161 			return(EINVAL);
   1162 		}
   1163 
   1164 		componentPtr = (RF_SingleComponent_t *) data;
   1165 		memcpy( &component, componentPtr,
   1166 			sizeof(RF_SingleComponent_t));
   1167 		component.row = 0; /* we don't support any more */
   1168 		column = component.column;
   1169 
   1170 		if ((column < 0) || (column >= raidPtr->numCol)) {
   1171 			return(EINVAL);
   1172 		}
   1173 
   1174 		RF_LOCK_MUTEX(raidPtr->mutex);
   1175 		if ((raidPtr->Disks[column].status == rf_ds_optimal) &&
   1176 		    (raidPtr->numFailures > 0)) {
   1177 			/* XXX 0 above shouldn't be constant!!! */
   1178 			/* some component other than this has failed.
   1179 			   Let's not make things worse than they already
   1180 			   are... */
   1181 			printf("raid%d: Unable to reconstruct to disk at:\n",
   1182 			       raidPtr->raidid);
   1183 			printf("raid%d:     Col: %d   Too many failures.\n",
   1184 			       raidPtr->raidid, column);
   1185 			RF_UNLOCK_MUTEX(raidPtr->mutex);
   1186 			return (EINVAL);
   1187 		}
   1188 		if (raidPtr->Disks[column].status ==
   1189 		    rf_ds_reconstructing) {
   1190 			printf("raid%d: Unable to reconstruct to disk at:\n",
   1191 			       raidPtr->raidid);
   1192 			printf("raid%d:    Col: %d   Reconstruction already occuring!\n", raidPtr->raidid, column);
   1193 
   1194 			RF_UNLOCK_MUTEX(raidPtr->mutex);
   1195 			return (EINVAL);
   1196 		}
   1197 		if (raidPtr->Disks[column].status == rf_ds_spared) {
   1198 			RF_UNLOCK_MUTEX(raidPtr->mutex);
   1199 			return (EINVAL);
   1200 		}
   1201 		RF_UNLOCK_MUTEX(raidPtr->mutex);
   1202 
   1203 		RF_Malloc(rrcopy, sizeof(*rrcopy), (struct rf_recon_req *));
   1204 		if (rrcopy == NULL)
   1205 			return(ENOMEM);
   1206 
   1207 		rrcopy->raidPtr = (void *) raidPtr;
   1208 		rrcopy->col = column;
   1209 
   1210 		retcode = RF_CREATE_THREAD(raidPtr->recon_thread,
   1211 					   rf_ReconstructInPlaceThread,
   1212 					   rrcopy,"raid_reconip");
   1213 		return(retcode);
   1214 
   1215 	case RAIDFRAME_GET_INFO:
   1216 		if (!raidPtr->valid)
   1217 			return (ENODEV);
   1218 		ucfgp = (RF_DeviceConfig_t **) data;
   1219 		RF_Malloc(d_cfg, sizeof(RF_DeviceConfig_t),
   1220 			  (RF_DeviceConfig_t *));
   1221 		if (d_cfg == NULL)
   1222 			return (ENOMEM);
   1223 		d_cfg->rows = 1; /* there is only 1 row now */
   1224 		d_cfg->cols = raidPtr->numCol;
   1225 		d_cfg->ndevs = raidPtr->numCol;
   1226 		if (d_cfg->ndevs >= RF_MAX_DISKS) {
   1227 			RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
   1228 			return (ENOMEM);
   1229 		}
   1230 		d_cfg->nspares = raidPtr->numSpare;
   1231 		if (d_cfg->nspares >= RF_MAX_DISKS) {
   1232 			RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
   1233 			return (ENOMEM);
   1234 		}
   1235 		d_cfg->maxqdepth = raidPtr->maxQueueDepth;
   1236 		d = 0;
   1237 		for (j = 0; j < d_cfg->cols; j++) {
   1238 			d_cfg->devs[d] = raidPtr->Disks[j];
   1239 			d++;
   1240 		}
   1241 		for (j = d_cfg->cols, i = 0; i < d_cfg->nspares; i++, j++) {
   1242 			d_cfg->spares[i] = raidPtr->Disks[j];
   1243 		}
   1244 		retcode = copyout(d_cfg, *ucfgp, sizeof(RF_DeviceConfig_t));
   1245 		RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
   1246 
   1247 		return (retcode);
   1248 
   1249 	case RAIDFRAME_CHECK_PARITY:
   1250 		*(int *) data = raidPtr->parity_good;
   1251 		return (0);
   1252 
   1253 	case RAIDFRAME_RESET_ACCTOTALS:
   1254 		memset(&raidPtr->acc_totals, 0, sizeof(raidPtr->acc_totals));
   1255 		return (0);
   1256 
   1257 	case RAIDFRAME_GET_ACCTOTALS:
   1258 		totals = (RF_AccTotals_t *) data;
   1259 		*totals = raidPtr->acc_totals;
   1260 		return (0);
   1261 
   1262 	case RAIDFRAME_KEEP_ACCTOTALS:
   1263 		raidPtr->keep_acc_totals = *(int *)data;
   1264 		return (0);
   1265 
   1266 	case RAIDFRAME_GET_SIZE:
   1267 		*(int *) data = raidPtr->totalSectors;
   1268 		return (0);
   1269 
   1270 		/* fail a disk & optionally start reconstruction */
   1271 	case RAIDFRAME_FAIL_DISK:
   1272 
   1273 		if (raidPtr->Layout.map->faultsTolerated == 0) {
   1274 			/* Can't do this on a RAID 0!! */
   1275 			return(EINVAL);
   1276 		}
   1277 
   1278 		rr = (struct rf_recon_req *) data;
   1279 		rr->row = 0;
   1280 		if (rr->col < 0 || rr->col >= raidPtr->numCol)
   1281 			return (EINVAL);
   1282 
   1283 
   1284 		RF_LOCK_MUTEX(raidPtr->mutex);
   1285 		if (raidPtr->status == rf_rs_reconstructing) {
   1286 			/* you can't fail a disk while we're reconstructing! */
   1287 			/* XXX wrong for RAID6 */
   1288 			RF_UNLOCK_MUTEX(raidPtr->mutex);
   1289 			return (EINVAL);
   1290 		}
   1291 		if ((raidPtr->Disks[rr->col].status ==
   1292 		     rf_ds_optimal) && (raidPtr->numFailures > 0)) {
   1293 			/* some other component has failed.  Let's not make
   1294 			   things worse. XXX wrong for RAID6 */
   1295 			RF_UNLOCK_MUTEX(raidPtr->mutex);
   1296 			return (EINVAL);
   1297 		}
   1298 		if (raidPtr->Disks[rr->col].status == rf_ds_spared) {
   1299 			/* Can't fail a spared disk! */
   1300 			RF_UNLOCK_MUTEX(raidPtr->mutex);
   1301 			return (EINVAL);
   1302 		}
   1303 		RF_UNLOCK_MUTEX(raidPtr->mutex);
   1304 
   1305 		/* make a copy of the recon request so that we don't rely on
   1306 		 * the user's buffer */
   1307 		RF_Malloc(rrcopy, sizeof(*rrcopy), (struct rf_recon_req *));
   1308 		if (rrcopy == NULL)
   1309 			return(ENOMEM);
   1310 		memcpy(rrcopy, rr, sizeof(*rr));
   1311 		rrcopy->raidPtr = (void *) raidPtr;
   1312 
   1313 		retcode = RF_CREATE_THREAD(raidPtr->recon_thread,
   1314 					   rf_ReconThread,
   1315 					   rrcopy,"raid_recon");
   1316 		return (0);
   1317 
   1318 		/* invoke a copyback operation after recon on whatever disk
   1319 		 * needs it, if any */
   1320 	case RAIDFRAME_COPYBACK:
   1321 
   1322 		if (raidPtr->Layout.map->faultsTolerated == 0) {
   1323 			/* This makes no sense on a RAID 0!! */
   1324 			return(EINVAL);
   1325 		}
   1326 
   1327 		if (raidPtr->copyback_in_progress == 1) {
   1328 			/* Copyback is already in progress! */
   1329 			return(EINVAL);
   1330 		}
   1331 
   1332 		retcode = RF_CREATE_THREAD(raidPtr->copyback_thread,
   1333 					   rf_CopybackThread,
   1334 					   raidPtr,"raid_copyback");
   1335 		return (retcode);
   1336 
   1337 		/* return the percentage completion of reconstruction */
   1338 	case RAIDFRAME_CHECK_RECON_STATUS:
   1339 		if (raidPtr->Layout.map->faultsTolerated == 0) {
   1340 			/* This makes no sense on a RAID 0, so tell the
   1341 			   user it's done. */
   1342 			*(int *) data = 100;
   1343 			return(0);
   1344 		}
   1345 		if (raidPtr->status != rf_rs_reconstructing)
   1346 			*(int *) data = 100;
   1347 		else {
   1348 			if (raidPtr->reconControl->numRUsTotal > 0) {
   1349 				*(int *) data = (raidPtr->reconControl->numRUsComplete * 100 / raidPtr->reconControl->numRUsTotal);
   1350 			} else {
   1351 				*(int *) data = 0;
   1352 			}
   1353 		}
   1354 		return (0);
   1355 	case RAIDFRAME_CHECK_RECON_STATUS_EXT:
   1356 		progressInfoPtr = (RF_ProgressInfo_t **) data;
   1357 		if (raidPtr->status != rf_rs_reconstructing) {
   1358 			progressInfo.remaining = 0;
   1359 			progressInfo.completed = 100;
   1360 			progressInfo.total = 100;
   1361 		} else {
   1362 			progressInfo.total =
   1363 				raidPtr->reconControl->numRUsTotal;
   1364 			progressInfo.completed =
   1365 				raidPtr->reconControl->numRUsComplete;
   1366 			progressInfo.remaining = progressInfo.total -
   1367 				progressInfo.completed;
   1368 		}
   1369 		retcode = copyout(&progressInfo, *progressInfoPtr,
   1370 				  sizeof(RF_ProgressInfo_t));
   1371 		return (retcode);
   1372 
   1373 	case RAIDFRAME_CHECK_PARITYREWRITE_STATUS:
   1374 		if (raidPtr->Layout.map->faultsTolerated == 0) {
   1375 			/* This makes no sense on a RAID 0, so tell the
   1376 			   user it's done. */
   1377 			*(int *) data = 100;
   1378 			return(0);
   1379 		}
   1380 		if (raidPtr->parity_rewrite_in_progress == 1) {
   1381 			*(int *) data = 100 *
   1382 				raidPtr->parity_rewrite_stripes_done /
   1383 				raidPtr->Layout.numStripe;
   1384 		} else {
   1385 			*(int *) data = 100;
   1386 		}
   1387 		return (0);
   1388 
   1389 	case RAIDFRAME_CHECK_PARITYREWRITE_STATUS_EXT:
   1390 		progressInfoPtr = (RF_ProgressInfo_t **) data;
   1391 		if (raidPtr->parity_rewrite_in_progress == 1) {
   1392 			progressInfo.total = raidPtr->Layout.numStripe;
   1393 			progressInfo.completed =
   1394 				raidPtr->parity_rewrite_stripes_done;
   1395 			progressInfo.remaining = progressInfo.total -
   1396 				progressInfo.completed;
   1397 		} else {
   1398 			progressInfo.remaining = 0;
   1399 			progressInfo.completed = 100;
   1400 			progressInfo.total = 100;
   1401 		}
   1402 		retcode = copyout(&progressInfo, *progressInfoPtr,
   1403 				  sizeof(RF_ProgressInfo_t));
   1404 		return (retcode);
   1405 
   1406 	case RAIDFRAME_CHECK_COPYBACK_STATUS:
   1407 		if (raidPtr->Layout.map->faultsTolerated == 0) {
   1408 			/* This makes no sense on a RAID 0 */
   1409 			*(int *) data = 100;
   1410 			return(0);
   1411 		}
   1412 		if (raidPtr->copyback_in_progress == 1) {
   1413 			*(int *) data = 100 * raidPtr->copyback_stripes_done /
   1414 				raidPtr->Layout.numStripe;
   1415 		} else {
   1416 			*(int *) data = 100;
   1417 		}
   1418 		return (0);
   1419 
   1420 	case RAIDFRAME_CHECK_COPYBACK_STATUS_EXT:
   1421 		progressInfoPtr = (RF_ProgressInfo_t **) data;
   1422 		if (raidPtr->copyback_in_progress == 1) {
   1423 			progressInfo.total = raidPtr->Layout.numStripe;
   1424 			progressInfo.completed =
   1425 				raidPtr->copyback_stripes_done;
   1426 			progressInfo.remaining = progressInfo.total -
   1427 				progressInfo.completed;
   1428 		} else {
   1429 			progressInfo.remaining = 0;
   1430 			progressInfo.completed = 100;
   1431 			progressInfo.total = 100;
   1432 		}
   1433 		retcode = copyout(&progressInfo, *progressInfoPtr,
   1434 				  sizeof(RF_ProgressInfo_t));
   1435 		return (retcode);
   1436 
   1437 		/* the sparetable daemon calls this to wait for the kernel to
   1438 		 * need a spare table. this ioctl does not return until a
   1439 		 * spare table is needed. XXX -- calling mpsleep here in the
   1440 		 * ioctl code is almost certainly wrong and evil. -- XXX XXX
   1441 		 * -- I should either compute the spare table in the kernel,
   1442 		 * or have a different -- XXX XXX -- interface (a different
   1443 		 * character device) for delivering the table     -- XXX */
   1444 #if 0
   1445 	case RAIDFRAME_SPARET_WAIT:
   1446 		RF_LOCK_MUTEX(rf_sparet_wait_mutex);
   1447 		while (!rf_sparet_wait_queue)
   1448 			mpsleep(&rf_sparet_wait_queue, (PZERO + 1) | PCATCH, "sparet wait", 0, (void *) simple_lock_addr(rf_sparet_wait_mutex), MS_LOCK_SIMPLE);
   1449 		waitreq = rf_sparet_wait_queue;
   1450 		rf_sparet_wait_queue = rf_sparet_wait_queue->next;
   1451 		RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
   1452 
   1453 		/* structure assignment */
   1454 		*((RF_SparetWait_t *) data) = *waitreq;
   1455 
   1456 		RF_Free(waitreq, sizeof(*waitreq));
   1457 		return (0);
   1458 
   1459 		/* wakes up a process waiting on SPARET_WAIT and puts an error
   1460 		 * code in it that will cause the dameon to exit */
   1461 	case RAIDFRAME_ABORT_SPARET_WAIT:
   1462 		RF_Malloc(waitreq, sizeof(*waitreq), (RF_SparetWait_t *));
   1463 		waitreq->fcol = -1;
   1464 		RF_LOCK_MUTEX(rf_sparet_wait_mutex);
   1465 		waitreq->next = rf_sparet_wait_queue;
   1466 		rf_sparet_wait_queue = waitreq;
   1467 		RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
   1468 		wakeup(&rf_sparet_wait_queue);
   1469 		return (0);
   1470 
   1471 		/* used by the spare table daemon to deliver a spare table
   1472 		 * into the kernel */
   1473 	case RAIDFRAME_SEND_SPARET:
   1474 
   1475 		/* install the spare table */
   1476 		retcode = rf_SetSpareTable(raidPtr, *(void **) data);
   1477 
   1478 		/* respond to the requestor.  the return status of the spare
   1479 		 * table installation is passed in the "fcol" field */
   1480 		RF_Malloc(waitreq, sizeof(*waitreq), (RF_SparetWait_t *));
   1481 		waitreq->fcol = retcode;
   1482 		RF_LOCK_MUTEX(rf_sparet_wait_mutex);
   1483 		waitreq->next = rf_sparet_resp_queue;
   1484 		rf_sparet_resp_queue = waitreq;
   1485 		wakeup(&rf_sparet_resp_queue);
   1486 		RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
   1487 
   1488 		return (retcode);
   1489 #endif
   1490 
   1491 	default:
   1492 		break; /* fall through to the os-specific code below */
   1493 
   1494 	}
   1495 
   1496 	if (!raidPtr->valid)
   1497 		return (EINVAL);
   1498 
   1499 	/*
   1500 	 * Add support for "regular" device ioctls here.
   1501 	 */
   1502 
   1503 	switch (cmd) {
   1504 	case DIOCGDINFO:
   1505 		*(struct disklabel *) data = *(rs->sc_dkdev.dk_label);
   1506 		break;
   1507 #ifdef __HAVE_OLD_DISKLABEL
   1508 	case ODIOCGDINFO:
   1509 		newlabel = *(rs->sc_dkdev.dk_label);
   1510 		if (newlabel.d_npartitions > OLDMAXPARTITIONS)
   1511 			return ENOTTY;
   1512 		memcpy(data, &newlabel, sizeof (struct olddisklabel));
   1513 		break;
   1514 #endif
   1515 
   1516 	case DIOCGPART:
   1517 		((struct partinfo *) data)->disklab = rs->sc_dkdev.dk_label;
   1518 		((struct partinfo *) data)->part =
   1519 		    &rs->sc_dkdev.dk_label->d_partitions[DISKPART(dev)];
   1520 		break;
   1521 
   1522 	case DIOCWDINFO:
   1523 	case DIOCSDINFO:
   1524 #ifdef __HAVE_OLD_DISKLABEL
   1525 	case ODIOCWDINFO:
   1526 	case ODIOCSDINFO:
   1527 #endif
   1528 	{
   1529 		struct disklabel *lp;
   1530 #ifdef __HAVE_OLD_DISKLABEL
   1531 		if (cmd == ODIOCSDINFO || cmd == ODIOCWDINFO) {
   1532 			memset(&newlabel, 0, sizeof newlabel);
   1533 			memcpy(&newlabel, data, sizeof (struct olddisklabel));
   1534 			lp = &newlabel;
   1535 		} else
   1536 #endif
   1537 		lp = (struct disklabel *)data;
   1538 
   1539 		if ((error = raidlock(rs)) != 0)
   1540 			return (error);
   1541 
   1542 		rs->sc_flags |= RAIDF_LABELLING;
   1543 
   1544 		error = setdisklabel(rs->sc_dkdev.dk_label,
   1545 		    lp, 0, rs->sc_dkdev.dk_cpulabel);
   1546 		if (error == 0) {
   1547 			if (cmd == DIOCWDINFO
   1548 #ifdef __HAVE_OLD_DISKLABEL
   1549 			    || cmd == ODIOCWDINFO
   1550 #endif
   1551 			   )
   1552 				error = writedisklabel(RAIDLABELDEV(dev),
   1553 				    raidstrategy, rs->sc_dkdev.dk_label,
   1554 				    rs->sc_dkdev.dk_cpulabel);
   1555 		}
   1556 		rs->sc_flags &= ~RAIDF_LABELLING;
   1557 
   1558 		raidunlock(rs);
   1559 
   1560 		if (error)
   1561 			return (error);
   1562 		break;
   1563 	}
   1564 
   1565 	case DIOCWLABEL:
   1566 		if (*(int *) data != 0)
   1567 			rs->sc_flags |= RAIDF_WLABEL;
   1568 		else
   1569 			rs->sc_flags &= ~RAIDF_WLABEL;
   1570 		break;
   1571 
   1572 	case DIOCGDEFLABEL:
   1573 		raidgetdefaultlabel(raidPtr, rs, (struct disklabel *) data);
   1574 		break;
   1575 
   1576 #ifdef __HAVE_OLD_DISKLABEL
   1577 	case ODIOCGDEFLABEL:
   1578 		raidgetdefaultlabel(raidPtr, rs, &newlabel);
   1579 		if (newlabel.d_npartitions > OLDMAXPARTITIONS)
   1580 			return ENOTTY;
   1581 		memcpy(data, &newlabel, sizeof (struct olddisklabel));
   1582 		break;
   1583 #endif
   1584 
   1585 	default:
   1586 		retcode = ENOTTY;
   1587 	}
   1588 	return (retcode);
   1589 
   1590 }
   1591 
   1592 
   1593 /* raidinit -- complete the rest of the initialization for the
   1594    RAIDframe device.  */
   1595 
   1596 
   1597 static void
   1598 raidinit(RF_Raid_t *raidPtr)
   1599 {
   1600 	struct raid_softc *rs;
   1601 	int     unit;
   1602 
   1603 	unit = raidPtr->raidid;
   1604 
   1605 	rs = &raid_softc[unit];
   1606 
   1607 	/* XXX should check return code first... */
   1608 	rs->sc_flags |= RAIDF_INITED;
   1609 
   1610 	/* XXX doesn't check bounds. */
   1611 	snprintf(rs->sc_xname, sizeof(rs->sc_xname), "raid%d", unit);
   1612 
   1613 	rs->sc_dkdev.dk_name = rs->sc_xname;
   1614 
   1615 	/* disk_attach actually creates space for the CPU disklabel, among
   1616 	 * other things, so it's critical to call this *BEFORE* we try putzing
   1617 	 * with disklabels. */
   1618 
   1619 	pseudo_disk_attach(&rs->sc_dkdev);
   1620 
   1621 	/* XXX There may be a weird interaction here between this, and
   1622 	 * protectedSectors, as used in RAIDframe.  */
   1623 
   1624 	rs->sc_size = raidPtr->totalSectors;
   1625 }
   1626 #if (RF_INCLUDE_PARITY_DECLUSTERING_DS > 0)
   1627 /* wake up the daemon & tell it to get us a spare table
   1628  * XXX
   1629  * the entries in the queues should be tagged with the raidPtr
   1630  * so that in the extremely rare case that two recons happen at once,
   1631  * we know for which device were requesting a spare table
   1632  * XXX
   1633  *
   1634  * XXX This code is not currently used. GO
   1635  */
   1636 int
   1637 rf_GetSpareTableFromDaemon(RF_SparetWait_t *req)
   1638 {
   1639 	int     retcode;
   1640 
   1641 	RF_LOCK_MUTEX(rf_sparet_wait_mutex);
   1642 	req->next = rf_sparet_wait_queue;
   1643 	rf_sparet_wait_queue = req;
   1644 	wakeup(&rf_sparet_wait_queue);
   1645 
   1646 	/* mpsleep unlocks the mutex */
   1647 	while (!rf_sparet_resp_queue) {
   1648 		tsleep(&rf_sparet_resp_queue, PRIBIO,
   1649 		    "raidframe getsparetable", 0);
   1650 	}
   1651 	req = rf_sparet_resp_queue;
   1652 	rf_sparet_resp_queue = req->next;
   1653 	RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
   1654 
   1655 	retcode = req->fcol;
   1656 	RF_Free(req, sizeof(*req));	/* this is not the same req as we
   1657 					 * alloc'd */
   1658 	return (retcode);
   1659 }
   1660 #endif
   1661 
   1662 /* a wrapper around rf_DoAccess that extracts appropriate info from the
   1663  * bp & passes it down.
   1664  * any calls originating in the kernel must use non-blocking I/O
   1665  * do some extra sanity checking to return "appropriate" error values for
   1666  * certain conditions (to make some standard utilities work)
   1667  *
   1668  * Formerly known as: rf_DoAccessKernel
   1669  */
   1670 void
   1671 raidstart(RF_Raid_t *raidPtr)
   1672 {
   1673 	RF_SectorCount_t num_blocks, pb, sum;
   1674 	RF_RaidAddr_t raid_addr;
   1675 	struct partition *pp;
   1676 	daddr_t blocknum;
   1677 	int     unit;
   1678 	struct raid_softc *rs;
   1679 	int     do_async;
   1680 	struct buf *bp;
   1681 	int rc;
   1682 
   1683 	unit = raidPtr->raidid;
   1684 	rs = &raid_softc[unit];
   1685 
   1686 	/* quick check to see if anything has died recently */
   1687 	RF_LOCK_MUTEX(raidPtr->mutex);
   1688 	if (raidPtr->numNewFailures > 0) {
   1689 		RF_UNLOCK_MUTEX(raidPtr->mutex);
   1690 		rf_update_component_labels(raidPtr,
   1691 					   RF_NORMAL_COMPONENT_UPDATE);
   1692 		RF_LOCK_MUTEX(raidPtr->mutex);
   1693 		raidPtr->numNewFailures--;
   1694 	}
   1695 
   1696 	/* Check to see if we're at the limit... */
   1697 	while (raidPtr->openings > 0) {
   1698 		RF_UNLOCK_MUTEX(raidPtr->mutex);
   1699 
   1700 		/* get the next item, if any, from the queue */
   1701 		if ((bp = BUFQ_GET(rs->buf_queue)) == NULL) {
   1702 			/* nothing more to do */
   1703 			return;
   1704 		}
   1705 
   1706 		/* Ok, for the bp we have here, bp->b_blkno is relative to the
   1707 		 * partition.. Need to make it absolute to the underlying
   1708 		 * device.. */
   1709 
   1710 		blocknum = bp->b_blkno;
   1711 		if (DISKPART(bp->b_dev) != RAW_PART) {
   1712 			pp = &rs->sc_dkdev.dk_label->d_partitions[DISKPART(bp->b_dev)];
   1713 			blocknum += pp->p_offset;
   1714 		}
   1715 
   1716 		db1_printf(("Blocks: %d, %d\n", (int) bp->b_blkno,
   1717 			    (int) blocknum));
   1718 
   1719 		db1_printf(("bp->b_bcount = %d\n", (int) bp->b_bcount));
   1720 		db1_printf(("bp->b_resid = %d\n", (int) bp->b_resid));
   1721 
   1722 		/* *THIS* is where we adjust what block we're going to...
   1723 		 * but DO NOT TOUCH bp->b_blkno!!! */
   1724 		raid_addr = blocknum;
   1725 
   1726 		num_blocks = bp->b_bcount >> raidPtr->logBytesPerSector;
   1727 		pb = (bp->b_bcount & raidPtr->sectorMask) ? 1 : 0;
   1728 		sum = raid_addr + num_blocks + pb;
   1729 		if (1 || rf_debugKernelAccess) {
   1730 			db1_printf(("raid_addr=%d sum=%d num_blocks=%d(+%d) (%d)\n",
   1731 				    (int) raid_addr, (int) sum, (int) num_blocks,
   1732 				    (int) pb, (int) bp->b_resid));
   1733 		}
   1734 		if ((sum > raidPtr->totalSectors) || (sum < raid_addr)
   1735 		    || (sum < num_blocks) || (sum < pb)) {
   1736 			bp->b_error = ENOSPC;
   1737 			bp->b_flags |= B_ERROR;
   1738 			bp->b_resid = bp->b_bcount;
   1739 			biodone(bp);
   1740 			RF_LOCK_MUTEX(raidPtr->mutex);
   1741 			continue;
   1742 		}
   1743 		/*
   1744 		 * XXX rf_DoAccess() should do this, not just DoAccessKernel()
   1745 		 */
   1746 
   1747 		if (bp->b_bcount & raidPtr->sectorMask) {
   1748 			bp->b_error = EINVAL;
   1749 			bp->b_flags |= B_ERROR;
   1750 			bp->b_resid = bp->b_bcount;
   1751 			biodone(bp);
   1752 			RF_LOCK_MUTEX(raidPtr->mutex);
   1753 			continue;
   1754 
   1755 		}
   1756 		db1_printf(("Calling DoAccess..\n"));
   1757 
   1758 
   1759 		RF_LOCK_MUTEX(raidPtr->mutex);
   1760 		raidPtr->openings--;
   1761 		RF_UNLOCK_MUTEX(raidPtr->mutex);
   1762 
   1763 		/*
   1764 		 * Everything is async.
   1765 		 */
   1766 		do_async = 1;
   1767 
   1768 		disk_busy(&rs->sc_dkdev);
   1769 
   1770 		/* XXX we're still at splbio() here... do we *really*
   1771 		   need to be? */
   1772 
   1773 		/* don't ever condition on bp->b_flags & B_WRITE.
   1774 		 * always condition on B_READ instead */
   1775 
   1776 		rc = rf_DoAccess(raidPtr, (bp->b_flags & B_READ) ?
   1777 				 RF_IO_TYPE_READ : RF_IO_TYPE_WRITE,
   1778 				 do_async, raid_addr, num_blocks,
   1779 				 bp->b_data, bp, RF_DAG_NONBLOCKING_IO);
   1780 
   1781 		if (rc) {
   1782 			bp->b_error = rc;
   1783 			bp->b_flags |= B_ERROR;
   1784 			bp->b_resid = bp->b_bcount;
   1785 			biodone(bp);
   1786 			/* continue loop */
   1787 		}
   1788 
   1789 		RF_LOCK_MUTEX(raidPtr->mutex);
   1790 	}
   1791 	RF_UNLOCK_MUTEX(raidPtr->mutex);
   1792 }
   1793 
   1794 
   1795 
   1796 
   1797 /* invoke an I/O from kernel mode.  Disk queue should be locked upon entry */
   1798 
   1799 int
   1800 rf_DispatchKernelIO(RF_DiskQueue_t *queue, RF_DiskQueueData_t *req)
   1801 {
   1802 	int     op = (req->type == RF_IO_TYPE_READ) ? B_READ : B_WRITE;
   1803 	struct buf *bp;
   1804 
   1805 	req->queue = queue;
   1806 
   1807 #if DIAGNOSTIC
   1808 	if (queue->raidPtr->raidid >= numraid) {
   1809 		printf("Invalid unit number: %d %d\n", queue->raidPtr->raidid,
   1810 		    numraid);
   1811 		panic("Invalid Unit number in rf_DispatchKernelIO");
   1812 	}
   1813 #endif
   1814 
   1815 	bp = req->bp;
   1816 
   1817 	switch (req->type) {
   1818 	case RF_IO_TYPE_NOP:	/* used primarily to unlock a locked queue */
   1819 		/* XXX need to do something extra here.. */
   1820 		/* I'm leaving this in, as I've never actually seen it used,
   1821 		 * and I'd like folks to report it... GO */
   1822 		printf(("WAKEUP CALLED\n"));
   1823 		queue->numOutstanding++;
   1824 
   1825 		bp->b_flags = 0;
   1826 		bp->b_fspriv.bf_private = req;
   1827 
   1828 		KernelWakeupFunc(bp);
   1829 		break;
   1830 
   1831 	case RF_IO_TYPE_READ:
   1832 	case RF_IO_TYPE_WRITE:
   1833 #if RF_ACC_TRACE > 0
   1834 		if (req->tracerec) {
   1835 			RF_ETIMER_START(req->tracerec->timer);
   1836 		}
   1837 #endif
   1838 		InitBP(bp, queue->rf_cinfo->ci_vp,
   1839 		    op, queue->rf_cinfo->ci_dev,
   1840 		    req->sectorOffset, req->numSector,
   1841 		    req->buf, KernelWakeupFunc, (void *) req,
   1842 		    queue->raidPtr->logBytesPerSector, req->b_proc);
   1843 
   1844 		if (rf_debugKernelAccess) {
   1845 			db1_printf(("dispatch: bp->b_blkno = %ld\n",
   1846 				(long) bp->b_blkno));
   1847 		}
   1848 		queue->numOutstanding++;
   1849 		queue->last_deq_sector = req->sectorOffset;
   1850 		/* acc wouldn't have been let in if there were any pending
   1851 		 * reqs at any other priority */
   1852 		queue->curPriority = req->priority;
   1853 
   1854 		db1_printf(("Going for %c to unit %d col %d\n",
   1855 			    req->type, queue->raidPtr->raidid,
   1856 			    queue->col));
   1857 		db1_printf(("sector %d count %d (%d bytes) %d\n",
   1858 			(int) req->sectorOffset, (int) req->numSector,
   1859 			(int) (req->numSector <<
   1860 			    queue->raidPtr->logBytesPerSector),
   1861 			(int) queue->raidPtr->logBytesPerSector));
   1862 		VOP_STRATEGY(bp->b_vp, bp);
   1863 
   1864 		break;
   1865 
   1866 	default:
   1867 		panic("bad req->type in rf_DispatchKernelIO");
   1868 	}
   1869 	db1_printf(("Exiting from DispatchKernelIO\n"));
   1870 
   1871 	return (0);
   1872 }
   1873 /* this is the callback function associated with a I/O invoked from
   1874    kernel code.
   1875  */
   1876 static void
   1877 KernelWakeupFunc(struct buf *bp)
   1878 {
   1879 	RF_DiskQueueData_t *req = NULL;
   1880 	RF_DiskQueue_t *queue;
   1881 	int s;
   1882 
   1883 	s = splbio();
   1884 	db1_printf(("recovering the request queue:\n"));
   1885 	req = bp->b_fspriv.bf_private;
   1886 
   1887 	queue = (RF_DiskQueue_t *) req->queue;
   1888 
   1889 #if RF_ACC_TRACE > 0
   1890 	if (req->tracerec) {
   1891 		RF_ETIMER_STOP(req->tracerec->timer);
   1892 		RF_ETIMER_EVAL(req->tracerec->timer);
   1893 		RF_LOCK_MUTEX(rf_tracing_mutex);
   1894 		req->tracerec->diskwait_us += RF_ETIMER_VAL_US(req->tracerec->timer);
   1895 		req->tracerec->phys_io_us += RF_ETIMER_VAL_US(req->tracerec->timer);
   1896 		req->tracerec->num_phys_ios++;
   1897 		RF_UNLOCK_MUTEX(rf_tracing_mutex);
   1898 	}
   1899 #endif
   1900 
   1901 	/* XXX Ok, let's get aggressive... If B_ERROR is set, let's go
   1902 	 * ballistic, and mark the component as hosed... */
   1903 
   1904 	if (bp->b_flags & B_ERROR) {
   1905 		/* Mark the disk as dead */
   1906 		/* but only mark it once... */
   1907 		/* and only if it wouldn't leave this RAID set
   1908 		   completely broken */
   1909 		if (((queue->raidPtr->Disks[queue->col].status ==
   1910 		      rf_ds_optimal) ||
   1911 		     (queue->raidPtr->Disks[queue->col].status ==
   1912 		      rf_ds_used_spare)) &&
   1913 		     (queue->raidPtr->numFailures <
   1914 		         queue->raidPtr->Layout.map->faultsTolerated)) {
   1915 			printf("raid%d: IO Error.  Marking %s as failed.\n",
   1916 			       queue->raidPtr->raidid,
   1917 			       queue->raidPtr->Disks[queue->col].devname);
   1918 			queue->raidPtr->Disks[queue->col].status =
   1919 			    rf_ds_failed;
   1920 			queue->raidPtr->status = rf_rs_degraded;
   1921 			queue->raidPtr->numFailures++;
   1922 			queue->raidPtr->numNewFailures++;
   1923 		} else {	/* Disk is already dead... */
   1924 			/* printf("Disk already marked as dead!\n"); */
   1925 		}
   1926 
   1927 	}
   1928 
   1929 	/* Fill in the error value */
   1930 
   1931 	req->error = (bp->b_flags & B_ERROR) ? bp->b_error : 0;
   1932 
   1933 	simple_lock(&queue->raidPtr->iodone_lock);
   1934 
   1935 	/* Drop this one on the "finished" queue... */
   1936 	TAILQ_INSERT_TAIL(&(queue->raidPtr->iodone), req, iodone_entries);
   1937 
   1938 	/* Let the raidio thread know there is work to be done. */
   1939 	wakeup(&(queue->raidPtr->iodone));
   1940 
   1941 	simple_unlock(&queue->raidPtr->iodone_lock);
   1942 
   1943 	splx(s);
   1944 }
   1945 
   1946 
   1947 
   1948 /*
   1949  * initialize a buf structure for doing an I/O in the kernel.
   1950  */
   1951 static void
   1952 InitBP(struct buf *bp, struct vnode *b_vp, unsigned rw_flag, dev_t dev,
   1953        RF_SectorNum_t startSect, RF_SectorCount_t numSect, caddr_t bf,
   1954        void (*cbFunc) (struct buf *), void *cbArg, int logBytesPerSector,
   1955        struct proc *b_proc)
   1956 {
   1957 	/* bp->b_flags       = B_PHYS | rw_flag; */
   1958 	bp->b_flags = B_CALL | rw_flag;	/* XXX need B_PHYS here too??? */
   1959 	bp->b_bcount = numSect << logBytesPerSector;
   1960 	bp->b_bufsize = bp->b_bcount;
   1961 	bp->b_error = 0;
   1962 	bp->b_dev = dev;
   1963 	bp->b_data = bf;
   1964 	bp->b_blkno = startSect;
   1965 	bp->b_resid = bp->b_bcount;	/* XXX is this right!??!?!! */
   1966 	if (bp->b_bcount == 0) {
   1967 		panic("bp->b_bcount is zero in InitBP!!");
   1968 	}
   1969 	bp->b_proc = b_proc;
   1970 	bp->b_iodone = cbFunc;
   1971 	bp->b_fspriv.bf_private = cbArg;
   1972 	bp->b_vp = b_vp;
   1973 	if ((bp->b_flags & B_READ) == 0) {
   1974 		bp->b_vp->v_numoutput++;
   1975 	}
   1976 
   1977 }
   1978 
   1979 static void
   1980 raidgetdefaultlabel(RF_Raid_t *raidPtr, struct raid_softc *rs,
   1981 		    struct disklabel *lp)
   1982 {
   1983 	memset(lp, 0, sizeof(*lp));
   1984 
   1985 	/* fabricate a label... */
   1986 	lp->d_secperunit = raidPtr->totalSectors;
   1987 	lp->d_secsize = raidPtr->bytesPerSector;
   1988 	lp->d_nsectors = raidPtr->Layout.dataSectorsPerStripe;
   1989 	lp->d_ntracks = 4 * raidPtr->numCol;
   1990 	lp->d_ncylinders = raidPtr->totalSectors /
   1991 		(lp->d_nsectors * lp->d_ntracks);
   1992 	lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
   1993 
   1994 	strncpy(lp->d_typename, "raid", sizeof(lp->d_typename));
   1995 	lp->d_type = DTYPE_RAID;
   1996 	strncpy(lp->d_packname, "fictitious", sizeof(lp->d_packname));
   1997 	lp->d_rpm = 3600;
   1998 	lp->d_interleave = 1;
   1999 	lp->d_flags = 0;
   2000 
   2001 	lp->d_partitions[RAW_PART].p_offset = 0;
   2002 	lp->d_partitions[RAW_PART].p_size = raidPtr->totalSectors;
   2003 	lp->d_partitions[RAW_PART].p_fstype = FS_UNUSED;
   2004 	lp->d_npartitions = RAW_PART + 1;
   2005 
   2006 	lp->d_magic = DISKMAGIC;
   2007 	lp->d_magic2 = DISKMAGIC;
   2008 	lp->d_checksum = dkcksum(rs->sc_dkdev.dk_label);
   2009 
   2010 }
   2011 /*
   2012  * Read the disklabel from the raid device.  If one is not present, fake one
   2013  * up.
   2014  */
   2015 static void
   2016 raidgetdisklabel(dev_t dev)
   2017 {
   2018 	int     unit = raidunit(dev);
   2019 	struct raid_softc *rs = &raid_softc[unit];
   2020 	const char   *errstring;
   2021 	struct disklabel *lp = rs->sc_dkdev.dk_label;
   2022 	struct cpu_disklabel *clp = rs->sc_dkdev.dk_cpulabel;
   2023 	RF_Raid_t *raidPtr;
   2024 
   2025 	db1_printf(("Getting the disklabel...\n"));
   2026 
   2027 	memset(clp, 0, sizeof(*clp));
   2028 
   2029 	raidPtr = raidPtrs[unit];
   2030 
   2031 	raidgetdefaultlabel(raidPtr, rs, lp);
   2032 
   2033 	/*
   2034 	 * Call the generic disklabel extraction routine.
   2035 	 */
   2036 	errstring = readdisklabel(RAIDLABELDEV(dev), raidstrategy,
   2037 	    rs->sc_dkdev.dk_label, rs->sc_dkdev.dk_cpulabel);
   2038 	if (errstring)
   2039 		raidmakedisklabel(rs);
   2040 	else {
   2041 		int     i;
   2042 		struct partition *pp;
   2043 
   2044 		/*
   2045 		 * Sanity check whether the found disklabel is valid.
   2046 		 *
   2047 		 * This is necessary since total size of the raid device
   2048 		 * may vary when an interleave is changed even though exactly
   2049 		 * same componets are used, and old disklabel may used
   2050 		 * if that is found.
   2051 		 */
   2052 		if (lp->d_secperunit != rs->sc_size)
   2053 			printf("raid%d: WARNING: %s: "
   2054 			    "total sector size in disklabel (%d) != "
   2055 			    "the size of raid (%ld)\n", unit, rs->sc_xname,
   2056 			    lp->d_secperunit, (long) rs->sc_size);
   2057 		for (i = 0; i < lp->d_npartitions; i++) {
   2058 			pp = &lp->d_partitions[i];
   2059 			if (pp->p_offset + pp->p_size > rs->sc_size)
   2060 				printf("raid%d: WARNING: %s: end of partition `%c' "
   2061 				       "exceeds the size of raid (%ld)\n",
   2062 				       unit, rs->sc_xname, 'a' + i, (long) rs->sc_size);
   2063 		}
   2064 	}
   2065 
   2066 }
   2067 /*
   2068  * Take care of things one might want to take care of in the event
   2069  * that a disklabel isn't present.
   2070  */
   2071 static void
   2072 raidmakedisklabel(struct raid_softc *rs)
   2073 {
   2074 	struct disklabel *lp = rs->sc_dkdev.dk_label;
   2075 	db1_printf(("Making a label..\n"));
   2076 
   2077 	/*
   2078 	 * For historical reasons, if there's no disklabel present
   2079 	 * the raw partition must be marked FS_BSDFFS.
   2080 	 */
   2081 
   2082 	lp->d_partitions[RAW_PART].p_fstype = FS_BSDFFS;
   2083 
   2084 	strncpy(lp->d_packname, "default label", sizeof(lp->d_packname));
   2085 
   2086 	lp->d_checksum = dkcksum(lp);
   2087 }
   2088 /*
   2089  * Lookup the provided name in the filesystem.  If the file exists,
   2090  * is a valid block device, and isn't being used by anyone else,
   2091  * set *vpp to the file's vnode.
   2092  * You'll find the original of this in ccd.c
   2093  */
   2094 int
   2095 raidlookup(char *path, struct lwp *l, struct vnode **vpp)
   2096 {
   2097 	struct nameidata nd;
   2098 	struct vnode *vp;
   2099 	struct proc *p;
   2100 	struct vattr va;
   2101 	int     error;
   2102 
   2103 	p = l ? l->l_proc : NULL;
   2104 	NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, path, l);
   2105 	if ((error = vn_open(&nd, FREAD | FWRITE, 0)) != 0) {
   2106 		return (error);
   2107 	}
   2108 	vp = nd.ni_vp;
   2109 	if (vp->v_usecount > 1) {
   2110 		VOP_UNLOCK(vp, 0);
   2111 		(void) vn_close(vp, FREAD | FWRITE, p->p_ucred, l);
   2112 		return (EBUSY);
   2113 	}
   2114 	if ((error = VOP_GETATTR(vp, &va, p->p_ucred, l)) != 0) {
   2115 		VOP_UNLOCK(vp, 0);
   2116 		(void) vn_close(vp, FREAD | FWRITE, p->p_ucred, l);
   2117 		return (error);
   2118 	}
   2119 	/* XXX: eventually we should handle VREG, too. */
   2120 	if (va.va_type != VBLK) {
   2121 		VOP_UNLOCK(vp, 0);
   2122 		(void) vn_close(vp, FREAD | FWRITE, p->p_ucred, l);
   2123 		return (ENOTBLK);
   2124 	}
   2125 	VOP_UNLOCK(vp, 0);
   2126 	*vpp = vp;
   2127 	return (0);
   2128 }
   2129 /*
   2130  * Wait interruptibly for an exclusive lock.
   2131  *
   2132  * XXX
   2133  * Several drivers do this; it should be abstracted and made MP-safe.
   2134  * (Hmm... where have we seen this warning before :->  GO )
   2135  */
   2136 static int
   2137 raidlock(struct raid_softc *rs)
   2138 {
   2139 	int     error;
   2140 
   2141 	while ((rs->sc_flags & RAIDF_LOCKED) != 0) {
   2142 		rs->sc_flags |= RAIDF_WANTED;
   2143 		if ((error =
   2144 			tsleep(rs, PRIBIO | PCATCH, "raidlck", 0)) != 0)
   2145 			return (error);
   2146 	}
   2147 	rs->sc_flags |= RAIDF_LOCKED;
   2148 	return (0);
   2149 }
   2150 /*
   2151  * Unlock and wake up any waiters.
   2152  */
   2153 static void
   2154 raidunlock(struct raid_softc *rs)
   2155 {
   2156 
   2157 	rs->sc_flags &= ~RAIDF_LOCKED;
   2158 	if ((rs->sc_flags & RAIDF_WANTED) != 0) {
   2159 		rs->sc_flags &= ~RAIDF_WANTED;
   2160 		wakeup(rs);
   2161 	}
   2162 }
   2163 
   2164 
   2165 #define RF_COMPONENT_INFO_OFFSET  16384 /* bytes */
   2166 #define RF_COMPONENT_INFO_SIZE     1024 /* bytes */
   2167 
   2168 int
   2169 raidmarkclean(dev_t dev, struct vnode *b_vp, int mod_counter)
   2170 {
   2171 	RF_ComponentLabel_t clabel;
   2172 	raidread_component_label(dev, b_vp, &clabel);
   2173 	clabel.mod_counter = mod_counter;
   2174 	clabel.clean = RF_RAID_CLEAN;
   2175 	raidwrite_component_label(dev, b_vp, &clabel);
   2176 	return(0);
   2177 }
   2178 
   2179 
   2180 int
   2181 raidmarkdirty(dev_t dev, struct vnode *b_vp, int mod_counter)
   2182 {
   2183 	RF_ComponentLabel_t clabel;
   2184 	raidread_component_label(dev, b_vp, &clabel);
   2185 	clabel.mod_counter = mod_counter;
   2186 	clabel.clean = RF_RAID_DIRTY;
   2187 	raidwrite_component_label(dev, b_vp, &clabel);
   2188 	return(0);
   2189 }
   2190 
   2191 /* ARGSUSED */
   2192 int
   2193 raidread_component_label(dev_t dev, struct vnode *b_vp,
   2194 			 RF_ComponentLabel_t *clabel)
   2195 {
   2196 	struct buf *bp;
   2197 	const struct bdevsw *bdev;
   2198 	int error;
   2199 
   2200 	/* XXX should probably ensure that we don't try to do this if
   2201 	   someone has changed rf_protected_sectors. */
   2202 
   2203 	if (b_vp == NULL) {
   2204 		/* For whatever reason, this component is not valid.
   2205 		   Don't try to read a component label from it. */
   2206 		return(EINVAL);
   2207 	}
   2208 
   2209 	/* get a block of the appropriate size... */
   2210 	bp = geteblk((int)RF_COMPONENT_INFO_SIZE);
   2211 	bp->b_dev = dev;
   2212 
   2213 	/* get our ducks in a row for the read */
   2214 	bp->b_blkno = RF_COMPONENT_INFO_OFFSET / DEV_BSIZE;
   2215 	bp->b_bcount = RF_COMPONENT_INFO_SIZE;
   2216 	bp->b_flags |= B_READ;
   2217  	bp->b_resid = RF_COMPONENT_INFO_SIZE / DEV_BSIZE;
   2218 
   2219 	bdev = bdevsw_lookup(bp->b_dev);
   2220 	if (bdev == NULL)
   2221 		return (ENXIO);
   2222 	(*bdev->d_strategy)(bp);
   2223 
   2224 	error = biowait(bp);
   2225 
   2226 	if (!error) {
   2227 		memcpy(clabel, bp->b_data,
   2228 		       sizeof(RF_ComponentLabel_t));
   2229         }
   2230 
   2231 	brelse(bp);
   2232 	return(error);
   2233 }
   2234 /* ARGSUSED */
   2235 int
   2236 raidwrite_component_label(dev_t dev, struct vnode *b_vp,
   2237 			  RF_ComponentLabel_t *clabel)
   2238 {
   2239 	struct buf *bp;
   2240 	const struct bdevsw *bdev;
   2241 	int error;
   2242 
   2243 	/* get a block of the appropriate size... */
   2244 	bp = geteblk((int)RF_COMPONENT_INFO_SIZE);
   2245 	bp->b_dev = dev;
   2246 
   2247 	/* get our ducks in a row for the write */
   2248 	bp->b_blkno = RF_COMPONENT_INFO_OFFSET / DEV_BSIZE;
   2249 	bp->b_bcount = RF_COMPONENT_INFO_SIZE;
   2250 	bp->b_flags |= B_WRITE;
   2251  	bp->b_resid = RF_COMPONENT_INFO_SIZE / DEV_BSIZE;
   2252 
   2253 	memset(bp->b_data, 0, RF_COMPONENT_INFO_SIZE );
   2254 
   2255 	memcpy(bp->b_data, clabel, sizeof(RF_ComponentLabel_t));
   2256 
   2257 	bdev = bdevsw_lookup(bp->b_dev);
   2258 	if (bdev == NULL)
   2259 		return (ENXIO);
   2260 	(*bdev->d_strategy)(bp);
   2261 	error = biowait(bp);
   2262 	brelse(bp);
   2263 	if (error) {
   2264 #if 1
   2265 		printf("Failed to write RAID component info!\n");
   2266 #endif
   2267 	}
   2268 
   2269 	return(error);
   2270 }
   2271 
   2272 void
   2273 rf_markalldirty(RF_Raid_t *raidPtr)
   2274 {
   2275 	RF_ComponentLabel_t clabel;
   2276 	int sparecol;
   2277 	int c;
   2278 	int j;
   2279 	int scol = -1;
   2280 
   2281 	raidPtr->mod_counter++;
   2282 	for (c = 0; c < raidPtr->numCol; c++) {
   2283 		/* we don't want to touch (at all) a disk that has
   2284 		   failed */
   2285 		if (!RF_DEAD_DISK(raidPtr->Disks[c].status)) {
   2286 			raidread_component_label(
   2287 						 raidPtr->Disks[c].dev,
   2288 						 raidPtr->raid_cinfo[c].ci_vp,
   2289 						 &clabel);
   2290 			if (clabel.status == rf_ds_spared) {
   2291 				/* XXX do something special...
   2292 				   but whatever you do, don't
   2293 				   try to access it!! */
   2294 			} else {
   2295 				raidmarkdirty(
   2296 					      raidPtr->Disks[c].dev,
   2297 					      raidPtr->raid_cinfo[c].ci_vp,
   2298 					      raidPtr->mod_counter);
   2299 			}
   2300 		}
   2301 	}
   2302 
   2303 	for( c = 0; c < raidPtr->numSpare ; c++) {
   2304 		sparecol = raidPtr->numCol + c;
   2305 		if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
   2306 			/*
   2307 
   2308 			   we claim this disk is "optimal" if it's
   2309 			   rf_ds_used_spare, as that means it should be
   2310 			   directly substitutable for the disk it replaced.
   2311 			   We note that too...
   2312 
   2313 			 */
   2314 
   2315 			for(j=0;j<raidPtr->numCol;j++) {
   2316 				if (raidPtr->Disks[j].spareCol == sparecol) {
   2317 					scol = j;
   2318 					break;
   2319 				}
   2320 			}
   2321 
   2322 			raidread_component_label(
   2323 				 raidPtr->Disks[sparecol].dev,
   2324 				 raidPtr->raid_cinfo[sparecol].ci_vp,
   2325 				 &clabel);
   2326 			/* make sure status is noted */
   2327 
   2328 			raid_init_component_label(raidPtr, &clabel);
   2329 
   2330 			clabel.row = 0;
   2331 			clabel.column = scol;
   2332 			/* Note: we *don't* change status from rf_ds_used_spare
   2333 			   to rf_ds_optimal */
   2334 			/* clabel.status = rf_ds_optimal; */
   2335 
   2336 			raidmarkdirty(raidPtr->Disks[sparecol].dev,
   2337 				      raidPtr->raid_cinfo[sparecol].ci_vp,
   2338 				      raidPtr->mod_counter);
   2339 		}
   2340 	}
   2341 }
   2342 
   2343 
   2344 void
   2345 rf_update_component_labels(RF_Raid_t *raidPtr, int final)
   2346 {
   2347 	RF_ComponentLabel_t clabel;
   2348 	int sparecol;
   2349 	int c;
   2350 	int j;
   2351 	int scol;
   2352 
   2353 	scol = -1;
   2354 
   2355 	/* XXX should do extra checks to make sure things really are clean,
   2356 	   rather than blindly setting the clean bit... */
   2357 
   2358 	raidPtr->mod_counter++;
   2359 
   2360 	for (c = 0; c < raidPtr->numCol; c++) {
   2361 		if (raidPtr->Disks[c].status == rf_ds_optimal) {
   2362 			raidread_component_label(
   2363 						 raidPtr->Disks[c].dev,
   2364 						 raidPtr->raid_cinfo[c].ci_vp,
   2365 						 &clabel);
   2366 			/* make sure status is noted */
   2367 			clabel.status = rf_ds_optimal;
   2368 
   2369 			/* bump the counter */
   2370 			clabel.mod_counter = raidPtr->mod_counter;
   2371 
   2372 			raidwrite_component_label(
   2373 						  raidPtr->Disks[c].dev,
   2374 						  raidPtr->raid_cinfo[c].ci_vp,
   2375 						  &clabel);
   2376 			if (final == RF_FINAL_COMPONENT_UPDATE) {
   2377 				if (raidPtr->parity_good == RF_RAID_CLEAN) {
   2378 					raidmarkclean(
   2379 						      raidPtr->Disks[c].dev,
   2380 						      raidPtr->raid_cinfo[c].ci_vp,
   2381 						      raidPtr->mod_counter);
   2382 				}
   2383 			}
   2384 		}
   2385 		/* else we don't touch it.. */
   2386 	}
   2387 
   2388 	for( c = 0; c < raidPtr->numSpare ; c++) {
   2389 		sparecol = raidPtr->numCol + c;
   2390 		/* Need to ensure that the reconstruct actually completed! */
   2391 		if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
   2392 			/*
   2393 
   2394 			   we claim this disk is "optimal" if it's
   2395 			   rf_ds_used_spare, as that means it should be
   2396 			   directly substitutable for the disk it replaced.
   2397 			   We note that too...
   2398 
   2399 			 */
   2400 
   2401 			for(j=0;j<raidPtr->numCol;j++) {
   2402 				if (raidPtr->Disks[j].spareCol == sparecol) {
   2403 					scol = j;
   2404 					break;
   2405 				}
   2406 			}
   2407 
   2408 			/* XXX shouldn't *really* need this... */
   2409 			raidread_component_label(
   2410 				      raidPtr->Disks[sparecol].dev,
   2411 				      raidPtr->raid_cinfo[sparecol].ci_vp,
   2412 				      &clabel);
   2413 			/* make sure status is noted */
   2414 
   2415 			raid_init_component_label(raidPtr, &clabel);
   2416 
   2417 			clabel.mod_counter = raidPtr->mod_counter;
   2418 			clabel.column = scol;
   2419 			clabel.status = rf_ds_optimal;
   2420 
   2421 			raidwrite_component_label(
   2422 				      raidPtr->Disks[sparecol].dev,
   2423 				      raidPtr->raid_cinfo[sparecol].ci_vp,
   2424 				      &clabel);
   2425 			if (final == RF_FINAL_COMPONENT_UPDATE) {
   2426 				if (raidPtr->parity_good == RF_RAID_CLEAN) {
   2427 					raidmarkclean( raidPtr->Disks[sparecol].dev,
   2428 						       raidPtr->raid_cinfo[sparecol].ci_vp,
   2429 						       raidPtr->mod_counter);
   2430 				}
   2431 			}
   2432 		}
   2433 	}
   2434 }
   2435 
   2436 void
   2437 rf_close_component(RF_Raid_t *raidPtr, struct vnode *vp, int auto_configured)
   2438 {
   2439 	struct proc *p;
   2440 	struct lwp *l;
   2441 
   2442 	p = raidPtr->engine_thread;
   2443 	l = LIST_FIRST(&p->p_lwps);
   2444 
   2445 	if (vp != NULL) {
   2446 		if (auto_configured == 1) {
   2447 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
   2448 			VOP_CLOSE(vp, FREAD | FWRITE, NOCRED, 0);
   2449 			vput(vp);
   2450 
   2451 		} else {
   2452 			(void) vn_close(vp, FREAD | FWRITE, p->p_ucred, l);
   2453 		}
   2454 	}
   2455 }
   2456 
   2457 
   2458 void
   2459 rf_UnconfigureVnodes(RF_Raid_t *raidPtr)
   2460 {
   2461 	int r,c;
   2462 	struct vnode *vp;
   2463 	int acd;
   2464 
   2465 
   2466 	/* We take this opportunity to close the vnodes like we should.. */
   2467 
   2468 	for (c = 0; c < raidPtr->numCol; c++) {
   2469 		vp = raidPtr->raid_cinfo[c].ci_vp;
   2470 		acd = raidPtr->Disks[c].auto_configured;
   2471 		rf_close_component(raidPtr, vp, acd);
   2472 		raidPtr->raid_cinfo[c].ci_vp = NULL;
   2473 		raidPtr->Disks[c].auto_configured = 0;
   2474 	}
   2475 
   2476 	for (r = 0; r < raidPtr->numSpare; r++) {
   2477 		vp = raidPtr->raid_cinfo[raidPtr->numCol + r].ci_vp;
   2478 		acd = raidPtr->Disks[raidPtr->numCol + r].auto_configured;
   2479 		rf_close_component(raidPtr, vp, acd);
   2480 		raidPtr->raid_cinfo[raidPtr->numCol + r].ci_vp = NULL;
   2481 		raidPtr->Disks[raidPtr->numCol + r].auto_configured = 0;
   2482 	}
   2483 }
   2484 
   2485 
   2486 void
   2487 rf_ReconThread(struct rf_recon_req *req)
   2488 {
   2489 	int     s;
   2490 	RF_Raid_t *raidPtr;
   2491 
   2492 	s = splbio();
   2493 	raidPtr = (RF_Raid_t *) req->raidPtr;
   2494 	raidPtr->recon_in_progress = 1;
   2495 
   2496 	rf_FailDisk((RF_Raid_t *) req->raidPtr, req->col,
   2497 		    ((req->flags & RF_FDFLAGS_RECON) ? 1 : 0));
   2498 
   2499 	RF_Free(req, sizeof(*req));
   2500 
   2501 	raidPtr->recon_in_progress = 0;
   2502 	splx(s);
   2503 
   2504 	/* That's all... */
   2505 	kthread_exit(0);        /* does not return */
   2506 }
   2507 
   2508 void
   2509 rf_RewriteParityThread(RF_Raid_t *raidPtr)
   2510 {
   2511 	int retcode;
   2512 	int s;
   2513 
   2514 	raidPtr->parity_rewrite_stripes_done = 0;
   2515 	raidPtr->parity_rewrite_in_progress = 1;
   2516 	s = splbio();
   2517 	retcode = rf_RewriteParity(raidPtr);
   2518 	splx(s);
   2519 	if (retcode) {
   2520 		printf("raid%d: Error re-writing parity!\n",raidPtr->raidid);
   2521 	} else {
   2522 		/* set the clean bit!  If we shutdown correctly,
   2523 		   the clean bit on each component label will get
   2524 		   set */
   2525 		raidPtr->parity_good = RF_RAID_CLEAN;
   2526 	}
   2527 	raidPtr->parity_rewrite_in_progress = 0;
   2528 
   2529 	/* Anyone waiting for us to stop?  If so, inform them... */
   2530 	if (raidPtr->waitShutdown) {
   2531 		wakeup(&raidPtr->parity_rewrite_in_progress);
   2532 	}
   2533 
   2534 	/* That's all... */
   2535 	kthread_exit(0);        /* does not return */
   2536 }
   2537 
   2538 
   2539 void
   2540 rf_CopybackThread(RF_Raid_t *raidPtr)
   2541 {
   2542 	int s;
   2543 
   2544 	raidPtr->copyback_in_progress = 1;
   2545 	s = splbio();
   2546 	rf_CopybackReconstructedData(raidPtr);
   2547 	splx(s);
   2548 	raidPtr->copyback_in_progress = 0;
   2549 
   2550 	/* That's all... */
   2551 	kthread_exit(0);        /* does not return */
   2552 }
   2553 
   2554 
   2555 void
   2556 rf_ReconstructInPlaceThread(struct rf_recon_req *req)
   2557 {
   2558 	int s;
   2559 	RF_Raid_t *raidPtr;
   2560 
   2561 	s = splbio();
   2562 	raidPtr = req->raidPtr;
   2563 	raidPtr->recon_in_progress = 1;
   2564 	rf_ReconstructInPlace(raidPtr, req->col);
   2565 	RF_Free(req, sizeof(*req));
   2566 	raidPtr->recon_in_progress = 0;
   2567 	splx(s);
   2568 
   2569 	/* That's all... */
   2570 	kthread_exit(0);        /* does not return */
   2571 }
   2572 
   2573 RF_AutoConfig_t *
   2574 rf_find_raid_components()
   2575 {
   2576 	struct vnode *vp;
   2577 	struct disklabel label;
   2578 	struct device *dv;
   2579 	dev_t dev;
   2580 	int bmajor;
   2581 	int error;
   2582 	int i;
   2583 	int good_one;
   2584 	RF_ComponentLabel_t *clabel;
   2585 	RF_AutoConfig_t *ac_list;
   2586 	RF_AutoConfig_t *ac;
   2587 
   2588 
   2589 	/* initialize the AutoConfig list */
   2590 	ac_list = NULL;
   2591 
   2592 	/* we begin by trolling through *all* the devices on the system */
   2593 
   2594 	for (dv = alldevs.tqh_first; dv != NULL;
   2595 	     dv = dv->dv_list.tqe_next) {
   2596 
   2597 		/* we are only interested in disks... */
   2598 		if (device_class(dv) != DV_DISK)
   2599 			continue;
   2600 
   2601 		/* we don't care about floppies... */
   2602 		if (!strcmp(dv->dv_cfdata->cf_name,"fd")) {
   2603 			continue;
   2604 		}
   2605 
   2606 		/* we don't care about CD's... */
   2607 		if (!strcmp(dv->dv_cfdata->cf_name,"cd")) {
   2608 			continue;
   2609 		}
   2610 
   2611 		/* hdfd is the Atari/Hades floppy driver */
   2612 		if (!strcmp(dv->dv_cfdata->cf_name,"hdfd")) {
   2613 			continue;
   2614 		}
   2615 		/* fdisa is the Atari/Milan floppy driver */
   2616 		if (!strcmp(dv->dv_cfdata->cf_name,"fdisa")) {
   2617 			continue;
   2618 		}
   2619 
   2620 		/* need to find the device_name_to_block_device_major stuff */
   2621 		bmajor = devsw_name2blk(dv->dv_xname, NULL, 0);
   2622 
   2623 		/* get a vnode for the raw partition of this disk */
   2624 
   2625 		dev = MAKEDISKDEV(bmajor, dv->dv_unit, RAW_PART);
   2626 		if (bdevvp(dev, &vp))
   2627 			panic("RAID can't alloc vnode");
   2628 
   2629 		error = VOP_OPEN(vp, FREAD, NOCRED, 0);
   2630 
   2631 		if (error) {
   2632 			/* "Who cares."  Continue looking
   2633 			   for something that exists*/
   2634 			vput(vp);
   2635 			continue;
   2636 		}
   2637 
   2638 		/* Ok, the disk exists.  Go get the disklabel. */
   2639 		error = VOP_IOCTL(vp, DIOCGDINFO, &label, FREAD, NOCRED, 0);
   2640 		if (error) {
   2641 			/*
   2642 			 * XXX can't happen - open() would
   2643 			 * have errored out (or faked up one)
   2644 			 */
   2645 			if (error != ENOTTY)
   2646 				printf("RAIDframe: can't get label for dev "
   2647 				    "%s (%d)\n", dv->dv_xname, error);
   2648 		}
   2649 
   2650 		/* don't need this any more.  We'll allocate it again
   2651 		   a little later if we really do... */
   2652 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
   2653 		VOP_CLOSE(vp, FREAD | FWRITE, NOCRED, 0);
   2654 		vput(vp);
   2655 
   2656 		if (error)
   2657 			continue;
   2658 
   2659 		for (i=0; i < label.d_npartitions; i++) {
   2660 			/* We only support partitions marked as RAID */
   2661 			if (label.d_partitions[i].p_fstype != FS_RAID)
   2662 				continue;
   2663 
   2664 			dev = MAKEDISKDEV(bmajor, dv->dv_unit, i);
   2665 			if (bdevvp(dev, &vp))
   2666 				panic("RAID can't alloc vnode");
   2667 
   2668 			error = VOP_OPEN(vp, FREAD, NOCRED, 0);
   2669 			if (error) {
   2670 				/* Whatever... */
   2671 				vput(vp);
   2672 				continue;
   2673 			}
   2674 
   2675 			good_one = 0;
   2676 
   2677 			clabel = (RF_ComponentLabel_t *)
   2678 				malloc(sizeof(RF_ComponentLabel_t),
   2679 				       M_RAIDFRAME, M_NOWAIT);
   2680 			if (clabel == NULL) {
   2681 				/* XXX CLEANUP HERE */
   2682 				printf("RAID auto config: out of memory!\n");
   2683 				return(NULL); /* XXX probably should panic? */
   2684 			}
   2685 
   2686 			if (!raidread_component_label(dev, vp, clabel)) {
   2687 				/* Got the label.  Does it look reasonable? */
   2688 				if (rf_reasonable_label(clabel) &&
   2689 				    (clabel->partitionSize <=
   2690 				     label.d_partitions[i].p_size)) {
   2691 #if DEBUG
   2692 					printf("Component on: %s%c: %d\n",
   2693 					       dv->dv_xname, 'a'+i,
   2694 					       label.d_partitions[i].p_size);
   2695 					rf_print_component_label(clabel);
   2696 #endif
   2697 					/* if it's reasonable, add it,
   2698 					   else ignore it. */
   2699 					ac = (RF_AutoConfig_t *)
   2700 						malloc(sizeof(RF_AutoConfig_t),
   2701 						       M_RAIDFRAME,
   2702 						       M_NOWAIT);
   2703 					if (ac == NULL) {
   2704 						/* XXX should panic?? */
   2705 						return(NULL);
   2706 					}
   2707 
   2708 					snprintf(ac->devname,
   2709 					    sizeof(ac->devname), "%s%c",
   2710 					    dv->dv_xname, 'a'+i);
   2711 					ac->dev = dev;
   2712 					ac->vp = vp;
   2713 					ac->clabel = clabel;
   2714 					ac->next = ac_list;
   2715 					ac_list = ac;
   2716 					good_one = 1;
   2717 				}
   2718 			}
   2719 			if (!good_one) {
   2720 				/* cleanup */
   2721 				free(clabel, M_RAIDFRAME);
   2722 				vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
   2723 				VOP_CLOSE(vp, FREAD | FWRITE, NOCRED, 0);
   2724 				vput(vp);
   2725 			}
   2726 		}
   2727 	}
   2728 	return(ac_list);
   2729 }
   2730 
   2731 static int
   2732 rf_reasonable_label(RF_ComponentLabel_t *clabel)
   2733 {
   2734 
   2735 	if (((clabel->version==RF_COMPONENT_LABEL_VERSION_1) ||
   2736 	     (clabel->version==RF_COMPONENT_LABEL_VERSION)) &&
   2737 	    ((clabel->clean == RF_RAID_CLEAN) ||
   2738 	     (clabel->clean == RF_RAID_DIRTY)) &&
   2739 	    clabel->row >=0 &&
   2740 	    clabel->column >= 0 &&
   2741 	    clabel->num_rows > 0 &&
   2742 	    clabel->num_columns > 0 &&
   2743 	    clabel->row < clabel->num_rows &&
   2744 	    clabel->column < clabel->num_columns &&
   2745 	    clabel->blockSize > 0 &&
   2746 	    clabel->numBlocks > 0) {
   2747 		/* label looks reasonable enough... */
   2748 		return(1);
   2749 	}
   2750 	return(0);
   2751 }
   2752 
   2753 
   2754 #if DEBUG
   2755 void
   2756 rf_print_component_label(RF_ComponentLabel_t *clabel)
   2757 {
   2758 	printf("   Row: %d Column: %d Num Rows: %d Num Columns: %d\n",
   2759 	       clabel->row, clabel->column,
   2760 	       clabel->num_rows, clabel->num_columns);
   2761 	printf("   Version: %d Serial Number: %d Mod Counter: %d\n",
   2762 	       clabel->version, clabel->serial_number,
   2763 	       clabel->mod_counter);
   2764 	printf("   Clean: %s Status: %d\n",
   2765 	       clabel->clean ? "Yes" : "No", clabel->status );
   2766 	printf("   sectPerSU: %d SUsPerPU: %d SUsPerRU: %d\n",
   2767 	       clabel->sectPerSU, clabel->SUsPerPU, clabel->SUsPerRU);
   2768 	printf("   RAID Level: %c  blocksize: %d numBlocks: %d\n",
   2769 	       (char) clabel->parityConfig, clabel->blockSize,
   2770 	       clabel->numBlocks);
   2771 	printf("   Autoconfig: %s\n", clabel->autoconfigure ? "Yes" : "No" );
   2772 	printf("   Contains root partition: %s\n",
   2773 	       clabel->root_partition ? "Yes" : "No" );
   2774 	printf("   Last configured as: raid%d\n", clabel->last_unit );
   2775 #if 0
   2776 	   printf("   Config order: %d\n", clabel->config_order);
   2777 #endif
   2778 
   2779 }
   2780 #endif
   2781 
   2782 RF_ConfigSet_t *
   2783 rf_create_auto_sets(RF_AutoConfig_t *ac_list)
   2784 {
   2785 	RF_AutoConfig_t *ac;
   2786 	RF_ConfigSet_t *config_sets;
   2787 	RF_ConfigSet_t *cset;
   2788 	RF_AutoConfig_t *ac_next;
   2789 
   2790 
   2791 	config_sets = NULL;
   2792 
   2793 	/* Go through the AutoConfig list, and figure out which components
   2794 	   belong to what sets.  */
   2795 	ac = ac_list;
   2796 	while(ac!=NULL) {
   2797 		/* we're going to putz with ac->next, so save it here
   2798 		   for use at the end of the loop */
   2799 		ac_next = ac->next;
   2800 
   2801 		if (config_sets == NULL) {
   2802 			/* will need at least this one... */
   2803 			config_sets = (RF_ConfigSet_t *)
   2804 				malloc(sizeof(RF_ConfigSet_t),
   2805 				       M_RAIDFRAME, M_NOWAIT);
   2806 			if (config_sets == NULL) {
   2807 				panic("rf_create_auto_sets: No memory!");
   2808 			}
   2809 			/* this one is easy :) */
   2810 			config_sets->ac = ac;
   2811 			config_sets->next = NULL;
   2812 			config_sets->rootable = 0;
   2813 			ac->next = NULL;
   2814 		} else {
   2815 			/* which set does this component fit into? */
   2816 			cset = config_sets;
   2817 			while(cset!=NULL) {
   2818 				if (rf_does_it_fit(cset, ac)) {
   2819 					/* looks like it matches... */
   2820 					ac->next = cset->ac;
   2821 					cset->ac = ac;
   2822 					break;
   2823 				}
   2824 				cset = cset->next;
   2825 			}
   2826 			if (cset==NULL) {
   2827 				/* didn't find a match above... new set..*/
   2828 				cset = (RF_ConfigSet_t *)
   2829 					malloc(sizeof(RF_ConfigSet_t),
   2830 					       M_RAIDFRAME, M_NOWAIT);
   2831 				if (cset == NULL) {
   2832 					panic("rf_create_auto_sets: No memory!");
   2833 				}
   2834 				cset->ac = ac;
   2835 				ac->next = NULL;
   2836 				cset->next = config_sets;
   2837 				cset->rootable = 0;
   2838 				config_sets = cset;
   2839 			}
   2840 		}
   2841 		ac = ac_next;
   2842 	}
   2843 
   2844 
   2845 	return(config_sets);
   2846 }
   2847 
   2848 static int
   2849 rf_does_it_fit(RF_ConfigSet_t *cset, RF_AutoConfig_t *ac)
   2850 {
   2851 	RF_ComponentLabel_t *clabel1, *clabel2;
   2852 
   2853 	/* If this one matches the *first* one in the set, that's good
   2854 	   enough, since the other members of the set would have been
   2855 	   through here too... */
   2856 	/* note that we are not checking partitionSize here..
   2857 
   2858 	   Note that we are also not checking the mod_counters here.
   2859 	   If everything else matches execpt the mod_counter, that's
   2860 	   good enough for this test.  We will deal with the mod_counters
   2861 	   a little later in the autoconfiguration process.
   2862 
   2863 	    (clabel1->mod_counter == clabel2->mod_counter) &&
   2864 
   2865 	   The reason we don't check for this is that failed disks
   2866 	   will have lower modification counts.  If those disks are
   2867 	   not added to the set they used to belong to, then they will
   2868 	   form their own set, which may result in 2 different sets,
   2869 	   for example, competing to be configured at raid0, and
   2870 	   perhaps competing to be the root filesystem set.  If the
   2871 	   wrong ones get configured, or both attempt to become /,
   2872 	   weird behaviour and or serious lossage will occur.  Thus we
   2873 	   need to bring them into the fold here, and kick them out at
   2874 	   a later point.
   2875 
   2876 	*/
   2877 
   2878 	clabel1 = cset->ac->clabel;
   2879 	clabel2 = ac->clabel;
   2880 	if ((clabel1->version == clabel2->version) &&
   2881 	    (clabel1->serial_number == clabel2->serial_number) &&
   2882 	    (clabel1->num_rows == clabel2->num_rows) &&
   2883 	    (clabel1->num_columns == clabel2->num_columns) &&
   2884 	    (clabel1->sectPerSU == clabel2->sectPerSU) &&
   2885 	    (clabel1->SUsPerPU == clabel2->SUsPerPU) &&
   2886 	    (clabel1->SUsPerRU == clabel2->SUsPerRU) &&
   2887 	    (clabel1->parityConfig == clabel2->parityConfig) &&
   2888 	    (clabel1->maxOutstanding == clabel2->maxOutstanding) &&
   2889 	    (clabel1->blockSize == clabel2->blockSize) &&
   2890 	    (clabel1->numBlocks == clabel2->numBlocks) &&
   2891 	    (clabel1->autoconfigure == clabel2->autoconfigure) &&
   2892 	    (clabel1->root_partition == clabel2->root_partition) &&
   2893 	    (clabel1->last_unit == clabel2->last_unit) &&
   2894 	    (clabel1->config_order == clabel2->config_order)) {
   2895 		/* if it get's here, it almost *has* to be a match */
   2896 	} else {
   2897 		/* it's not consistent with somebody in the set..
   2898 		   punt */
   2899 		return(0);
   2900 	}
   2901 	/* all was fine.. it must fit... */
   2902 	return(1);
   2903 }
   2904 
   2905 int
   2906 rf_have_enough_components(RF_ConfigSet_t *cset)
   2907 {
   2908 	RF_AutoConfig_t *ac;
   2909 	RF_AutoConfig_t *auto_config;
   2910 	RF_ComponentLabel_t *clabel;
   2911 	int c;
   2912 	int num_cols;
   2913 	int num_missing;
   2914 	int mod_counter;
   2915 	int mod_counter_found;
   2916 	int even_pair_failed;
   2917 	char parity_type;
   2918 
   2919 
   2920 	/* check to see that we have enough 'live' components
   2921 	   of this set.  If so, we can configure it if necessary */
   2922 
   2923 	num_cols = cset->ac->clabel->num_columns;
   2924 	parity_type = cset->ac->clabel->parityConfig;
   2925 
   2926 	/* XXX Check for duplicate components!?!?!? */
   2927 
   2928 	/* Determine what the mod_counter is supposed to be for this set. */
   2929 
   2930 	mod_counter_found = 0;
   2931 	mod_counter = 0;
   2932 	ac = cset->ac;
   2933 	while(ac!=NULL) {
   2934 		if (mod_counter_found==0) {
   2935 			mod_counter = ac->clabel->mod_counter;
   2936 			mod_counter_found = 1;
   2937 		} else {
   2938 			if (ac->clabel->mod_counter > mod_counter) {
   2939 				mod_counter = ac->clabel->mod_counter;
   2940 			}
   2941 		}
   2942 		ac = ac->next;
   2943 	}
   2944 
   2945 	num_missing = 0;
   2946 	auto_config = cset->ac;
   2947 
   2948 	even_pair_failed = 0;
   2949 	for(c=0; c<num_cols; c++) {
   2950 		ac = auto_config;
   2951 		while(ac!=NULL) {
   2952 			if ((ac->clabel->column == c) &&
   2953 			    (ac->clabel->mod_counter == mod_counter)) {
   2954 				/* it's this one... */
   2955 #if DEBUG
   2956 				printf("Found: %s at %d\n",
   2957 				       ac->devname,c);
   2958 #endif
   2959 				break;
   2960 			}
   2961 			ac=ac->next;
   2962 		}
   2963 		if (ac==NULL) {
   2964 				/* Didn't find one here! */
   2965 				/* special case for RAID 1, especially
   2966 				   where there are more than 2
   2967 				   components (where RAIDframe treats
   2968 				   things a little differently :( ) */
   2969 			if (parity_type == '1') {
   2970 				if (c%2 == 0) { /* even component */
   2971 					even_pair_failed = 1;
   2972 				} else { /* odd component.  If
   2973 					    we're failed, and
   2974 					    so is the even
   2975 					    component, it's
   2976 					    "Good Night, Charlie" */
   2977 					if (even_pair_failed == 1) {
   2978 						return(0);
   2979 					}
   2980 				}
   2981 			} else {
   2982 				/* normal accounting */
   2983 				num_missing++;
   2984 			}
   2985 		}
   2986 		if ((parity_type == '1') && (c%2 == 1)) {
   2987 				/* Just did an even component, and we didn't
   2988 				   bail.. reset the even_pair_failed flag,
   2989 				   and go on to the next component.... */
   2990 			even_pair_failed = 0;
   2991 		}
   2992 	}
   2993 
   2994 	clabel = cset->ac->clabel;
   2995 
   2996 	if (((clabel->parityConfig == '0') && (num_missing > 0)) ||
   2997 	    ((clabel->parityConfig == '4') && (num_missing > 1)) ||
   2998 	    ((clabel->parityConfig == '5') && (num_missing > 1))) {
   2999 		/* XXX this needs to be made *much* more general */
   3000 		/* Too many failures */
   3001 		return(0);
   3002 	}
   3003 	/* otherwise, all is well, and we've got enough to take a kick
   3004 	   at autoconfiguring this set */
   3005 	return(1);
   3006 }
   3007 
   3008 void
   3009 rf_create_configuration(RF_AutoConfig_t *ac, RF_Config_t *config,
   3010 			RF_Raid_t *raidPtr)
   3011 {
   3012 	RF_ComponentLabel_t *clabel;
   3013 	int i;
   3014 
   3015 	clabel = ac->clabel;
   3016 
   3017 	/* 1. Fill in the common stuff */
   3018 	config->numRow = clabel->num_rows = 1;
   3019 	config->numCol = clabel->num_columns;
   3020 	config->numSpare = 0; /* XXX should this be set here? */
   3021 	config->sectPerSU = clabel->sectPerSU;
   3022 	config->SUsPerPU = clabel->SUsPerPU;
   3023 	config->SUsPerRU = clabel->SUsPerRU;
   3024 	config->parityConfig = clabel->parityConfig;
   3025 	/* XXX... */
   3026 	strcpy(config->diskQueueType,"fifo");
   3027 	config->maxOutstandingDiskReqs = clabel->maxOutstanding;
   3028 	config->layoutSpecificSize = 0; /* XXX ?? */
   3029 
   3030 	while(ac!=NULL) {
   3031 		/* row/col values will be in range due to the checks
   3032 		   in reasonable_label() */
   3033 		strcpy(config->devnames[0][ac->clabel->column],
   3034 		       ac->devname);
   3035 		ac = ac->next;
   3036 	}
   3037 
   3038 	for(i=0;i<RF_MAXDBGV;i++) {
   3039 		config->debugVars[i][0] = 0;
   3040 	}
   3041 }
   3042 
   3043 int
   3044 rf_set_autoconfig(RF_Raid_t *raidPtr, int new_value)
   3045 {
   3046 	RF_ComponentLabel_t clabel;
   3047 	struct vnode *vp;
   3048 	dev_t dev;
   3049 	int column;
   3050 	int sparecol;
   3051 
   3052 	raidPtr->autoconfigure = new_value;
   3053 
   3054 	for(column=0; column<raidPtr->numCol; column++) {
   3055 		if (raidPtr->Disks[column].status == rf_ds_optimal) {
   3056 			dev = raidPtr->Disks[column].dev;
   3057 			vp = raidPtr->raid_cinfo[column].ci_vp;
   3058 			raidread_component_label(dev, vp, &clabel);
   3059 			clabel.autoconfigure = new_value;
   3060 			raidwrite_component_label(dev, vp, &clabel);
   3061 		}
   3062 	}
   3063 	for(column = 0; column < raidPtr->numSpare ; column++) {
   3064 		sparecol = raidPtr->numCol + column;
   3065 		if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
   3066 			dev = raidPtr->Disks[sparecol].dev;
   3067 			vp = raidPtr->raid_cinfo[sparecol].ci_vp;
   3068 			raidread_component_label(dev, vp, &clabel);
   3069 			clabel.autoconfigure = new_value;
   3070 			raidwrite_component_label(dev, vp, &clabel);
   3071 		}
   3072 	}
   3073 	return(new_value);
   3074 }
   3075 
   3076 int
   3077 rf_set_rootpartition(RF_Raid_t *raidPtr, int new_value)
   3078 {
   3079 	RF_ComponentLabel_t clabel;
   3080 	struct vnode *vp;
   3081 	dev_t dev;
   3082 	int column;
   3083 	int sparecol;
   3084 
   3085 	raidPtr->root_partition = new_value;
   3086 	for(column=0; column<raidPtr->numCol; column++) {
   3087 		if (raidPtr->Disks[column].status == rf_ds_optimal) {
   3088 			dev = raidPtr->Disks[column].dev;
   3089 			vp = raidPtr->raid_cinfo[column].ci_vp;
   3090 			raidread_component_label(dev, vp, &clabel);
   3091 			clabel.root_partition = new_value;
   3092 			raidwrite_component_label(dev, vp, &clabel);
   3093 		}
   3094 	}
   3095 	for(column = 0; column < raidPtr->numSpare ; column++) {
   3096 		sparecol = raidPtr->numCol + column;
   3097 		if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
   3098 			dev = raidPtr->Disks[sparecol].dev;
   3099 			vp = raidPtr->raid_cinfo[sparecol].ci_vp;
   3100 			raidread_component_label(dev, vp, &clabel);
   3101 			clabel.root_partition = new_value;
   3102 			raidwrite_component_label(dev, vp, &clabel);
   3103 		}
   3104 	}
   3105 	return(new_value);
   3106 }
   3107 
   3108 void
   3109 rf_release_all_vps(RF_ConfigSet_t *cset)
   3110 {
   3111 	RF_AutoConfig_t *ac;
   3112 
   3113 	ac = cset->ac;
   3114 	while(ac!=NULL) {
   3115 		/* Close the vp, and give it back */
   3116 		if (ac->vp) {
   3117 			vn_lock(ac->vp, LK_EXCLUSIVE | LK_RETRY);
   3118 			VOP_CLOSE(ac->vp, FREAD, NOCRED, 0);
   3119 			vput(ac->vp);
   3120 			ac->vp = NULL;
   3121 		}
   3122 		ac = ac->next;
   3123 	}
   3124 }
   3125 
   3126 
   3127 void
   3128 rf_cleanup_config_set(RF_ConfigSet_t *cset)
   3129 {
   3130 	RF_AutoConfig_t *ac;
   3131 	RF_AutoConfig_t *next_ac;
   3132 
   3133 	ac = cset->ac;
   3134 	while(ac!=NULL) {
   3135 		next_ac = ac->next;
   3136 		/* nuke the label */
   3137 		free(ac->clabel, M_RAIDFRAME);
   3138 		/* cleanup the config structure */
   3139 		free(ac, M_RAIDFRAME);
   3140 		/* "next.." */
   3141 		ac = next_ac;
   3142 	}
   3143 	/* and, finally, nuke the config set */
   3144 	free(cset, M_RAIDFRAME);
   3145 }
   3146 
   3147 
   3148 void
   3149 raid_init_component_label(RF_Raid_t *raidPtr, RF_ComponentLabel_t *clabel)
   3150 {
   3151 	/* current version number */
   3152 	clabel->version = RF_COMPONENT_LABEL_VERSION;
   3153 	clabel->serial_number = raidPtr->serial_number;
   3154 	clabel->mod_counter = raidPtr->mod_counter;
   3155 	clabel->num_rows = 1;
   3156 	clabel->num_columns = raidPtr->numCol;
   3157 	clabel->clean = RF_RAID_DIRTY; /* not clean */
   3158 	clabel->status = rf_ds_optimal; /* "It's good!" */
   3159 
   3160 	clabel->sectPerSU = raidPtr->Layout.sectorsPerStripeUnit;
   3161 	clabel->SUsPerPU = raidPtr->Layout.SUsPerPU;
   3162 	clabel->SUsPerRU = raidPtr->Layout.SUsPerRU;
   3163 
   3164 	clabel->blockSize = raidPtr->bytesPerSector;
   3165 	clabel->numBlocks = raidPtr->sectorsPerDisk;
   3166 
   3167 	/* XXX not portable */
   3168 	clabel->parityConfig = raidPtr->Layout.map->parityConfig;
   3169 	clabel->maxOutstanding = raidPtr->maxOutstanding;
   3170 	clabel->autoconfigure = raidPtr->autoconfigure;
   3171 	clabel->root_partition = raidPtr->root_partition;
   3172 	clabel->last_unit = raidPtr->raidid;
   3173 	clabel->config_order = raidPtr->config_order;
   3174 }
   3175 
   3176 int
   3177 rf_auto_config_set(RF_ConfigSet_t *cset, int *unit)
   3178 {
   3179 	RF_Raid_t *raidPtr;
   3180 	RF_Config_t *config;
   3181 	int raidID;
   3182 	int retcode;
   3183 
   3184 #if DEBUG
   3185 	printf("RAID autoconfigure\n");
   3186 #endif
   3187 
   3188 	retcode = 0;
   3189 	*unit = -1;
   3190 
   3191 	/* 1. Create a config structure */
   3192 
   3193 	config = (RF_Config_t *)malloc(sizeof(RF_Config_t),
   3194 				       M_RAIDFRAME,
   3195 				       M_NOWAIT);
   3196 	if (config==NULL) {
   3197 		printf("Out of mem!?!?\n");
   3198 				/* XXX do something more intelligent here. */
   3199 		return(1);
   3200 	}
   3201 
   3202 	memset(config, 0, sizeof(RF_Config_t));
   3203 
   3204 	/*
   3205 	   2. Figure out what RAID ID this one is supposed to live at
   3206 	   See if we can get the same RAID dev that it was configured
   3207 	   on last time..
   3208 	*/
   3209 
   3210 	raidID = cset->ac->clabel->last_unit;
   3211 	if ((raidID < 0) || (raidID >= numraid)) {
   3212 		/* let's not wander off into lala land. */
   3213 		raidID = numraid - 1;
   3214 	}
   3215 	if (raidPtrs[raidID]->valid != 0) {
   3216 
   3217 		/*
   3218 		   Nope... Go looking for an alternative...
   3219 		   Start high so we don't immediately use raid0 if that's
   3220 		   not taken.
   3221 		*/
   3222 
   3223 		for(raidID = numraid - 1; raidID >= 0; raidID--) {
   3224 			if (raidPtrs[raidID]->valid == 0) {
   3225 				/* can use this one! */
   3226 				break;
   3227 			}
   3228 		}
   3229 	}
   3230 
   3231 	if (raidID < 0) {
   3232 		/* punt... */
   3233 		printf("Unable to auto configure this set!\n");
   3234 		printf("(Out of RAID devs!)\n");
   3235 		return(1);
   3236 	}
   3237 
   3238 #if DEBUG
   3239 	printf("Configuring raid%d:\n",raidID);
   3240 #endif
   3241 
   3242 	raidPtr = raidPtrs[raidID];
   3243 
   3244 	/* XXX all this stuff should be done SOMEWHERE ELSE! */
   3245 	raidPtr->raidid = raidID;
   3246 	raidPtr->openings = RAIDOUTSTANDING;
   3247 
   3248 	/* 3. Build the configuration structure */
   3249 	rf_create_configuration(cset->ac, config, raidPtr);
   3250 
   3251 	/* 4. Do the configuration */
   3252 	retcode = rf_Configure(raidPtr, config, cset->ac);
   3253 
   3254 	if (retcode == 0) {
   3255 
   3256 		raidinit(raidPtrs[raidID]);
   3257 
   3258 		rf_markalldirty(raidPtrs[raidID]);
   3259 		raidPtrs[raidID]->autoconfigure = 1; /* XXX do this here? */
   3260 		if (cset->ac->clabel->root_partition==1) {
   3261 			/* everything configured just fine.  Make a note
   3262 			   that this set is eligible to be root. */
   3263 			cset->rootable = 1;
   3264 			/* XXX do this here? */
   3265 			raidPtrs[raidID]->root_partition = 1;
   3266 		}
   3267 	}
   3268 
   3269 	/* 5. Cleanup */
   3270 	free(config, M_RAIDFRAME);
   3271 
   3272 	*unit = raidID;
   3273 	return(retcode);
   3274 }
   3275 
   3276 void
   3277 rf_disk_unbusy(RF_RaidAccessDesc_t *desc)
   3278 {
   3279 	struct buf *bp;
   3280 
   3281 	bp = (struct buf *)desc->bp;
   3282 	disk_unbusy(&raid_softc[desc->raidPtr->raidid].sc_dkdev,
   3283 	    (bp->b_bcount - bp->b_resid), (bp->b_flags & B_READ));
   3284 }
   3285 
   3286 void
   3287 rf_pool_init(struct pool *p, size_t size, const char *w_chan,
   3288 	     size_t xmin, size_t xmax)
   3289 {
   3290 	pool_init(p, size, 0, 0, 0, w_chan, NULL);
   3291 	pool_sethiwat(p, xmax);
   3292 	pool_prime(p, xmin);
   3293 	pool_setlowat(p, xmin);
   3294 }
   3295 
   3296 /*
   3297  * rf_buf_queue_check(int raidid) -- looks into the buf_queue to see
   3298  * if there is IO pending and if that IO could possibly be done for a
   3299  * given RAID set.  Returns 0 if IO is waiting and can be done, 1
   3300  * otherwise.
   3301  *
   3302  */
   3303 
   3304 int
   3305 rf_buf_queue_check(int raidid)
   3306 {
   3307 	if ((BUFQ_PEEK(raid_softc[raidid].buf_queue) != NULL) &&
   3308 	    raidPtrs[raidid]->openings > 0) {
   3309 		/* there is work to do */
   3310 		return 0;
   3311 	}
   3312 	/* default is nothing to do */
   3313 	return 1;
   3314 }
   3315