Home | History | Annotate | Line # | Download | only in raidframe
rf_netbsdkintf.c revision 1.193
      1 /*	$NetBSD: rf_netbsdkintf.c,v 1.193 2006/01/04 04:56:41 oster Exp $	*/
      2 /*-
      3  * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
      4  * All rights reserved.
      5  *
      6  * This code is derived from software contributed to The NetBSD Foundation
      7  * by Greg Oster; Jason R. Thorpe.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *        This product includes software developed by the NetBSD
     20  *        Foundation, Inc. and its contributors.
     21  * 4. Neither the name of The NetBSD Foundation nor the names of its
     22  *    contributors may be used to endorse or promote products derived
     23  *    from this software without specific prior written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     26  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*
     39  * Copyright (c) 1990, 1993
     40  *      The Regents of the University of California.  All rights reserved.
     41  *
     42  * This code is derived from software contributed to Berkeley by
     43  * the Systems Programming Group of the University of Utah Computer
     44  * Science Department.
     45  *
     46  * Redistribution and use in source and binary forms, with or without
     47  * modification, are permitted provided that the following conditions
     48  * are met:
     49  * 1. Redistributions of source code must retain the above copyright
     50  *    notice, this list of conditions and the following disclaimer.
     51  * 2. Redistributions in binary form must reproduce the above copyright
     52  *    notice, this list of conditions and the following disclaimer in the
     53  *    documentation and/or other materials provided with the distribution.
     54  * 3. Neither the name of the University nor the names of its contributors
     55  *    may be used to endorse or promote products derived from this software
     56  *    without specific prior written permission.
     57  *
     58  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     59  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     60  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     61  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     62  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     63  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     64  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     65  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     66  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     67  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     68  * SUCH DAMAGE.
     69  *
     70  * from: Utah $Hdr: cd.c 1.6 90/11/28$
     71  *
     72  *      @(#)cd.c        8.2 (Berkeley) 11/16/93
     73  */
     74 
     75 /*
     76  * Copyright (c) 1988 University of Utah.
     77  *
     78  * This code is derived from software contributed to Berkeley by
     79  * the Systems Programming Group of the University of Utah Computer
     80  * Science Department.
     81  *
     82  * Redistribution and use in source and binary forms, with or without
     83  * modification, are permitted provided that the following conditions
     84  * are met:
     85  * 1. Redistributions of source code must retain the above copyright
     86  *    notice, this list of conditions and the following disclaimer.
     87  * 2. Redistributions in binary form must reproduce the above copyright
     88  *    notice, this list of conditions and the following disclaimer in the
     89  *    documentation and/or other materials provided with the distribution.
     90  * 3. All advertising materials mentioning features or use of this software
     91  *    must display the following acknowledgement:
     92  *      This product includes software developed by the University of
     93  *      California, Berkeley and its contributors.
     94  * 4. Neither the name of the University nor the names of its contributors
     95  *    may be used to endorse or promote products derived from this software
     96  *    without specific prior written permission.
     97  *
     98  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     99  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
    100  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
    101  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
    102  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
    103  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
    104  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
    105  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
    106  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
    107  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
    108  * SUCH DAMAGE.
    109  *
    110  * from: Utah $Hdr: cd.c 1.6 90/11/28$
    111  *
    112  *      @(#)cd.c        8.2 (Berkeley) 11/16/93
    113  */
    114 
    115 /*
    116  * Copyright (c) 1995 Carnegie-Mellon University.
    117  * All rights reserved.
    118  *
    119  * Authors: Mark Holland, Jim Zelenka
    120  *
    121  * Permission to use, copy, modify and distribute this software and
    122  * its documentation is hereby granted, provided that both the copyright
    123  * notice and this permission notice appear in all copies of the
    124  * software, derivative works or modified versions, and any portions
    125  * thereof, and that both notices appear in supporting documentation.
    126  *
    127  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
    128  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
    129  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
    130  *
    131  * Carnegie Mellon requests users of this software to return to
    132  *
    133  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
    134  *  School of Computer Science
    135  *  Carnegie Mellon University
    136  *  Pittsburgh PA 15213-3890
    137  *
    138  * any improvements or extensions that they make and grant Carnegie the
    139  * rights to redistribute these changes.
    140  */
    141 
    142 /***********************************************************
    143  *
    144  * rf_kintf.c -- the kernel interface routines for RAIDframe
    145  *
    146  ***********************************************************/
    147 
    148 #include <sys/cdefs.h>
    149 __KERNEL_RCSID(0, "$NetBSD: rf_netbsdkintf.c,v 1.193 2006/01/04 04:56:41 oster Exp $");
    150 
    151 #include <sys/param.h>
    152 #include <sys/errno.h>
    153 #include <sys/pool.h>
    154 #include <sys/proc.h>
    155 #include <sys/queue.h>
    156 #include <sys/disk.h>
    157 #include <sys/device.h>
    158 #include <sys/stat.h>
    159 #include <sys/ioctl.h>
    160 #include <sys/fcntl.h>
    161 #include <sys/systm.h>
    162 #include <sys/namei.h>
    163 #include <sys/vnode.h>
    164 #include <sys/disklabel.h>
    165 #include <sys/conf.h>
    166 #include <sys/lock.h>
    167 #include <sys/buf.h>
    168 #include <sys/bufq.h>
    169 #include <sys/user.h>
    170 #include <sys/reboot.h>
    171 
    172 #include <dev/raidframe/raidframevar.h>
    173 #include <dev/raidframe/raidframeio.h>
    174 #include "raid.h"
    175 #include "opt_raid_autoconfig.h"
    176 #include "rf_raid.h"
    177 #include "rf_copyback.h"
    178 #include "rf_dag.h"
    179 #include "rf_dagflags.h"
    180 #include "rf_desc.h"
    181 #include "rf_diskqueue.h"
    182 #include "rf_etimer.h"
    183 #include "rf_general.h"
    184 #include "rf_kintf.h"
    185 #include "rf_options.h"
    186 #include "rf_driver.h"
    187 #include "rf_parityscan.h"
    188 #include "rf_threadstuff.h"
    189 
    190 #ifdef DEBUG
    191 int     rf_kdebug_level = 0;
    192 #define db1_printf(a) if (rf_kdebug_level > 0) printf a
    193 #else				/* DEBUG */
    194 #define db1_printf(a) { }
    195 #endif				/* DEBUG */
    196 
    197 static RF_Raid_t **raidPtrs;	/* global raid device descriptors */
    198 
    199 RF_DECLARE_STATIC_MUTEX(rf_sparet_wait_mutex)
    200 
    201 static RF_SparetWait_t *rf_sparet_wait_queue;	/* requests to install a
    202 						 * spare table */
    203 static RF_SparetWait_t *rf_sparet_resp_queue;	/* responses from
    204 						 * installation process */
    205 
    206 MALLOC_DEFINE(M_RAIDFRAME, "RAIDframe", "RAIDframe structures");
    207 
    208 /* prototypes */
    209 static void KernelWakeupFunc(struct buf *);
    210 static void InitBP(struct buf *, struct vnode *, unsigned,
    211     dev_t, RF_SectorNum_t, RF_SectorCount_t, caddr_t, void (*) (struct buf *),
    212     void *, int, struct proc *);
    213 static void raidinit(RF_Raid_t *);
    214 
    215 void raidattach(int);
    216 
    217 dev_type_open(raidopen);
    218 dev_type_close(raidclose);
    219 dev_type_read(raidread);
    220 dev_type_write(raidwrite);
    221 dev_type_ioctl(raidioctl);
    222 dev_type_strategy(raidstrategy);
    223 dev_type_dump(raiddump);
    224 dev_type_size(raidsize);
    225 
    226 const struct bdevsw raid_bdevsw = {
    227 	raidopen, raidclose, raidstrategy, raidioctl,
    228 	raiddump, raidsize, D_DISK
    229 };
    230 
    231 const struct cdevsw raid_cdevsw = {
    232 	raidopen, raidclose, raidread, raidwrite, raidioctl,
    233 	nostop, notty, nopoll, nommap, nokqfilter, D_DISK
    234 };
    235 
    236 /*
    237  * Pilfered from ccd.c
    238  */
    239 
    240 struct raidbuf {
    241 	struct buf rf_buf;	/* new I/O buf.  MUST BE FIRST!!! */
    242 	struct buf *rf_obp;	/* ptr. to original I/O buf */
    243 	RF_DiskQueueData_t *req;/* the request that this was part of.. */
    244 };
    245 
    246 /* XXX Not sure if the following should be replacing the raidPtrs above,
    247    or if it should be used in conjunction with that...
    248 */
    249 
    250 struct raid_softc {
    251 	int     sc_flags;	/* flags */
    252 	int     sc_cflags;	/* configuration flags */
    253 	size_t  sc_size;        /* size of the raid device */
    254 	char    sc_xname[20];	/* XXX external name */
    255 	struct disk sc_dkdev;	/* generic disk device info */
    256 	struct bufq_state *buf_queue;	/* used for the device queue */
    257 };
    258 /* sc_flags */
    259 #define RAIDF_INITED	0x01	/* unit has been initialized */
    260 #define RAIDF_WLABEL	0x02	/* label area is writable */
    261 #define RAIDF_LABELLING	0x04	/* unit is currently being labelled */
    262 #define RAIDF_WANTED	0x40	/* someone is waiting to obtain a lock */
    263 #define RAIDF_LOCKED	0x80	/* unit is locked */
    264 
    265 #define	raidunit(x)	DISKUNIT(x)
    266 int numraid = 0;
    267 
    268 /*
    269  * Allow RAIDOUTSTANDING number of simultaneous IO's to this RAID device.
    270  * Be aware that large numbers can allow the driver to consume a lot of
    271  * kernel memory, especially on writes, and in degraded mode reads.
    272  *
    273  * For example: with a stripe width of 64 blocks (32k) and 5 disks,
    274  * a single 64K write will typically require 64K for the old data,
    275  * 64K for the old parity, and 64K for the new parity, for a total
    276  * of 192K (if the parity buffer is not re-used immediately).
    277  * Even it if is used immediately, that's still 128K, which when multiplied
    278  * by say 10 requests, is 1280K, *on top* of the 640K of incoming data.
    279  *
    280  * Now in degraded mode, for example, a 64K read on the above setup may
    281  * require data reconstruction, which will require *all* of the 4 remaining
    282  * disks to participate -- 4 * 32K/disk == 128K again.
    283  */
    284 
    285 #ifndef RAIDOUTSTANDING
    286 #define RAIDOUTSTANDING   6
    287 #endif
    288 
    289 #define RAIDLABELDEV(dev)	\
    290 	(MAKEDISKDEV(major((dev)), raidunit((dev)), RAW_PART))
    291 
    292 /* declared here, and made public, for the benefit of KVM stuff.. */
    293 struct raid_softc *raid_softc;
    294 
    295 static void raidgetdefaultlabel(RF_Raid_t *, struct raid_softc *,
    296 				     struct disklabel *);
    297 static void raidgetdisklabel(dev_t);
    298 static void raidmakedisklabel(struct raid_softc *);
    299 
    300 static int raidlock(struct raid_softc *);
    301 static void raidunlock(struct raid_softc *);
    302 
    303 static void rf_markalldirty(RF_Raid_t *);
    304 
    305 struct device *raidrootdev;
    306 
    307 void rf_ReconThread(struct rf_recon_req *);
    308 void rf_RewriteParityThread(RF_Raid_t *raidPtr);
    309 void rf_CopybackThread(RF_Raid_t *raidPtr);
    310 void rf_ReconstructInPlaceThread(struct rf_recon_req *);
    311 int rf_autoconfig(struct device *self);
    312 void rf_buildroothack(RF_ConfigSet_t *);
    313 
    314 RF_AutoConfig_t *rf_find_raid_components(void);
    315 RF_ConfigSet_t *rf_create_auto_sets(RF_AutoConfig_t *);
    316 static int rf_does_it_fit(RF_ConfigSet_t *,RF_AutoConfig_t *);
    317 static int rf_reasonable_label(RF_ComponentLabel_t *);
    318 void rf_create_configuration(RF_AutoConfig_t *,RF_Config_t *, RF_Raid_t *);
    319 int rf_set_autoconfig(RF_Raid_t *, int);
    320 int rf_set_rootpartition(RF_Raid_t *, int);
    321 void rf_release_all_vps(RF_ConfigSet_t *);
    322 void rf_cleanup_config_set(RF_ConfigSet_t *);
    323 int rf_have_enough_components(RF_ConfigSet_t *);
    324 int rf_auto_config_set(RF_ConfigSet_t *, int *);
    325 
    326 static int raidautoconfig = 0; /* Debugging, mostly.  Set to 0 to not
    327 				  allow autoconfig to take place.
    328 			          Note that this is overridden by having
    329 			          RAID_AUTOCONFIG as an option in the
    330 			          kernel config file.  */
    331 
    332 struct RF_Pools_s rf_pools;
    333 
    334 void
    335 raidattach(int num)
    336 {
    337 	int raidID;
    338 	int i, rc;
    339 
    340 #ifdef DEBUG
    341 	printf("raidattach: Asked for %d units\n", num);
    342 #endif
    343 
    344 	if (num <= 0) {
    345 #ifdef DIAGNOSTIC
    346 		panic("raidattach: count <= 0");
    347 #endif
    348 		return;
    349 	}
    350 	/* This is where all the initialization stuff gets done. */
    351 
    352 	numraid = num;
    353 
    354 	/* Make some space for requested number of units... */
    355 
    356 	RF_Malloc(raidPtrs, num * sizeof(RF_Raid_t *), (RF_Raid_t **));
    357 	if (raidPtrs == NULL) {
    358 		panic("raidPtrs is NULL!!");
    359 	}
    360 
    361 	/* Initialize the component buffer pool. */
    362 	rf_pool_init(&rf_pools.cbuf, sizeof(struct raidbuf),
    363 		     "raidpl", num * RAIDOUTSTANDING,
    364 		     2 * num * RAIDOUTSTANDING);
    365 
    366 	rf_mutex_init(&rf_sparet_wait_mutex);
    367 
    368 	rf_sparet_wait_queue = rf_sparet_resp_queue = NULL;
    369 
    370 	for (i = 0; i < num; i++)
    371 		raidPtrs[i] = NULL;
    372 	rc = rf_BootRaidframe();
    373 	if (rc == 0)
    374 		printf("Kernelized RAIDframe activated\n");
    375 	else
    376 		panic("Serious error booting RAID!!");
    377 
    378 	/* put together some datastructures like the CCD device does.. This
    379 	 * lets us lock the device and what-not when it gets opened. */
    380 
    381 	raid_softc = (struct raid_softc *)
    382 		malloc(num * sizeof(struct raid_softc),
    383 		       M_RAIDFRAME, M_NOWAIT);
    384 	if (raid_softc == NULL) {
    385 		printf("WARNING: no memory for RAIDframe driver\n");
    386 		return;
    387 	}
    388 
    389 	memset(raid_softc, 0, num * sizeof(struct raid_softc));
    390 
    391 	raidrootdev = (struct device *)malloc(num * sizeof(struct device),
    392 					      M_RAIDFRAME, M_NOWAIT);
    393 	if (raidrootdev == NULL) {
    394 		panic("No memory for RAIDframe driver!!?!?!");
    395 	}
    396 
    397 	for (raidID = 0; raidID < num; raidID++) {
    398 		bufq_alloc(&raid_softc[raidID].buf_queue, "fcfs", 0);
    399 		pseudo_disk_init(&raid_softc[raidID].sc_dkdev);
    400 
    401 		raidrootdev[raidID].dv_class  = DV_DISK;
    402 		raidrootdev[raidID].dv_cfdata = NULL;
    403 		raidrootdev[raidID].dv_unit   = raidID;
    404 		raidrootdev[raidID].dv_parent = NULL;
    405 		raidrootdev[raidID].dv_flags  = 0;
    406 		snprintf(raidrootdev[raidID].dv_xname,
    407 		    sizeof(raidrootdev[raidID].dv_xname), "raid%d", raidID);
    408 
    409 		RF_Malloc(raidPtrs[raidID], sizeof(RF_Raid_t),
    410 			  (RF_Raid_t *));
    411 		if (raidPtrs[raidID] == NULL) {
    412 			printf("WARNING: raidPtrs[%d] is NULL\n", raidID);
    413 			numraid = raidID;
    414 			return;
    415 		}
    416 	}
    417 
    418 #ifdef RAID_AUTOCONFIG
    419 	raidautoconfig = 1;
    420 #endif
    421 
    422 	/*
    423 	 * Register a finalizer which will be used to auto-config RAID
    424 	 * sets once all real hardware devices have been found.
    425 	 */
    426 	if (config_finalize_register(NULL, rf_autoconfig) != 0)
    427 		printf("WARNING: unable to register RAIDframe finalizer\n");
    428 }
    429 
    430 int
    431 rf_autoconfig(struct device *self)
    432 {
    433 	RF_AutoConfig_t *ac_list;
    434 	RF_ConfigSet_t *config_sets;
    435 
    436 	if (raidautoconfig == 0)
    437 		return (0);
    438 
    439 	/* XXX This code can only be run once. */
    440 	raidautoconfig = 0;
    441 
    442 	/* 1. locate all RAID components on the system */
    443 #ifdef DEBUG
    444 	printf("Searching for RAID components...\n");
    445 #endif
    446 	ac_list = rf_find_raid_components();
    447 
    448 	/* 2. Sort them into their respective sets. */
    449 	config_sets = rf_create_auto_sets(ac_list);
    450 
    451 	/*
    452 	 * 3. Evaluate each set andconfigure the valid ones.
    453 	 * This gets done in rf_buildroothack().
    454 	 */
    455 	rf_buildroothack(config_sets);
    456 
    457 	return (1);
    458 }
    459 
    460 void
    461 rf_buildroothack(RF_ConfigSet_t *config_sets)
    462 {
    463 	RF_ConfigSet_t *cset;
    464 	RF_ConfigSet_t *next_cset;
    465 	int retcode;
    466 	int raidID;
    467 	int rootID;
    468 	int num_root;
    469 
    470 	rootID = 0;
    471 	num_root = 0;
    472 	cset = config_sets;
    473 	while(cset != NULL ) {
    474 		next_cset = cset->next;
    475 		if (rf_have_enough_components(cset) &&
    476 		    cset->ac->clabel->autoconfigure==1) {
    477 			retcode = rf_auto_config_set(cset,&raidID);
    478 			if (!retcode) {
    479 				if (cset->rootable) {
    480 					rootID = raidID;
    481 					num_root++;
    482 				}
    483 			} else {
    484 				/* The autoconfig didn't work :( */
    485 #if DEBUG
    486 				printf("Autoconfig failed with code %d for raid%d\n", retcode, raidID);
    487 #endif
    488 				rf_release_all_vps(cset);
    489 			}
    490 		} else {
    491 			/* we're not autoconfiguring this set...
    492 			   release the associated resources */
    493 			rf_release_all_vps(cset);
    494 		}
    495 		/* cleanup */
    496 		rf_cleanup_config_set(cset);
    497 		cset = next_cset;
    498 	}
    499 
    500 	/* we found something bootable... */
    501 
    502 	if (num_root == 1) {
    503 		booted_device = &raidrootdev[rootID];
    504 	} else if (num_root > 1) {
    505 		/* we can't guess.. require the user to answer... */
    506 		boothowto |= RB_ASKNAME;
    507 	}
    508 }
    509 
    510 
    511 int
    512 raidsize(dev_t dev)
    513 {
    514 	struct raid_softc *rs;
    515 	struct disklabel *lp;
    516 	int     part, unit, omask, size;
    517 
    518 	unit = raidunit(dev);
    519 	if (unit >= numraid)
    520 		return (-1);
    521 	rs = &raid_softc[unit];
    522 
    523 	if ((rs->sc_flags & RAIDF_INITED) == 0)
    524 		return (-1);
    525 
    526 	part = DISKPART(dev);
    527 	omask = rs->sc_dkdev.dk_openmask & (1 << part);
    528 	lp = rs->sc_dkdev.dk_label;
    529 
    530 	if (omask == 0 && raidopen(dev, 0, S_IFBLK, curlwp))
    531 		return (-1);
    532 
    533 	if (lp->d_partitions[part].p_fstype != FS_SWAP)
    534 		size = -1;
    535 	else
    536 		size = lp->d_partitions[part].p_size *
    537 		    (lp->d_secsize / DEV_BSIZE);
    538 
    539 	if (omask == 0 && raidclose(dev, 0, S_IFBLK, curlwp))
    540 		return (-1);
    541 
    542 	return (size);
    543 
    544 }
    545 
    546 int
    547 raiddump(dev_t dev, daddr_t blkno, caddr_t va, size_t  size)
    548 {
    549 	/* Not implemented. */
    550 	return ENXIO;
    551 }
    552 /* ARGSUSED */
    553 int
    554 raidopen(dev_t dev, int flags, int fmt, struct lwp *l)
    555 {
    556 	int     unit = raidunit(dev);
    557 	struct raid_softc *rs;
    558 	struct disklabel *lp;
    559 	int     part, pmask;
    560 	int     error = 0;
    561 
    562 	if (unit >= numraid)
    563 		return (ENXIO);
    564 	rs = &raid_softc[unit];
    565 
    566 	if ((error = raidlock(rs)) != 0)
    567 		return (error);
    568 	lp = rs->sc_dkdev.dk_label;
    569 
    570 	part = DISKPART(dev);
    571 	pmask = (1 << part);
    572 
    573 	if ((rs->sc_flags & RAIDF_INITED) &&
    574 	    (rs->sc_dkdev.dk_openmask == 0))
    575 		raidgetdisklabel(dev);
    576 
    577 	/* make sure that this partition exists */
    578 
    579 	if (part != RAW_PART) {
    580 		if (((rs->sc_flags & RAIDF_INITED) == 0) ||
    581 		    ((part >= lp->d_npartitions) ||
    582 			(lp->d_partitions[part].p_fstype == FS_UNUSED))) {
    583 			error = ENXIO;
    584 			raidunlock(rs);
    585 			return (error);
    586 		}
    587 	}
    588 	/* Prevent this unit from being unconfigured while open. */
    589 	switch (fmt) {
    590 	case S_IFCHR:
    591 		rs->sc_dkdev.dk_copenmask |= pmask;
    592 		break;
    593 
    594 	case S_IFBLK:
    595 		rs->sc_dkdev.dk_bopenmask |= pmask;
    596 		break;
    597 	}
    598 
    599 	if ((rs->sc_dkdev.dk_openmask == 0) &&
    600 	    ((rs->sc_flags & RAIDF_INITED) != 0)) {
    601 		/* First one... mark things as dirty... Note that we *MUST*
    602 		 have done a configure before this.  I DO NOT WANT TO BE
    603 		 SCRIBBLING TO RANDOM COMPONENTS UNTIL IT'S BEEN DETERMINED
    604 		 THAT THEY BELONG TOGETHER!!!!! */
    605 		/* XXX should check to see if we're only open for reading
    606 		   here... If so, we needn't do this, but then need some
    607 		   other way of keeping track of what's happened.. */
    608 
    609 		rf_markalldirty( raidPtrs[unit] );
    610 	}
    611 
    612 
    613 	rs->sc_dkdev.dk_openmask =
    614 	    rs->sc_dkdev.dk_copenmask | rs->sc_dkdev.dk_bopenmask;
    615 
    616 	raidunlock(rs);
    617 
    618 	return (error);
    619 
    620 
    621 }
    622 /* ARGSUSED */
    623 int
    624 raidclose(dev_t dev, int flags, int fmt, struct lwp *l)
    625 {
    626 	int     unit = raidunit(dev);
    627 	struct raid_softc *rs;
    628 	int     error = 0;
    629 	int     part;
    630 
    631 	if (unit >= numraid)
    632 		return (ENXIO);
    633 	rs = &raid_softc[unit];
    634 
    635 	if ((error = raidlock(rs)) != 0)
    636 		return (error);
    637 
    638 	part = DISKPART(dev);
    639 
    640 	/* ...that much closer to allowing unconfiguration... */
    641 	switch (fmt) {
    642 	case S_IFCHR:
    643 		rs->sc_dkdev.dk_copenmask &= ~(1 << part);
    644 		break;
    645 
    646 	case S_IFBLK:
    647 		rs->sc_dkdev.dk_bopenmask &= ~(1 << part);
    648 		break;
    649 	}
    650 	rs->sc_dkdev.dk_openmask =
    651 	    rs->sc_dkdev.dk_copenmask | rs->sc_dkdev.dk_bopenmask;
    652 
    653 	if ((rs->sc_dkdev.dk_openmask == 0) &&
    654 	    ((rs->sc_flags & RAIDF_INITED) != 0)) {
    655 		/* Last one... device is not unconfigured yet.
    656 		   Device shutdown has taken care of setting the
    657 		   clean bits if RAIDF_INITED is not set
    658 		   mark things as clean... */
    659 
    660 		rf_update_component_labels(raidPtrs[unit],
    661 						 RF_FINAL_COMPONENT_UPDATE);
    662 		if (doing_shutdown) {
    663 			/* last one, and we're going down, so
    664 			   lights out for this RAID set too. */
    665 			error = rf_Shutdown(raidPtrs[unit]);
    666 
    667 			/* It's no longer initialized... */
    668 			rs->sc_flags &= ~RAIDF_INITED;
    669 
    670 			/* Detach the disk. */
    671 			pseudo_disk_detach(&rs->sc_dkdev);
    672 		}
    673 	}
    674 
    675 	raidunlock(rs);
    676 	return (0);
    677 
    678 }
    679 
    680 void
    681 raidstrategy(struct buf *bp)
    682 {
    683 	int s;
    684 
    685 	unsigned int raidID = raidunit(bp->b_dev);
    686 	RF_Raid_t *raidPtr;
    687 	struct raid_softc *rs = &raid_softc[raidID];
    688 	int     wlabel;
    689 
    690 	if ((rs->sc_flags & RAIDF_INITED) ==0) {
    691 		bp->b_error = ENXIO;
    692 		bp->b_flags |= B_ERROR;
    693 		bp->b_resid = bp->b_bcount;
    694 		biodone(bp);
    695 		return;
    696 	}
    697 	if (raidID >= numraid || !raidPtrs[raidID]) {
    698 		bp->b_error = ENODEV;
    699 		bp->b_flags |= B_ERROR;
    700 		bp->b_resid = bp->b_bcount;
    701 		biodone(bp);
    702 		return;
    703 	}
    704 	raidPtr = raidPtrs[raidID];
    705 	if (!raidPtr->valid) {
    706 		bp->b_error = ENODEV;
    707 		bp->b_flags |= B_ERROR;
    708 		bp->b_resid = bp->b_bcount;
    709 		biodone(bp);
    710 		return;
    711 	}
    712 	if (bp->b_bcount == 0) {
    713 		db1_printf(("b_bcount is zero..\n"));
    714 		biodone(bp);
    715 		return;
    716 	}
    717 
    718 	/*
    719 	 * Do bounds checking and adjust transfer.  If there's an
    720 	 * error, the bounds check will flag that for us.
    721 	 */
    722 
    723 	wlabel = rs->sc_flags & (RAIDF_WLABEL | RAIDF_LABELLING);
    724 	if (DISKPART(bp->b_dev) != RAW_PART)
    725 		if (bounds_check_with_label(&rs->sc_dkdev, bp, wlabel) <= 0) {
    726 			db1_printf(("Bounds check failed!!:%d %d\n",
    727 				(int) bp->b_blkno, (int) wlabel));
    728 			biodone(bp);
    729 			return;
    730 		}
    731 	s = splbio();
    732 
    733 	bp->b_resid = 0;
    734 
    735 	/* stuff it onto our queue */
    736 	BUFQ_PUT(rs->buf_queue, bp);
    737 
    738 	/* scheduled the IO to happen at the next convenient time */
    739 	wakeup(&(raidPtrs[raidID]->iodone));
    740 
    741 	splx(s);
    742 }
    743 /* ARGSUSED */
    744 int
    745 raidread(dev_t dev, struct uio *uio, int flags)
    746 {
    747 	int     unit = raidunit(dev);
    748 	struct raid_softc *rs;
    749 
    750 	if (unit >= numraid)
    751 		return (ENXIO);
    752 	rs = &raid_softc[unit];
    753 
    754 	if ((rs->sc_flags & RAIDF_INITED) == 0)
    755 		return (ENXIO);
    756 
    757 	return (physio(raidstrategy, NULL, dev, B_READ, minphys, uio));
    758 
    759 }
    760 /* ARGSUSED */
    761 int
    762 raidwrite(dev_t dev, struct uio *uio, int flags)
    763 {
    764 	int     unit = raidunit(dev);
    765 	struct raid_softc *rs;
    766 
    767 	if (unit >= numraid)
    768 		return (ENXIO);
    769 	rs = &raid_softc[unit];
    770 
    771 	if ((rs->sc_flags & RAIDF_INITED) == 0)
    772 		return (ENXIO);
    773 
    774 	return (physio(raidstrategy, NULL, dev, B_WRITE, minphys, uio));
    775 
    776 }
    777 
    778 int
    779 raidioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct lwp *l)
    780 {
    781 	int     unit = raidunit(dev);
    782 	int     error = 0;
    783 	int     part, pmask;
    784 	struct raid_softc *rs;
    785 	RF_Config_t *k_cfg, *u_cfg;
    786 	RF_Raid_t *raidPtr;
    787 	RF_RaidDisk_t *diskPtr;
    788 	RF_AccTotals_t *totals;
    789 	RF_DeviceConfig_t *d_cfg, **ucfgp;
    790 	u_char *specific_buf;
    791 	int retcode = 0;
    792 	int column;
    793 	int raidid;
    794 	struct rf_recon_req *rrcopy, *rr;
    795 	RF_ComponentLabel_t *clabel;
    796 	RF_ComponentLabel_t ci_label;
    797 	RF_ComponentLabel_t **clabel_ptr;
    798 	RF_SingleComponent_t *sparePtr,*componentPtr;
    799 	RF_SingleComponent_t hot_spare;
    800 	RF_SingleComponent_t component;
    801 	RF_ProgressInfo_t progressInfo, **progressInfoPtr;
    802 	int i, j, d;
    803 #ifdef __HAVE_OLD_DISKLABEL
    804 	struct disklabel newlabel;
    805 #endif
    806 
    807 	if (unit >= numraid)
    808 		return (ENXIO);
    809 	rs = &raid_softc[unit];
    810 	raidPtr = raidPtrs[unit];
    811 
    812 	db1_printf(("raidioctl: %d %d %d %d\n", (int) dev,
    813 		(int) DISKPART(dev), (int) unit, (int) cmd));
    814 
    815 	/* Must be open for writes for these commands... */
    816 	switch (cmd) {
    817 	case DIOCSDINFO:
    818 	case DIOCWDINFO:
    819 #ifdef __HAVE_OLD_DISKLABEL
    820 	case ODIOCWDINFO:
    821 	case ODIOCSDINFO:
    822 #endif
    823 	case DIOCWLABEL:
    824 		if ((flag & FWRITE) == 0)
    825 			return (EBADF);
    826 	}
    827 
    828 	/* Must be initialized for these... */
    829 	switch (cmd) {
    830 	case DIOCGDINFO:
    831 	case DIOCSDINFO:
    832 	case DIOCWDINFO:
    833 #ifdef __HAVE_OLD_DISKLABEL
    834 	case ODIOCGDINFO:
    835 	case ODIOCWDINFO:
    836 	case ODIOCSDINFO:
    837 	case ODIOCGDEFLABEL:
    838 #endif
    839 	case DIOCGPART:
    840 	case DIOCWLABEL:
    841 	case DIOCGDEFLABEL:
    842 	case RAIDFRAME_SHUTDOWN:
    843 	case RAIDFRAME_REWRITEPARITY:
    844 	case RAIDFRAME_GET_INFO:
    845 	case RAIDFRAME_RESET_ACCTOTALS:
    846 	case RAIDFRAME_GET_ACCTOTALS:
    847 	case RAIDFRAME_KEEP_ACCTOTALS:
    848 	case RAIDFRAME_GET_SIZE:
    849 	case RAIDFRAME_FAIL_DISK:
    850 	case RAIDFRAME_COPYBACK:
    851 	case RAIDFRAME_CHECK_RECON_STATUS:
    852 	case RAIDFRAME_CHECK_RECON_STATUS_EXT:
    853 	case RAIDFRAME_GET_COMPONENT_LABEL:
    854 	case RAIDFRAME_SET_COMPONENT_LABEL:
    855 	case RAIDFRAME_ADD_HOT_SPARE:
    856 	case RAIDFRAME_REMOVE_HOT_SPARE:
    857 	case RAIDFRAME_INIT_LABELS:
    858 	case RAIDFRAME_REBUILD_IN_PLACE:
    859 	case RAIDFRAME_CHECK_PARITY:
    860 	case RAIDFRAME_CHECK_PARITYREWRITE_STATUS:
    861 	case RAIDFRAME_CHECK_PARITYREWRITE_STATUS_EXT:
    862 	case RAIDFRAME_CHECK_COPYBACK_STATUS:
    863 	case RAIDFRAME_CHECK_COPYBACK_STATUS_EXT:
    864 	case RAIDFRAME_SET_AUTOCONFIG:
    865 	case RAIDFRAME_SET_ROOT:
    866 	case RAIDFRAME_DELETE_COMPONENT:
    867 	case RAIDFRAME_INCORPORATE_HOT_SPARE:
    868 		if ((rs->sc_flags & RAIDF_INITED) == 0)
    869 			return (ENXIO);
    870 	}
    871 
    872 	switch (cmd) {
    873 
    874 		/* configure the system */
    875 	case RAIDFRAME_CONFIGURE:
    876 
    877 		if (raidPtr->valid) {
    878 			/* There is a valid RAID set running on this unit! */
    879 			printf("raid%d: Device already configured!\n",unit);
    880 			return(EINVAL);
    881 		}
    882 
    883 		/* copy-in the configuration information */
    884 		/* data points to a pointer to the configuration structure */
    885 
    886 		u_cfg = *((RF_Config_t **) data);
    887 		RF_Malloc(k_cfg, sizeof(RF_Config_t), (RF_Config_t *));
    888 		if (k_cfg == NULL) {
    889 			return (ENOMEM);
    890 		}
    891 		retcode = copyin(u_cfg, k_cfg, sizeof(RF_Config_t));
    892 		if (retcode) {
    893 			RF_Free(k_cfg, sizeof(RF_Config_t));
    894 			db1_printf(("rf_ioctl: retcode=%d copyin.1\n",
    895 				retcode));
    896 			return (retcode);
    897 		}
    898 		/* allocate a buffer for the layout-specific data, and copy it
    899 		 * in */
    900 		if (k_cfg->layoutSpecificSize) {
    901 			if (k_cfg->layoutSpecificSize > 10000) {
    902 				/* sanity check */
    903 				RF_Free(k_cfg, sizeof(RF_Config_t));
    904 				return (EINVAL);
    905 			}
    906 			RF_Malloc(specific_buf, k_cfg->layoutSpecificSize,
    907 			    (u_char *));
    908 			if (specific_buf == NULL) {
    909 				RF_Free(k_cfg, sizeof(RF_Config_t));
    910 				return (ENOMEM);
    911 			}
    912 			retcode = copyin(k_cfg->layoutSpecific, specific_buf,
    913 			    k_cfg->layoutSpecificSize);
    914 			if (retcode) {
    915 				RF_Free(k_cfg, sizeof(RF_Config_t));
    916 				RF_Free(specific_buf,
    917 					k_cfg->layoutSpecificSize);
    918 				db1_printf(("rf_ioctl: retcode=%d copyin.2\n",
    919 					retcode));
    920 				return (retcode);
    921 			}
    922 		} else
    923 			specific_buf = NULL;
    924 		k_cfg->layoutSpecific = specific_buf;
    925 
    926 		/* should do some kind of sanity check on the configuration.
    927 		 * Store the sum of all the bytes in the last byte? */
    928 
    929 		/* configure the system */
    930 
    931 		/*
    932 		 * Clear the entire RAID descriptor, just to make sure
    933 		 *  there is no stale data left in the case of a
    934 		 *  reconfiguration
    935 		 */
    936 		memset((char *) raidPtr, 0, sizeof(RF_Raid_t));
    937 		raidPtr->raidid = unit;
    938 
    939 		retcode = rf_Configure(raidPtr, k_cfg, NULL);
    940 
    941 		if (retcode == 0) {
    942 
    943 			/* allow this many simultaneous IO's to
    944 			   this RAID device */
    945 			raidPtr->openings = RAIDOUTSTANDING;
    946 
    947 			raidinit(raidPtr);
    948 			rf_markalldirty(raidPtr);
    949 		}
    950 		/* free the buffers.  No return code here. */
    951 		if (k_cfg->layoutSpecificSize) {
    952 			RF_Free(specific_buf, k_cfg->layoutSpecificSize);
    953 		}
    954 		RF_Free(k_cfg, sizeof(RF_Config_t));
    955 
    956 		return (retcode);
    957 
    958 		/* shutdown the system */
    959 	case RAIDFRAME_SHUTDOWN:
    960 
    961 		if ((error = raidlock(rs)) != 0)
    962 			return (error);
    963 
    964 		/*
    965 		 * If somebody has a partition mounted, we shouldn't
    966 		 * shutdown.
    967 		 */
    968 
    969 		part = DISKPART(dev);
    970 		pmask = (1 << part);
    971 		if ((rs->sc_dkdev.dk_openmask & ~pmask) ||
    972 		    ((rs->sc_dkdev.dk_bopenmask & pmask) &&
    973 			(rs->sc_dkdev.dk_copenmask & pmask))) {
    974 			raidunlock(rs);
    975 			return (EBUSY);
    976 		}
    977 
    978 		retcode = rf_Shutdown(raidPtr);
    979 
    980 		/* It's no longer initialized... */
    981 		rs->sc_flags &= ~RAIDF_INITED;
    982 
    983 		/* Detach the disk. */
    984 		pseudo_disk_detach(&rs->sc_dkdev);
    985 
    986 		raidunlock(rs);
    987 
    988 		return (retcode);
    989 	case RAIDFRAME_GET_COMPONENT_LABEL:
    990 		clabel_ptr = (RF_ComponentLabel_t **) data;
    991 		/* need to read the component label for the disk indicated
    992 		   by row,column in clabel */
    993 
    994 		/* For practice, let's get it directly fromdisk, rather
    995 		   than from the in-core copy */
    996 		RF_Malloc( clabel, sizeof( RF_ComponentLabel_t ),
    997 			   (RF_ComponentLabel_t *));
    998 		if (clabel == NULL)
    999 			return (ENOMEM);
   1000 
   1001 		memset((char *) clabel, 0, sizeof(RF_ComponentLabel_t));
   1002 
   1003 		retcode = copyin( *clabel_ptr, clabel,
   1004 				  sizeof(RF_ComponentLabel_t));
   1005 
   1006 		if (retcode) {
   1007 			RF_Free( clabel, sizeof(RF_ComponentLabel_t));
   1008 			return(retcode);
   1009 		}
   1010 
   1011 		clabel->row = 0; /* Don't allow looking at anything else.*/
   1012 
   1013 		column = clabel->column;
   1014 
   1015 		if ((column < 0) || (column >= raidPtr->numCol +
   1016 				     raidPtr->numSpare)) {
   1017 			RF_Free( clabel, sizeof(RF_ComponentLabel_t));
   1018 			return(EINVAL);
   1019 		}
   1020 
   1021 		raidread_component_label(raidPtr->Disks[column].dev,
   1022 				raidPtr->raid_cinfo[column].ci_vp,
   1023 				clabel );
   1024 
   1025 		retcode = copyout(clabel, *clabel_ptr,
   1026 				  sizeof(RF_ComponentLabel_t));
   1027 		RF_Free(clabel, sizeof(RF_ComponentLabel_t));
   1028 		return (retcode);
   1029 
   1030 	case RAIDFRAME_SET_COMPONENT_LABEL:
   1031 		clabel = (RF_ComponentLabel_t *) data;
   1032 
   1033 		/* XXX check the label for valid stuff... */
   1034 		/* Note that some things *should not* get modified --
   1035 		   the user should be re-initing the labels instead of
   1036 		   trying to patch things.
   1037 		   */
   1038 
   1039 		raidid = raidPtr->raidid;
   1040 #if DEBUG
   1041 		printf("raid%d: Got component label:\n", raidid);
   1042 		printf("raid%d: Version: %d\n", raidid, clabel->version);
   1043 		printf("raid%d: Serial Number: %d\n", raidid, clabel->serial_number);
   1044 		printf("raid%d: Mod counter: %d\n", raidid, clabel->mod_counter);
   1045 		printf("raid%d: Column: %d\n", raidid, clabel->column);
   1046 		printf("raid%d: Num Columns: %d\n", raidid, clabel->num_columns);
   1047 		printf("raid%d: Clean: %d\n", raidid, clabel->clean);
   1048 		printf("raid%d: Status: %d\n", raidid, clabel->status);
   1049 #endif
   1050 		clabel->row = 0;
   1051 		column = clabel->column;
   1052 
   1053 		if ((column < 0) || (column >= raidPtr->numCol)) {
   1054 			return(EINVAL);
   1055 		}
   1056 
   1057 		/* XXX this isn't allowed to do anything for now :-) */
   1058 
   1059 		/* XXX and before it is, we need to fill in the rest
   1060 		   of the fields!?!?!?! */
   1061 #if 0
   1062 		raidwrite_component_label(
   1063                             raidPtr->Disks[column].dev,
   1064 			    raidPtr->raid_cinfo[column].ci_vp,
   1065 			    clabel );
   1066 #endif
   1067 		return (0);
   1068 
   1069 	case RAIDFRAME_INIT_LABELS:
   1070 		clabel = (RF_ComponentLabel_t *) data;
   1071 		/*
   1072 		   we only want the serial number from
   1073 		   the above.  We get all the rest of the information
   1074 		   from the config that was used to create this RAID
   1075 		   set.
   1076 		   */
   1077 
   1078 		raidPtr->serial_number = clabel->serial_number;
   1079 
   1080 		raid_init_component_label(raidPtr, &ci_label);
   1081 		ci_label.serial_number = clabel->serial_number;
   1082 		ci_label.row = 0; /* we dont' pretend to support more */
   1083 
   1084 		for(column=0;column<raidPtr->numCol;column++) {
   1085 			diskPtr = &raidPtr->Disks[column];
   1086 			if (!RF_DEAD_DISK(diskPtr->status)) {
   1087 				ci_label.partitionSize = diskPtr->partitionSize;
   1088 				ci_label.column = column;
   1089 				raidwrite_component_label(
   1090 							  raidPtr->Disks[column].dev,
   1091 							  raidPtr->raid_cinfo[column].ci_vp,
   1092 							  &ci_label );
   1093 			}
   1094 		}
   1095 
   1096 		return (retcode);
   1097 	case RAIDFRAME_SET_AUTOCONFIG:
   1098 		d = rf_set_autoconfig(raidPtr, *(int *) data);
   1099 		printf("raid%d: New autoconfig value is: %d\n",
   1100 		       raidPtr->raidid, d);
   1101 		*(int *) data = d;
   1102 		return (retcode);
   1103 
   1104 	case RAIDFRAME_SET_ROOT:
   1105 		d = rf_set_rootpartition(raidPtr, *(int *) data);
   1106 		printf("raid%d: New rootpartition value is: %d\n",
   1107 		       raidPtr->raidid, d);
   1108 		*(int *) data = d;
   1109 		return (retcode);
   1110 
   1111 		/* initialize all parity */
   1112 	case RAIDFRAME_REWRITEPARITY:
   1113 
   1114 		if (raidPtr->Layout.map->faultsTolerated == 0) {
   1115 			/* Parity for RAID 0 is trivially correct */
   1116 			raidPtr->parity_good = RF_RAID_CLEAN;
   1117 			return(0);
   1118 		}
   1119 
   1120 		if (raidPtr->parity_rewrite_in_progress == 1) {
   1121 			/* Re-write is already in progress! */
   1122 			return(EINVAL);
   1123 		}
   1124 
   1125 		retcode = RF_CREATE_THREAD(raidPtr->parity_rewrite_thread,
   1126 					   rf_RewriteParityThread,
   1127 					   raidPtr,"raid_parity");
   1128 		return (retcode);
   1129 
   1130 
   1131 	case RAIDFRAME_ADD_HOT_SPARE:
   1132 		sparePtr = (RF_SingleComponent_t *) data;
   1133 		memcpy( &hot_spare, sparePtr, sizeof(RF_SingleComponent_t));
   1134 		retcode = rf_add_hot_spare(raidPtr, &hot_spare);
   1135 		return(retcode);
   1136 
   1137 	case RAIDFRAME_REMOVE_HOT_SPARE:
   1138 		return(retcode);
   1139 
   1140 	case RAIDFRAME_DELETE_COMPONENT:
   1141 		componentPtr = (RF_SingleComponent_t *)data;
   1142 		memcpy( &component, componentPtr,
   1143 			sizeof(RF_SingleComponent_t));
   1144 		retcode = rf_delete_component(raidPtr, &component);
   1145 		return(retcode);
   1146 
   1147 	case RAIDFRAME_INCORPORATE_HOT_SPARE:
   1148 		componentPtr = (RF_SingleComponent_t *)data;
   1149 		memcpy( &component, componentPtr,
   1150 			sizeof(RF_SingleComponent_t));
   1151 		retcode = rf_incorporate_hot_spare(raidPtr, &component);
   1152 		return(retcode);
   1153 
   1154 	case RAIDFRAME_REBUILD_IN_PLACE:
   1155 
   1156 		if (raidPtr->Layout.map->faultsTolerated == 0) {
   1157 			/* Can't do this on a RAID 0!! */
   1158 			return(EINVAL);
   1159 		}
   1160 
   1161 		if (raidPtr->recon_in_progress == 1) {
   1162 			/* a reconstruct is already in progress! */
   1163 			return(EINVAL);
   1164 		}
   1165 
   1166 		componentPtr = (RF_SingleComponent_t *) data;
   1167 		memcpy( &component, componentPtr,
   1168 			sizeof(RF_SingleComponent_t));
   1169 		component.row = 0; /* we don't support any more */
   1170 		column = component.column;
   1171 
   1172 		if ((column < 0) || (column >= raidPtr->numCol)) {
   1173 			return(EINVAL);
   1174 		}
   1175 
   1176 		RF_LOCK_MUTEX(raidPtr->mutex);
   1177 		if ((raidPtr->Disks[column].status == rf_ds_optimal) &&
   1178 		    (raidPtr->numFailures > 0)) {
   1179 			/* XXX 0 above shouldn't be constant!!! */
   1180 			/* some component other than this has failed.
   1181 			   Let's not make things worse than they already
   1182 			   are... */
   1183 			printf("raid%d: Unable to reconstruct to disk at:\n",
   1184 			       raidPtr->raidid);
   1185 			printf("raid%d:     Col: %d   Too many failures.\n",
   1186 			       raidPtr->raidid, column);
   1187 			RF_UNLOCK_MUTEX(raidPtr->mutex);
   1188 			return (EINVAL);
   1189 		}
   1190 		if (raidPtr->Disks[column].status ==
   1191 		    rf_ds_reconstructing) {
   1192 			printf("raid%d: Unable to reconstruct to disk at:\n",
   1193 			       raidPtr->raidid);
   1194 			printf("raid%d:    Col: %d   Reconstruction already occuring!\n", raidPtr->raidid, column);
   1195 
   1196 			RF_UNLOCK_MUTEX(raidPtr->mutex);
   1197 			return (EINVAL);
   1198 		}
   1199 		if (raidPtr->Disks[column].status == rf_ds_spared) {
   1200 			RF_UNLOCK_MUTEX(raidPtr->mutex);
   1201 			return (EINVAL);
   1202 		}
   1203 		RF_UNLOCK_MUTEX(raidPtr->mutex);
   1204 
   1205 		RF_Malloc(rrcopy, sizeof(*rrcopy), (struct rf_recon_req *));
   1206 		if (rrcopy == NULL)
   1207 			return(ENOMEM);
   1208 
   1209 		rrcopy->raidPtr = (void *) raidPtr;
   1210 		rrcopy->col = column;
   1211 
   1212 		retcode = RF_CREATE_THREAD(raidPtr->recon_thread,
   1213 					   rf_ReconstructInPlaceThread,
   1214 					   rrcopy,"raid_reconip");
   1215 		return(retcode);
   1216 
   1217 	case RAIDFRAME_GET_INFO:
   1218 		if (!raidPtr->valid)
   1219 			return (ENODEV);
   1220 		ucfgp = (RF_DeviceConfig_t **) data;
   1221 		RF_Malloc(d_cfg, sizeof(RF_DeviceConfig_t),
   1222 			  (RF_DeviceConfig_t *));
   1223 		if (d_cfg == NULL)
   1224 			return (ENOMEM);
   1225 		memset((char *) d_cfg, 0, sizeof(RF_DeviceConfig_t));
   1226 		d_cfg->rows = 1; /* there is only 1 row now */
   1227 		d_cfg->cols = raidPtr->numCol;
   1228 		d_cfg->ndevs = raidPtr->numCol;
   1229 		if (d_cfg->ndevs >= RF_MAX_DISKS) {
   1230 			RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
   1231 			return (ENOMEM);
   1232 		}
   1233 		d_cfg->nspares = raidPtr->numSpare;
   1234 		if (d_cfg->nspares >= RF_MAX_DISKS) {
   1235 			RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
   1236 			return (ENOMEM);
   1237 		}
   1238 		d_cfg->maxqdepth = raidPtr->maxQueueDepth;
   1239 		d = 0;
   1240 		for (j = 0; j < d_cfg->cols; j++) {
   1241 			d_cfg->devs[d] = raidPtr->Disks[j];
   1242 			d++;
   1243 		}
   1244 		for (j = d_cfg->cols, i = 0; i < d_cfg->nspares; i++, j++) {
   1245 			d_cfg->spares[i] = raidPtr->Disks[j];
   1246 		}
   1247 		retcode = copyout(d_cfg, *ucfgp, sizeof(RF_DeviceConfig_t));
   1248 		RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
   1249 
   1250 		return (retcode);
   1251 
   1252 	case RAIDFRAME_CHECK_PARITY:
   1253 		*(int *) data = raidPtr->parity_good;
   1254 		return (0);
   1255 
   1256 	case RAIDFRAME_RESET_ACCTOTALS:
   1257 		memset(&raidPtr->acc_totals, 0, sizeof(raidPtr->acc_totals));
   1258 		return (0);
   1259 
   1260 	case RAIDFRAME_GET_ACCTOTALS:
   1261 		totals = (RF_AccTotals_t *) data;
   1262 		*totals = raidPtr->acc_totals;
   1263 		return (0);
   1264 
   1265 	case RAIDFRAME_KEEP_ACCTOTALS:
   1266 		raidPtr->keep_acc_totals = *(int *)data;
   1267 		return (0);
   1268 
   1269 	case RAIDFRAME_GET_SIZE:
   1270 		*(int *) data = raidPtr->totalSectors;
   1271 		return (0);
   1272 
   1273 		/* fail a disk & optionally start reconstruction */
   1274 	case RAIDFRAME_FAIL_DISK:
   1275 
   1276 		if (raidPtr->Layout.map->faultsTolerated == 0) {
   1277 			/* Can't do this on a RAID 0!! */
   1278 			return(EINVAL);
   1279 		}
   1280 
   1281 		rr = (struct rf_recon_req *) data;
   1282 		rr->row = 0;
   1283 		if (rr->col < 0 || rr->col >= raidPtr->numCol)
   1284 			return (EINVAL);
   1285 
   1286 
   1287 		RF_LOCK_MUTEX(raidPtr->mutex);
   1288 		if (raidPtr->status == rf_rs_reconstructing) {
   1289 			/* you can't fail a disk while we're reconstructing! */
   1290 			/* XXX wrong for RAID6 */
   1291 			RF_UNLOCK_MUTEX(raidPtr->mutex);
   1292 			return (EINVAL);
   1293 		}
   1294 		if ((raidPtr->Disks[rr->col].status ==
   1295 		     rf_ds_optimal) && (raidPtr->numFailures > 0)) {
   1296 			/* some other component has failed.  Let's not make
   1297 			   things worse. XXX wrong for RAID6 */
   1298 			RF_UNLOCK_MUTEX(raidPtr->mutex);
   1299 			return (EINVAL);
   1300 		}
   1301 		if (raidPtr->Disks[rr->col].status == rf_ds_spared) {
   1302 			/* Can't fail a spared disk! */
   1303 			RF_UNLOCK_MUTEX(raidPtr->mutex);
   1304 			return (EINVAL);
   1305 		}
   1306 		RF_UNLOCK_MUTEX(raidPtr->mutex);
   1307 
   1308 		/* make a copy of the recon request so that we don't rely on
   1309 		 * the user's buffer */
   1310 		RF_Malloc(rrcopy, sizeof(*rrcopy), (struct rf_recon_req *));
   1311 		if (rrcopy == NULL)
   1312 			return(ENOMEM);
   1313 		memcpy(rrcopy, rr, sizeof(*rr));
   1314 		rrcopy->raidPtr = (void *) raidPtr;
   1315 
   1316 		retcode = RF_CREATE_THREAD(raidPtr->recon_thread,
   1317 					   rf_ReconThread,
   1318 					   rrcopy,"raid_recon");
   1319 		return (0);
   1320 
   1321 		/* invoke a copyback operation after recon on whatever disk
   1322 		 * needs it, if any */
   1323 	case RAIDFRAME_COPYBACK:
   1324 
   1325 		if (raidPtr->Layout.map->faultsTolerated == 0) {
   1326 			/* This makes no sense on a RAID 0!! */
   1327 			return(EINVAL);
   1328 		}
   1329 
   1330 		if (raidPtr->copyback_in_progress == 1) {
   1331 			/* Copyback is already in progress! */
   1332 			return(EINVAL);
   1333 		}
   1334 
   1335 		retcode = RF_CREATE_THREAD(raidPtr->copyback_thread,
   1336 					   rf_CopybackThread,
   1337 					   raidPtr,"raid_copyback");
   1338 		return (retcode);
   1339 
   1340 		/* return the percentage completion of reconstruction */
   1341 	case RAIDFRAME_CHECK_RECON_STATUS:
   1342 		if (raidPtr->Layout.map->faultsTolerated == 0) {
   1343 			/* This makes no sense on a RAID 0, so tell the
   1344 			   user it's done. */
   1345 			*(int *) data = 100;
   1346 			return(0);
   1347 		}
   1348 		if (raidPtr->status != rf_rs_reconstructing)
   1349 			*(int *) data = 100;
   1350 		else {
   1351 			if (raidPtr->reconControl->numRUsTotal > 0) {
   1352 				*(int *) data = (raidPtr->reconControl->numRUsComplete * 100 / raidPtr->reconControl->numRUsTotal);
   1353 			} else {
   1354 				*(int *) data = 0;
   1355 			}
   1356 		}
   1357 		return (0);
   1358 	case RAIDFRAME_CHECK_RECON_STATUS_EXT:
   1359 		progressInfoPtr = (RF_ProgressInfo_t **) data;
   1360 		if (raidPtr->status != rf_rs_reconstructing) {
   1361 			progressInfo.remaining = 0;
   1362 			progressInfo.completed = 100;
   1363 			progressInfo.total = 100;
   1364 		} else {
   1365 			progressInfo.total =
   1366 				raidPtr->reconControl->numRUsTotal;
   1367 			progressInfo.completed =
   1368 				raidPtr->reconControl->numRUsComplete;
   1369 			progressInfo.remaining = progressInfo.total -
   1370 				progressInfo.completed;
   1371 		}
   1372 		retcode = copyout(&progressInfo, *progressInfoPtr,
   1373 				  sizeof(RF_ProgressInfo_t));
   1374 		return (retcode);
   1375 
   1376 	case RAIDFRAME_CHECK_PARITYREWRITE_STATUS:
   1377 		if (raidPtr->Layout.map->faultsTolerated == 0) {
   1378 			/* This makes no sense on a RAID 0, so tell the
   1379 			   user it's done. */
   1380 			*(int *) data = 100;
   1381 			return(0);
   1382 		}
   1383 		if (raidPtr->parity_rewrite_in_progress == 1) {
   1384 			*(int *) data = 100 *
   1385 				raidPtr->parity_rewrite_stripes_done /
   1386 				raidPtr->Layout.numStripe;
   1387 		} else {
   1388 			*(int *) data = 100;
   1389 		}
   1390 		return (0);
   1391 
   1392 	case RAIDFRAME_CHECK_PARITYREWRITE_STATUS_EXT:
   1393 		progressInfoPtr = (RF_ProgressInfo_t **) data;
   1394 		if (raidPtr->parity_rewrite_in_progress == 1) {
   1395 			progressInfo.total = raidPtr->Layout.numStripe;
   1396 			progressInfo.completed =
   1397 				raidPtr->parity_rewrite_stripes_done;
   1398 			progressInfo.remaining = progressInfo.total -
   1399 				progressInfo.completed;
   1400 		} else {
   1401 			progressInfo.remaining = 0;
   1402 			progressInfo.completed = 100;
   1403 			progressInfo.total = 100;
   1404 		}
   1405 		retcode = copyout(&progressInfo, *progressInfoPtr,
   1406 				  sizeof(RF_ProgressInfo_t));
   1407 		return (retcode);
   1408 
   1409 	case RAIDFRAME_CHECK_COPYBACK_STATUS:
   1410 		if (raidPtr->Layout.map->faultsTolerated == 0) {
   1411 			/* This makes no sense on a RAID 0 */
   1412 			*(int *) data = 100;
   1413 			return(0);
   1414 		}
   1415 		if (raidPtr->copyback_in_progress == 1) {
   1416 			*(int *) data = 100 * raidPtr->copyback_stripes_done /
   1417 				raidPtr->Layout.numStripe;
   1418 		} else {
   1419 			*(int *) data = 100;
   1420 		}
   1421 		return (0);
   1422 
   1423 	case RAIDFRAME_CHECK_COPYBACK_STATUS_EXT:
   1424 		progressInfoPtr = (RF_ProgressInfo_t **) data;
   1425 		if (raidPtr->copyback_in_progress == 1) {
   1426 			progressInfo.total = raidPtr->Layout.numStripe;
   1427 			progressInfo.completed =
   1428 				raidPtr->copyback_stripes_done;
   1429 			progressInfo.remaining = progressInfo.total -
   1430 				progressInfo.completed;
   1431 		} else {
   1432 			progressInfo.remaining = 0;
   1433 			progressInfo.completed = 100;
   1434 			progressInfo.total = 100;
   1435 		}
   1436 		retcode = copyout(&progressInfo, *progressInfoPtr,
   1437 				  sizeof(RF_ProgressInfo_t));
   1438 		return (retcode);
   1439 
   1440 		/* the sparetable daemon calls this to wait for the kernel to
   1441 		 * need a spare table. this ioctl does not return until a
   1442 		 * spare table is needed. XXX -- calling mpsleep here in the
   1443 		 * ioctl code is almost certainly wrong and evil. -- XXX XXX
   1444 		 * -- I should either compute the spare table in the kernel,
   1445 		 * or have a different -- XXX XXX -- interface (a different
   1446 		 * character device) for delivering the table     -- XXX */
   1447 #if 0
   1448 	case RAIDFRAME_SPARET_WAIT:
   1449 		RF_LOCK_MUTEX(rf_sparet_wait_mutex);
   1450 		while (!rf_sparet_wait_queue)
   1451 			mpsleep(&rf_sparet_wait_queue, (PZERO + 1) | PCATCH, "sparet wait", 0, (void *) simple_lock_addr(rf_sparet_wait_mutex), MS_LOCK_SIMPLE);
   1452 		waitreq = rf_sparet_wait_queue;
   1453 		rf_sparet_wait_queue = rf_sparet_wait_queue->next;
   1454 		RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
   1455 
   1456 		/* structure assignment */
   1457 		*((RF_SparetWait_t *) data) = *waitreq;
   1458 
   1459 		RF_Free(waitreq, sizeof(*waitreq));
   1460 		return (0);
   1461 
   1462 		/* wakes up a process waiting on SPARET_WAIT and puts an error
   1463 		 * code in it that will cause the dameon to exit */
   1464 	case RAIDFRAME_ABORT_SPARET_WAIT:
   1465 		RF_Malloc(waitreq, sizeof(*waitreq), (RF_SparetWait_t *));
   1466 		waitreq->fcol = -1;
   1467 		RF_LOCK_MUTEX(rf_sparet_wait_mutex);
   1468 		waitreq->next = rf_sparet_wait_queue;
   1469 		rf_sparet_wait_queue = waitreq;
   1470 		RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
   1471 		wakeup(&rf_sparet_wait_queue);
   1472 		return (0);
   1473 
   1474 		/* used by the spare table daemon to deliver a spare table
   1475 		 * into the kernel */
   1476 	case RAIDFRAME_SEND_SPARET:
   1477 
   1478 		/* install the spare table */
   1479 		retcode = rf_SetSpareTable(raidPtr, *(void **) data);
   1480 
   1481 		/* respond to the requestor.  the return status of the spare
   1482 		 * table installation is passed in the "fcol" field */
   1483 		RF_Malloc(waitreq, sizeof(*waitreq), (RF_SparetWait_t *));
   1484 		waitreq->fcol = retcode;
   1485 		RF_LOCK_MUTEX(rf_sparet_wait_mutex);
   1486 		waitreq->next = rf_sparet_resp_queue;
   1487 		rf_sparet_resp_queue = waitreq;
   1488 		wakeup(&rf_sparet_resp_queue);
   1489 		RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
   1490 
   1491 		return (retcode);
   1492 #endif
   1493 
   1494 	default:
   1495 		break; /* fall through to the os-specific code below */
   1496 
   1497 	}
   1498 
   1499 	if (!raidPtr->valid)
   1500 		return (EINVAL);
   1501 
   1502 	/*
   1503 	 * Add support for "regular" device ioctls here.
   1504 	 */
   1505 
   1506 	switch (cmd) {
   1507 	case DIOCGDINFO:
   1508 		*(struct disklabel *) data = *(rs->sc_dkdev.dk_label);
   1509 		break;
   1510 #ifdef __HAVE_OLD_DISKLABEL
   1511 	case ODIOCGDINFO:
   1512 		newlabel = *(rs->sc_dkdev.dk_label);
   1513 		if (newlabel.d_npartitions > OLDMAXPARTITIONS)
   1514 			return ENOTTY;
   1515 		memcpy(data, &newlabel, sizeof (struct olddisklabel));
   1516 		break;
   1517 #endif
   1518 
   1519 	case DIOCGPART:
   1520 		((struct partinfo *) data)->disklab = rs->sc_dkdev.dk_label;
   1521 		((struct partinfo *) data)->part =
   1522 		    &rs->sc_dkdev.dk_label->d_partitions[DISKPART(dev)];
   1523 		break;
   1524 
   1525 	case DIOCWDINFO:
   1526 	case DIOCSDINFO:
   1527 #ifdef __HAVE_OLD_DISKLABEL
   1528 	case ODIOCWDINFO:
   1529 	case ODIOCSDINFO:
   1530 #endif
   1531 	{
   1532 		struct disklabel *lp;
   1533 #ifdef __HAVE_OLD_DISKLABEL
   1534 		if (cmd == ODIOCSDINFO || cmd == ODIOCWDINFO) {
   1535 			memset(&newlabel, 0, sizeof newlabel);
   1536 			memcpy(&newlabel, data, sizeof (struct olddisklabel));
   1537 			lp = &newlabel;
   1538 		} else
   1539 #endif
   1540 		lp = (struct disklabel *)data;
   1541 
   1542 		if ((error = raidlock(rs)) != 0)
   1543 			return (error);
   1544 
   1545 		rs->sc_flags |= RAIDF_LABELLING;
   1546 
   1547 		error = setdisklabel(rs->sc_dkdev.dk_label,
   1548 		    lp, 0, rs->sc_dkdev.dk_cpulabel);
   1549 		if (error == 0) {
   1550 			if (cmd == DIOCWDINFO
   1551 #ifdef __HAVE_OLD_DISKLABEL
   1552 			    || cmd == ODIOCWDINFO
   1553 #endif
   1554 			   )
   1555 				error = writedisklabel(RAIDLABELDEV(dev),
   1556 				    raidstrategy, rs->sc_dkdev.dk_label,
   1557 				    rs->sc_dkdev.dk_cpulabel);
   1558 		}
   1559 		rs->sc_flags &= ~RAIDF_LABELLING;
   1560 
   1561 		raidunlock(rs);
   1562 
   1563 		if (error)
   1564 			return (error);
   1565 		break;
   1566 	}
   1567 
   1568 	case DIOCWLABEL:
   1569 		if (*(int *) data != 0)
   1570 			rs->sc_flags |= RAIDF_WLABEL;
   1571 		else
   1572 			rs->sc_flags &= ~RAIDF_WLABEL;
   1573 		break;
   1574 
   1575 	case DIOCGDEFLABEL:
   1576 		raidgetdefaultlabel(raidPtr, rs, (struct disklabel *) data);
   1577 		break;
   1578 
   1579 #ifdef __HAVE_OLD_DISKLABEL
   1580 	case ODIOCGDEFLABEL:
   1581 		raidgetdefaultlabel(raidPtr, rs, &newlabel);
   1582 		if (newlabel.d_npartitions > OLDMAXPARTITIONS)
   1583 			return ENOTTY;
   1584 		memcpy(data, &newlabel, sizeof (struct olddisklabel));
   1585 		break;
   1586 #endif
   1587 
   1588 	default:
   1589 		retcode = ENOTTY;
   1590 	}
   1591 	return (retcode);
   1592 
   1593 }
   1594 
   1595 
   1596 /* raidinit -- complete the rest of the initialization for the
   1597    RAIDframe device.  */
   1598 
   1599 
   1600 static void
   1601 raidinit(RF_Raid_t *raidPtr)
   1602 {
   1603 	struct raid_softc *rs;
   1604 	int     unit;
   1605 
   1606 	unit = raidPtr->raidid;
   1607 
   1608 	rs = &raid_softc[unit];
   1609 
   1610 	/* XXX should check return code first... */
   1611 	rs->sc_flags |= RAIDF_INITED;
   1612 
   1613 	/* XXX doesn't check bounds. */
   1614 	snprintf(rs->sc_xname, sizeof(rs->sc_xname), "raid%d", unit);
   1615 
   1616 	rs->sc_dkdev.dk_name = rs->sc_xname;
   1617 
   1618 	/* disk_attach actually creates space for the CPU disklabel, among
   1619 	 * other things, so it's critical to call this *BEFORE* we try putzing
   1620 	 * with disklabels. */
   1621 
   1622 	pseudo_disk_attach(&rs->sc_dkdev);
   1623 
   1624 	/* XXX There may be a weird interaction here between this, and
   1625 	 * protectedSectors, as used in RAIDframe.  */
   1626 
   1627 	rs->sc_size = raidPtr->totalSectors;
   1628 }
   1629 #if (RF_INCLUDE_PARITY_DECLUSTERING_DS > 0)
   1630 /* wake up the daemon & tell it to get us a spare table
   1631  * XXX
   1632  * the entries in the queues should be tagged with the raidPtr
   1633  * so that in the extremely rare case that two recons happen at once,
   1634  * we know for which device were requesting a spare table
   1635  * XXX
   1636  *
   1637  * XXX This code is not currently used. GO
   1638  */
   1639 int
   1640 rf_GetSpareTableFromDaemon(RF_SparetWait_t *req)
   1641 {
   1642 	int     retcode;
   1643 
   1644 	RF_LOCK_MUTEX(rf_sparet_wait_mutex);
   1645 	req->next = rf_sparet_wait_queue;
   1646 	rf_sparet_wait_queue = req;
   1647 	wakeup(&rf_sparet_wait_queue);
   1648 
   1649 	/* mpsleep unlocks the mutex */
   1650 	while (!rf_sparet_resp_queue) {
   1651 		tsleep(&rf_sparet_resp_queue, PRIBIO,
   1652 		    "raidframe getsparetable", 0);
   1653 	}
   1654 	req = rf_sparet_resp_queue;
   1655 	rf_sparet_resp_queue = req->next;
   1656 	RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
   1657 
   1658 	retcode = req->fcol;
   1659 	RF_Free(req, sizeof(*req));	/* this is not the same req as we
   1660 					 * alloc'd */
   1661 	return (retcode);
   1662 }
   1663 #endif
   1664 
   1665 /* a wrapper around rf_DoAccess that extracts appropriate info from the
   1666  * bp & passes it down.
   1667  * any calls originating in the kernel must use non-blocking I/O
   1668  * do some extra sanity checking to return "appropriate" error values for
   1669  * certain conditions (to make some standard utilities work)
   1670  *
   1671  * Formerly known as: rf_DoAccessKernel
   1672  */
   1673 void
   1674 raidstart(RF_Raid_t *raidPtr)
   1675 {
   1676 	RF_SectorCount_t num_blocks, pb, sum;
   1677 	RF_RaidAddr_t raid_addr;
   1678 	struct partition *pp;
   1679 	daddr_t blocknum;
   1680 	int     unit;
   1681 	struct raid_softc *rs;
   1682 	int     do_async;
   1683 	struct buf *bp;
   1684 	int rc;
   1685 
   1686 	unit = raidPtr->raidid;
   1687 	rs = &raid_softc[unit];
   1688 
   1689 	/* quick check to see if anything has died recently */
   1690 	RF_LOCK_MUTEX(raidPtr->mutex);
   1691 	if (raidPtr->numNewFailures > 0) {
   1692 		RF_UNLOCK_MUTEX(raidPtr->mutex);
   1693 		rf_update_component_labels(raidPtr,
   1694 					   RF_NORMAL_COMPONENT_UPDATE);
   1695 		RF_LOCK_MUTEX(raidPtr->mutex);
   1696 		raidPtr->numNewFailures--;
   1697 	}
   1698 
   1699 	/* Check to see if we're at the limit... */
   1700 	while (raidPtr->openings > 0) {
   1701 		RF_UNLOCK_MUTEX(raidPtr->mutex);
   1702 
   1703 		/* get the next item, if any, from the queue */
   1704 		if ((bp = BUFQ_GET(rs->buf_queue)) == NULL) {
   1705 			/* nothing more to do */
   1706 			return;
   1707 		}
   1708 
   1709 		/* Ok, for the bp we have here, bp->b_blkno is relative to the
   1710 		 * partition.. Need to make it absolute to the underlying
   1711 		 * device.. */
   1712 
   1713 		blocknum = bp->b_blkno;
   1714 		if (DISKPART(bp->b_dev) != RAW_PART) {
   1715 			pp = &rs->sc_dkdev.dk_label->d_partitions[DISKPART(bp->b_dev)];
   1716 			blocknum += pp->p_offset;
   1717 		}
   1718 
   1719 		db1_printf(("Blocks: %d, %d\n", (int) bp->b_blkno,
   1720 			    (int) blocknum));
   1721 
   1722 		db1_printf(("bp->b_bcount = %d\n", (int) bp->b_bcount));
   1723 		db1_printf(("bp->b_resid = %d\n", (int) bp->b_resid));
   1724 
   1725 		/* *THIS* is where we adjust what block we're going to...
   1726 		 * but DO NOT TOUCH bp->b_blkno!!! */
   1727 		raid_addr = blocknum;
   1728 
   1729 		num_blocks = bp->b_bcount >> raidPtr->logBytesPerSector;
   1730 		pb = (bp->b_bcount & raidPtr->sectorMask) ? 1 : 0;
   1731 		sum = raid_addr + num_blocks + pb;
   1732 		if (1 || rf_debugKernelAccess) {
   1733 			db1_printf(("raid_addr=%d sum=%d num_blocks=%d(+%d) (%d)\n",
   1734 				    (int) raid_addr, (int) sum, (int) num_blocks,
   1735 				    (int) pb, (int) bp->b_resid));
   1736 		}
   1737 		if ((sum > raidPtr->totalSectors) || (sum < raid_addr)
   1738 		    || (sum < num_blocks) || (sum < pb)) {
   1739 			bp->b_error = ENOSPC;
   1740 			bp->b_flags |= B_ERROR;
   1741 			bp->b_resid = bp->b_bcount;
   1742 			biodone(bp);
   1743 			RF_LOCK_MUTEX(raidPtr->mutex);
   1744 			continue;
   1745 		}
   1746 		/*
   1747 		 * XXX rf_DoAccess() should do this, not just DoAccessKernel()
   1748 		 */
   1749 
   1750 		if (bp->b_bcount & raidPtr->sectorMask) {
   1751 			bp->b_error = EINVAL;
   1752 			bp->b_flags |= B_ERROR;
   1753 			bp->b_resid = bp->b_bcount;
   1754 			biodone(bp);
   1755 			RF_LOCK_MUTEX(raidPtr->mutex);
   1756 			continue;
   1757 
   1758 		}
   1759 		db1_printf(("Calling DoAccess..\n"));
   1760 
   1761 
   1762 		RF_LOCK_MUTEX(raidPtr->mutex);
   1763 		raidPtr->openings--;
   1764 		RF_UNLOCK_MUTEX(raidPtr->mutex);
   1765 
   1766 		/*
   1767 		 * Everything is async.
   1768 		 */
   1769 		do_async = 1;
   1770 
   1771 		disk_busy(&rs->sc_dkdev);
   1772 
   1773 		/* XXX we're still at splbio() here... do we *really*
   1774 		   need to be? */
   1775 
   1776 		/* don't ever condition on bp->b_flags & B_WRITE.
   1777 		 * always condition on B_READ instead */
   1778 
   1779 		rc = rf_DoAccess(raidPtr, (bp->b_flags & B_READ) ?
   1780 				 RF_IO_TYPE_READ : RF_IO_TYPE_WRITE,
   1781 				 do_async, raid_addr, num_blocks,
   1782 				 bp->b_data, bp, RF_DAG_NONBLOCKING_IO);
   1783 
   1784 		if (rc) {
   1785 			bp->b_error = rc;
   1786 			bp->b_flags |= B_ERROR;
   1787 			bp->b_resid = bp->b_bcount;
   1788 			biodone(bp);
   1789 			/* continue loop */
   1790 		}
   1791 
   1792 		RF_LOCK_MUTEX(raidPtr->mutex);
   1793 	}
   1794 	RF_UNLOCK_MUTEX(raidPtr->mutex);
   1795 }
   1796 
   1797 
   1798 
   1799 
   1800 /* invoke an I/O from kernel mode.  Disk queue should be locked upon entry */
   1801 
   1802 int
   1803 rf_DispatchKernelIO(RF_DiskQueue_t *queue, RF_DiskQueueData_t *req)
   1804 {
   1805 	int     op = (req->type == RF_IO_TYPE_READ) ? B_READ : B_WRITE;
   1806 	struct buf *bp;
   1807 	struct raidbuf *raidbp = NULL;
   1808 
   1809 	req->queue = queue;
   1810 
   1811 #if DIAGNOSTIC
   1812 	if (queue->raidPtr->raidid >= numraid) {
   1813 		printf("Invalid unit number: %d %d\n", queue->raidPtr->raidid,
   1814 		    numraid);
   1815 		panic("Invalid Unit number in rf_DispatchKernelIO");
   1816 	}
   1817 #endif
   1818 
   1819 	bp = req->bp;
   1820 #if 1
   1821 	/* XXX when there is a physical disk failure, someone is passing us a
   1822 	 * buffer that contains old stuff!!  Attempt to deal with this problem
   1823 	 * without taking a performance hit... (not sure where the real bug
   1824 	 * is.  It's buried in RAIDframe somewhere) :-(  GO ) */
   1825 
   1826 	if (bp->b_flags & B_ERROR) {
   1827 		bp->b_flags &= ~B_ERROR;
   1828 	}
   1829 	if (bp->b_error != 0) {
   1830 		bp->b_error = 0;
   1831 	}
   1832 #endif
   1833 	raidbp = pool_get(&rf_pools.cbuf, PR_NOWAIT);
   1834 	if (raidbp == NULL) {
   1835 		bp->b_flags |= B_ERROR;
   1836 		bp->b_error = ENOMEM;
   1837 		return (ENOMEM);
   1838 	}
   1839 	BUF_INIT(&raidbp->rf_buf);
   1840 
   1841 	/*
   1842 	 * context for raidiodone
   1843 	 */
   1844 	raidbp->rf_obp = bp;
   1845 	raidbp->req = req;
   1846 
   1847 	BIO_COPYPRIO(&raidbp->rf_buf, bp);
   1848 
   1849 	switch (req->type) {
   1850 	case RF_IO_TYPE_NOP:	/* used primarily to unlock a locked queue */
   1851 		/* XXX need to do something extra here.. */
   1852 		/* I'm leaving this in, as I've never actually seen it used,
   1853 		 * and I'd like folks to report it... GO */
   1854 		printf(("WAKEUP CALLED\n"));
   1855 		queue->numOutstanding++;
   1856 
   1857 		/* XXX need to glue the original buffer into this??  */
   1858 
   1859 		KernelWakeupFunc(&raidbp->rf_buf);
   1860 		break;
   1861 
   1862 	case RF_IO_TYPE_READ:
   1863 	case RF_IO_TYPE_WRITE:
   1864 #if RF_ACC_TRACE > 0
   1865 		if (req->tracerec) {
   1866 			RF_ETIMER_START(req->tracerec->timer);
   1867 		}
   1868 #endif
   1869 		InitBP(&raidbp->rf_buf, queue->rf_cinfo->ci_vp,
   1870 		    op | bp->b_flags, queue->rf_cinfo->ci_dev,
   1871 		    req->sectorOffset, req->numSector,
   1872 		    req->buf, KernelWakeupFunc, (void *) req,
   1873 		    queue->raidPtr->logBytesPerSector, req->b_proc);
   1874 
   1875 		if (rf_debugKernelAccess) {
   1876 			db1_printf(("dispatch: bp->b_blkno = %ld\n",
   1877 				(long) bp->b_blkno));
   1878 		}
   1879 		queue->numOutstanding++;
   1880 		queue->last_deq_sector = req->sectorOffset;
   1881 		/* acc wouldn't have been let in if there were any pending
   1882 		 * reqs at any other priority */
   1883 		queue->curPriority = req->priority;
   1884 
   1885 		db1_printf(("Going for %c to unit %d col %d\n",
   1886 			    req->type, queue->raidPtr->raidid,
   1887 			    queue->col));
   1888 		db1_printf(("sector %d count %d (%d bytes) %d\n",
   1889 			(int) req->sectorOffset, (int) req->numSector,
   1890 			(int) (req->numSector <<
   1891 			    queue->raidPtr->logBytesPerSector),
   1892 			(int) queue->raidPtr->logBytesPerSector));
   1893 		if ((raidbp->rf_buf.b_flags & B_READ) == 0) {
   1894 			raidbp->rf_buf.b_vp->v_numoutput++;
   1895 		}
   1896 		VOP_STRATEGY(raidbp->rf_buf.b_vp, &raidbp->rf_buf);
   1897 
   1898 		break;
   1899 
   1900 	default:
   1901 		panic("bad req->type in rf_DispatchKernelIO");
   1902 	}
   1903 	db1_printf(("Exiting from DispatchKernelIO\n"));
   1904 
   1905 	return (0);
   1906 }
   1907 /* this is the callback function associated with a I/O invoked from
   1908    kernel code.
   1909  */
   1910 static void
   1911 KernelWakeupFunc(struct buf *vbp)
   1912 {
   1913 	RF_DiskQueueData_t *req = NULL;
   1914 	RF_DiskQueue_t *queue;
   1915 	struct raidbuf *raidbp = (struct raidbuf *) vbp;
   1916 	struct buf *bp;
   1917 	int s;
   1918 
   1919 	s = splbio();
   1920 	db1_printf(("recovering the request queue:\n"));
   1921 	req = raidbp->req;
   1922 
   1923 	bp = raidbp->rf_obp;
   1924 
   1925 	queue = (RF_DiskQueue_t *) req->queue;
   1926 
   1927 	if (raidbp->rf_buf.b_flags & B_ERROR) {
   1928 		bp->b_flags |= B_ERROR;
   1929 		bp->b_error = raidbp->rf_buf.b_error ?
   1930 		    raidbp->rf_buf.b_error : EIO;
   1931 	}
   1932 
   1933 	/* XXX methinks this could be wrong... */
   1934 #if 1
   1935 	bp->b_resid = raidbp->rf_buf.b_resid;
   1936 #endif
   1937 #if RF_ACC_TRACE > 0
   1938 	if (req->tracerec) {
   1939 		RF_ETIMER_STOP(req->tracerec->timer);
   1940 		RF_ETIMER_EVAL(req->tracerec->timer);
   1941 		RF_LOCK_MUTEX(rf_tracing_mutex);
   1942 		req->tracerec->diskwait_us += RF_ETIMER_VAL_US(req->tracerec->timer);
   1943 		req->tracerec->phys_io_us += RF_ETIMER_VAL_US(req->tracerec->timer);
   1944 		req->tracerec->num_phys_ios++;
   1945 		RF_UNLOCK_MUTEX(rf_tracing_mutex);
   1946 	}
   1947 #endif
   1948 	bp->b_bcount = raidbp->rf_buf.b_bcount;	/* XXXX ?? */
   1949 
   1950 	/* XXX Ok, let's get aggressive... If B_ERROR is set, let's go
   1951 	 * ballistic, and mark the component as hosed... */
   1952 
   1953 	if (bp->b_flags & B_ERROR) {
   1954 		/* Mark the disk as dead */
   1955 		/* but only mark it once... */
   1956 		/* and only if it wouldn't leave this RAID set
   1957 		   completely broken */
   1958 		if (((queue->raidPtr->Disks[queue->col].status ==
   1959 		      rf_ds_optimal) ||
   1960 		     (queue->raidPtr->Disks[queue->col].status ==
   1961 		      rf_ds_used_spare)) &&
   1962 		     (queue->raidPtr->numFailures <
   1963 		         queue->raidPtr->Layout.map->faultsTolerated)) {
   1964 			printf("raid%d: IO Error.  Marking %s as failed.\n",
   1965 			       queue->raidPtr->raidid,
   1966 			       queue->raidPtr->Disks[queue->col].devname);
   1967 			queue->raidPtr->Disks[queue->col].status =
   1968 			    rf_ds_failed;
   1969 			queue->raidPtr->status = rf_rs_degraded;
   1970 			queue->raidPtr->numFailures++;
   1971 			queue->raidPtr->numNewFailures++;
   1972 		} else {	/* Disk is already dead... */
   1973 			/* printf("Disk already marked as dead!\n"); */
   1974 		}
   1975 
   1976 	}
   1977 
   1978 	pool_put(&rf_pools.cbuf, raidbp);
   1979 
   1980 	/* Fill in the error value */
   1981 
   1982 	req->error = (bp->b_flags & B_ERROR) ? bp->b_error : 0;
   1983 
   1984 	simple_lock(&queue->raidPtr->iodone_lock);
   1985 
   1986 	/* Drop this one on the "finished" queue... */
   1987 	TAILQ_INSERT_TAIL(&(queue->raidPtr->iodone), req, iodone_entries);
   1988 
   1989 	/* Let the raidio thread know there is work to be done. */
   1990 	wakeup(&(queue->raidPtr->iodone));
   1991 
   1992 	simple_unlock(&queue->raidPtr->iodone_lock);
   1993 
   1994 	splx(s);
   1995 }
   1996 
   1997 
   1998 
   1999 /*
   2000  * initialize a buf structure for doing an I/O in the kernel.
   2001  */
   2002 static void
   2003 InitBP(struct buf *bp, struct vnode *b_vp, unsigned rw_flag, dev_t dev,
   2004        RF_SectorNum_t startSect, RF_SectorCount_t numSect, caddr_t bf,
   2005        void (*cbFunc) (struct buf *), void *cbArg, int logBytesPerSector,
   2006        struct proc *b_proc)
   2007 {
   2008 	/* bp->b_flags       = B_PHYS | rw_flag; */
   2009 	bp->b_flags = B_CALL | rw_flag;	/* XXX need B_PHYS here too??? */
   2010 	bp->b_bcount = numSect << logBytesPerSector;
   2011 	bp->b_bufsize = bp->b_bcount;
   2012 	bp->b_error = 0;
   2013 	bp->b_dev = dev;
   2014 	bp->b_data = bf;
   2015 	bp->b_blkno = startSect;
   2016 	bp->b_resid = bp->b_bcount;	/* XXX is this right!??!?!! */
   2017 	if (bp->b_bcount == 0) {
   2018 		panic("bp->b_bcount is zero in InitBP!!");
   2019 	}
   2020 	bp->b_proc = b_proc;
   2021 	bp->b_iodone = cbFunc;
   2022 	bp->b_vp = b_vp;
   2023 
   2024 }
   2025 
   2026 static void
   2027 raidgetdefaultlabel(RF_Raid_t *raidPtr, struct raid_softc *rs,
   2028 		    struct disklabel *lp)
   2029 {
   2030 	memset(lp, 0, sizeof(*lp));
   2031 
   2032 	/* fabricate a label... */
   2033 	lp->d_secperunit = raidPtr->totalSectors;
   2034 	lp->d_secsize = raidPtr->bytesPerSector;
   2035 	lp->d_nsectors = raidPtr->Layout.dataSectorsPerStripe;
   2036 	lp->d_ntracks = 4 * raidPtr->numCol;
   2037 	lp->d_ncylinders = raidPtr->totalSectors /
   2038 		(lp->d_nsectors * lp->d_ntracks);
   2039 	lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
   2040 
   2041 	strncpy(lp->d_typename, "raid", sizeof(lp->d_typename));
   2042 	lp->d_type = DTYPE_RAID;
   2043 	strncpy(lp->d_packname, "fictitious", sizeof(lp->d_packname));
   2044 	lp->d_rpm = 3600;
   2045 	lp->d_interleave = 1;
   2046 	lp->d_flags = 0;
   2047 
   2048 	lp->d_partitions[RAW_PART].p_offset = 0;
   2049 	lp->d_partitions[RAW_PART].p_size = raidPtr->totalSectors;
   2050 	lp->d_partitions[RAW_PART].p_fstype = FS_UNUSED;
   2051 	lp->d_npartitions = RAW_PART + 1;
   2052 
   2053 	lp->d_magic = DISKMAGIC;
   2054 	lp->d_magic2 = DISKMAGIC;
   2055 	lp->d_checksum = dkcksum(rs->sc_dkdev.dk_label);
   2056 
   2057 }
   2058 /*
   2059  * Read the disklabel from the raid device.  If one is not present, fake one
   2060  * up.
   2061  */
   2062 static void
   2063 raidgetdisklabel(dev_t dev)
   2064 {
   2065 	int     unit = raidunit(dev);
   2066 	struct raid_softc *rs = &raid_softc[unit];
   2067 	const char   *errstring;
   2068 	struct disklabel *lp = rs->sc_dkdev.dk_label;
   2069 	struct cpu_disklabel *clp = rs->sc_dkdev.dk_cpulabel;
   2070 	RF_Raid_t *raidPtr;
   2071 
   2072 	db1_printf(("Getting the disklabel...\n"));
   2073 
   2074 	memset(clp, 0, sizeof(*clp));
   2075 
   2076 	raidPtr = raidPtrs[unit];
   2077 
   2078 	raidgetdefaultlabel(raidPtr, rs, lp);
   2079 
   2080 	/*
   2081 	 * Call the generic disklabel extraction routine.
   2082 	 */
   2083 	errstring = readdisklabel(RAIDLABELDEV(dev), raidstrategy,
   2084 	    rs->sc_dkdev.dk_label, rs->sc_dkdev.dk_cpulabel);
   2085 	if (errstring)
   2086 		raidmakedisklabel(rs);
   2087 	else {
   2088 		int     i;
   2089 		struct partition *pp;
   2090 
   2091 		/*
   2092 		 * Sanity check whether the found disklabel is valid.
   2093 		 *
   2094 		 * This is necessary since total size of the raid device
   2095 		 * may vary when an interleave is changed even though exactly
   2096 		 * same componets are used, and old disklabel may used
   2097 		 * if that is found.
   2098 		 */
   2099 		if (lp->d_secperunit != rs->sc_size)
   2100 			printf("raid%d: WARNING: %s: "
   2101 			    "total sector size in disklabel (%d) != "
   2102 			    "the size of raid (%ld)\n", unit, rs->sc_xname,
   2103 			    lp->d_secperunit, (long) rs->sc_size);
   2104 		for (i = 0; i < lp->d_npartitions; i++) {
   2105 			pp = &lp->d_partitions[i];
   2106 			if (pp->p_offset + pp->p_size > rs->sc_size)
   2107 				printf("raid%d: WARNING: %s: end of partition `%c' "
   2108 				       "exceeds the size of raid (%ld)\n",
   2109 				       unit, rs->sc_xname, 'a' + i, (long) rs->sc_size);
   2110 		}
   2111 	}
   2112 
   2113 }
   2114 /*
   2115  * Take care of things one might want to take care of in the event
   2116  * that a disklabel isn't present.
   2117  */
   2118 static void
   2119 raidmakedisklabel(struct raid_softc *rs)
   2120 {
   2121 	struct disklabel *lp = rs->sc_dkdev.dk_label;
   2122 	db1_printf(("Making a label..\n"));
   2123 
   2124 	/*
   2125 	 * For historical reasons, if there's no disklabel present
   2126 	 * the raw partition must be marked FS_BSDFFS.
   2127 	 */
   2128 
   2129 	lp->d_partitions[RAW_PART].p_fstype = FS_BSDFFS;
   2130 
   2131 	strncpy(lp->d_packname, "default label", sizeof(lp->d_packname));
   2132 
   2133 	lp->d_checksum = dkcksum(lp);
   2134 }
   2135 /*
   2136  * Lookup the provided name in the filesystem.  If the file exists,
   2137  * is a valid block device, and isn't being used by anyone else,
   2138  * set *vpp to the file's vnode.
   2139  * You'll find the original of this in ccd.c
   2140  */
   2141 int
   2142 raidlookup(char *path, struct lwp *l, struct vnode **vpp)
   2143 {
   2144 	struct nameidata nd;
   2145 	struct vnode *vp;
   2146 	struct proc *p;
   2147 	struct vattr va;
   2148 	int     error;
   2149 
   2150 	p = l ? l->l_proc : NULL;
   2151 	NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, path, l);
   2152 	if ((error = vn_open(&nd, FREAD | FWRITE, 0)) != 0) {
   2153 		return (error);
   2154 	}
   2155 	vp = nd.ni_vp;
   2156 	if (vp->v_usecount > 1) {
   2157 		VOP_UNLOCK(vp, 0);
   2158 		(void) vn_close(vp, FREAD | FWRITE, p->p_ucred, l);
   2159 		return (EBUSY);
   2160 	}
   2161 	if ((error = VOP_GETATTR(vp, &va, p->p_ucred, l)) != 0) {
   2162 		VOP_UNLOCK(vp, 0);
   2163 		(void) vn_close(vp, FREAD | FWRITE, p->p_ucred, l);
   2164 		return (error);
   2165 	}
   2166 	/* XXX: eventually we should handle VREG, too. */
   2167 	if (va.va_type != VBLK) {
   2168 		VOP_UNLOCK(vp, 0);
   2169 		(void) vn_close(vp, FREAD | FWRITE, p->p_ucred, l);
   2170 		return (ENOTBLK);
   2171 	}
   2172 	VOP_UNLOCK(vp, 0);
   2173 	*vpp = vp;
   2174 	return (0);
   2175 }
   2176 /*
   2177  * Wait interruptibly for an exclusive lock.
   2178  *
   2179  * XXX
   2180  * Several drivers do this; it should be abstracted and made MP-safe.
   2181  * (Hmm... where have we seen this warning before :->  GO )
   2182  */
   2183 static int
   2184 raidlock(struct raid_softc *rs)
   2185 {
   2186 	int     error;
   2187 
   2188 	while ((rs->sc_flags & RAIDF_LOCKED) != 0) {
   2189 		rs->sc_flags |= RAIDF_WANTED;
   2190 		if ((error =
   2191 			tsleep(rs, PRIBIO | PCATCH, "raidlck", 0)) != 0)
   2192 			return (error);
   2193 	}
   2194 	rs->sc_flags |= RAIDF_LOCKED;
   2195 	return (0);
   2196 }
   2197 /*
   2198  * Unlock and wake up any waiters.
   2199  */
   2200 static void
   2201 raidunlock(struct raid_softc *rs)
   2202 {
   2203 
   2204 	rs->sc_flags &= ~RAIDF_LOCKED;
   2205 	if ((rs->sc_flags & RAIDF_WANTED) != 0) {
   2206 		rs->sc_flags &= ~RAIDF_WANTED;
   2207 		wakeup(rs);
   2208 	}
   2209 }
   2210 
   2211 
   2212 #define RF_COMPONENT_INFO_OFFSET  16384 /* bytes */
   2213 #define RF_COMPONENT_INFO_SIZE     1024 /* bytes */
   2214 
   2215 int
   2216 raidmarkclean(dev_t dev, struct vnode *b_vp, int mod_counter)
   2217 {
   2218 	RF_ComponentLabel_t clabel;
   2219 	raidread_component_label(dev, b_vp, &clabel);
   2220 	clabel.mod_counter = mod_counter;
   2221 	clabel.clean = RF_RAID_CLEAN;
   2222 	raidwrite_component_label(dev, b_vp, &clabel);
   2223 	return(0);
   2224 }
   2225 
   2226 
   2227 int
   2228 raidmarkdirty(dev_t dev, struct vnode *b_vp, int mod_counter)
   2229 {
   2230 	RF_ComponentLabel_t clabel;
   2231 	raidread_component_label(dev, b_vp, &clabel);
   2232 	clabel.mod_counter = mod_counter;
   2233 	clabel.clean = RF_RAID_DIRTY;
   2234 	raidwrite_component_label(dev, b_vp, &clabel);
   2235 	return(0);
   2236 }
   2237 
   2238 /* ARGSUSED */
   2239 int
   2240 raidread_component_label(dev_t dev, struct vnode *b_vp,
   2241 			 RF_ComponentLabel_t *clabel)
   2242 {
   2243 	struct buf *bp;
   2244 	const struct bdevsw *bdev;
   2245 	int error;
   2246 
   2247 	/* XXX should probably ensure that we don't try to do this if
   2248 	   someone has changed rf_protected_sectors. */
   2249 
   2250 	if (b_vp == NULL) {
   2251 		/* For whatever reason, this component is not valid.
   2252 		   Don't try to read a component label from it. */
   2253 		return(EINVAL);
   2254 	}
   2255 
   2256 	/* get a block of the appropriate size... */
   2257 	bp = geteblk((int)RF_COMPONENT_INFO_SIZE);
   2258 	bp->b_dev = dev;
   2259 
   2260 	/* get our ducks in a row for the read */
   2261 	bp->b_blkno = RF_COMPONENT_INFO_OFFSET / DEV_BSIZE;
   2262 	bp->b_bcount = RF_COMPONENT_INFO_SIZE;
   2263 	bp->b_flags |= B_READ;
   2264  	bp->b_resid = RF_COMPONENT_INFO_SIZE / DEV_BSIZE;
   2265 
   2266 	bdev = bdevsw_lookup(bp->b_dev);
   2267 	if (bdev == NULL)
   2268 		return (ENXIO);
   2269 	(*bdev->d_strategy)(bp);
   2270 
   2271 	error = biowait(bp);
   2272 
   2273 	if (!error) {
   2274 		memcpy(clabel, bp->b_data,
   2275 		       sizeof(RF_ComponentLabel_t));
   2276         }
   2277 
   2278 	brelse(bp);
   2279 	return(error);
   2280 }
   2281 /* ARGSUSED */
   2282 int
   2283 raidwrite_component_label(dev_t dev, struct vnode *b_vp,
   2284 			  RF_ComponentLabel_t *clabel)
   2285 {
   2286 	struct buf *bp;
   2287 	const struct bdevsw *bdev;
   2288 	int error;
   2289 
   2290 	/* get a block of the appropriate size... */
   2291 	bp = geteblk((int)RF_COMPONENT_INFO_SIZE);
   2292 	bp->b_dev = dev;
   2293 
   2294 	/* get our ducks in a row for the write */
   2295 	bp->b_blkno = RF_COMPONENT_INFO_OFFSET / DEV_BSIZE;
   2296 	bp->b_bcount = RF_COMPONENT_INFO_SIZE;
   2297 	bp->b_flags |= B_WRITE;
   2298  	bp->b_resid = RF_COMPONENT_INFO_SIZE / DEV_BSIZE;
   2299 
   2300 	memset(bp->b_data, 0, RF_COMPONENT_INFO_SIZE );
   2301 
   2302 	memcpy(bp->b_data, clabel, sizeof(RF_ComponentLabel_t));
   2303 
   2304 	bdev = bdevsw_lookup(bp->b_dev);
   2305 	if (bdev == NULL)
   2306 		return (ENXIO);
   2307 	(*bdev->d_strategy)(bp);
   2308 	error = biowait(bp);
   2309 	brelse(bp);
   2310 	if (error) {
   2311 #if 1
   2312 		printf("Failed to write RAID component info!\n");
   2313 #endif
   2314 	}
   2315 
   2316 	return(error);
   2317 }
   2318 
   2319 void
   2320 rf_markalldirty(RF_Raid_t *raidPtr)
   2321 {
   2322 	RF_ComponentLabel_t clabel;
   2323 	int sparecol;
   2324 	int c;
   2325 	int j;
   2326 	int scol = -1;
   2327 
   2328 	raidPtr->mod_counter++;
   2329 	for (c = 0; c < raidPtr->numCol; c++) {
   2330 		/* we don't want to touch (at all) a disk that has
   2331 		   failed */
   2332 		if (!RF_DEAD_DISK(raidPtr->Disks[c].status)) {
   2333 			raidread_component_label(
   2334 						 raidPtr->Disks[c].dev,
   2335 						 raidPtr->raid_cinfo[c].ci_vp,
   2336 						 &clabel);
   2337 			if (clabel.status == rf_ds_spared) {
   2338 				/* XXX do something special...
   2339 				   but whatever you do, don't
   2340 				   try to access it!! */
   2341 			} else {
   2342 				raidmarkdirty(
   2343 					      raidPtr->Disks[c].dev,
   2344 					      raidPtr->raid_cinfo[c].ci_vp,
   2345 					      raidPtr->mod_counter);
   2346 			}
   2347 		}
   2348 	}
   2349 
   2350 	for( c = 0; c < raidPtr->numSpare ; c++) {
   2351 		sparecol = raidPtr->numCol + c;
   2352 		if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
   2353 			/*
   2354 
   2355 			   we claim this disk is "optimal" if it's
   2356 			   rf_ds_used_spare, as that means it should be
   2357 			   directly substitutable for the disk it replaced.
   2358 			   We note that too...
   2359 
   2360 			 */
   2361 
   2362 			for(j=0;j<raidPtr->numCol;j++) {
   2363 				if (raidPtr->Disks[j].spareCol == sparecol) {
   2364 					scol = j;
   2365 					break;
   2366 				}
   2367 			}
   2368 
   2369 			raidread_component_label(
   2370 				 raidPtr->Disks[sparecol].dev,
   2371 				 raidPtr->raid_cinfo[sparecol].ci_vp,
   2372 				 &clabel);
   2373 			/* make sure status is noted */
   2374 
   2375 			raid_init_component_label(raidPtr, &clabel);
   2376 
   2377 			clabel.row = 0;
   2378 			clabel.column = scol;
   2379 			/* Note: we *don't* change status from rf_ds_used_spare
   2380 			   to rf_ds_optimal */
   2381 			/* clabel.status = rf_ds_optimal; */
   2382 
   2383 			raidmarkdirty(raidPtr->Disks[sparecol].dev,
   2384 				      raidPtr->raid_cinfo[sparecol].ci_vp,
   2385 				      raidPtr->mod_counter);
   2386 		}
   2387 	}
   2388 }
   2389 
   2390 
   2391 void
   2392 rf_update_component_labels(RF_Raid_t *raidPtr, int final)
   2393 {
   2394 	RF_ComponentLabel_t clabel;
   2395 	int sparecol;
   2396 	int c;
   2397 	int j;
   2398 	int scol;
   2399 
   2400 	scol = -1;
   2401 
   2402 	/* XXX should do extra checks to make sure things really are clean,
   2403 	   rather than blindly setting the clean bit... */
   2404 
   2405 	raidPtr->mod_counter++;
   2406 
   2407 	for (c = 0; c < raidPtr->numCol; c++) {
   2408 		if (raidPtr->Disks[c].status == rf_ds_optimal) {
   2409 			raidread_component_label(
   2410 						 raidPtr->Disks[c].dev,
   2411 						 raidPtr->raid_cinfo[c].ci_vp,
   2412 						 &clabel);
   2413 				/* make sure status is noted */
   2414 			clabel.status = rf_ds_optimal;
   2415 				/* bump the counter */
   2416 			clabel.mod_counter = raidPtr->mod_counter;
   2417 
   2418 			raidwrite_component_label(
   2419 						  raidPtr->Disks[c].dev,
   2420 						  raidPtr->raid_cinfo[c].ci_vp,
   2421 						  &clabel);
   2422 			if (final == RF_FINAL_COMPONENT_UPDATE) {
   2423 				if (raidPtr->parity_good == RF_RAID_CLEAN) {
   2424 					raidmarkclean(
   2425 						      raidPtr->Disks[c].dev,
   2426 						      raidPtr->raid_cinfo[c].ci_vp,
   2427 						      raidPtr->mod_counter);
   2428 				}
   2429 			}
   2430 		}
   2431 		/* else we don't touch it.. */
   2432 	}
   2433 
   2434 	for( c = 0; c < raidPtr->numSpare ; c++) {
   2435 		sparecol = raidPtr->numCol + c;
   2436 		/* Need to ensure that the reconstruct actually completed! */
   2437 		if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
   2438 			/*
   2439 
   2440 			   we claim this disk is "optimal" if it's
   2441 			   rf_ds_used_spare, as that means it should be
   2442 			   directly substitutable for the disk it replaced.
   2443 			   We note that too...
   2444 
   2445 			 */
   2446 
   2447 			for(j=0;j<raidPtr->numCol;j++) {
   2448 				if (raidPtr->Disks[j].spareCol == sparecol) {
   2449 					scol = j;
   2450 					break;
   2451 				}
   2452 			}
   2453 
   2454 			/* XXX shouldn't *really* need this... */
   2455 			raidread_component_label(
   2456 				      raidPtr->Disks[sparecol].dev,
   2457 				      raidPtr->raid_cinfo[sparecol].ci_vp,
   2458 				      &clabel);
   2459 			/* make sure status is noted */
   2460 
   2461 			raid_init_component_label(raidPtr, &clabel);
   2462 
   2463 			clabel.mod_counter = raidPtr->mod_counter;
   2464 			clabel.column = scol;
   2465 			clabel.status = rf_ds_optimal;
   2466 
   2467 			raidwrite_component_label(
   2468 				      raidPtr->Disks[sparecol].dev,
   2469 				      raidPtr->raid_cinfo[sparecol].ci_vp,
   2470 				      &clabel);
   2471 			if (final == RF_FINAL_COMPONENT_UPDATE) {
   2472 				if (raidPtr->parity_good == RF_RAID_CLEAN) {
   2473 					raidmarkclean( raidPtr->Disks[sparecol].dev,
   2474 						       raidPtr->raid_cinfo[sparecol].ci_vp,
   2475 						       raidPtr->mod_counter);
   2476 				}
   2477 			}
   2478 		}
   2479 	}
   2480 }
   2481 
   2482 void
   2483 rf_close_component(RF_Raid_t *raidPtr, struct vnode *vp, int auto_configured)
   2484 {
   2485 	struct proc *p;
   2486 	struct lwp *l;
   2487 
   2488 	p = raidPtr->engine_thread;
   2489 	l = LIST_FIRST(&p->p_lwps);
   2490 
   2491 	if (vp != NULL) {
   2492 		if (auto_configured == 1) {
   2493 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
   2494 			VOP_CLOSE(vp, FREAD | FWRITE, NOCRED, 0);
   2495 			vput(vp);
   2496 
   2497 		} else {
   2498 			(void) vn_close(vp, FREAD | FWRITE, p->p_ucred, l);
   2499 		}
   2500 	}
   2501 }
   2502 
   2503 
   2504 void
   2505 rf_UnconfigureVnodes(RF_Raid_t *raidPtr)
   2506 {
   2507 	int r,c;
   2508 	struct vnode *vp;
   2509 	int acd;
   2510 
   2511 
   2512 	/* We take this opportunity to close the vnodes like we should.. */
   2513 
   2514 	for (c = 0; c < raidPtr->numCol; c++) {
   2515 		vp = raidPtr->raid_cinfo[c].ci_vp;
   2516 		acd = raidPtr->Disks[c].auto_configured;
   2517 		rf_close_component(raidPtr, vp, acd);
   2518 		raidPtr->raid_cinfo[c].ci_vp = NULL;
   2519 		raidPtr->Disks[c].auto_configured = 0;
   2520 	}
   2521 
   2522 	for (r = 0; r < raidPtr->numSpare; r++) {
   2523 		vp = raidPtr->raid_cinfo[raidPtr->numCol + r].ci_vp;
   2524 		acd = raidPtr->Disks[raidPtr->numCol + r].auto_configured;
   2525 		rf_close_component(raidPtr, vp, acd);
   2526 		raidPtr->raid_cinfo[raidPtr->numCol + r].ci_vp = NULL;
   2527 		raidPtr->Disks[raidPtr->numCol + r].auto_configured = 0;
   2528 	}
   2529 }
   2530 
   2531 
   2532 void
   2533 rf_ReconThread(struct rf_recon_req *req)
   2534 {
   2535 	int     s;
   2536 	RF_Raid_t *raidPtr;
   2537 
   2538 	s = splbio();
   2539 	raidPtr = (RF_Raid_t *) req->raidPtr;
   2540 	raidPtr->recon_in_progress = 1;
   2541 
   2542 	rf_FailDisk((RF_Raid_t *) req->raidPtr, req->col,
   2543 		    ((req->flags & RF_FDFLAGS_RECON) ? 1 : 0));
   2544 
   2545 	RF_Free(req, sizeof(*req));
   2546 
   2547 	raidPtr->recon_in_progress = 0;
   2548 	splx(s);
   2549 
   2550 	/* That's all... */
   2551 	kthread_exit(0);        /* does not return */
   2552 }
   2553 
   2554 void
   2555 rf_RewriteParityThread(RF_Raid_t *raidPtr)
   2556 {
   2557 	int retcode;
   2558 	int s;
   2559 
   2560 	raidPtr->parity_rewrite_stripes_done = 0;
   2561 	raidPtr->parity_rewrite_in_progress = 1;
   2562 	s = splbio();
   2563 	retcode = rf_RewriteParity(raidPtr);
   2564 	splx(s);
   2565 	if (retcode) {
   2566 		printf("raid%d: Error re-writing parity!\n",raidPtr->raidid);
   2567 	} else {
   2568 		/* set the clean bit!  If we shutdown correctly,
   2569 		   the clean bit on each component label will get
   2570 		   set */
   2571 		raidPtr->parity_good = RF_RAID_CLEAN;
   2572 	}
   2573 	raidPtr->parity_rewrite_in_progress = 0;
   2574 
   2575 	/* Anyone waiting for us to stop?  If so, inform them... */
   2576 	if (raidPtr->waitShutdown) {
   2577 		wakeup(&raidPtr->parity_rewrite_in_progress);
   2578 	}
   2579 
   2580 	/* That's all... */
   2581 	kthread_exit(0);        /* does not return */
   2582 }
   2583 
   2584 
   2585 void
   2586 rf_CopybackThread(RF_Raid_t *raidPtr)
   2587 {
   2588 	int s;
   2589 
   2590 	raidPtr->copyback_in_progress = 1;
   2591 	s = splbio();
   2592 	rf_CopybackReconstructedData(raidPtr);
   2593 	splx(s);
   2594 	raidPtr->copyback_in_progress = 0;
   2595 
   2596 	/* That's all... */
   2597 	kthread_exit(0);        /* does not return */
   2598 }
   2599 
   2600 
   2601 void
   2602 rf_ReconstructInPlaceThread(struct rf_recon_req *req)
   2603 {
   2604 	int s;
   2605 	RF_Raid_t *raidPtr;
   2606 
   2607 	s = splbio();
   2608 	raidPtr = req->raidPtr;
   2609 	raidPtr->recon_in_progress = 1;
   2610 	rf_ReconstructInPlace(raidPtr, req->col);
   2611 	RF_Free(req, sizeof(*req));
   2612 	raidPtr->recon_in_progress = 0;
   2613 	splx(s);
   2614 
   2615 	/* That's all... */
   2616 	kthread_exit(0);        /* does not return */
   2617 }
   2618 
   2619 RF_AutoConfig_t *
   2620 rf_find_raid_components()
   2621 {
   2622 	struct vnode *vp;
   2623 	struct disklabel label;
   2624 	struct device *dv;
   2625 	dev_t dev;
   2626 	int bmajor;
   2627 	int error;
   2628 	int i;
   2629 	int good_one;
   2630 	RF_ComponentLabel_t *clabel;
   2631 	RF_AutoConfig_t *ac_list;
   2632 	RF_AutoConfig_t *ac;
   2633 
   2634 
   2635 	/* initialize the AutoConfig list */
   2636 	ac_list = NULL;
   2637 
   2638 	/* we begin by trolling through *all* the devices on the system */
   2639 
   2640 	for (dv = alldevs.tqh_first; dv != NULL;
   2641 	     dv = dv->dv_list.tqe_next) {
   2642 
   2643 		/* we are only interested in disks... */
   2644 		if (dv->dv_class != DV_DISK)
   2645 			continue;
   2646 
   2647 		/* we don't care about floppies... */
   2648 		if (!strcmp(dv->dv_cfdata->cf_name,"fd")) {
   2649 			continue;
   2650 		}
   2651 
   2652 		/* we don't care about CD's... */
   2653 		if (!strcmp(dv->dv_cfdata->cf_name,"cd")) {
   2654 			continue;
   2655 		}
   2656 
   2657 		/* hdfd is the Atari/Hades floppy driver */
   2658 		if (!strcmp(dv->dv_cfdata->cf_name,"hdfd")) {
   2659 			continue;
   2660 		}
   2661 		/* fdisa is the Atari/Milan floppy driver */
   2662 		if (!strcmp(dv->dv_cfdata->cf_name,"fdisa")) {
   2663 			continue;
   2664 		}
   2665 
   2666 		/* need to find the device_name_to_block_device_major stuff */
   2667 		bmajor = devsw_name2blk(dv->dv_xname, NULL, 0);
   2668 
   2669 		/* get a vnode for the raw partition of this disk */
   2670 
   2671 		dev = MAKEDISKDEV(bmajor, dv->dv_unit, RAW_PART);
   2672 		if (bdevvp(dev, &vp))
   2673 			panic("RAID can't alloc vnode");
   2674 
   2675 		error = VOP_OPEN(vp, FREAD, NOCRED, 0);
   2676 
   2677 		if (error) {
   2678 			/* "Who cares."  Continue looking
   2679 			   for something that exists*/
   2680 			vput(vp);
   2681 			continue;
   2682 		}
   2683 
   2684 		/* Ok, the disk exists.  Go get the disklabel. */
   2685 		error = VOP_IOCTL(vp, DIOCGDINFO, &label, FREAD, NOCRED, 0);
   2686 		if (error) {
   2687 			/*
   2688 			 * XXX can't happen - open() would
   2689 			 * have errored out (or faked up one)
   2690 			 */
   2691 			if (error != ENOTTY)
   2692 				printf("RAIDframe: can't get label for dev "
   2693 				    "%s (%d)\n", dv->dv_xname, error);
   2694 		}
   2695 
   2696 		/* don't need this any more.  We'll allocate it again
   2697 		   a little later if we really do... */
   2698 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
   2699 		VOP_CLOSE(vp, FREAD | FWRITE, NOCRED, 0);
   2700 		vput(vp);
   2701 
   2702 		if (error)
   2703 			continue;
   2704 
   2705 		for (i=0; i < label.d_npartitions; i++) {
   2706 			/* We only support partitions marked as RAID */
   2707 			if (label.d_partitions[i].p_fstype != FS_RAID)
   2708 				continue;
   2709 
   2710 			dev = MAKEDISKDEV(bmajor, dv->dv_unit, i);
   2711 			if (bdevvp(dev, &vp))
   2712 				panic("RAID can't alloc vnode");
   2713 
   2714 			error = VOP_OPEN(vp, FREAD, NOCRED, 0);
   2715 			if (error) {
   2716 				/* Whatever... */
   2717 				vput(vp);
   2718 				continue;
   2719 			}
   2720 
   2721 			good_one = 0;
   2722 
   2723 			clabel = (RF_ComponentLabel_t *)
   2724 				malloc(sizeof(RF_ComponentLabel_t),
   2725 				       M_RAIDFRAME, M_NOWAIT);
   2726 			if (clabel == NULL) {
   2727 				/* XXX CLEANUP HERE */
   2728 				printf("RAID auto config: out of memory!\n");
   2729 				return(NULL); /* XXX probably should panic? */
   2730 			}
   2731 
   2732 			if (!raidread_component_label(dev, vp, clabel)) {
   2733 				/* Got the label.  Does it look reasonable? */
   2734 				if (rf_reasonable_label(clabel) &&
   2735 				    (clabel->partitionSize <=
   2736 				     label.d_partitions[i].p_size)) {
   2737 #if DEBUG
   2738 					printf("Component on: %s%c: %d\n",
   2739 					       dv->dv_xname, 'a'+i,
   2740 					       label.d_partitions[i].p_size);
   2741 					rf_print_component_label(clabel);
   2742 #endif
   2743 					/* if it's reasonable, add it,
   2744 					   else ignore it. */
   2745 					ac = (RF_AutoConfig_t *)
   2746 						malloc(sizeof(RF_AutoConfig_t),
   2747 						       M_RAIDFRAME,
   2748 						       M_NOWAIT);
   2749 					if (ac == NULL) {
   2750 						/* XXX should panic?? */
   2751 						return(NULL);
   2752 					}
   2753 
   2754 					snprintf(ac->devname,
   2755 					    sizeof(ac->devname), "%s%c",
   2756 					    dv->dv_xname, 'a'+i);
   2757 					ac->dev = dev;
   2758 					ac->vp = vp;
   2759 					ac->clabel = clabel;
   2760 					ac->next = ac_list;
   2761 					ac_list = ac;
   2762 					good_one = 1;
   2763 				}
   2764 			}
   2765 			if (!good_one) {
   2766 				/* cleanup */
   2767 				free(clabel, M_RAIDFRAME);
   2768 				vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
   2769 				VOP_CLOSE(vp, FREAD | FWRITE, NOCRED, 0);
   2770 				vput(vp);
   2771 			}
   2772 		}
   2773 	}
   2774 	return(ac_list);
   2775 }
   2776 
   2777 static int
   2778 rf_reasonable_label(RF_ComponentLabel_t *clabel)
   2779 {
   2780 
   2781 	if (((clabel->version==RF_COMPONENT_LABEL_VERSION_1) ||
   2782 	     (clabel->version==RF_COMPONENT_LABEL_VERSION)) &&
   2783 	    ((clabel->clean == RF_RAID_CLEAN) ||
   2784 	     (clabel->clean == RF_RAID_DIRTY)) &&
   2785 	    clabel->row >=0 &&
   2786 	    clabel->column >= 0 &&
   2787 	    clabel->num_rows > 0 &&
   2788 	    clabel->num_columns > 0 &&
   2789 	    clabel->row < clabel->num_rows &&
   2790 	    clabel->column < clabel->num_columns &&
   2791 	    clabel->blockSize > 0 &&
   2792 	    clabel->numBlocks > 0) {
   2793 		/* label looks reasonable enough... */
   2794 		return(1);
   2795 	}
   2796 	return(0);
   2797 }
   2798 
   2799 
   2800 #if DEBUG
   2801 void
   2802 rf_print_component_label(RF_ComponentLabel_t *clabel)
   2803 {
   2804 	printf("   Row: %d Column: %d Num Rows: %d Num Columns: %d\n",
   2805 	       clabel->row, clabel->column,
   2806 	       clabel->num_rows, clabel->num_columns);
   2807 	printf("   Version: %d Serial Number: %d Mod Counter: %d\n",
   2808 	       clabel->version, clabel->serial_number,
   2809 	       clabel->mod_counter);
   2810 	printf("   Clean: %s Status: %d\n",
   2811 	       clabel->clean ? "Yes" : "No", clabel->status );
   2812 	printf("   sectPerSU: %d SUsPerPU: %d SUsPerRU: %d\n",
   2813 	       clabel->sectPerSU, clabel->SUsPerPU, clabel->SUsPerRU);
   2814 	printf("   RAID Level: %c  blocksize: %d numBlocks: %d\n",
   2815 	       (char) clabel->parityConfig, clabel->blockSize,
   2816 	       clabel->numBlocks);
   2817 	printf("   Autoconfig: %s\n", clabel->autoconfigure ? "Yes" : "No" );
   2818 	printf("   Contains root partition: %s\n",
   2819 	       clabel->root_partition ? "Yes" : "No" );
   2820 	printf("   Last configured as: raid%d\n", clabel->last_unit );
   2821 #if 0
   2822 	   printf("   Config order: %d\n", clabel->config_order);
   2823 #endif
   2824 
   2825 }
   2826 #endif
   2827 
   2828 RF_ConfigSet_t *
   2829 rf_create_auto_sets(RF_AutoConfig_t *ac_list)
   2830 {
   2831 	RF_AutoConfig_t *ac;
   2832 	RF_ConfigSet_t *config_sets;
   2833 	RF_ConfigSet_t *cset;
   2834 	RF_AutoConfig_t *ac_next;
   2835 
   2836 
   2837 	config_sets = NULL;
   2838 
   2839 	/* Go through the AutoConfig list, and figure out which components
   2840 	   belong to what sets.  */
   2841 	ac = ac_list;
   2842 	while(ac!=NULL) {
   2843 		/* we're going to putz with ac->next, so save it here
   2844 		   for use at the end of the loop */
   2845 		ac_next = ac->next;
   2846 
   2847 		if (config_sets == NULL) {
   2848 			/* will need at least this one... */
   2849 			config_sets = (RF_ConfigSet_t *)
   2850 				malloc(sizeof(RF_ConfigSet_t),
   2851 				       M_RAIDFRAME, M_NOWAIT);
   2852 			if (config_sets == NULL) {
   2853 				panic("rf_create_auto_sets: No memory!");
   2854 			}
   2855 			/* this one is easy :) */
   2856 			config_sets->ac = ac;
   2857 			config_sets->next = NULL;
   2858 			config_sets->rootable = 0;
   2859 			ac->next = NULL;
   2860 		} else {
   2861 			/* which set does this component fit into? */
   2862 			cset = config_sets;
   2863 			while(cset!=NULL) {
   2864 				if (rf_does_it_fit(cset, ac)) {
   2865 					/* looks like it matches... */
   2866 					ac->next = cset->ac;
   2867 					cset->ac = ac;
   2868 					break;
   2869 				}
   2870 				cset = cset->next;
   2871 			}
   2872 			if (cset==NULL) {
   2873 				/* didn't find a match above... new set..*/
   2874 				cset = (RF_ConfigSet_t *)
   2875 					malloc(sizeof(RF_ConfigSet_t),
   2876 					       M_RAIDFRAME, M_NOWAIT);
   2877 				if (cset == NULL) {
   2878 					panic("rf_create_auto_sets: No memory!");
   2879 				}
   2880 				cset->ac = ac;
   2881 				ac->next = NULL;
   2882 				cset->next = config_sets;
   2883 				cset->rootable = 0;
   2884 				config_sets = cset;
   2885 			}
   2886 		}
   2887 		ac = ac_next;
   2888 	}
   2889 
   2890 
   2891 	return(config_sets);
   2892 }
   2893 
   2894 static int
   2895 rf_does_it_fit(RF_ConfigSet_t *cset, RF_AutoConfig_t *ac)
   2896 {
   2897 	RF_ComponentLabel_t *clabel1, *clabel2;
   2898 
   2899 	/* If this one matches the *first* one in the set, that's good
   2900 	   enough, since the other members of the set would have been
   2901 	   through here too... */
   2902 	/* note that we are not checking partitionSize here..
   2903 
   2904 	   Note that we are also not checking the mod_counters here.
   2905 	   If everything else matches execpt the mod_counter, that's
   2906 	   good enough for this test.  We will deal with the mod_counters
   2907 	   a little later in the autoconfiguration process.
   2908 
   2909 	    (clabel1->mod_counter == clabel2->mod_counter) &&
   2910 
   2911 	   The reason we don't check for this is that failed disks
   2912 	   will have lower modification counts.  If those disks are
   2913 	   not added to the set they used to belong to, then they will
   2914 	   form their own set, which may result in 2 different sets,
   2915 	   for example, competing to be configured at raid0, and
   2916 	   perhaps competing to be the root filesystem set.  If the
   2917 	   wrong ones get configured, or both attempt to become /,
   2918 	   weird behaviour and or serious lossage will occur.  Thus we
   2919 	   need to bring them into the fold here, and kick them out at
   2920 	   a later point.
   2921 
   2922 	*/
   2923 
   2924 	clabel1 = cset->ac->clabel;
   2925 	clabel2 = ac->clabel;
   2926 	if ((clabel1->version == clabel2->version) &&
   2927 	    (clabel1->serial_number == clabel2->serial_number) &&
   2928 	    (clabel1->num_rows == clabel2->num_rows) &&
   2929 	    (clabel1->num_columns == clabel2->num_columns) &&
   2930 	    (clabel1->sectPerSU == clabel2->sectPerSU) &&
   2931 	    (clabel1->SUsPerPU == clabel2->SUsPerPU) &&
   2932 	    (clabel1->SUsPerRU == clabel2->SUsPerRU) &&
   2933 	    (clabel1->parityConfig == clabel2->parityConfig) &&
   2934 	    (clabel1->maxOutstanding == clabel2->maxOutstanding) &&
   2935 	    (clabel1->blockSize == clabel2->blockSize) &&
   2936 	    (clabel1->numBlocks == clabel2->numBlocks) &&
   2937 	    (clabel1->autoconfigure == clabel2->autoconfigure) &&
   2938 	    (clabel1->root_partition == clabel2->root_partition) &&
   2939 	    (clabel1->last_unit == clabel2->last_unit) &&
   2940 	    (clabel1->config_order == clabel2->config_order)) {
   2941 		/* if it get's here, it almost *has* to be a match */
   2942 	} else {
   2943 		/* it's not consistent with somebody in the set..
   2944 		   punt */
   2945 		return(0);
   2946 	}
   2947 	/* all was fine.. it must fit... */
   2948 	return(1);
   2949 }
   2950 
   2951 int
   2952 rf_have_enough_components(RF_ConfigSet_t *cset)
   2953 {
   2954 	RF_AutoConfig_t *ac;
   2955 	RF_AutoConfig_t *auto_config;
   2956 	RF_ComponentLabel_t *clabel;
   2957 	int c;
   2958 	int num_cols;
   2959 	int num_missing;
   2960 	int mod_counter;
   2961 	int mod_counter_found;
   2962 	int even_pair_failed;
   2963 	char parity_type;
   2964 
   2965 
   2966 	/* check to see that we have enough 'live' components
   2967 	   of this set.  If so, we can configure it if necessary */
   2968 
   2969 	num_cols = cset->ac->clabel->num_columns;
   2970 	parity_type = cset->ac->clabel->parityConfig;
   2971 
   2972 	/* XXX Check for duplicate components!?!?!? */
   2973 
   2974 	/* Determine what the mod_counter is supposed to be for this set. */
   2975 
   2976 	mod_counter_found = 0;
   2977 	mod_counter = 0;
   2978 	ac = cset->ac;
   2979 	while(ac!=NULL) {
   2980 		if (mod_counter_found==0) {
   2981 			mod_counter = ac->clabel->mod_counter;
   2982 			mod_counter_found = 1;
   2983 		} else {
   2984 			if (ac->clabel->mod_counter > mod_counter) {
   2985 				mod_counter = ac->clabel->mod_counter;
   2986 			}
   2987 		}
   2988 		ac = ac->next;
   2989 	}
   2990 
   2991 	num_missing = 0;
   2992 	auto_config = cset->ac;
   2993 
   2994 	even_pair_failed = 0;
   2995 	for(c=0; c<num_cols; c++) {
   2996 		ac = auto_config;
   2997 		while(ac!=NULL) {
   2998 			if ((ac->clabel->column == c) &&
   2999 			    (ac->clabel->mod_counter == mod_counter)) {
   3000 				/* it's this one... */
   3001 #if DEBUG
   3002 				printf("Found: %s at %d\n",
   3003 				       ac->devname,c);
   3004 #endif
   3005 				break;
   3006 			}
   3007 			ac=ac->next;
   3008 		}
   3009 		if (ac==NULL) {
   3010 				/* Didn't find one here! */
   3011 				/* special case for RAID 1, especially
   3012 				   where there are more than 2
   3013 				   components (where RAIDframe treats
   3014 				   things a little differently :( ) */
   3015 			if (parity_type == '1') {
   3016 				if (c%2 == 0) { /* even component */
   3017 					even_pair_failed = 1;
   3018 				} else { /* odd component.  If
   3019 					    we're failed, and
   3020 					    so is the even
   3021 					    component, it's
   3022 					    "Good Night, Charlie" */
   3023 					if (even_pair_failed == 1) {
   3024 						return(0);
   3025 					}
   3026 				}
   3027 			} else {
   3028 				/* normal accounting */
   3029 				num_missing++;
   3030 			}
   3031 		}
   3032 		if ((parity_type == '1') && (c%2 == 1)) {
   3033 				/* Just did an even component, and we didn't
   3034 				   bail.. reset the even_pair_failed flag,
   3035 				   and go on to the next component.... */
   3036 			even_pair_failed = 0;
   3037 		}
   3038 	}
   3039 
   3040 	clabel = cset->ac->clabel;
   3041 
   3042 	if (((clabel->parityConfig == '0') && (num_missing > 0)) ||
   3043 	    ((clabel->parityConfig == '4') && (num_missing > 1)) ||
   3044 	    ((clabel->parityConfig == '5') && (num_missing > 1))) {
   3045 		/* XXX this needs to be made *much* more general */
   3046 		/* Too many failures */
   3047 		return(0);
   3048 	}
   3049 	/* otherwise, all is well, and we've got enough to take a kick
   3050 	   at autoconfiguring this set */
   3051 	return(1);
   3052 }
   3053 
   3054 void
   3055 rf_create_configuration(RF_AutoConfig_t *ac, RF_Config_t *config,
   3056 			RF_Raid_t *raidPtr)
   3057 {
   3058 	RF_ComponentLabel_t *clabel;
   3059 	int i;
   3060 
   3061 	clabel = ac->clabel;
   3062 
   3063 	/* 1. Fill in the common stuff */
   3064 	config->numRow = clabel->num_rows = 1;
   3065 	config->numCol = clabel->num_columns;
   3066 	config->numSpare = 0; /* XXX should this be set here? */
   3067 	config->sectPerSU = clabel->sectPerSU;
   3068 	config->SUsPerPU = clabel->SUsPerPU;
   3069 	config->SUsPerRU = clabel->SUsPerRU;
   3070 	config->parityConfig = clabel->parityConfig;
   3071 	/* XXX... */
   3072 	strcpy(config->diskQueueType,"fifo");
   3073 	config->maxOutstandingDiskReqs = clabel->maxOutstanding;
   3074 	config->layoutSpecificSize = 0; /* XXX ?? */
   3075 
   3076 	while(ac!=NULL) {
   3077 		/* row/col values will be in range due to the checks
   3078 		   in reasonable_label() */
   3079 		strcpy(config->devnames[0][ac->clabel->column],
   3080 		       ac->devname);
   3081 		ac = ac->next;
   3082 	}
   3083 
   3084 	for(i=0;i<RF_MAXDBGV;i++) {
   3085 		config->debugVars[i][0] = 0;
   3086 	}
   3087 }
   3088 
   3089 int
   3090 rf_set_autoconfig(RF_Raid_t *raidPtr, int new_value)
   3091 {
   3092 	RF_ComponentLabel_t clabel;
   3093 	struct vnode *vp;
   3094 	dev_t dev;
   3095 	int column;
   3096 	int sparecol;
   3097 
   3098 	raidPtr->autoconfigure = new_value;
   3099 
   3100 	for(column=0; column<raidPtr->numCol; column++) {
   3101 		if (raidPtr->Disks[column].status == rf_ds_optimal) {
   3102 			dev = raidPtr->Disks[column].dev;
   3103 			vp = raidPtr->raid_cinfo[column].ci_vp;
   3104 			raidread_component_label(dev, vp, &clabel);
   3105 			clabel.autoconfigure = new_value;
   3106 			raidwrite_component_label(dev, vp, &clabel);
   3107 		}
   3108 	}
   3109 	for(column = 0; column < raidPtr->numSpare ; column++) {
   3110 		sparecol = raidPtr->numCol + column;
   3111 		if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
   3112 			dev = raidPtr->Disks[sparecol].dev;
   3113 			vp = raidPtr->raid_cinfo[sparecol].ci_vp;
   3114 			raidread_component_label(dev, vp, &clabel);
   3115 			clabel.autoconfigure = new_value;
   3116 			raidwrite_component_label(dev, vp, &clabel);
   3117 		}
   3118 	}
   3119 	return(new_value);
   3120 }
   3121 
   3122 int
   3123 rf_set_rootpartition(RF_Raid_t *raidPtr, int new_value)
   3124 {
   3125 	RF_ComponentLabel_t clabel;
   3126 	struct vnode *vp;
   3127 	dev_t dev;
   3128 	int column;
   3129 	int sparecol;
   3130 
   3131 	raidPtr->root_partition = new_value;
   3132 	for(column=0; column<raidPtr->numCol; column++) {
   3133 		if (raidPtr->Disks[column].status == rf_ds_optimal) {
   3134 			dev = raidPtr->Disks[column].dev;
   3135 			vp = raidPtr->raid_cinfo[column].ci_vp;
   3136 			raidread_component_label(dev, vp, &clabel);
   3137 			clabel.root_partition = new_value;
   3138 			raidwrite_component_label(dev, vp, &clabel);
   3139 		}
   3140 	}
   3141 	for(column = 0; column < raidPtr->numSpare ; column++) {
   3142 		sparecol = raidPtr->numCol + column;
   3143 		if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
   3144 			dev = raidPtr->Disks[sparecol].dev;
   3145 			vp = raidPtr->raid_cinfo[sparecol].ci_vp;
   3146 			raidread_component_label(dev, vp, &clabel);
   3147 			clabel.root_partition = new_value;
   3148 			raidwrite_component_label(dev, vp, &clabel);
   3149 		}
   3150 	}
   3151 	return(new_value);
   3152 }
   3153 
   3154 void
   3155 rf_release_all_vps(RF_ConfigSet_t *cset)
   3156 {
   3157 	RF_AutoConfig_t *ac;
   3158 
   3159 	ac = cset->ac;
   3160 	while(ac!=NULL) {
   3161 		/* Close the vp, and give it back */
   3162 		if (ac->vp) {
   3163 			vn_lock(ac->vp, LK_EXCLUSIVE | LK_RETRY);
   3164 			VOP_CLOSE(ac->vp, FREAD, NOCRED, 0);
   3165 			vput(ac->vp);
   3166 			ac->vp = NULL;
   3167 		}
   3168 		ac = ac->next;
   3169 	}
   3170 }
   3171 
   3172 
   3173 void
   3174 rf_cleanup_config_set(RF_ConfigSet_t *cset)
   3175 {
   3176 	RF_AutoConfig_t *ac;
   3177 	RF_AutoConfig_t *next_ac;
   3178 
   3179 	ac = cset->ac;
   3180 	while(ac!=NULL) {
   3181 		next_ac = ac->next;
   3182 		/* nuke the label */
   3183 		free(ac->clabel, M_RAIDFRAME);
   3184 		/* cleanup the config structure */
   3185 		free(ac, M_RAIDFRAME);
   3186 		/* "next.." */
   3187 		ac = next_ac;
   3188 	}
   3189 	/* and, finally, nuke the config set */
   3190 	free(cset, M_RAIDFRAME);
   3191 }
   3192 
   3193 
   3194 void
   3195 raid_init_component_label(RF_Raid_t *raidPtr, RF_ComponentLabel_t *clabel)
   3196 {
   3197 	/* current version number */
   3198 	clabel->version = RF_COMPONENT_LABEL_VERSION;
   3199 	clabel->serial_number = raidPtr->serial_number;
   3200 	clabel->mod_counter = raidPtr->mod_counter;
   3201 	clabel->num_rows = 1;
   3202 	clabel->num_columns = raidPtr->numCol;
   3203 	clabel->clean = RF_RAID_DIRTY; /* not clean */
   3204 	clabel->status = rf_ds_optimal; /* "It's good!" */
   3205 
   3206 	clabel->sectPerSU = raidPtr->Layout.sectorsPerStripeUnit;
   3207 	clabel->SUsPerPU = raidPtr->Layout.SUsPerPU;
   3208 	clabel->SUsPerRU = raidPtr->Layout.SUsPerRU;
   3209 
   3210 	clabel->blockSize = raidPtr->bytesPerSector;
   3211 	clabel->numBlocks = raidPtr->sectorsPerDisk;
   3212 
   3213 	/* XXX not portable */
   3214 	clabel->parityConfig = raidPtr->Layout.map->parityConfig;
   3215 	clabel->maxOutstanding = raidPtr->maxOutstanding;
   3216 	clabel->autoconfigure = raidPtr->autoconfigure;
   3217 	clabel->root_partition = raidPtr->root_partition;
   3218 	clabel->last_unit = raidPtr->raidid;
   3219 	clabel->config_order = raidPtr->config_order;
   3220 }
   3221 
   3222 int
   3223 rf_auto_config_set(RF_ConfigSet_t *cset, int *unit)
   3224 {
   3225 	RF_Raid_t *raidPtr;
   3226 	RF_Config_t *config;
   3227 	int raidID;
   3228 	int retcode;
   3229 
   3230 #if DEBUG
   3231 	printf("RAID autoconfigure\n");
   3232 #endif
   3233 
   3234 	retcode = 0;
   3235 	*unit = -1;
   3236 
   3237 	/* 1. Create a config structure */
   3238 
   3239 	config = (RF_Config_t *)malloc(sizeof(RF_Config_t),
   3240 				       M_RAIDFRAME,
   3241 				       M_NOWAIT);
   3242 	if (config==NULL) {
   3243 		printf("Out of mem!?!?\n");
   3244 				/* XXX do something more intelligent here. */
   3245 		return(1);
   3246 	}
   3247 
   3248 	memset(config, 0, sizeof(RF_Config_t));
   3249 
   3250 	/*
   3251 	   2. Figure out what RAID ID this one is supposed to live at
   3252 	   See if we can get the same RAID dev that it was configured
   3253 	   on last time..
   3254 	*/
   3255 
   3256 	raidID = cset->ac->clabel->last_unit;
   3257 	if ((raidID < 0) || (raidID >= numraid)) {
   3258 		/* let's not wander off into lala land. */
   3259 		raidID = numraid - 1;
   3260 	}
   3261 	if (raidPtrs[raidID]->valid != 0) {
   3262 
   3263 		/*
   3264 		   Nope... Go looking for an alternative...
   3265 		   Start high so we don't immediately use raid0 if that's
   3266 		   not taken.
   3267 		*/
   3268 
   3269 		for(raidID = numraid - 1; raidID >= 0; raidID--) {
   3270 			if (raidPtrs[raidID]->valid == 0) {
   3271 				/* can use this one! */
   3272 				break;
   3273 			}
   3274 		}
   3275 	}
   3276 
   3277 	if (raidID < 0) {
   3278 		/* punt... */
   3279 		printf("Unable to auto configure this set!\n");
   3280 		printf("(Out of RAID devs!)\n");
   3281 		return(1);
   3282 	}
   3283 
   3284 #if DEBUG
   3285 	printf("Configuring raid%d:\n",raidID);
   3286 #endif
   3287 
   3288 	raidPtr = raidPtrs[raidID];
   3289 
   3290 	/* XXX all this stuff should be done SOMEWHERE ELSE! */
   3291 	raidPtr->raidid = raidID;
   3292 	raidPtr->openings = RAIDOUTSTANDING;
   3293 
   3294 	/* 3. Build the configuration structure */
   3295 	rf_create_configuration(cset->ac, config, raidPtr);
   3296 
   3297 	/* 4. Do the configuration */
   3298 	retcode = rf_Configure(raidPtr, config, cset->ac);
   3299 
   3300 	if (retcode == 0) {
   3301 
   3302 		raidinit(raidPtrs[raidID]);
   3303 
   3304 		rf_markalldirty(raidPtrs[raidID]);
   3305 		raidPtrs[raidID]->autoconfigure = 1; /* XXX do this here? */
   3306 		if (cset->ac->clabel->root_partition==1) {
   3307 			/* everything configured just fine.  Make a note
   3308 			   that this set is eligible to be root. */
   3309 			cset->rootable = 1;
   3310 			/* XXX do this here? */
   3311 			raidPtrs[raidID]->root_partition = 1;
   3312 		}
   3313 	}
   3314 
   3315 	/* 5. Cleanup */
   3316 	free(config, M_RAIDFRAME);
   3317 
   3318 	*unit = raidID;
   3319 	return(retcode);
   3320 }
   3321 
   3322 void
   3323 rf_disk_unbusy(RF_RaidAccessDesc_t *desc)
   3324 {
   3325 	struct buf *bp;
   3326 
   3327 	bp = (struct buf *)desc->bp;
   3328 	disk_unbusy(&raid_softc[desc->raidPtr->raidid].sc_dkdev,
   3329 	    (bp->b_bcount - bp->b_resid), (bp->b_flags & B_READ));
   3330 }
   3331 
   3332 void
   3333 rf_pool_init(struct pool *p, size_t size, const char *w_chan,
   3334 	     size_t xmin, size_t xmax)
   3335 {
   3336 	pool_init(p, size, 0, 0, 0, w_chan, NULL);
   3337 	pool_sethiwat(p, xmax);
   3338 	pool_prime(p, xmin);
   3339 	pool_setlowat(p, xmin);
   3340 }
   3341 
   3342 /*
   3343  * rf_buf_queue_check(int raidid) -- looks into the buf_queue to see
   3344  * if there is IO pending and if that IO could possibly be done for a
   3345  * given RAID set.  Returns 0 if IO is waiting and can be done, 1
   3346  * otherwise.
   3347  *
   3348  */
   3349 
   3350 int
   3351 rf_buf_queue_check(int raidid)
   3352 {
   3353 	if ((BUFQ_PEEK(raid_softc[raidid].buf_queue) != NULL) &&
   3354 	    raidPtrs[raidid]->openings > 0) {
   3355 		/* there is work to do */
   3356 		return 0;
   3357 	}
   3358 	/* default is nothing to do */
   3359 	return 1;
   3360 }
   3361