Home | History | Annotate | Line # | Download | only in raidframe
rf_netbsdkintf.c revision 1.189
      1 /*	$NetBSD: rf_netbsdkintf.c,v 1.189 2005/09/24 22:51:55 oster Exp $	*/
      2 /*-
      3  * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
      4  * All rights reserved.
      5  *
      6  * This code is derived from software contributed to The NetBSD Foundation
      7  * by Greg Oster; Jason R. Thorpe.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *        This product includes software developed by the NetBSD
     20  *        Foundation, Inc. and its contributors.
     21  * 4. Neither the name of The NetBSD Foundation nor the names of its
     22  *    contributors may be used to endorse or promote products derived
     23  *    from this software without specific prior written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     26  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*
     39  * Copyright (c) 1990, 1993
     40  *      The Regents of the University of California.  All rights reserved.
     41  *
     42  * This code is derived from software contributed to Berkeley by
     43  * the Systems Programming Group of the University of Utah Computer
     44  * Science Department.
     45  *
     46  * Redistribution and use in source and binary forms, with or without
     47  * modification, are permitted provided that the following conditions
     48  * are met:
     49  * 1. Redistributions of source code must retain the above copyright
     50  *    notice, this list of conditions and the following disclaimer.
     51  * 2. Redistributions in binary form must reproduce the above copyright
     52  *    notice, this list of conditions and the following disclaimer in the
     53  *    documentation and/or other materials provided with the distribution.
     54  * 3. Neither the name of the University nor the names of its contributors
     55  *    may be used to endorse or promote products derived from this software
     56  *    without specific prior written permission.
     57  *
     58  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     59  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     60  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     61  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     62  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     63  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     64  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     65  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     66  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     67  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     68  * SUCH DAMAGE.
     69  *
     70  * from: Utah $Hdr: cd.c 1.6 90/11/28$
     71  *
     72  *      @(#)cd.c        8.2 (Berkeley) 11/16/93
     73  */
     74 
     75 /*
     76  * Copyright (c) 1988 University of Utah.
     77  *
     78  * This code is derived from software contributed to Berkeley by
     79  * the Systems Programming Group of the University of Utah Computer
     80  * Science Department.
     81  *
     82  * Redistribution and use in source and binary forms, with or without
     83  * modification, are permitted provided that the following conditions
     84  * are met:
     85  * 1. Redistributions of source code must retain the above copyright
     86  *    notice, this list of conditions and the following disclaimer.
     87  * 2. Redistributions in binary form must reproduce the above copyright
     88  *    notice, this list of conditions and the following disclaimer in the
     89  *    documentation and/or other materials provided with the distribution.
     90  * 3. All advertising materials mentioning features or use of this software
     91  *    must display the following acknowledgement:
     92  *      This product includes software developed by the University of
     93  *      California, Berkeley and its contributors.
     94  * 4. Neither the name of the University nor the names of its contributors
     95  *    may be used to endorse or promote products derived from this software
     96  *    without specific prior written permission.
     97  *
     98  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     99  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
    100  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
    101  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
    102  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
    103  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
    104  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
    105  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
    106  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
    107  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
    108  * SUCH DAMAGE.
    109  *
    110  * from: Utah $Hdr: cd.c 1.6 90/11/28$
    111  *
    112  *      @(#)cd.c        8.2 (Berkeley) 11/16/93
    113  */
    114 
    115 /*
    116  * Copyright (c) 1995 Carnegie-Mellon University.
    117  * All rights reserved.
    118  *
    119  * Authors: Mark Holland, Jim Zelenka
    120  *
    121  * Permission to use, copy, modify and distribute this software and
    122  * its documentation is hereby granted, provided that both the copyright
    123  * notice and this permission notice appear in all copies of the
    124  * software, derivative works or modified versions, and any portions
    125  * thereof, and that both notices appear in supporting documentation.
    126  *
    127  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
    128  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
    129  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
    130  *
    131  * Carnegie Mellon requests users of this software to return to
    132  *
    133  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
    134  *  School of Computer Science
    135  *  Carnegie Mellon University
    136  *  Pittsburgh PA 15213-3890
    137  *
    138  * any improvements or extensions that they make and grant Carnegie the
    139  * rights to redistribute these changes.
    140  */
    141 
    142 /***********************************************************
    143  *
    144  * rf_kintf.c -- the kernel interface routines for RAIDframe
    145  *
    146  ***********************************************************/
    147 
    148 #include <sys/cdefs.h>
    149 __KERNEL_RCSID(0, "$NetBSD: rf_netbsdkintf.c,v 1.189 2005/09/24 22:51:55 oster Exp $");
    150 
    151 #include <sys/param.h>
    152 #include <sys/errno.h>
    153 #include <sys/pool.h>
    154 #include <sys/proc.h>
    155 #include <sys/queue.h>
    156 #include <sys/disk.h>
    157 #include <sys/device.h>
    158 #include <sys/stat.h>
    159 #include <sys/ioctl.h>
    160 #include <sys/fcntl.h>
    161 #include <sys/systm.h>
    162 #include <sys/namei.h>
    163 #include <sys/vnode.h>
    164 #include <sys/disklabel.h>
    165 #include <sys/conf.h>
    166 #include <sys/lock.h>
    167 #include <sys/buf.h>
    168 #include <sys/bufq.h>
    169 #include <sys/user.h>
    170 #include <sys/reboot.h>
    171 
    172 #include <dev/raidframe/raidframevar.h>
    173 #include <dev/raidframe/raidframeio.h>
    174 #include "raid.h"
    175 #include "opt_raid_autoconfig.h"
    176 #include "rf_raid.h"
    177 #include "rf_copyback.h"
    178 #include "rf_dag.h"
    179 #include "rf_dagflags.h"
    180 #include "rf_desc.h"
    181 #include "rf_diskqueue.h"
    182 #include "rf_etimer.h"
    183 #include "rf_general.h"
    184 #include "rf_kintf.h"
    185 #include "rf_options.h"
    186 #include "rf_driver.h"
    187 #include "rf_parityscan.h"
    188 #include "rf_threadstuff.h"
    189 
    190 #ifdef DEBUG
    191 int     rf_kdebug_level = 0;
    192 #define db1_printf(a) if (rf_kdebug_level > 0) printf a
    193 #else				/* DEBUG */
    194 #define db1_printf(a) { }
    195 #endif				/* DEBUG */
    196 
    197 static RF_Raid_t **raidPtrs;	/* global raid device descriptors */
    198 
    199 RF_DECLARE_STATIC_MUTEX(rf_sparet_wait_mutex)
    200 
    201 static RF_SparetWait_t *rf_sparet_wait_queue;	/* requests to install a
    202 						 * spare table */
    203 static RF_SparetWait_t *rf_sparet_resp_queue;	/* responses from
    204 						 * installation process */
    205 
    206 MALLOC_DEFINE(M_RAIDFRAME, "RAIDframe", "RAIDframe structures");
    207 
    208 /* prototypes */
    209 static void KernelWakeupFunc(struct buf *);
    210 static void InitBP(struct buf *, struct vnode *, unsigned,
    211     dev_t, RF_SectorNum_t, RF_SectorCount_t, caddr_t, void (*) (struct buf *),
    212     void *, int, struct proc *);
    213 static void raidinit(RF_Raid_t *);
    214 
    215 void raidattach(int);
    216 
    217 dev_type_open(raidopen);
    218 dev_type_close(raidclose);
    219 dev_type_read(raidread);
    220 dev_type_write(raidwrite);
    221 dev_type_ioctl(raidioctl);
    222 dev_type_strategy(raidstrategy);
    223 dev_type_dump(raiddump);
    224 dev_type_size(raidsize);
    225 
    226 const struct bdevsw raid_bdevsw = {
    227 	raidopen, raidclose, raidstrategy, raidioctl,
    228 	raiddump, raidsize, D_DISK
    229 };
    230 
    231 const struct cdevsw raid_cdevsw = {
    232 	raidopen, raidclose, raidread, raidwrite, raidioctl,
    233 	nostop, notty, nopoll, nommap, nokqfilter, D_DISK
    234 };
    235 
    236 /*
    237  * Pilfered from ccd.c
    238  */
    239 
    240 struct raidbuf {
    241 	struct buf rf_buf;	/* new I/O buf.  MUST BE FIRST!!! */
    242 	struct buf *rf_obp;	/* ptr. to original I/O buf */
    243 	RF_DiskQueueData_t *req;/* the request that this was part of.. */
    244 };
    245 
    246 /* XXX Not sure if the following should be replacing the raidPtrs above,
    247    or if it should be used in conjunction with that...
    248 */
    249 
    250 struct raid_softc {
    251 	int     sc_flags;	/* flags */
    252 	int     sc_cflags;	/* configuration flags */
    253 	size_t  sc_size;        /* size of the raid device */
    254 	char    sc_xname[20];	/* XXX external name */
    255 	struct disk sc_dkdev;	/* generic disk device info */
    256 	struct bufq_state buf_queue;	/* used for the device queue */
    257 };
    258 /* sc_flags */
    259 #define RAIDF_INITED	0x01	/* unit has been initialized */
    260 #define RAIDF_WLABEL	0x02	/* label area is writable */
    261 #define RAIDF_LABELLING	0x04	/* unit is currently being labelled */
    262 #define RAIDF_WANTED	0x40	/* someone is waiting to obtain a lock */
    263 #define RAIDF_LOCKED	0x80	/* unit is locked */
    264 
    265 #define	raidunit(x)	DISKUNIT(x)
    266 int numraid = 0;
    267 
    268 /*
    269  * Allow RAIDOUTSTANDING number of simultaneous IO's to this RAID device.
    270  * Be aware that large numbers can allow the driver to consume a lot of
    271  * kernel memory, especially on writes, and in degraded mode reads.
    272  *
    273  * For example: with a stripe width of 64 blocks (32k) and 5 disks,
    274  * a single 64K write will typically require 64K for the old data,
    275  * 64K for the old parity, and 64K for the new parity, for a total
    276  * of 192K (if the parity buffer is not re-used immediately).
    277  * Even it if is used immediately, that's still 128K, which when multiplied
    278  * by say 10 requests, is 1280K, *on top* of the 640K of incoming data.
    279  *
    280  * Now in degraded mode, for example, a 64K read on the above setup may
    281  * require data reconstruction, which will require *all* of the 4 remaining
    282  * disks to participate -- 4 * 32K/disk == 128K again.
    283  */
    284 
    285 #ifndef RAIDOUTSTANDING
    286 #define RAIDOUTSTANDING   6
    287 #endif
    288 
    289 #define RAIDLABELDEV(dev)	\
    290 	(MAKEDISKDEV(major((dev)), raidunit((dev)), RAW_PART))
    291 
    292 /* declared here, and made public, for the benefit of KVM stuff.. */
    293 struct raid_softc *raid_softc;
    294 
    295 static void raidgetdefaultlabel(RF_Raid_t *, struct raid_softc *,
    296 				     struct disklabel *);
    297 static void raidgetdisklabel(dev_t);
    298 static void raidmakedisklabel(struct raid_softc *);
    299 
    300 static int raidlock(struct raid_softc *);
    301 static void raidunlock(struct raid_softc *);
    302 
    303 static void rf_markalldirty(RF_Raid_t *);
    304 
    305 struct device *raidrootdev;
    306 
    307 void rf_ReconThread(struct rf_recon_req *);
    308 void rf_RewriteParityThread(RF_Raid_t *raidPtr);
    309 void rf_CopybackThread(RF_Raid_t *raidPtr);
    310 void rf_ReconstructInPlaceThread(struct rf_recon_req *);
    311 int rf_autoconfig(struct device *self);
    312 void rf_buildroothack(RF_ConfigSet_t *);
    313 
    314 RF_AutoConfig_t *rf_find_raid_components(void);
    315 RF_ConfigSet_t *rf_create_auto_sets(RF_AutoConfig_t *);
    316 static int rf_does_it_fit(RF_ConfigSet_t *,RF_AutoConfig_t *);
    317 static int rf_reasonable_label(RF_ComponentLabel_t *);
    318 void rf_create_configuration(RF_AutoConfig_t *,RF_Config_t *, RF_Raid_t *);
    319 int rf_set_autoconfig(RF_Raid_t *, int);
    320 int rf_set_rootpartition(RF_Raid_t *, int);
    321 void rf_release_all_vps(RF_ConfigSet_t *);
    322 void rf_cleanup_config_set(RF_ConfigSet_t *);
    323 int rf_have_enough_components(RF_ConfigSet_t *);
    324 int rf_auto_config_set(RF_ConfigSet_t *, int *);
    325 
    326 static int raidautoconfig = 0; /* Debugging, mostly.  Set to 0 to not
    327 				  allow autoconfig to take place.
    328 			          Note that this is overridden by having
    329 			          RAID_AUTOCONFIG as an option in the
    330 			          kernel config file.  */
    331 
    332 struct RF_Pools_s rf_pools;
    333 
    334 void
    335 raidattach(int num)
    336 {
    337 	int raidID;
    338 	int i, rc;
    339 
    340 #ifdef DEBUG
    341 	printf("raidattach: Asked for %d units\n", num);
    342 #endif
    343 
    344 	if (num <= 0) {
    345 #ifdef DIAGNOSTIC
    346 		panic("raidattach: count <= 0");
    347 #endif
    348 		return;
    349 	}
    350 	/* This is where all the initialization stuff gets done. */
    351 
    352 	numraid = num;
    353 
    354 	/* Make some space for requested number of units... */
    355 
    356 	RF_Malloc(raidPtrs, num * sizeof(RF_Raid_t *), (RF_Raid_t **));
    357 	if (raidPtrs == NULL) {
    358 		panic("raidPtrs is NULL!!");
    359 	}
    360 
    361 	/* Initialize the component buffer pool. */
    362 	rf_pool_init(&rf_pools.cbuf, sizeof(struct raidbuf),
    363 		     "raidpl", num * RAIDOUTSTANDING,
    364 		     2 * num * RAIDOUTSTANDING);
    365 
    366 	rf_mutex_init(&rf_sparet_wait_mutex);
    367 
    368 	rf_sparet_wait_queue = rf_sparet_resp_queue = NULL;
    369 
    370 	for (i = 0; i < num; i++)
    371 		raidPtrs[i] = NULL;
    372 	rc = rf_BootRaidframe();
    373 	if (rc == 0)
    374 		printf("Kernelized RAIDframe activated\n");
    375 	else
    376 		panic("Serious error booting RAID!!");
    377 
    378 	/* put together some datastructures like the CCD device does.. This
    379 	 * lets us lock the device and what-not when it gets opened. */
    380 
    381 	raid_softc = (struct raid_softc *)
    382 		malloc(num * sizeof(struct raid_softc),
    383 		       M_RAIDFRAME, M_NOWAIT);
    384 	if (raid_softc == NULL) {
    385 		printf("WARNING: no memory for RAIDframe driver\n");
    386 		return;
    387 	}
    388 
    389 	memset(raid_softc, 0, num * sizeof(struct raid_softc));
    390 
    391 	raidrootdev = (struct device *)malloc(num * sizeof(struct device),
    392 					      M_RAIDFRAME, M_NOWAIT);
    393 	if (raidrootdev == NULL) {
    394 		panic("No memory for RAIDframe driver!!?!?!");
    395 	}
    396 
    397 	for (raidID = 0; raidID < num; raidID++) {
    398 		bufq_alloc(&raid_softc[raidID].buf_queue, BUFQ_FCFS);
    399 		pseudo_disk_init(&raid_softc[raidID].sc_dkdev);
    400 
    401 		raidrootdev[raidID].dv_class  = DV_DISK;
    402 		raidrootdev[raidID].dv_cfdata = NULL;
    403 		raidrootdev[raidID].dv_unit   = raidID;
    404 		raidrootdev[raidID].dv_parent = NULL;
    405 		raidrootdev[raidID].dv_flags  = 0;
    406 		snprintf(raidrootdev[raidID].dv_xname,
    407 		    sizeof(raidrootdev[raidID].dv_xname), "raid%d", raidID);
    408 
    409 		RF_Malloc(raidPtrs[raidID], sizeof(RF_Raid_t),
    410 			  (RF_Raid_t *));
    411 		if (raidPtrs[raidID] == NULL) {
    412 			printf("WARNING: raidPtrs[%d] is NULL\n", raidID);
    413 			numraid = raidID;
    414 			return;
    415 		}
    416 	}
    417 
    418 #ifdef RAID_AUTOCONFIG
    419 	raidautoconfig = 1;
    420 #endif
    421 
    422 	/*
    423 	 * Register a finalizer which will be used to auto-config RAID
    424 	 * sets once all real hardware devices have been found.
    425 	 */
    426 	if (config_finalize_register(NULL, rf_autoconfig) != 0)
    427 		printf("WARNING: unable to register RAIDframe finalizer\n");
    428 }
    429 
    430 int
    431 rf_autoconfig(struct device *self)
    432 {
    433 	RF_AutoConfig_t *ac_list;
    434 	RF_ConfigSet_t *config_sets;
    435 
    436 	if (raidautoconfig == 0)
    437 		return (0);
    438 
    439 	/* XXX This code can only be run once. */
    440 	raidautoconfig = 0;
    441 
    442 	/* 1. locate all RAID components on the system */
    443 #ifdef DEBUG
    444 	printf("Searching for RAID components...\n");
    445 #endif
    446 	ac_list = rf_find_raid_components();
    447 
    448 	/* 2. Sort them into their respective sets. */
    449 	config_sets = rf_create_auto_sets(ac_list);
    450 
    451 	/*
    452 	 * 3. Evaluate each set andconfigure the valid ones.
    453 	 * This gets done in rf_buildroothack().
    454 	 */
    455 	rf_buildroothack(config_sets);
    456 
    457 	return (1);
    458 }
    459 
    460 void
    461 rf_buildroothack(RF_ConfigSet_t *config_sets)
    462 {
    463 	RF_ConfigSet_t *cset;
    464 	RF_ConfigSet_t *next_cset;
    465 	int retcode;
    466 	int raidID;
    467 	int rootID;
    468 	int num_root;
    469 
    470 	rootID = 0;
    471 	num_root = 0;
    472 	cset = config_sets;
    473 	while(cset != NULL ) {
    474 		next_cset = cset->next;
    475 		if (rf_have_enough_components(cset) &&
    476 		    cset->ac->clabel->autoconfigure==1) {
    477 			retcode = rf_auto_config_set(cset,&raidID);
    478 			if (!retcode) {
    479 				if (cset->rootable) {
    480 					rootID = raidID;
    481 					num_root++;
    482 				}
    483 			} else {
    484 				/* The autoconfig didn't work :( */
    485 #if DEBUG
    486 				printf("Autoconfig failed with code %d for raid%d\n", retcode, raidID);
    487 #endif
    488 				rf_release_all_vps(cset);
    489 			}
    490 		} else {
    491 			/* we're not autoconfiguring this set...
    492 			   release the associated resources */
    493 			rf_release_all_vps(cset);
    494 		}
    495 		/* cleanup */
    496 		rf_cleanup_config_set(cset);
    497 		cset = next_cset;
    498 	}
    499 
    500 	/* we found something bootable... */
    501 
    502 	if (num_root == 1) {
    503 		booted_device = &raidrootdev[rootID];
    504 	} else if (num_root > 1) {
    505 		/* we can't guess.. require the user to answer... */
    506 		boothowto |= RB_ASKNAME;
    507 	}
    508 }
    509 
    510 
    511 int
    512 raidsize(dev_t dev)
    513 {
    514 	struct raid_softc *rs;
    515 	struct disklabel *lp;
    516 	int     part, unit, omask, size;
    517 
    518 	unit = raidunit(dev);
    519 	if (unit >= numraid)
    520 		return (-1);
    521 	rs = &raid_softc[unit];
    522 
    523 	if ((rs->sc_flags & RAIDF_INITED) == 0)
    524 		return (-1);
    525 
    526 	part = DISKPART(dev);
    527 	omask = rs->sc_dkdev.dk_openmask & (1 << part);
    528 	lp = rs->sc_dkdev.dk_label;
    529 
    530 	if (omask == 0 && raidopen(dev, 0, S_IFBLK, curproc))
    531 		return (-1);
    532 
    533 	if (lp->d_partitions[part].p_fstype != FS_SWAP)
    534 		size = -1;
    535 	else
    536 		size = lp->d_partitions[part].p_size *
    537 		    (lp->d_secsize / DEV_BSIZE);
    538 
    539 	if (omask == 0 && raidclose(dev, 0, S_IFBLK, curproc))
    540 		return (-1);
    541 
    542 	return (size);
    543 
    544 }
    545 
    546 int
    547 raiddump(dev_t dev, daddr_t blkno, caddr_t va, size_t  size)
    548 {
    549 	/* Not implemented. */
    550 	return ENXIO;
    551 }
    552 /* ARGSUSED */
    553 int
    554 raidopen(dev_t dev, int flags, int fmt, struct proc *p)
    555 {
    556 	int     unit = raidunit(dev);
    557 	struct raid_softc *rs;
    558 	struct disklabel *lp;
    559 	int     part, pmask;
    560 	int     error = 0;
    561 
    562 	if (unit >= numraid)
    563 		return (ENXIO);
    564 	rs = &raid_softc[unit];
    565 
    566 	if ((error = raidlock(rs)) != 0)
    567 		return (error);
    568 	lp = rs->sc_dkdev.dk_label;
    569 
    570 	part = DISKPART(dev);
    571 	pmask = (1 << part);
    572 
    573 	if ((rs->sc_flags & RAIDF_INITED) &&
    574 	    (rs->sc_dkdev.dk_openmask == 0))
    575 		raidgetdisklabel(dev);
    576 
    577 	/* make sure that this partition exists */
    578 
    579 	if (part != RAW_PART) {
    580 		if (((rs->sc_flags & RAIDF_INITED) == 0) ||
    581 		    ((part >= lp->d_npartitions) ||
    582 			(lp->d_partitions[part].p_fstype == FS_UNUSED))) {
    583 			error = ENXIO;
    584 			raidunlock(rs);
    585 			return (error);
    586 		}
    587 	}
    588 	/* Prevent this unit from being unconfigured while open. */
    589 	switch (fmt) {
    590 	case S_IFCHR:
    591 		rs->sc_dkdev.dk_copenmask |= pmask;
    592 		break;
    593 
    594 	case S_IFBLK:
    595 		rs->sc_dkdev.dk_bopenmask |= pmask;
    596 		break;
    597 	}
    598 
    599 	if ((rs->sc_dkdev.dk_openmask == 0) &&
    600 	    ((rs->sc_flags & RAIDF_INITED) != 0)) {
    601 		/* First one... mark things as dirty... Note that we *MUST*
    602 		 have done a configure before this.  I DO NOT WANT TO BE
    603 		 SCRIBBLING TO RANDOM COMPONENTS UNTIL IT'S BEEN DETERMINED
    604 		 THAT THEY BELONG TOGETHER!!!!! */
    605 		/* XXX should check to see if we're only open for reading
    606 		   here... If so, we needn't do this, but then need some
    607 		   other way of keeping track of what's happened.. */
    608 
    609 		rf_markalldirty( raidPtrs[unit] );
    610 	}
    611 
    612 
    613 	rs->sc_dkdev.dk_openmask =
    614 	    rs->sc_dkdev.dk_copenmask | rs->sc_dkdev.dk_bopenmask;
    615 
    616 	raidunlock(rs);
    617 
    618 	return (error);
    619 
    620 
    621 }
    622 /* ARGSUSED */
    623 int
    624 raidclose(dev_t dev, int flags, int fmt, struct proc *p)
    625 {
    626 	int     unit = raidunit(dev);
    627 	struct raid_softc *rs;
    628 	int     error = 0;
    629 	int     part;
    630 
    631 	if (unit >= numraid)
    632 		return (ENXIO);
    633 	rs = &raid_softc[unit];
    634 
    635 	if ((error = raidlock(rs)) != 0)
    636 		return (error);
    637 
    638 	part = DISKPART(dev);
    639 
    640 	/* ...that much closer to allowing unconfiguration... */
    641 	switch (fmt) {
    642 	case S_IFCHR:
    643 		rs->sc_dkdev.dk_copenmask &= ~(1 << part);
    644 		break;
    645 
    646 	case S_IFBLK:
    647 		rs->sc_dkdev.dk_bopenmask &= ~(1 << part);
    648 		break;
    649 	}
    650 	rs->sc_dkdev.dk_openmask =
    651 	    rs->sc_dkdev.dk_copenmask | rs->sc_dkdev.dk_bopenmask;
    652 
    653 	if ((rs->sc_dkdev.dk_openmask == 0) &&
    654 	    ((rs->sc_flags & RAIDF_INITED) != 0)) {
    655 		/* Last one... device is not unconfigured yet.
    656 		   Device shutdown has taken care of setting the
    657 		   clean bits if RAIDF_INITED is not set
    658 		   mark things as clean... */
    659 
    660 		rf_update_component_labels(raidPtrs[unit],
    661 						 RF_FINAL_COMPONENT_UPDATE);
    662 		if (doing_shutdown) {
    663 			/* last one, and we're going down, so
    664 			   lights out for this RAID set too. */
    665 			error = rf_Shutdown(raidPtrs[unit]);
    666 
    667 			/* It's no longer initialized... */
    668 			rs->sc_flags &= ~RAIDF_INITED;
    669 
    670 			/* Detach the disk. */
    671 			pseudo_disk_detach(&rs->sc_dkdev);
    672 		}
    673 	}
    674 
    675 	raidunlock(rs);
    676 	return (0);
    677 
    678 }
    679 
    680 void
    681 raidstrategy(struct buf *bp)
    682 {
    683 	int s;
    684 
    685 	unsigned int raidID = raidunit(bp->b_dev);
    686 	RF_Raid_t *raidPtr;
    687 	struct raid_softc *rs = &raid_softc[raidID];
    688 	int     wlabel;
    689 
    690 	if ((rs->sc_flags & RAIDF_INITED) ==0) {
    691 		bp->b_error = ENXIO;
    692 		bp->b_flags |= B_ERROR;
    693 		bp->b_resid = bp->b_bcount;
    694 		biodone(bp);
    695 		return;
    696 	}
    697 	if (raidID >= numraid || !raidPtrs[raidID]) {
    698 		bp->b_error = ENODEV;
    699 		bp->b_flags |= B_ERROR;
    700 		bp->b_resid = bp->b_bcount;
    701 		biodone(bp);
    702 		return;
    703 	}
    704 	raidPtr = raidPtrs[raidID];
    705 	if (!raidPtr->valid) {
    706 		bp->b_error = ENODEV;
    707 		bp->b_flags |= B_ERROR;
    708 		bp->b_resid = bp->b_bcount;
    709 		biodone(bp);
    710 		return;
    711 	}
    712 	if (bp->b_bcount == 0) {
    713 		db1_printf(("b_bcount is zero..\n"));
    714 		biodone(bp);
    715 		return;
    716 	}
    717 
    718 	/*
    719 	 * Do bounds checking and adjust transfer.  If there's an
    720 	 * error, the bounds check will flag that for us.
    721 	 */
    722 
    723 	wlabel = rs->sc_flags & (RAIDF_WLABEL | RAIDF_LABELLING);
    724 	if (DISKPART(bp->b_dev) != RAW_PART)
    725 		if (bounds_check_with_label(&rs->sc_dkdev, bp, wlabel) <= 0) {
    726 			db1_printf(("Bounds check failed!!:%d %d\n",
    727 				(int) bp->b_blkno, (int) wlabel));
    728 			biodone(bp);
    729 			return;
    730 		}
    731 	s = splbio();
    732 
    733 	bp->b_resid = 0;
    734 
    735 	/* stuff it onto our queue */
    736 	BUFQ_PUT(&rs->buf_queue, bp);
    737 
    738 	raidstart(raidPtrs[raidID]);
    739 
    740 	splx(s);
    741 }
    742 /* ARGSUSED */
    743 int
    744 raidread(dev_t dev, struct uio *uio, int flags)
    745 {
    746 	int     unit = raidunit(dev);
    747 	struct raid_softc *rs;
    748 
    749 	if (unit >= numraid)
    750 		return (ENXIO);
    751 	rs = &raid_softc[unit];
    752 
    753 	if ((rs->sc_flags & RAIDF_INITED) == 0)
    754 		return (ENXIO);
    755 
    756 	return (physio(raidstrategy, NULL, dev, B_READ, minphys, uio));
    757 
    758 }
    759 /* ARGSUSED */
    760 int
    761 raidwrite(dev_t dev, struct uio *uio, int flags)
    762 {
    763 	int     unit = raidunit(dev);
    764 	struct raid_softc *rs;
    765 
    766 	if (unit >= numraid)
    767 		return (ENXIO);
    768 	rs = &raid_softc[unit];
    769 
    770 	if ((rs->sc_flags & RAIDF_INITED) == 0)
    771 		return (ENXIO);
    772 
    773 	return (physio(raidstrategy, NULL, dev, B_WRITE, minphys, uio));
    774 
    775 }
    776 
    777 int
    778 raidioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
    779 {
    780 	int     unit = raidunit(dev);
    781 	int     error = 0;
    782 	int     part, pmask;
    783 	struct raid_softc *rs;
    784 	RF_Config_t *k_cfg, *u_cfg;
    785 	RF_Raid_t *raidPtr;
    786 	RF_RaidDisk_t *diskPtr;
    787 	RF_AccTotals_t *totals;
    788 	RF_DeviceConfig_t *d_cfg, **ucfgp;
    789 	u_char *specific_buf;
    790 	int retcode = 0;
    791 	int column;
    792 	int raidid;
    793 	struct rf_recon_req *rrcopy, *rr;
    794 	RF_ComponentLabel_t *clabel;
    795 	RF_ComponentLabel_t ci_label;
    796 	RF_ComponentLabel_t **clabel_ptr;
    797 	RF_SingleComponent_t *sparePtr,*componentPtr;
    798 	RF_SingleComponent_t hot_spare;
    799 	RF_SingleComponent_t component;
    800 	RF_ProgressInfo_t progressInfo, **progressInfoPtr;
    801 	int i, j, d;
    802 #ifdef __HAVE_OLD_DISKLABEL
    803 	struct disklabel newlabel;
    804 #endif
    805 
    806 	if (unit >= numraid)
    807 		return (ENXIO);
    808 	rs = &raid_softc[unit];
    809 	raidPtr = raidPtrs[unit];
    810 
    811 	db1_printf(("raidioctl: %d %d %d %d\n", (int) dev,
    812 		(int) DISKPART(dev), (int) unit, (int) cmd));
    813 
    814 	/* Must be open for writes for these commands... */
    815 	switch (cmd) {
    816 	case DIOCSDINFO:
    817 	case DIOCWDINFO:
    818 #ifdef __HAVE_OLD_DISKLABEL
    819 	case ODIOCWDINFO:
    820 	case ODIOCSDINFO:
    821 #endif
    822 	case DIOCWLABEL:
    823 		if ((flag & FWRITE) == 0)
    824 			return (EBADF);
    825 	}
    826 
    827 	/* Must be initialized for these... */
    828 	switch (cmd) {
    829 	case DIOCGDINFO:
    830 	case DIOCSDINFO:
    831 	case DIOCWDINFO:
    832 #ifdef __HAVE_OLD_DISKLABEL
    833 	case ODIOCGDINFO:
    834 	case ODIOCWDINFO:
    835 	case ODIOCSDINFO:
    836 	case ODIOCGDEFLABEL:
    837 #endif
    838 	case DIOCGPART:
    839 	case DIOCWLABEL:
    840 	case DIOCGDEFLABEL:
    841 	case RAIDFRAME_SHUTDOWN:
    842 	case RAIDFRAME_REWRITEPARITY:
    843 	case RAIDFRAME_GET_INFO:
    844 	case RAIDFRAME_RESET_ACCTOTALS:
    845 	case RAIDFRAME_GET_ACCTOTALS:
    846 	case RAIDFRAME_KEEP_ACCTOTALS:
    847 	case RAIDFRAME_GET_SIZE:
    848 	case RAIDFRAME_FAIL_DISK:
    849 	case RAIDFRAME_COPYBACK:
    850 	case RAIDFRAME_CHECK_RECON_STATUS:
    851 	case RAIDFRAME_CHECK_RECON_STATUS_EXT:
    852 	case RAIDFRAME_GET_COMPONENT_LABEL:
    853 	case RAIDFRAME_SET_COMPONENT_LABEL:
    854 	case RAIDFRAME_ADD_HOT_SPARE:
    855 	case RAIDFRAME_REMOVE_HOT_SPARE:
    856 	case RAIDFRAME_INIT_LABELS:
    857 	case RAIDFRAME_REBUILD_IN_PLACE:
    858 	case RAIDFRAME_CHECK_PARITY:
    859 	case RAIDFRAME_CHECK_PARITYREWRITE_STATUS:
    860 	case RAIDFRAME_CHECK_PARITYREWRITE_STATUS_EXT:
    861 	case RAIDFRAME_CHECK_COPYBACK_STATUS:
    862 	case RAIDFRAME_CHECK_COPYBACK_STATUS_EXT:
    863 	case RAIDFRAME_SET_AUTOCONFIG:
    864 	case RAIDFRAME_SET_ROOT:
    865 	case RAIDFRAME_DELETE_COMPONENT:
    866 	case RAIDFRAME_INCORPORATE_HOT_SPARE:
    867 		if ((rs->sc_flags & RAIDF_INITED) == 0)
    868 			return (ENXIO);
    869 	}
    870 
    871 	switch (cmd) {
    872 
    873 		/* configure the system */
    874 	case RAIDFRAME_CONFIGURE:
    875 
    876 		if (raidPtr->valid) {
    877 			/* There is a valid RAID set running on this unit! */
    878 			printf("raid%d: Device already configured!\n",unit);
    879 			return(EINVAL);
    880 		}
    881 
    882 		/* copy-in the configuration information */
    883 		/* data points to a pointer to the configuration structure */
    884 
    885 		u_cfg = *((RF_Config_t **) data);
    886 		RF_Malloc(k_cfg, sizeof(RF_Config_t), (RF_Config_t *));
    887 		if (k_cfg == NULL) {
    888 			return (ENOMEM);
    889 		}
    890 		retcode = copyin(u_cfg, k_cfg, sizeof(RF_Config_t));
    891 		if (retcode) {
    892 			RF_Free(k_cfg, sizeof(RF_Config_t));
    893 			db1_printf(("rf_ioctl: retcode=%d copyin.1\n",
    894 				retcode));
    895 			return (retcode);
    896 		}
    897 		/* allocate a buffer for the layout-specific data, and copy it
    898 		 * in */
    899 		if (k_cfg->layoutSpecificSize) {
    900 			if (k_cfg->layoutSpecificSize > 10000) {
    901 				/* sanity check */
    902 				RF_Free(k_cfg, sizeof(RF_Config_t));
    903 				return (EINVAL);
    904 			}
    905 			RF_Malloc(specific_buf, k_cfg->layoutSpecificSize,
    906 			    (u_char *));
    907 			if (specific_buf == NULL) {
    908 				RF_Free(k_cfg, sizeof(RF_Config_t));
    909 				return (ENOMEM);
    910 			}
    911 			retcode = copyin(k_cfg->layoutSpecific, specific_buf,
    912 			    k_cfg->layoutSpecificSize);
    913 			if (retcode) {
    914 				RF_Free(k_cfg, sizeof(RF_Config_t));
    915 				RF_Free(specific_buf,
    916 					k_cfg->layoutSpecificSize);
    917 				db1_printf(("rf_ioctl: retcode=%d copyin.2\n",
    918 					retcode));
    919 				return (retcode);
    920 			}
    921 		} else
    922 			specific_buf = NULL;
    923 		k_cfg->layoutSpecific = specific_buf;
    924 
    925 		/* should do some kind of sanity check on the configuration.
    926 		 * Store the sum of all the bytes in the last byte? */
    927 
    928 		/* configure the system */
    929 
    930 		/*
    931 		 * Clear the entire RAID descriptor, just to make sure
    932 		 *  there is no stale data left in the case of a
    933 		 *  reconfiguration
    934 		 */
    935 		memset((char *) raidPtr, 0, sizeof(RF_Raid_t));
    936 		raidPtr->raidid = unit;
    937 
    938 		retcode = rf_Configure(raidPtr, k_cfg, NULL);
    939 
    940 		if (retcode == 0) {
    941 
    942 			/* allow this many simultaneous IO's to
    943 			   this RAID device */
    944 			raidPtr->openings = RAIDOUTSTANDING;
    945 
    946 			raidinit(raidPtr);
    947 			rf_markalldirty(raidPtr);
    948 		}
    949 		/* free the buffers.  No return code here. */
    950 		if (k_cfg->layoutSpecificSize) {
    951 			RF_Free(specific_buf, k_cfg->layoutSpecificSize);
    952 		}
    953 		RF_Free(k_cfg, sizeof(RF_Config_t));
    954 
    955 		return (retcode);
    956 
    957 		/* shutdown the system */
    958 	case RAIDFRAME_SHUTDOWN:
    959 
    960 		if ((error = raidlock(rs)) != 0)
    961 			return (error);
    962 
    963 		/*
    964 		 * If somebody has a partition mounted, we shouldn't
    965 		 * shutdown.
    966 		 */
    967 
    968 		part = DISKPART(dev);
    969 		pmask = (1 << part);
    970 		if ((rs->sc_dkdev.dk_openmask & ~pmask) ||
    971 		    ((rs->sc_dkdev.dk_bopenmask & pmask) &&
    972 			(rs->sc_dkdev.dk_copenmask & pmask))) {
    973 			raidunlock(rs);
    974 			return (EBUSY);
    975 		}
    976 
    977 		retcode = rf_Shutdown(raidPtr);
    978 
    979 		/* It's no longer initialized... */
    980 		rs->sc_flags &= ~RAIDF_INITED;
    981 
    982 		/* Detach the disk. */
    983 		pseudo_disk_detach(&rs->sc_dkdev);
    984 
    985 		raidunlock(rs);
    986 
    987 		return (retcode);
    988 	case RAIDFRAME_GET_COMPONENT_LABEL:
    989 		clabel_ptr = (RF_ComponentLabel_t **) data;
    990 		/* need to read the component label for the disk indicated
    991 		   by row,column in clabel */
    992 
    993 		/* For practice, let's get it directly fromdisk, rather
    994 		   than from the in-core copy */
    995 		RF_Malloc( clabel, sizeof( RF_ComponentLabel_t ),
    996 			   (RF_ComponentLabel_t *));
    997 		if (clabel == NULL)
    998 			return (ENOMEM);
    999 
   1000 		memset((char *) clabel, 0, sizeof(RF_ComponentLabel_t));
   1001 
   1002 		retcode = copyin( *clabel_ptr, clabel,
   1003 				  sizeof(RF_ComponentLabel_t));
   1004 
   1005 		if (retcode) {
   1006 			RF_Free( clabel, sizeof(RF_ComponentLabel_t));
   1007 			return(retcode);
   1008 		}
   1009 
   1010 		clabel->row = 0; /* Don't allow looking at anything else.*/
   1011 
   1012 		column = clabel->column;
   1013 
   1014 		if ((column < 0) || (column >= raidPtr->numCol +
   1015 				     raidPtr->numSpare)) {
   1016 			RF_Free( clabel, sizeof(RF_ComponentLabel_t));
   1017 			return(EINVAL);
   1018 		}
   1019 
   1020 		raidread_component_label(raidPtr->Disks[column].dev,
   1021 				raidPtr->raid_cinfo[column].ci_vp,
   1022 				clabel );
   1023 
   1024 		retcode = copyout(clabel, *clabel_ptr,
   1025 				  sizeof(RF_ComponentLabel_t));
   1026 		RF_Free(clabel, sizeof(RF_ComponentLabel_t));
   1027 		return (retcode);
   1028 
   1029 	case RAIDFRAME_SET_COMPONENT_LABEL:
   1030 		clabel = (RF_ComponentLabel_t *) data;
   1031 
   1032 		/* XXX check the label for valid stuff... */
   1033 		/* Note that some things *should not* get modified --
   1034 		   the user should be re-initing the labels instead of
   1035 		   trying to patch things.
   1036 		   */
   1037 
   1038 		raidid = raidPtr->raidid;
   1039 #if DEBUG
   1040 		printf("raid%d: Got component label:\n", raidid);
   1041 		printf("raid%d: Version: %d\n", raidid, clabel->version);
   1042 		printf("raid%d: Serial Number: %d\n", raidid, clabel->serial_number);
   1043 		printf("raid%d: Mod counter: %d\n", raidid, clabel->mod_counter);
   1044 		printf("raid%d: Column: %d\n", raidid, clabel->column);
   1045 		printf("raid%d: Num Columns: %d\n", raidid, clabel->num_columns);
   1046 		printf("raid%d: Clean: %d\n", raidid, clabel->clean);
   1047 		printf("raid%d: Status: %d\n", raidid, clabel->status);
   1048 #endif
   1049 		clabel->row = 0;
   1050 		column = clabel->column;
   1051 
   1052 		if ((column < 0) || (column >= raidPtr->numCol)) {
   1053 			return(EINVAL);
   1054 		}
   1055 
   1056 		/* XXX this isn't allowed to do anything for now :-) */
   1057 
   1058 		/* XXX and before it is, we need to fill in the rest
   1059 		   of the fields!?!?!?! */
   1060 #if 0
   1061 		raidwrite_component_label(
   1062                             raidPtr->Disks[column].dev,
   1063 			    raidPtr->raid_cinfo[column].ci_vp,
   1064 			    clabel );
   1065 #endif
   1066 		return (0);
   1067 
   1068 	case RAIDFRAME_INIT_LABELS:
   1069 		clabel = (RF_ComponentLabel_t *) data;
   1070 		/*
   1071 		   we only want the serial number from
   1072 		   the above.  We get all the rest of the information
   1073 		   from the config that was used to create this RAID
   1074 		   set.
   1075 		   */
   1076 
   1077 		raidPtr->serial_number = clabel->serial_number;
   1078 
   1079 		raid_init_component_label(raidPtr, &ci_label);
   1080 		ci_label.serial_number = clabel->serial_number;
   1081 		ci_label.row = 0; /* we dont' pretend to support more */
   1082 
   1083 		for(column=0;column<raidPtr->numCol;column++) {
   1084 			diskPtr = &raidPtr->Disks[column];
   1085 			if (!RF_DEAD_DISK(diskPtr->status)) {
   1086 				ci_label.partitionSize = diskPtr->partitionSize;
   1087 				ci_label.column = column;
   1088 				raidwrite_component_label(
   1089 							  raidPtr->Disks[column].dev,
   1090 							  raidPtr->raid_cinfo[column].ci_vp,
   1091 							  &ci_label );
   1092 			}
   1093 		}
   1094 
   1095 		return (retcode);
   1096 	case RAIDFRAME_SET_AUTOCONFIG:
   1097 		d = rf_set_autoconfig(raidPtr, *(int *) data);
   1098 		printf("raid%d: New autoconfig value is: %d\n",
   1099 		       raidPtr->raidid, d);
   1100 		*(int *) data = d;
   1101 		return (retcode);
   1102 
   1103 	case RAIDFRAME_SET_ROOT:
   1104 		d = rf_set_rootpartition(raidPtr, *(int *) data);
   1105 		printf("raid%d: New rootpartition value is: %d\n",
   1106 		       raidPtr->raidid, d);
   1107 		*(int *) data = d;
   1108 		return (retcode);
   1109 
   1110 		/* initialize all parity */
   1111 	case RAIDFRAME_REWRITEPARITY:
   1112 
   1113 		if (raidPtr->Layout.map->faultsTolerated == 0) {
   1114 			/* Parity for RAID 0 is trivially correct */
   1115 			raidPtr->parity_good = RF_RAID_CLEAN;
   1116 			return(0);
   1117 		}
   1118 
   1119 		if (raidPtr->parity_rewrite_in_progress == 1) {
   1120 			/* Re-write is already in progress! */
   1121 			return(EINVAL);
   1122 		}
   1123 
   1124 		retcode = RF_CREATE_THREAD(raidPtr->parity_rewrite_thread,
   1125 					   rf_RewriteParityThread,
   1126 					   raidPtr,"raid_parity");
   1127 		return (retcode);
   1128 
   1129 
   1130 	case RAIDFRAME_ADD_HOT_SPARE:
   1131 		sparePtr = (RF_SingleComponent_t *) data;
   1132 		memcpy( &hot_spare, sparePtr, sizeof(RF_SingleComponent_t));
   1133 		retcode = rf_add_hot_spare(raidPtr, &hot_spare);
   1134 		return(retcode);
   1135 
   1136 	case RAIDFRAME_REMOVE_HOT_SPARE:
   1137 		return(retcode);
   1138 
   1139 	case RAIDFRAME_DELETE_COMPONENT:
   1140 		componentPtr = (RF_SingleComponent_t *)data;
   1141 		memcpy( &component, componentPtr,
   1142 			sizeof(RF_SingleComponent_t));
   1143 		retcode = rf_delete_component(raidPtr, &component);
   1144 		return(retcode);
   1145 
   1146 	case RAIDFRAME_INCORPORATE_HOT_SPARE:
   1147 		componentPtr = (RF_SingleComponent_t *)data;
   1148 		memcpy( &component, componentPtr,
   1149 			sizeof(RF_SingleComponent_t));
   1150 		retcode = rf_incorporate_hot_spare(raidPtr, &component);
   1151 		return(retcode);
   1152 
   1153 	case RAIDFRAME_REBUILD_IN_PLACE:
   1154 
   1155 		if (raidPtr->Layout.map->faultsTolerated == 0) {
   1156 			/* Can't do this on a RAID 0!! */
   1157 			return(EINVAL);
   1158 		}
   1159 
   1160 		if (raidPtr->recon_in_progress == 1) {
   1161 			/* a reconstruct is already in progress! */
   1162 			return(EINVAL);
   1163 		}
   1164 
   1165 		componentPtr = (RF_SingleComponent_t *) data;
   1166 		memcpy( &component, componentPtr,
   1167 			sizeof(RF_SingleComponent_t));
   1168 		component.row = 0; /* we don't support any more */
   1169 		column = component.column;
   1170 
   1171 		if ((column < 0) || (column >= raidPtr->numCol)) {
   1172 			return(EINVAL);
   1173 		}
   1174 
   1175 		RF_LOCK_MUTEX(raidPtr->mutex);
   1176 		if ((raidPtr->Disks[column].status == rf_ds_optimal) &&
   1177 		    (raidPtr->numFailures > 0)) {
   1178 			/* XXX 0 above shouldn't be constant!!! */
   1179 			/* some component other than this has failed.
   1180 			   Let's not make things worse than they already
   1181 			   are... */
   1182 			printf("raid%d: Unable to reconstruct to disk at:\n",
   1183 			       raidPtr->raidid);
   1184 			printf("raid%d:     Col: %d   Too many failures.\n",
   1185 			       raidPtr->raidid, column);
   1186 			RF_UNLOCK_MUTEX(raidPtr->mutex);
   1187 			return (EINVAL);
   1188 		}
   1189 		if (raidPtr->Disks[column].status ==
   1190 		    rf_ds_reconstructing) {
   1191 			printf("raid%d: Unable to reconstruct to disk at:\n",
   1192 			       raidPtr->raidid);
   1193 			printf("raid%d:    Col: %d   Reconstruction already occuring!\n", raidPtr->raidid, column);
   1194 
   1195 			RF_UNLOCK_MUTEX(raidPtr->mutex);
   1196 			return (EINVAL);
   1197 		}
   1198 		if (raidPtr->Disks[column].status == rf_ds_spared) {
   1199 			RF_UNLOCK_MUTEX(raidPtr->mutex);
   1200 			return (EINVAL);
   1201 		}
   1202 		RF_UNLOCK_MUTEX(raidPtr->mutex);
   1203 
   1204 		RF_Malloc(rrcopy, sizeof(*rrcopy), (struct rf_recon_req *));
   1205 		if (rrcopy == NULL)
   1206 			return(ENOMEM);
   1207 
   1208 		rrcopy->raidPtr = (void *) raidPtr;
   1209 		rrcopy->col = column;
   1210 
   1211 		retcode = RF_CREATE_THREAD(raidPtr->recon_thread,
   1212 					   rf_ReconstructInPlaceThread,
   1213 					   rrcopy,"raid_reconip");
   1214 		return(retcode);
   1215 
   1216 	case RAIDFRAME_GET_INFO:
   1217 		if (!raidPtr->valid)
   1218 			return (ENODEV);
   1219 		ucfgp = (RF_DeviceConfig_t **) data;
   1220 		RF_Malloc(d_cfg, sizeof(RF_DeviceConfig_t),
   1221 			  (RF_DeviceConfig_t *));
   1222 		if (d_cfg == NULL)
   1223 			return (ENOMEM);
   1224 		memset((char *) d_cfg, 0, sizeof(RF_DeviceConfig_t));
   1225 		d_cfg->rows = 1; /* there is only 1 row now */
   1226 		d_cfg->cols = raidPtr->numCol;
   1227 		d_cfg->ndevs = raidPtr->numCol;
   1228 		if (d_cfg->ndevs >= RF_MAX_DISKS) {
   1229 			RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
   1230 			return (ENOMEM);
   1231 		}
   1232 		d_cfg->nspares = raidPtr->numSpare;
   1233 		if (d_cfg->nspares >= RF_MAX_DISKS) {
   1234 			RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
   1235 			return (ENOMEM);
   1236 		}
   1237 		d_cfg->maxqdepth = raidPtr->maxQueueDepth;
   1238 		d = 0;
   1239 		for (j = 0; j < d_cfg->cols; j++) {
   1240 			d_cfg->devs[d] = raidPtr->Disks[j];
   1241 			d++;
   1242 		}
   1243 		for (j = d_cfg->cols, i = 0; i < d_cfg->nspares; i++, j++) {
   1244 			d_cfg->spares[i] = raidPtr->Disks[j];
   1245 		}
   1246 		retcode = copyout(d_cfg, *ucfgp, sizeof(RF_DeviceConfig_t));
   1247 		RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
   1248 
   1249 		return (retcode);
   1250 
   1251 	case RAIDFRAME_CHECK_PARITY:
   1252 		*(int *) data = raidPtr->parity_good;
   1253 		return (0);
   1254 
   1255 	case RAIDFRAME_RESET_ACCTOTALS:
   1256 		memset(&raidPtr->acc_totals, 0, sizeof(raidPtr->acc_totals));
   1257 		return (0);
   1258 
   1259 	case RAIDFRAME_GET_ACCTOTALS:
   1260 		totals = (RF_AccTotals_t *) data;
   1261 		*totals = raidPtr->acc_totals;
   1262 		return (0);
   1263 
   1264 	case RAIDFRAME_KEEP_ACCTOTALS:
   1265 		raidPtr->keep_acc_totals = *(int *)data;
   1266 		return (0);
   1267 
   1268 	case RAIDFRAME_GET_SIZE:
   1269 		*(int *) data = raidPtr->totalSectors;
   1270 		return (0);
   1271 
   1272 		/* fail a disk & optionally start reconstruction */
   1273 	case RAIDFRAME_FAIL_DISK:
   1274 
   1275 		if (raidPtr->Layout.map->faultsTolerated == 0) {
   1276 			/* Can't do this on a RAID 0!! */
   1277 			return(EINVAL);
   1278 		}
   1279 
   1280 		rr = (struct rf_recon_req *) data;
   1281 		rr->row = 0;
   1282 		if (rr->col < 0 || rr->col >= raidPtr->numCol)
   1283 			return (EINVAL);
   1284 
   1285 
   1286 		RF_LOCK_MUTEX(raidPtr->mutex);
   1287 		if (raidPtr->status == rf_rs_reconstructing) {
   1288 			/* you can't fail a disk while we're reconstructing! */
   1289 			/* XXX wrong for RAID6 */
   1290 			RF_UNLOCK_MUTEX(raidPtr->mutex);
   1291 			return (EINVAL);
   1292 		}
   1293 		if ((raidPtr->Disks[rr->col].status ==
   1294 		     rf_ds_optimal) && (raidPtr->numFailures > 0)) {
   1295 			/* some other component has failed.  Let's not make
   1296 			   things worse. XXX wrong for RAID6 */
   1297 			RF_UNLOCK_MUTEX(raidPtr->mutex);
   1298 			return (EINVAL);
   1299 		}
   1300 		if (raidPtr->Disks[rr->col].status == rf_ds_spared) {
   1301 			/* Can't fail a spared disk! */
   1302 			RF_UNLOCK_MUTEX(raidPtr->mutex);
   1303 			return (EINVAL);
   1304 		}
   1305 		RF_UNLOCK_MUTEX(raidPtr->mutex);
   1306 
   1307 		/* make a copy of the recon request so that we don't rely on
   1308 		 * the user's buffer */
   1309 		RF_Malloc(rrcopy, sizeof(*rrcopy), (struct rf_recon_req *));
   1310 		if (rrcopy == NULL)
   1311 			return(ENOMEM);
   1312 		memcpy(rrcopy, rr, sizeof(*rr));
   1313 		rrcopy->raidPtr = (void *) raidPtr;
   1314 
   1315 		retcode = RF_CREATE_THREAD(raidPtr->recon_thread,
   1316 					   rf_ReconThread,
   1317 					   rrcopy,"raid_recon");
   1318 		return (0);
   1319 
   1320 		/* invoke a copyback operation after recon on whatever disk
   1321 		 * needs it, if any */
   1322 	case RAIDFRAME_COPYBACK:
   1323 
   1324 		if (raidPtr->Layout.map->faultsTolerated == 0) {
   1325 			/* This makes no sense on a RAID 0!! */
   1326 			return(EINVAL);
   1327 		}
   1328 
   1329 		if (raidPtr->copyback_in_progress == 1) {
   1330 			/* Copyback is already in progress! */
   1331 			return(EINVAL);
   1332 		}
   1333 
   1334 		retcode = RF_CREATE_THREAD(raidPtr->copyback_thread,
   1335 					   rf_CopybackThread,
   1336 					   raidPtr,"raid_copyback");
   1337 		return (retcode);
   1338 
   1339 		/* return the percentage completion of reconstruction */
   1340 	case RAIDFRAME_CHECK_RECON_STATUS:
   1341 		if (raidPtr->Layout.map->faultsTolerated == 0) {
   1342 			/* This makes no sense on a RAID 0, so tell the
   1343 			   user it's done. */
   1344 			*(int *) data = 100;
   1345 			return(0);
   1346 		}
   1347 		if (raidPtr->status != rf_rs_reconstructing)
   1348 			*(int *) data = 100;
   1349 		else {
   1350 			if (raidPtr->reconControl->numRUsTotal > 0) {
   1351 				*(int *) data = (raidPtr->reconControl->numRUsComplete * 100 / raidPtr->reconControl->numRUsTotal);
   1352 			} else {
   1353 				*(int *) data = 0;
   1354 			}
   1355 		}
   1356 		return (0);
   1357 	case RAIDFRAME_CHECK_RECON_STATUS_EXT:
   1358 		progressInfoPtr = (RF_ProgressInfo_t **) data;
   1359 		if (raidPtr->status != rf_rs_reconstructing) {
   1360 			progressInfo.remaining = 0;
   1361 			progressInfo.completed = 100;
   1362 			progressInfo.total = 100;
   1363 		} else {
   1364 			progressInfo.total =
   1365 				raidPtr->reconControl->numRUsTotal;
   1366 			progressInfo.completed =
   1367 				raidPtr->reconControl->numRUsComplete;
   1368 			progressInfo.remaining = progressInfo.total -
   1369 				progressInfo.completed;
   1370 		}
   1371 		retcode = copyout(&progressInfo, *progressInfoPtr,
   1372 				  sizeof(RF_ProgressInfo_t));
   1373 		return (retcode);
   1374 
   1375 	case RAIDFRAME_CHECK_PARITYREWRITE_STATUS:
   1376 		if (raidPtr->Layout.map->faultsTolerated == 0) {
   1377 			/* This makes no sense on a RAID 0, so tell the
   1378 			   user it's done. */
   1379 			*(int *) data = 100;
   1380 			return(0);
   1381 		}
   1382 		if (raidPtr->parity_rewrite_in_progress == 1) {
   1383 			*(int *) data = 100 *
   1384 				raidPtr->parity_rewrite_stripes_done /
   1385 				raidPtr->Layout.numStripe;
   1386 		} else {
   1387 			*(int *) data = 100;
   1388 		}
   1389 		return (0);
   1390 
   1391 	case RAIDFRAME_CHECK_PARITYREWRITE_STATUS_EXT:
   1392 		progressInfoPtr = (RF_ProgressInfo_t **) data;
   1393 		if (raidPtr->parity_rewrite_in_progress == 1) {
   1394 			progressInfo.total = raidPtr->Layout.numStripe;
   1395 			progressInfo.completed =
   1396 				raidPtr->parity_rewrite_stripes_done;
   1397 			progressInfo.remaining = progressInfo.total -
   1398 				progressInfo.completed;
   1399 		} else {
   1400 			progressInfo.remaining = 0;
   1401 			progressInfo.completed = 100;
   1402 			progressInfo.total = 100;
   1403 		}
   1404 		retcode = copyout(&progressInfo, *progressInfoPtr,
   1405 				  sizeof(RF_ProgressInfo_t));
   1406 		return (retcode);
   1407 
   1408 	case RAIDFRAME_CHECK_COPYBACK_STATUS:
   1409 		if (raidPtr->Layout.map->faultsTolerated == 0) {
   1410 			/* This makes no sense on a RAID 0 */
   1411 			*(int *) data = 100;
   1412 			return(0);
   1413 		}
   1414 		if (raidPtr->copyback_in_progress == 1) {
   1415 			*(int *) data = 100 * raidPtr->copyback_stripes_done /
   1416 				raidPtr->Layout.numStripe;
   1417 		} else {
   1418 			*(int *) data = 100;
   1419 		}
   1420 		return (0);
   1421 
   1422 	case RAIDFRAME_CHECK_COPYBACK_STATUS_EXT:
   1423 		progressInfoPtr = (RF_ProgressInfo_t **) data;
   1424 		if (raidPtr->copyback_in_progress == 1) {
   1425 			progressInfo.total = raidPtr->Layout.numStripe;
   1426 			progressInfo.completed =
   1427 				raidPtr->copyback_stripes_done;
   1428 			progressInfo.remaining = progressInfo.total -
   1429 				progressInfo.completed;
   1430 		} else {
   1431 			progressInfo.remaining = 0;
   1432 			progressInfo.completed = 100;
   1433 			progressInfo.total = 100;
   1434 		}
   1435 		retcode = copyout(&progressInfo, *progressInfoPtr,
   1436 				  sizeof(RF_ProgressInfo_t));
   1437 		return (retcode);
   1438 
   1439 		/* the sparetable daemon calls this to wait for the kernel to
   1440 		 * need a spare table. this ioctl does not return until a
   1441 		 * spare table is needed. XXX -- calling mpsleep here in the
   1442 		 * ioctl code is almost certainly wrong and evil. -- XXX XXX
   1443 		 * -- I should either compute the spare table in the kernel,
   1444 		 * or have a different -- XXX XXX -- interface (a different
   1445 		 * character device) for delivering the table     -- XXX */
   1446 #if 0
   1447 	case RAIDFRAME_SPARET_WAIT:
   1448 		RF_LOCK_MUTEX(rf_sparet_wait_mutex);
   1449 		while (!rf_sparet_wait_queue)
   1450 			mpsleep(&rf_sparet_wait_queue, (PZERO + 1) | PCATCH, "sparet wait", 0, (void *) simple_lock_addr(rf_sparet_wait_mutex), MS_LOCK_SIMPLE);
   1451 		waitreq = rf_sparet_wait_queue;
   1452 		rf_sparet_wait_queue = rf_sparet_wait_queue->next;
   1453 		RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
   1454 
   1455 		/* structure assignment */
   1456 		*((RF_SparetWait_t *) data) = *waitreq;
   1457 
   1458 		RF_Free(waitreq, sizeof(*waitreq));
   1459 		return (0);
   1460 
   1461 		/* wakes up a process waiting on SPARET_WAIT and puts an error
   1462 		 * code in it that will cause the dameon to exit */
   1463 	case RAIDFRAME_ABORT_SPARET_WAIT:
   1464 		RF_Malloc(waitreq, sizeof(*waitreq), (RF_SparetWait_t *));
   1465 		waitreq->fcol = -1;
   1466 		RF_LOCK_MUTEX(rf_sparet_wait_mutex);
   1467 		waitreq->next = rf_sparet_wait_queue;
   1468 		rf_sparet_wait_queue = waitreq;
   1469 		RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
   1470 		wakeup(&rf_sparet_wait_queue);
   1471 		return (0);
   1472 
   1473 		/* used by the spare table daemon to deliver a spare table
   1474 		 * into the kernel */
   1475 	case RAIDFRAME_SEND_SPARET:
   1476 
   1477 		/* install the spare table */
   1478 		retcode = rf_SetSpareTable(raidPtr, *(void **) data);
   1479 
   1480 		/* respond to the requestor.  the return status of the spare
   1481 		 * table installation is passed in the "fcol" field */
   1482 		RF_Malloc(waitreq, sizeof(*waitreq), (RF_SparetWait_t *));
   1483 		waitreq->fcol = retcode;
   1484 		RF_LOCK_MUTEX(rf_sparet_wait_mutex);
   1485 		waitreq->next = rf_sparet_resp_queue;
   1486 		rf_sparet_resp_queue = waitreq;
   1487 		wakeup(&rf_sparet_resp_queue);
   1488 		RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
   1489 
   1490 		return (retcode);
   1491 #endif
   1492 
   1493 	default:
   1494 		break; /* fall through to the os-specific code below */
   1495 
   1496 	}
   1497 
   1498 	if (!raidPtr->valid)
   1499 		return (EINVAL);
   1500 
   1501 	/*
   1502 	 * Add support for "regular" device ioctls here.
   1503 	 */
   1504 
   1505 	switch (cmd) {
   1506 	case DIOCGDINFO:
   1507 		*(struct disklabel *) data = *(rs->sc_dkdev.dk_label);
   1508 		break;
   1509 #ifdef __HAVE_OLD_DISKLABEL
   1510 	case ODIOCGDINFO:
   1511 		newlabel = *(rs->sc_dkdev.dk_label);
   1512 		if (newlabel.d_npartitions > OLDMAXPARTITIONS)
   1513 			return ENOTTY;
   1514 		memcpy(data, &newlabel, sizeof (struct olddisklabel));
   1515 		break;
   1516 #endif
   1517 
   1518 	case DIOCGPART:
   1519 		((struct partinfo *) data)->disklab = rs->sc_dkdev.dk_label;
   1520 		((struct partinfo *) data)->part =
   1521 		    &rs->sc_dkdev.dk_label->d_partitions[DISKPART(dev)];
   1522 		break;
   1523 
   1524 	case DIOCWDINFO:
   1525 	case DIOCSDINFO:
   1526 #ifdef __HAVE_OLD_DISKLABEL
   1527 	case ODIOCWDINFO:
   1528 	case ODIOCSDINFO:
   1529 #endif
   1530 	{
   1531 		struct disklabel *lp;
   1532 #ifdef __HAVE_OLD_DISKLABEL
   1533 		if (cmd == ODIOCSDINFO || cmd == ODIOCWDINFO) {
   1534 			memset(&newlabel, 0, sizeof newlabel);
   1535 			memcpy(&newlabel, data, sizeof (struct olddisklabel));
   1536 			lp = &newlabel;
   1537 		} else
   1538 #endif
   1539 		lp = (struct disklabel *)data;
   1540 
   1541 		if ((error = raidlock(rs)) != 0)
   1542 			return (error);
   1543 
   1544 		rs->sc_flags |= RAIDF_LABELLING;
   1545 
   1546 		error = setdisklabel(rs->sc_dkdev.dk_label,
   1547 		    lp, 0, rs->sc_dkdev.dk_cpulabel);
   1548 		if (error == 0) {
   1549 			if (cmd == DIOCWDINFO
   1550 #ifdef __HAVE_OLD_DISKLABEL
   1551 			    || cmd == ODIOCWDINFO
   1552 #endif
   1553 			   )
   1554 				error = writedisklabel(RAIDLABELDEV(dev),
   1555 				    raidstrategy, rs->sc_dkdev.dk_label,
   1556 				    rs->sc_dkdev.dk_cpulabel);
   1557 		}
   1558 		rs->sc_flags &= ~RAIDF_LABELLING;
   1559 
   1560 		raidunlock(rs);
   1561 
   1562 		if (error)
   1563 			return (error);
   1564 		break;
   1565 	}
   1566 
   1567 	case DIOCWLABEL:
   1568 		if (*(int *) data != 0)
   1569 			rs->sc_flags |= RAIDF_WLABEL;
   1570 		else
   1571 			rs->sc_flags &= ~RAIDF_WLABEL;
   1572 		break;
   1573 
   1574 	case DIOCGDEFLABEL:
   1575 		raidgetdefaultlabel(raidPtr, rs, (struct disklabel *) data);
   1576 		break;
   1577 
   1578 #ifdef __HAVE_OLD_DISKLABEL
   1579 	case ODIOCGDEFLABEL:
   1580 		raidgetdefaultlabel(raidPtr, rs, &newlabel);
   1581 		if (newlabel.d_npartitions > OLDMAXPARTITIONS)
   1582 			return ENOTTY;
   1583 		memcpy(data, &newlabel, sizeof (struct olddisklabel));
   1584 		break;
   1585 #endif
   1586 
   1587 	default:
   1588 		retcode = ENOTTY;
   1589 	}
   1590 	return (retcode);
   1591 
   1592 }
   1593 
   1594 
   1595 /* raidinit -- complete the rest of the initialization for the
   1596    RAIDframe device.  */
   1597 
   1598 
   1599 static void
   1600 raidinit(RF_Raid_t *raidPtr)
   1601 {
   1602 	struct raid_softc *rs;
   1603 	int     unit;
   1604 
   1605 	unit = raidPtr->raidid;
   1606 
   1607 	rs = &raid_softc[unit];
   1608 
   1609 	/* XXX should check return code first... */
   1610 	rs->sc_flags |= RAIDF_INITED;
   1611 
   1612 	/* XXX doesn't check bounds. */
   1613 	snprintf(rs->sc_xname, sizeof(rs->sc_xname), "raid%d", unit);
   1614 
   1615 	rs->sc_dkdev.dk_name = rs->sc_xname;
   1616 
   1617 	/* disk_attach actually creates space for the CPU disklabel, among
   1618 	 * other things, so it's critical to call this *BEFORE* we try putzing
   1619 	 * with disklabels. */
   1620 
   1621 	pseudo_disk_attach(&rs->sc_dkdev);
   1622 
   1623 	/* XXX There may be a weird interaction here between this, and
   1624 	 * protectedSectors, as used in RAIDframe.  */
   1625 
   1626 	rs->sc_size = raidPtr->totalSectors;
   1627 }
   1628 #if (RF_INCLUDE_PARITY_DECLUSTERING_DS > 0)
   1629 /* wake up the daemon & tell it to get us a spare table
   1630  * XXX
   1631  * the entries in the queues should be tagged with the raidPtr
   1632  * so that in the extremely rare case that two recons happen at once,
   1633  * we know for which device were requesting a spare table
   1634  * XXX
   1635  *
   1636  * XXX This code is not currently used. GO
   1637  */
   1638 int
   1639 rf_GetSpareTableFromDaemon(RF_SparetWait_t *req)
   1640 {
   1641 	int     retcode;
   1642 
   1643 	RF_LOCK_MUTEX(rf_sparet_wait_mutex);
   1644 	req->next = rf_sparet_wait_queue;
   1645 	rf_sparet_wait_queue = req;
   1646 	wakeup(&rf_sparet_wait_queue);
   1647 
   1648 	/* mpsleep unlocks the mutex */
   1649 	while (!rf_sparet_resp_queue) {
   1650 		tsleep(&rf_sparet_resp_queue, PRIBIO,
   1651 		    "raidframe getsparetable", 0);
   1652 	}
   1653 	req = rf_sparet_resp_queue;
   1654 	rf_sparet_resp_queue = req->next;
   1655 	RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
   1656 
   1657 	retcode = req->fcol;
   1658 	RF_Free(req, sizeof(*req));	/* this is not the same req as we
   1659 					 * alloc'd */
   1660 	return (retcode);
   1661 }
   1662 #endif
   1663 
   1664 /* a wrapper around rf_DoAccess that extracts appropriate info from the
   1665  * bp & passes it down.
   1666  * any calls originating in the kernel must use non-blocking I/O
   1667  * do some extra sanity checking to return "appropriate" error values for
   1668  * certain conditions (to make some standard utilities work)
   1669  *
   1670  * Formerly known as: rf_DoAccessKernel
   1671  */
   1672 void
   1673 raidstart(RF_Raid_t *raidPtr)
   1674 {
   1675 	RF_SectorCount_t num_blocks, pb, sum;
   1676 	RF_RaidAddr_t raid_addr;
   1677 	struct partition *pp;
   1678 	daddr_t blocknum;
   1679 	int     unit;
   1680 	struct raid_softc *rs;
   1681 	int     do_async;
   1682 	struct buf *bp;
   1683 	int rc;
   1684 
   1685 	unit = raidPtr->raidid;
   1686 	rs = &raid_softc[unit];
   1687 
   1688 	/* quick check to see if anything has died recently */
   1689 	RF_LOCK_MUTEX(raidPtr->mutex);
   1690 	if (raidPtr->numNewFailures > 0) {
   1691 		RF_UNLOCK_MUTEX(raidPtr->mutex);
   1692 		rf_update_component_labels(raidPtr,
   1693 					   RF_NORMAL_COMPONENT_UPDATE);
   1694 		RF_LOCK_MUTEX(raidPtr->mutex);
   1695 		raidPtr->numNewFailures--;
   1696 	}
   1697 
   1698 	/* Check to see if we're at the limit... */
   1699 	while (raidPtr->openings > 0) {
   1700 		RF_UNLOCK_MUTEX(raidPtr->mutex);
   1701 
   1702 		/* get the next item, if any, from the queue */
   1703 		if ((bp = BUFQ_GET(&rs->buf_queue)) == NULL) {
   1704 			/* nothing more to do */
   1705 			return;
   1706 		}
   1707 
   1708 		/* Ok, for the bp we have here, bp->b_blkno is relative to the
   1709 		 * partition.. Need to make it absolute to the underlying
   1710 		 * device.. */
   1711 
   1712 		blocknum = bp->b_blkno;
   1713 		if (DISKPART(bp->b_dev) != RAW_PART) {
   1714 			pp = &rs->sc_dkdev.dk_label->d_partitions[DISKPART(bp->b_dev)];
   1715 			blocknum += pp->p_offset;
   1716 		}
   1717 
   1718 		db1_printf(("Blocks: %d, %d\n", (int) bp->b_blkno,
   1719 			    (int) blocknum));
   1720 
   1721 		db1_printf(("bp->b_bcount = %d\n", (int) bp->b_bcount));
   1722 		db1_printf(("bp->b_resid = %d\n", (int) bp->b_resid));
   1723 
   1724 		/* *THIS* is where we adjust what block we're going to...
   1725 		 * but DO NOT TOUCH bp->b_blkno!!! */
   1726 		raid_addr = blocknum;
   1727 
   1728 		num_blocks = bp->b_bcount >> raidPtr->logBytesPerSector;
   1729 		pb = (bp->b_bcount & raidPtr->sectorMask) ? 1 : 0;
   1730 		sum = raid_addr + num_blocks + pb;
   1731 		if (1 || rf_debugKernelAccess) {
   1732 			db1_printf(("raid_addr=%d sum=%d num_blocks=%d(+%d) (%d)\n",
   1733 				    (int) raid_addr, (int) sum, (int) num_blocks,
   1734 				    (int) pb, (int) bp->b_resid));
   1735 		}
   1736 		if ((sum > raidPtr->totalSectors) || (sum < raid_addr)
   1737 		    || (sum < num_blocks) || (sum < pb)) {
   1738 			bp->b_error = ENOSPC;
   1739 			bp->b_flags |= B_ERROR;
   1740 			bp->b_resid = bp->b_bcount;
   1741 			biodone(bp);
   1742 			RF_LOCK_MUTEX(raidPtr->mutex);
   1743 			continue;
   1744 		}
   1745 		/*
   1746 		 * XXX rf_DoAccess() should do this, not just DoAccessKernel()
   1747 		 */
   1748 
   1749 		if (bp->b_bcount & raidPtr->sectorMask) {
   1750 			bp->b_error = EINVAL;
   1751 			bp->b_flags |= B_ERROR;
   1752 			bp->b_resid = bp->b_bcount;
   1753 			biodone(bp);
   1754 			RF_LOCK_MUTEX(raidPtr->mutex);
   1755 			continue;
   1756 
   1757 		}
   1758 		db1_printf(("Calling DoAccess..\n"));
   1759 
   1760 
   1761 		RF_LOCK_MUTEX(raidPtr->mutex);
   1762 		raidPtr->openings--;
   1763 		RF_UNLOCK_MUTEX(raidPtr->mutex);
   1764 
   1765 		/*
   1766 		 * Everything is async.
   1767 		 */
   1768 		do_async = 1;
   1769 
   1770 		disk_busy(&rs->sc_dkdev);
   1771 
   1772 		/* XXX we're still at splbio() here... do we *really*
   1773 		   need to be? */
   1774 
   1775 		/* don't ever condition on bp->b_flags & B_WRITE.
   1776 		 * always condition on B_READ instead */
   1777 
   1778 		rc = rf_DoAccess(raidPtr, (bp->b_flags & B_READ) ?
   1779 				 RF_IO_TYPE_READ : RF_IO_TYPE_WRITE,
   1780 				 do_async, raid_addr, num_blocks,
   1781 				 bp->b_data, bp, RF_DAG_NONBLOCKING_IO);
   1782 
   1783 		if (rc) {
   1784 			bp->b_error = rc;
   1785 			bp->b_flags |= B_ERROR;
   1786 			bp->b_resid = bp->b_bcount;
   1787 			biodone(bp);
   1788 			/* continue loop */
   1789 		}
   1790 
   1791 		RF_LOCK_MUTEX(raidPtr->mutex);
   1792 	}
   1793 	RF_UNLOCK_MUTEX(raidPtr->mutex);
   1794 }
   1795 
   1796 
   1797 
   1798 
   1799 /* invoke an I/O from kernel mode.  Disk queue should be locked upon entry */
   1800 
   1801 int
   1802 rf_DispatchKernelIO(RF_DiskQueue_t *queue, RF_DiskQueueData_t *req)
   1803 {
   1804 	int     op = (req->type == RF_IO_TYPE_READ) ? B_READ : B_WRITE;
   1805 	struct buf *bp;
   1806 	struct raidbuf *raidbp = NULL;
   1807 
   1808 	req->queue = queue;
   1809 
   1810 #if DIAGNOSTIC
   1811 	if (queue->raidPtr->raidid >= numraid) {
   1812 		printf("Invalid unit number: %d %d\n", queue->raidPtr->raidid,
   1813 		    numraid);
   1814 		panic("Invalid Unit number in rf_DispatchKernelIO");
   1815 	}
   1816 #endif
   1817 
   1818 	bp = req->bp;
   1819 #if 1
   1820 	/* XXX when there is a physical disk failure, someone is passing us a
   1821 	 * buffer that contains old stuff!!  Attempt to deal with this problem
   1822 	 * without taking a performance hit... (not sure where the real bug
   1823 	 * is.  It's buried in RAIDframe somewhere) :-(  GO ) */
   1824 
   1825 	if (bp->b_flags & B_ERROR) {
   1826 		bp->b_flags &= ~B_ERROR;
   1827 	}
   1828 	if (bp->b_error != 0) {
   1829 		bp->b_error = 0;
   1830 	}
   1831 #endif
   1832 	raidbp = pool_get(&rf_pools.cbuf, PR_NOWAIT);
   1833 	if (raidbp == NULL) {
   1834 		bp->b_flags |= B_ERROR;
   1835 		bp->b_error = ENOMEM;
   1836 		return (ENOMEM);
   1837 	}
   1838 	BUF_INIT(&raidbp->rf_buf);
   1839 
   1840 	/*
   1841 	 * context for raidiodone
   1842 	 */
   1843 	raidbp->rf_obp = bp;
   1844 	raidbp->req = req;
   1845 
   1846 	BIO_COPYPRIO(&raidbp->rf_buf, bp);
   1847 
   1848 	switch (req->type) {
   1849 	case RF_IO_TYPE_NOP:	/* used primarily to unlock a locked queue */
   1850 		/* XXX need to do something extra here.. */
   1851 		/* I'm leaving this in, as I've never actually seen it used,
   1852 		 * and I'd like folks to report it... GO */
   1853 		printf(("WAKEUP CALLED\n"));
   1854 		queue->numOutstanding++;
   1855 
   1856 		/* XXX need to glue the original buffer into this??  */
   1857 
   1858 		KernelWakeupFunc(&raidbp->rf_buf);
   1859 		break;
   1860 
   1861 	case RF_IO_TYPE_READ:
   1862 	case RF_IO_TYPE_WRITE:
   1863 #if RF_ACC_TRACE > 0
   1864 		if (req->tracerec) {
   1865 			RF_ETIMER_START(req->tracerec->timer);
   1866 		}
   1867 #endif
   1868 		InitBP(&raidbp->rf_buf, queue->rf_cinfo->ci_vp,
   1869 		    op | bp->b_flags, queue->rf_cinfo->ci_dev,
   1870 		    req->sectorOffset, req->numSector,
   1871 		    req->buf, KernelWakeupFunc, (void *) req,
   1872 		    queue->raidPtr->logBytesPerSector, req->b_proc);
   1873 
   1874 		if (rf_debugKernelAccess) {
   1875 			db1_printf(("dispatch: bp->b_blkno = %ld\n",
   1876 				(long) bp->b_blkno));
   1877 		}
   1878 		queue->numOutstanding++;
   1879 		queue->last_deq_sector = req->sectorOffset;
   1880 		/* acc wouldn't have been let in if there were any pending
   1881 		 * reqs at any other priority */
   1882 		queue->curPriority = req->priority;
   1883 
   1884 		db1_printf(("Going for %c to unit %d col %d\n",
   1885 			    req->type, queue->raidPtr->raidid,
   1886 			    queue->col));
   1887 		db1_printf(("sector %d count %d (%d bytes) %d\n",
   1888 			(int) req->sectorOffset, (int) req->numSector,
   1889 			(int) (req->numSector <<
   1890 			    queue->raidPtr->logBytesPerSector),
   1891 			(int) queue->raidPtr->logBytesPerSector));
   1892 		if ((raidbp->rf_buf.b_flags & B_READ) == 0) {
   1893 			raidbp->rf_buf.b_vp->v_numoutput++;
   1894 		}
   1895 		VOP_STRATEGY(raidbp->rf_buf.b_vp, &raidbp->rf_buf);
   1896 
   1897 		break;
   1898 
   1899 	default:
   1900 		panic("bad req->type in rf_DispatchKernelIO");
   1901 	}
   1902 	db1_printf(("Exiting from DispatchKernelIO\n"));
   1903 
   1904 	return (0);
   1905 }
   1906 /* this is the callback function associated with a I/O invoked from
   1907    kernel code.
   1908  */
   1909 static void
   1910 KernelWakeupFunc(struct buf *vbp)
   1911 {
   1912 	RF_DiskQueueData_t *req = NULL;
   1913 	RF_DiskQueue_t *queue;
   1914 	struct raidbuf *raidbp = (struct raidbuf *) vbp;
   1915 	struct buf *bp;
   1916 	int s;
   1917 
   1918 	s = splbio();
   1919 	db1_printf(("recovering the request queue:\n"));
   1920 	req = raidbp->req;
   1921 
   1922 	bp = raidbp->rf_obp;
   1923 
   1924 	queue = (RF_DiskQueue_t *) req->queue;
   1925 
   1926 	if (raidbp->rf_buf.b_flags & B_ERROR) {
   1927 		bp->b_flags |= B_ERROR;
   1928 		bp->b_error = raidbp->rf_buf.b_error ?
   1929 		    raidbp->rf_buf.b_error : EIO;
   1930 	}
   1931 
   1932 	/* XXX methinks this could be wrong... */
   1933 #if 1
   1934 	bp->b_resid = raidbp->rf_buf.b_resid;
   1935 #endif
   1936 #if RF_ACC_TRACE > 0
   1937 	if (req->tracerec) {
   1938 		RF_ETIMER_STOP(req->tracerec->timer);
   1939 		RF_ETIMER_EVAL(req->tracerec->timer);
   1940 		RF_LOCK_MUTEX(rf_tracing_mutex);
   1941 		req->tracerec->diskwait_us += RF_ETIMER_VAL_US(req->tracerec->timer);
   1942 		req->tracerec->phys_io_us += RF_ETIMER_VAL_US(req->tracerec->timer);
   1943 		req->tracerec->num_phys_ios++;
   1944 		RF_UNLOCK_MUTEX(rf_tracing_mutex);
   1945 	}
   1946 #endif
   1947 	bp->b_bcount = raidbp->rf_buf.b_bcount;	/* XXXX ?? */
   1948 
   1949 	/* XXX Ok, let's get aggressive... If B_ERROR is set, let's go
   1950 	 * ballistic, and mark the component as hosed... */
   1951 
   1952 	if (bp->b_flags & B_ERROR) {
   1953 		/* Mark the disk as dead */
   1954 		/* but only mark it once... */
   1955 		/* and only if it wouldn't leave this RAID set
   1956 		   completely broken */
   1957 		if ((queue->raidPtr->Disks[queue->col].status ==
   1958 		    rf_ds_optimal) && (queue->raidPtr->numFailures <
   1959 				       queue->raidPtr->Layout.map->faultsTolerated)) {
   1960 			printf("raid%d: IO Error.  Marking %s as failed.\n",
   1961 			       queue->raidPtr->raidid,
   1962 			       queue->raidPtr->Disks[queue->col].devname);
   1963 			queue->raidPtr->Disks[queue->col].status =
   1964 			    rf_ds_failed;
   1965 			queue->raidPtr->status = rf_rs_degraded;
   1966 			queue->raidPtr->numFailures++;
   1967 			queue->raidPtr->numNewFailures++;
   1968 		} else {	/* Disk is already dead... */
   1969 			/* printf("Disk already marked as dead!\n"); */
   1970 		}
   1971 
   1972 	}
   1973 
   1974 	pool_put(&rf_pools.cbuf, raidbp);
   1975 
   1976 	/* Fill in the error value */
   1977 
   1978 	req->error = (bp->b_flags & B_ERROR) ? bp->b_error : 0;
   1979 
   1980 	simple_lock(&queue->raidPtr->iodone_lock);
   1981 
   1982 	/* Drop this one on the "finished" queue... */
   1983 	TAILQ_INSERT_TAIL(&(queue->raidPtr->iodone), req, iodone_entries);
   1984 
   1985 	/* Let the raidio thread know there is work to be done. */
   1986 	wakeup(&(queue->raidPtr->iodone));
   1987 
   1988 	simple_unlock(&queue->raidPtr->iodone_lock);
   1989 
   1990 	splx(s);
   1991 }
   1992 
   1993 
   1994 
   1995 /*
   1996  * initialize a buf structure for doing an I/O in the kernel.
   1997  */
   1998 static void
   1999 InitBP(struct buf *bp, struct vnode *b_vp, unsigned rw_flag, dev_t dev,
   2000        RF_SectorNum_t startSect, RF_SectorCount_t numSect, caddr_t bf,
   2001        void (*cbFunc) (struct buf *), void *cbArg, int logBytesPerSector,
   2002        struct proc *b_proc)
   2003 {
   2004 	/* bp->b_flags       = B_PHYS | rw_flag; */
   2005 	bp->b_flags = B_CALL | rw_flag;	/* XXX need B_PHYS here too??? */
   2006 	bp->b_bcount = numSect << logBytesPerSector;
   2007 	bp->b_bufsize = bp->b_bcount;
   2008 	bp->b_error = 0;
   2009 	bp->b_dev = dev;
   2010 	bp->b_data = bf;
   2011 	bp->b_blkno = startSect;
   2012 	bp->b_resid = bp->b_bcount;	/* XXX is this right!??!?!! */
   2013 	if (bp->b_bcount == 0) {
   2014 		panic("bp->b_bcount is zero in InitBP!!");
   2015 	}
   2016 	bp->b_proc = b_proc;
   2017 	bp->b_iodone = cbFunc;
   2018 	bp->b_vp = b_vp;
   2019 
   2020 }
   2021 
   2022 static void
   2023 raidgetdefaultlabel(RF_Raid_t *raidPtr, struct raid_softc *rs,
   2024 		    struct disklabel *lp)
   2025 {
   2026 	memset(lp, 0, sizeof(*lp));
   2027 
   2028 	/* fabricate a label... */
   2029 	lp->d_secperunit = raidPtr->totalSectors;
   2030 	lp->d_secsize = raidPtr->bytesPerSector;
   2031 	lp->d_nsectors = raidPtr->Layout.dataSectorsPerStripe;
   2032 	lp->d_ntracks = 4 * raidPtr->numCol;
   2033 	lp->d_ncylinders = raidPtr->totalSectors /
   2034 		(lp->d_nsectors * lp->d_ntracks);
   2035 	lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
   2036 
   2037 	strncpy(lp->d_typename, "raid", sizeof(lp->d_typename));
   2038 	lp->d_type = DTYPE_RAID;
   2039 	strncpy(lp->d_packname, "fictitious", sizeof(lp->d_packname));
   2040 	lp->d_rpm = 3600;
   2041 	lp->d_interleave = 1;
   2042 	lp->d_flags = 0;
   2043 
   2044 	lp->d_partitions[RAW_PART].p_offset = 0;
   2045 	lp->d_partitions[RAW_PART].p_size = raidPtr->totalSectors;
   2046 	lp->d_partitions[RAW_PART].p_fstype = FS_UNUSED;
   2047 	lp->d_npartitions = RAW_PART + 1;
   2048 
   2049 	lp->d_magic = DISKMAGIC;
   2050 	lp->d_magic2 = DISKMAGIC;
   2051 	lp->d_checksum = dkcksum(rs->sc_dkdev.dk_label);
   2052 
   2053 }
   2054 /*
   2055  * Read the disklabel from the raid device.  If one is not present, fake one
   2056  * up.
   2057  */
   2058 static void
   2059 raidgetdisklabel(dev_t dev)
   2060 {
   2061 	int     unit = raidunit(dev);
   2062 	struct raid_softc *rs = &raid_softc[unit];
   2063 	const char   *errstring;
   2064 	struct disklabel *lp = rs->sc_dkdev.dk_label;
   2065 	struct cpu_disklabel *clp = rs->sc_dkdev.dk_cpulabel;
   2066 	RF_Raid_t *raidPtr;
   2067 
   2068 	db1_printf(("Getting the disklabel...\n"));
   2069 
   2070 	memset(clp, 0, sizeof(*clp));
   2071 
   2072 	raidPtr = raidPtrs[unit];
   2073 
   2074 	raidgetdefaultlabel(raidPtr, rs, lp);
   2075 
   2076 	/*
   2077 	 * Call the generic disklabel extraction routine.
   2078 	 */
   2079 	errstring = readdisklabel(RAIDLABELDEV(dev), raidstrategy,
   2080 	    rs->sc_dkdev.dk_label, rs->sc_dkdev.dk_cpulabel);
   2081 	if (errstring)
   2082 		raidmakedisklabel(rs);
   2083 	else {
   2084 		int     i;
   2085 		struct partition *pp;
   2086 
   2087 		/*
   2088 		 * Sanity check whether the found disklabel is valid.
   2089 		 *
   2090 		 * This is necessary since total size of the raid device
   2091 		 * may vary when an interleave is changed even though exactly
   2092 		 * same componets are used, and old disklabel may used
   2093 		 * if that is found.
   2094 		 */
   2095 		if (lp->d_secperunit != rs->sc_size)
   2096 			printf("raid%d: WARNING: %s: "
   2097 			    "total sector size in disklabel (%d) != "
   2098 			    "the size of raid (%ld)\n", unit, rs->sc_xname,
   2099 			    lp->d_secperunit, (long) rs->sc_size);
   2100 		for (i = 0; i < lp->d_npartitions; i++) {
   2101 			pp = &lp->d_partitions[i];
   2102 			if (pp->p_offset + pp->p_size > rs->sc_size)
   2103 				printf("raid%d: WARNING: %s: end of partition `%c' "
   2104 				       "exceeds the size of raid (%ld)\n",
   2105 				       unit, rs->sc_xname, 'a' + i, (long) rs->sc_size);
   2106 		}
   2107 	}
   2108 
   2109 }
   2110 /*
   2111  * Take care of things one might want to take care of in the event
   2112  * that a disklabel isn't present.
   2113  */
   2114 static void
   2115 raidmakedisklabel(struct raid_softc *rs)
   2116 {
   2117 	struct disklabel *lp = rs->sc_dkdev.dk_label;
   2118 	db1_printf(("Making a label..\n"));
   2119 
   2120 	/*
   2121 	 * For historical reasons, if there's no disklabel present
   2122 	 * the raw partition must be marked FS_BSDFFS.
   2123 	 */
   2124 
   2125 	lp->d_partitions[RAW_PART].p_fstype = FS_BSDFFS;
   2126 
   2127 	strncpy(lp->d_packname, "default label", sizeof(lp->d_packname));
   2128 
   2129 	lp->d_checksum = dkcksum(lp);
   2130 }
   2131 /*
   2132  * Lookup the provided name in the filesystem.  If the file exists,
   2133  * is a valid block device, and isn't being used by anyone else,
   2134  * set *vpp to the file's vnode.
   2135  * You'll find the original of this in ccd.c
   2136  */
   2137 int
   2138 raidlookup(char *path, struct proc *p, struct vnode **vpp)
   2139 {
   2140 	struct nameidata nd;
   2141 	struct vnode *vp;
   2142 	struct vattr va;
   2143 	int     error;
   2144 
   2145 	NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, path, p);
   2146 	if ((error = vn_open(&nd, FREAD | FWRITE, 0)) != 0) {
   2147 		return (error);
   2148 	}
   2149 	vp = nd.ni_vp;
   2150 	if (vp->v_usecount > 1) {
   2151 		VOP_UNLOCK(vp, 0);
   2152 		(void) vn_close(vp, FREAD | FWRITE, p->p_ucred, p);
   2153 		return (EBUSY);
   2154 	}
   2155 	if ((error = VOP_GETATTR(vp, &va, p->p_ucred, p)) != 0) {
   2156 		VOP_UNLOCK(vp, 0);
   2157 		(void) vn_close(vp, FREAD | FWRITE, p->p_ucred, p);
   2158 		return (error);
   2159 	}
   2160 	/* XXX: eventually we should handle VREG, too. */
   2161 	if (va.va_type != VBLK) {
   2162 		VOP_UNLOCK(vp, 0);
   2163 		(void) vn_close(vp, FREAD | FWRITE, p->p_ucred, p);
   2164 		return (ENOTBLK);
   2165 	}
   2166 	VOP_UNLOCK(vp, 0);
   2167 	*vpp = vp;
   2168 	return (0);
   2169 }
   2170 /*
   2171  * Wait interruptibly for an exclusive lock.
   2172  *
   2173  * XXX
   2174  * Several drivers do this; it should be abstracted and made MP-safe.
   2175  * (Hmm... where have we seen this warning before :->  GO )
   2176  */
   2177 static int
   2178 raidlock(struct raid_softc *rs)
   2179 {
   2180 	int     error;
   2181 
   2182 	while ((rs->sc_flags & RAIDF_LOCKED) != 0) {
   2183 		rs->sc_flags |= RAIDF_WANTED;
   2184 		if ((error =
   2185 			tsleep(rs, PRIBIO | PCATCH, "raidlck", 0)) != 0)
   2186 			return (error);
   2187 	}
   2188 	rs->sc_flags |= RAIDF_LOCKED;
   2189 	return (0);
   2190 }
   2191 /*
   2192  * Unlock and wake up any waiters.
   2193  */
   2194 static void
   2195 raidunlock(struct raid_softc *rs)
   2196 {
   2197 
   2198 	rs->sc_flags &= ~RAIDF_LOCKED;
   2199 	if ((rs->sc_flags & RAIDF_WANTED) != 0) {
   2200 		rs->sc_flags &= ~RAIDF_WANTED;
   2201 		wakeup(rs);
   2202 	}
   2203 }
   2204 
   2205 
   2206 #define RF_COMPONENT_INFO_OFFSET  16384 /* bytes */
   2207 #define RF_COMPONENT_INFO_SIZE     1024 /* bytes */
   2208 
   2209 int
   2210 raidmarkclean(dev_t dev, struct vnode *b_vp, int mod_counter)
   2211 {
   2212 	RF_ComponentLabel_t clabel;
   2213 	raidread_component_label(dev, b_vp, &clabel);
   2214 	clabel.mod_counter = mod_counter;
   2215 	clabel.clean = RF_RAID_CLEAN;
   2216 	raidwrite_component_label(dev, b_vp, &clabel);
   2217 	return(0);
   2218 }
   2219 
   2220 
   2221 int
   2222 raidmarkdirty(dev_t dev, struct vnode *b_vp, int mod_counter)
   2223 {
   2224 	RF_ComponentLabel_t clabel;
   2225 	raidread_component_label(dev, b_vp, &clabel);
   2226 	clabel.mod_counter = mod_counter;
   2227 	clabel.clean = RF_RAID_DIRTY;
   2228 	raidwrite_component_label(dev, b_vp, &clabel);
   2229 	return(0);
   2230 }
   2231 
   2232 /* ARGSUSED */
   2233 int
   2234 raidread_component_label(dev_t dev, struct vnode *b_vp,
   2235 			 RF_ComponentLabel_t *clabel)
   2236 {
   2237 	struct buf *bp;
   2238 	const struct bdevsw *bdev;
   2239 	int error;
   2240 
   2241 	/* XXX should probably ensure that we don't try to do this if
   2242 	   someone has changed rf_protected_sectors. */
   2243 
   2244 	if (b_vp == NULL) {
   2245 		/* For whatever reason, this component is not valid.
   2246 		   Don't try to read a component label from it. */
   2247 		return(EINVAL);
   2248 	}
   2249 
   2250 	/* get a block of the appropriate size... */
   2251 	bp = geteblk((int)RF_COMPONENT_INFO_SIZE);
   2252 	bp->b_dev = dev;
   2253 
   2254 	/* get our ducks in a row for the read */
   2255 	bp->b_blkno = RF_COMPONENT_INFO_OFFSET / DEV_BSIZE;
   2256 	bp->b_bcount = RF_COMPONENT_INFO_SIZE;
   2257 	bp->b_flags |= B_READ;
   2258  	bp->b_resid = RF_COMPONENT_INFO_SIZE / DEV_BSIZE;
   2259 
   2260 	bdev = bdevsw_lookup(bp->b_dev);
   2261 	if (bdev == NULL)
   2262 		return (ENXIO);
   2263 	(*bdev->d_strategy)(bp);
   2264 
   2265 	error = biowait(bp);
   2266 
   2267 	if (!error) {
   2268 		memcpy(clabel, bp->b_data,
   2269 		       sizeof(RF_ComponentLabel_t));
   2270         }
   2271 
   2272 	brelse(bp);
   2273 	return(error);
   2274 }
   2275 /* ARGSUSED */
   2276 int
   2277 raidwrite_component_label(dev_t dev, struct vnode *b_vp,
   2278 			  RF_ComponentLabel_t *clabel)
   2279 {
   2280 	struct buf *bp;
   2281 	const struct bdevsw *bdev;
   2282 	int error;
   2283 
   2284 	/* get a block of the appropriate size... */
   2285 	bp = geteblk((int)RF_COMPONENT_INFO_SIZE);
   2286 	bp->b_dev = dev;
   2287 
   2288 	/* get our ducks in a row for the write */
   2289 	bp->b_blkno = RF_COMPONENT_INFO_OFFSET / DEV_BSIZE;
   2290 	bp->b_bcount = RF_COMPONENT_INFO_SIZE;
   2291 	bp->b_flags |= B_WRITE;
   2292  	bp->b_resid = RF_COMPONENT_INFO_SIZE / DEV_BSIZE;
   2293 
   2294 	memset(bp->b_data, 0, RF_COMPONENT_INFO_SIZE );
   2295 
   2296 	memcpy(bp->b_data, clabel, sizeof(RF_ComponentLabel_t));
   2297 
   2298 	bdev = bdevsw_lookup(bp->b_dev);
   2299 	if (bdev == NULL)
   2300 		return (ENXIO);
   2301 	(*bdev->d_strategy)(bp);
   2302 	error = biowait(bp);
   2303 	brelse(bp);
   2304 	if (error) {
   2305 #if 1
   2306 		printf("Failed to write RAID component info!\n");
   2307 #endif
   2308 	}
   2309 
   2310 	return(error);
   2311 }
   2312 
   2313 void
   2314 rf_markalldirty(RF_Raid_t *raidPtr)
   2315 {
   2316 	RF_ComponentLabel_t clabel;
   2317 	int sparecol;
   2318 	int c;
   2319 	int j;
   2320 	int scol = -1;
   2321 
   2322 	raidPtr->mod_counter++;
   2323 	for (c = 0; c < raidPtr->numCol; c++) {
   2324 		/* we don't want to touch (at all) a disk that has
   2325 		   failed */
   2326 		if (!RF_DEAD_DISK(raidPtr->Disks[c].status)) {
   2327 			raidread_component_label(
   2328 						 raidPtr->Disks[c].dev,
   2329 						 raidPtr->raid_cinfo[c].ci_vp,
   2330 						 &clabel);
   2331 			if (clabel.status == rf_ds_spared) {
   2332 				/* XXX do something special...
   2333 				   but whatever you do, don't
   2334 				   try to access it!! */
   2335 			} else {
   2336 				raidmarkdirty(
   2337 					      raidPtr->Disks[c].dev,
   2338 					      raidPtr->raid_cinfo[c].ci_vp,
   2339 					      raidPtr->mod_counter);
   2340 			}
   2341 		}
   2342 	}
   2343 
   2344 	for( c = 0; c < raidPtr->numSpare ; c++) {
   2345 		sparecol = raidPtr->numCol + c;
   2346 		if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
   2347 			/*
   2348 
   2349 			   we claim this disk is "optimal" if it's
   2350 			   rf_ds_used_spare, as that means it should be
   2351 			   directly substitutable for the disk it replaced.
   2352 			   We note that too...
   2353 
   2354 			 */
   2355 
   2356 			for(j=0;j<raidPtr->numCol;j++) {
   2357 				if (raidPtr->Disks[j].spareCol == sparecol) {
   2358 					scol = j;
   2359 					break;
   2360 				}
   2361 			}
   2362 
   2363 			raidread_component_label(
   2364 				 raidPtr->Disks[sparecol].dev,
   2365 				 raidPtr->raid_cinfo[sparecol].ci_vp,
   2366 				 &clabel);
   2367 			/* make sure status is noted */
   2368 
   2369 			raid_init_component_label(raidPtr, &clabel);
   2370 
   2371 			clabel.row = 0;
   2372 			clabel.column = scol;
   2373 			/* Note: we *don't* change status from rf_ds_used_spare
   2374 			   to rf_ds_optimal */
   2375 			/* clabel.status = rf_ds_optimal; */
   2376 
   2377 			raidmarkdirty(raidPtr->Disks[sparecol].dev,
   2378 				      raidPtr->raid_cinfo[sparecol].ci_vp,
   2379 				      raidPtr->mod_counter);
   2380 		}
   2381 	}
   2382 }
   2383 
   2384 
   2385 void
   2386 rf_update_component_labels(RF_Raid_t *raidPtr, int final)
   2387 {
   2388 	RF_ComponentLabel_t clabel;
   2389 	int sparecol;
   2390 	int c;
   2391 	int j;
   2392 	int scol;
   2393 
   2394 	scol = -1;
   2395 
   2396 	/* XXX should do extra checks to make sure things really are clean,
   2397 	   rather than blindly setting the clean bit... */
   2398 
   2399 	raidPtr->mod_counter++;
   2400 
   2401 	for (c = 0; c < raidPtr->numCol; c++) {
   2402 		if (raidPtr->Disks[c].status == rf_ds_optimal) {
   2403 			raidread_component_label(
   2404 						 raidPtr->Disks[c].dev,
   2405 						 raidPtr->raid_cinfo[c].ci_vp,
   2406 						 &clabel);
   2407 				/* make sure status is noted */
   2408 			clabel.status = rf_ds_optimal;
   2409 				/* bump the counter */
   2410 			clabel.mod_counter = raidPtr->mod_counter;
   2411 
   2412 			raidwrite_component_label(
   2413 						  raidPtr->Disks[c].dev,
   2414 						  raidPtr->raid_cinfo[c].ci_vp,
   2415 						  &clabel);
   2416 			if (final == RF_FINAL_COMPONENT_UPDATE) {
   2417 				if (raidPtr->parity_good == RF_RAID_CLEAN) {
   2418 					raidmarkclean(
   2419 						      raidPtr->Disks[c].dev,
   2420 						      raidPtr->raid_cinfo[c].ci_vp,
   2421 						      raidPtr->mod_counter);
   2422 				}
   2423 			}
   2424 		}
   2425 		/* else we don't touch it.. */
   2426 	}
   2427 
   2428 	for( c = 0; c < raidPtr->numSpare ; c++) {
   2429 		sparecol = raidPtr->numCol + c;
   2430 		/* Need to ensure that the reconstruct actually completed! */
   2431 		if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
   2432 			/*
   2433 
   2434 			   we claim this disk is "optimal" if it's
   2435 			   rf_ds_used_spare, as that means it should be
   2436 			   directly substitutable for the disk it replaced.
   2437 			   We note that too...
   2438 
   2439 			 */
   2440 
   2441 			for(j=0;j<raidPtr->numCol;j++) {
   2442 				if (raidPtr->Disks[j].spareCol == sparecol) {
   2443 					scol = j;
   2444 					break;
   2445 				}
   2446 			}
   2447 
   2448 			/* XXX shouldn't *really* need this... */
   2449 			raidread_component_label(
   2450 				      raidPtr->Disks[sparecol].dev,
   2451 				      raidPtr->raid_cinfo[sparecol].ci_vp,
   2452 				      &clabel);
   2453 			/* make sure status is noted */
   2454 
   2455 			raid_init_component_label(raidPtr, &clabel);
   2456 
   2457 			clabel.mod_counter = raidPtr->mod_counter;
   2458 			clabel.column = scol;
   2459 			clabel.status = rf_ds_optimal;
   2460 
   2461 			raidwrite_component_label(
   2462 				      raidPtr->Disks[sparecol].dev,
   2463 				      raidPtr->raid_cinfo[sparecol].ci_vp,
   2464 				      &clabel);
   2465 			if (final == RF_FINAL_COMPONENT_UPDATE) {
   2466 				if (raidPtr->parity_good == RF_RAID_CLEAN) {
   2467 					raidmarkclean( raidPtr->Disks[sparecol].dev,
   2468 						       raidPtr->raid_cinfo[sparecol].ci_vp,
   2469 						       raidPtr->mod_counter);
   2470 				}
   2471 			}
   2472 		}
   2473 	}
   2474 }
   2475 
   2476 void
   2477 rf_close_component(RF_Raid_t *raidPtr, struct vnode *vp, int auto_configured)
   2478 {
   2479 	struct proc *p;
   2480 
   2481 	p = raidPtr->engine_thread;
   2482 
   2483 	if (vp != NULL) {
   2484 		if (auto_configured == 1) {
   2485 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
   2486 			VOP_CLOSE(vp, FREAD | FWRITE, NOCRED, 0);
   2487 			vput(vp);
   2488 
   2489 		} else {
   2490 			(void) vn_close(vp, FREAD | FWRITE, p->p_ucred, p);
   2491 		}
   2492 	}
   2493 }
   2494 
   2495 
   2496 void
   2497 rf_UnconfigureVnodes(RF_Raid_t *raidPtr)
   2498 {
   2499 	int r,c;
   2500 	struct vnode *vp;
   2501 	int acd;
   2502 
   2503 
   2504 	/* We take this opportunity to close the vnodes like we should.. */
   2505 
   2506 	for (c = 0; c < raidPtr->numCol; c++) {
   2507 		vp = raidPtr->raid_cinfo[c].ci_vp;
   2508 		acd = raidPtr->Disks[c].auto_configured;
   2509 		rf_close_component(raidPtr, vp, acd);
   2510 		raidPtr->raid_cinfo[c].ci_vp = NULL;
   2511 		raidPtr->Disks[c].auto_configured = 0;
   2512 	}
   2513 
   2514 	for (r = 0; r < raidPtr->numSpare; r++) {
   2515 		vp = raidPtr->raid_cinfo[raidPtr->numCol + r].ci_vp;
   2516 		acd = raidPtr->Disks[raidPtr->numCol + r].auto_configured;
   2517 		rf_close_component(raidPtr, vp, acd);
   2518 		raidPtr->raid_cinfo[raidPtr->numCol + r].ci_vp = NULL;
   2519 		raidPtr->Disks[raidPtr->numCol + r].auto_configured = 0;
   2520 	}
   2521 }
   2522 
   2523 
   2524 void
   2525 rf_ReconThread(struct rf_recon_req *req)
   2526 {
   2527 	int     s;
   2528 	RF_Raid_t *raidPtr;
   2529 
   2530 	s = splbio();
   2531 	raidPtr = (RF_Raid_t *) req->raidPtr;
   2532 	raidPtr->recon_in_progress = 1;
   2533 
   2534 	rf_FailDisk((RF_Raid_t *) req->raidPtr, req->col,
   2535 		    ((req->flags & RF_FDFLAGS_RECON) ? 1 : 0));
   2536 
   2537 	RF_Free(req, sizeof(*req));
   2538 
   2539 	raidPtr->recon_in_progress = 0;
   2540 	splx(s);
   2541 
   2542 	/* That's all... */
   2543 	kthread_exit(0);        /* does not return */
   2544 }
   2545 
   2546 void
   2547 rf_RewriteParityThread(RF_Raid_t *raidPtr)
   2548 {
   2549 	int retcode;
   2550 	int s;
   2551 
   2552 	raidPtr->parity_rewrite_stripes_done = 0;
   2553 	raidPtr->parity_rewrite_in_progress = 1;
   2554 	s = splbio();
   2555 	retcode = rf_RewriteParity(raidPtr);
   2556 	splx(s);
   2557 	if (retcode) {
   2558 		printf("raid%d: Error re-writing parity!\n",raidPtr->raidid);
   2559 	} else {
   2560 		/* set the clean bit!  If we shutdown correctly,
   2561 		   the clean bit on each component label will get
   2562 		   set */
   2563 		raidPtr->parity_good = RF_RAID_CLEAN;
   2564 	}
   2565 	raidPtr->parity_rewrite_in_progress = 0;
   2566 
   2567 	/* Anyone waiting for us to stop?  If so, inform them... */
   2568 	if (raidPtr->waitShutdown) {
   2569 		wakeup(&raidPtr->parity_rewrite_in_progress);
   2570 	}
   2571 
   2572 	/* That's all... */
   2573 	kthread_exit(0);        /* does not return */
   2574 }
   2575 
   2576 
   2577 void
   2578 rf_CopybackThread(RF_Raid_t *raidPtr)
   2579 {
   2580 	int s;
   2581 
   2582 	raidPtr->copyback_in_progress = 1;
   2583 	s = splbio();
   2584 	rf_CopybackReconstructedData(raidPtr);
   2585 	splx(s);
   2586 	raidPtr->copyback_in_progress = 0;
   2587 
   2588 	/* That's all... */
   2589 	kthread_exit(0);        /* does not return */
   2590 }
   2591 
   2592 
   2593 void
   2594 rf_ReconstructInPlaceThread(struct rf_recon_req *req)
   2595 {
   2596 	int s;
   2597 	RF_Raid_t *raidPtr;
   2598 
   2599 	s = splbio();
   2600 	raidPtr = req->raidPtr;
   2601 	raidPtr->recon_in_progress = 1;
   2602 	rf_ReconstructInPlace(raidPtr, req->col);
   2603 	RF_Free(req, sizeof(*req));
   2604 	raidPtr->recon_in_progress = 0;
   2605 	splx(s);
   2606 
   2607 	/* That's all... */
   2608 	kthread_exit(0);        /* does not return */
   2609 }
   2610 
   2611 RF_AutoConfig_t *
   2612 rf_find_raid_components()
   2613 {
   2614 	struct vnode *vp;
   2615 	struct disklabel label;
   2616 	struct device *dv;
   2617 	dev_t dev;
   2618 	int bmajor;
   2619 	int error;
   2620 	int i;
   2621 	int good_one;
   2622 	RF_ComponentLabel_t *clabel;
   2623 	RF_AutoConfig_t *ac_list;
   2624 	RF_AutoConfig_t *ac;
   2625 
   2626 
   2627 	/* initialize the AutoConfig list */
   2628 	ac_list = NULL;
   2629 
   2630 	/* we begin by trolling through *all* the devices on the system */
   2631 
   2632 	for (dv = alldevs.tqh_first; dv != NULL;
   2633 	     dv = dv->dv_list.tqe_next) {
   2634 
   2635 		/* we are only interested in disks... */
   2636 		if (dv->dv_class != DV_DISK)
   2637 			continue;
   2638 
   2639 		/* we don't care about floppies... */
   2640 		if (!strcmp(dv->dv_cfdata->cf_name,"fd")) {
   2641 			continue;
   2642 		}
   2643 
   2644 		/* we don't care about CD's... */
   2645 		if (!strcmp(dv->dv_cfdata->cf_name,"cd")) {
   2646 			continue;
   2647 		}
   2648 
   2649 		/* hdfd is the Atari/Hades floppy driver */
   2650 		if (!strcmp(dv->dv_cfdata->cf_name,"hdfd")) {
   2651 			continue;
   2652 		}
   2653 		/* fdisa is the Atari/Milan floppy driver */
   2654 		if (!strcmp(dv->dv_cfdata->cf_name,"fdisa")) {
   2655 			continue;
   2656 		}
   2657 
   2658 		/* need to find the device_name_to_block_device_major stuff */
   2659 		bmajor = devsw_name2blk(dv->dv_xname, NULL, 0);
   2660 
   2661 		/* get a vnode for the raw partition of this disk */
   2662 
   2663 		dev = MAKEDISKDEV(bmajor, dv->dv_unit, RAW_PART);
   2664 		if (bdevvp(dev, &vp))
   2665 			panic("RAID can't alloc vnode");
   2666 
   2667 		error = VOP_OPEN(vp, FREAD, NOCRED, 0);
   2668 
   2669 		if (error) {
   2670 			/* "Who cares."  Continue looking
   2671 			   for something that exists*/
   2672 			vput(vp);
   2673 			continue;
   2674 		}
   2675 
   2676 		/* Ok, the disk exists.  Go get the disklabel. */
   2677 		error = VOP_IOCTL(vp, DIOCGDINFO, &label, FREAD, NOCRED, 0);
   2678 		if (error) {
   2679 			/*
   2680 			 * XXX can't happen - open() would
   2681 			 * have errored out (or faked up one)
   2682 			 */
   2683 			if (error != ENOTTY)
   2684 				printf("RAIDframe: can't get label for dev "
   2685 				    "%s (%d)\n", dv->dv_xname, error);
   2686 		}
   2687 
   2688 		/* don't need this any more.  We'll allocate it again
   2689 		   a little later if we really do... */
   2690 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
   2691 		VOP_CLOSE(vp, FREAD | FWRITE, NOCRED, 0);
   2692 		vput(vp);
   2693 
   2694 		if (error)
   2695 			continue;
   2696 
   2697 		for (i=0; i < label.d_npartitions; i++) {
   2698 			/* We only support partitions marked as RAID */
   2699 			if (label.d_partitions[i].p_fstype != FS_RAID)
   2700 				continue;
   2701 
   2702 			dev = MAKEDISKDEV(bmajor, dv->dv_unit, i);
   2703 			if (bdevvp(dev, &vp))
   2704 				panic("RAID can't alloc vnode");
   2705 
   2706 			error = VOP_OPEN(vp, FREAD, NOCRED, 0);
   2707 			if (error) {
   2708 				/* Whatever... */
   2709 				vput(vp);
   2710 				continue;
   2711 			}
   2712 
   2713 			good_one = 0;
   2714 
   2715 			clabel = (RF_ComponentLabel_t *)
   2716 				malloc(sizeof(RF_ComponentLabel_t),
   2717 				       M_RAIDFRAME, M_NOWAIT);
   2718 			if (clabel == NULL) {
   2719 				/* XXX CLEANUP HERE */
   2720 				printf("RAID auto config: out of memory!\n");
   2721 				return(NULL); /* XXX probably should panic? */
   2722 			}
   2723 
   2724 			if (!raidread_component_label(dev, vp, clabel)) {
   2725 				/* Got the label.  Does it look reasonable? */
   2726 				if (rf_reasonable_label(clabel) &&
   2727 				    (clabel->partitionSize <=
   2728 				     label.d_partitions[i].p_size)) {
   2729 #if DEBUG
   2730 					printf("Component on: %s%c: %d\n",
   2731 					       dv->dv_xname, 'a'+i,
   2732 					       label.d_partitions[i].p_size);
   2733 					rf_print_component_label(clabel);
   2734 #endif
   2735 					/* if it's reasonable, add it,
   2736 					   else ignore it. */
   2737 					ac = (RF_AutoConfig_t *)
   2738 						malloc(sizeof(RF_AutoConfig_t),
   2739 						       M_RAIDFRAME,
   2740 						       M_NOWAIT);
   2741 					if (ac == NULL) {
   2742 						/* XXX should panic?? */
   2743 						return(NULL);
   2744 					}
   2745 
   2746 					snprintf(ac->devname,
   2747 					    sizeof(ac->devname), "%s%c",
   2748 					    dv->dv_xname, 'a'+i);
   2749 					ac->dev = dev;
   2750 					ac->vp = vp;
   2751 					ac->clabel = clabel;
   2752 					ac->next = ac_list;
   2753 					ac_list = ac;
   2754 					good_one = 1;
   2755 				}
   2756 			}
   2757 			if (!good_one) {
   2758 				/* cleanup */
   2759 				free(clabel, M_RAIDFRAME);
   2760 				vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
   2761 				VOP_CLOSE(vp, FREAD | FWRITE, NOCRED, 0);
   2762 				vput(vp);
   2763 			}
   2764 		}
   2765 	}
   2766 	return(ac_list);
   2767 }
   2768 
   2769 static int
   2770 rf_reasonable_label(RF_ComponentLabel_t *clabel)
   2771 {
   2772 
   2773 	if (((clabel->version==RF_COMPONENT_LABEL_VERSION_1) ||
   2774 	     (clabel->version==RF_COMPONENT_LABEL_VERSION)) &&
   2775 	    ((clabel->clean == RF_RAID_CLEAN) ||
   2776 	     (clabel->clean == RF_RAID_DIRTY)) &&
   2777 	    clabel->row >=0 &&
   2778 	    clabel->column >= 0 &&
   2779 	    clabel->num_rows > 0 &&
   2780 	    clabel->num_columns > 0 &&
   2781 	    clabel->row < clabel->num_rows &&
   2782 	    clabel->column < clabel->num_columns &&
   2783 	    clabel->blockSize > 0 &&
   2784 	    clabel->numBlocks > 0) {
   2785 		/* label looks reasonable enough... */
   2786 		return(1);
   2787 	}
   2788 	return(0);
   2789 }
   2790 
   2791 
   2792 #if DEBUG
   2793 void
   2794 rf_print_component_label(RF_ComponentLabel_t *clabel)
   2795 {
   2796 	printf("   Row: %d Column: %d Num Rows: %d Num Columns: %d\n",
   2797 	       clabel->row, clabel->column,
   2798 	       clabel->num_rows, clabel->num_columns);
   2799 	printf("   Version: %d Serial Number: %d Mod Counter: %d\n",
   2800 	       clabel->version, clabel->serial_number,
   2801 	       clabel->mod_counter);
   2802 	printf("   Clean: %s Status: %d\n",
   2803 	       clabel->clean ? "Yes" : "No", clabel->status );
   2804 	printf("   sectPerSU: %d SUsPerPU: %d SUsPerRU: %d\n",
   2805 	       clabel->sectPerSU, clabel->SUsPerPU, clabel->SUsPerRU);
   2806 	printf("   RAID Level: %c  blocksize: %d numBlocks: %d\n",
   2807 	       (char) clabel->parityConfig, clabel->blockSize,
   2808 	       clabel->numBlocks);
   2809 	printf("   Autoconfig: %s\n", clabel->autoconfigure ? "Yes" : "No" );
   2810 	printf("   Contains root partition: %s\n",
   2811 	       clabel->root_partition ? "Yes" : "No" );
   2812 	printf("   Last configured as: raid%d\n", clabel->last_unit );
   2813 #if 0
   2814 	   printf("   Config order: %d\n", clabel->config_order);
   2815 #endif
   2816 
   2817 }
   2818 #endif
   2819 
   2820 RF_ConfigSet_t *
   2821 rf_create_auto_sets(RF_AutoConfig_t *ac_list)
   2822 {
   2823 	RF_AutoConfig_t *ac;
   2824 	RF_ConfigSet_t *config_sets;
   2825 	RF_ConfigSet_t *cset;
   2826 	RF_AutoConfig_t *ac_next;
   2827 
   2828 
   2829 	config_sets = NULL;
   2830 
   2831 	/* Go through the AutoConfig list, and figure out which components
   2832 	   belong to what sets.  */
   2833 	ac = ac_list;
   2834 	while(ac!=NULL) {
   2835 		/* we're going to putz with ac->next, so save it here
   2836 		   for use at the end of the loop */
   2837 		ac_next = ac->next;
   2838 
   2839 		if (config_sets == NULL) {
   2840 			/* will need at least this one... */
   2841 			config_sets = (RF_ConfigSet_t *)
   2842 				malloc(sizeof(RF_ConfigSet_t),
   2843 				       M_RAIDFRAME, M_NOWAIT);
   2844 			if (config_sets == NULL) {
   2845 				panic("rf_create_auto_sets: No memory!");
   2846 			}
   2847 			/* this one is easy :) */
   2848 			config_sets->ac = ac;
   2849 			config_sets->next = NULL;
   2850 			config_sets->rootable = 0;
   2851 			ac->next = NULL;
   2852 		} else {
   2853 			/* which set does this component fit into? */
   2854 			cset = config_sets;
   2855 			while(cset!=NULL) {
   2856 				if (rf_does_it_fit(cset, ac)) {
   2857 					/* looks like it matches... */
   2858 					ac->next = cset->ac;
   2859 					cset->ac = ac;
   2860 					break;
   2861 				}
   2862 				cset = cset->next;
   2863 			}
   2864 			if (cset==NULL) {
   2865 				/* didn't find a match above... new set..*/
   2866 				cset = (RF_ConfigSet_t *)
   2867 					malloc(sizeof(RF_ConfigSet_t),
   2868 					       M_RAIDFRAME, M_NOWAIT);
   2869 				if (cset == NULL) {
   2870 					panic("rf_create_auto_sets: No memory!");
   2871 				}
   2872 				cset->ac = ac;
   2873 				ac->next = NULL;
   2874 				cset->next = config_sets;
   2875 				cset->rootable = 0;
   2876 				config_sets = cset;
   2877 			}
   2878 		}
   2879 		ac = ac_next;
   2880 	}
   2881 
   2882 
   2883 	return(config_sets);
   2884 }
   2885 
   2886 static int
   2887 rf_does_it_fit(RF_ConfigSet_t *cset, RF_AutoConfig_t *ac)
   2888 {
   2889 	RF_ComponentLabel_t *clabel1, *clabel2;
   2890 
   2891 	/* If this one matches the *first* one in the set, that's good
   2892 	   enough, since the other members of the set would have been
   2893 	   through here too... */
   2894 	/* note that we are not checking partitionSize here..
   2895 
   2896 	   Note that we are also not checking the mod_counters here.
   2897 	   If everything else matches execpt the mod_counter, that's
   2898 	   good enough for this test.  We will deal with the mod_counters
   2899 	   a little later in the autoconfiguration process.
   2900 
   2901 	    (clabel1->mod_counter == clabel2->mod_counter) &&
   2902 
   2903 	   The reason we don't check for this is that failed disks
   2904 	   will have lower modification counts.  If those disks are
   2905 	   not added to the set they used to belong to, then they will
   2906 	   form their own set, which may result in 2 different sets,
   2907 	   for example, competing to be configured at raid0, and
   2908 	   perhaps competing to be the root filesystem set.  If the
   2909 	   wrong ones get configured, or both attempt to become /,
   2910 	   weird behaviour and or serious lossage will occur.  Thus we
   2911 	   need to bring them into the fold here, and kick them out at
   2912 	   a later point.
   2913 
   2914 	*/
   2915 
   2916 	clabel1 = cset->ac->clabel;
   2917 	clabel2 = ac->clabel;
   2918 	if ((clabel1->version == clabel2->version) &&
   2919 	    (clabel1->serial_number == clabel2->serial_number) &&
   2920 	    (clabel1->num_rows == clabel2->num_rows) &&
   2921 	    (clabel1->num_columns == clabel2->num_columns) &&
   2922 	    (clabel1->sectPerSU == clabel2->sectPerSU) &&
   2923 	    (clabel1->SUsPerPU == clabel2->SUsPerPU) &&
   2924 	    (clabel1->SUsPerRU == clabel2->SUsPerRU) &&
   2925 	    (clabel1->parityConfig == clabel2->parityConfig) &&
   2926 	    (clabel1->maxOutstanding == clabel2->maxOutstanding) &&
   2927 	    (clabel1->blockSize == clabel2->blockSize) &&
   2928 	    (clabel1->numBlocks == clabel2->numBlocks) &&
   2929 	    (clabel1->autoconfigure == clabel2->autoconfigure) &&
   2930 	    (clabel1->root_partition == clabel2->root_partition) &&
   2931 	    (clabel1->last_unit == clabel2->last_unit) &&
   2932 	    (clabel1->config_order == clabel2->config_order)) {
   2933 		/* if it get's here, it almost *has* to be a match */
   2934 	} else {
   2935 		/* it's not consistent with somebody in the set..
   2936 		   punt */
   2937 		return(0);
   2938 	}
   2939 	/* all was fine.. it must fit... */
   2940 	return(1);
   2941 }
   2942 
   2943 int
   2944 rf_have_enough_components(RF_ConfigSet_t *cset)
   2945 {
   2946 	RF_AutoConfig_t *ac;
   2947 	RF_AutoConfig_t *auto_config;
   2948 	RF_ComponentLabel_t *clabel;
   2949 	int c;
   2950 	int num_cols;
   2951 	int num_missing;
   2952 	int mod_counter;
   2953 	int mod_counter_found;
   2954 	int even_pair_failed;
   2955 	char parity_type;
   2956 
   2957 
   2958 	/* check to see that we have enough 'live' components
   2959 	   of this set.  If so, we can configure it if necessary */
   2960 
   2961 	num_cols = cset->ac->clabel->num_columns;
   2962 	parity_type = cset->ac->clabel->parityConfig;
   2963 
   2964 	/* XXX Check for duplicate components!?!?!? */
   2965 
   2966 	/* Determine what the mod_counter is supposed to be for this set. */
   2967 
   2968 	mod_counter_found = 0;
   2969 	mod_counter = 0;
   2970 	ac = cset->ac;
   2971 	while(ac!=NULL) {
   2972 		if (mod_counter_found==0) {
   2973 			mod_counter = ac->clabel->mod_counter;
   2974 			mod_counter_found = 1;
   2975 		} else {
   2976 			if (ac->clabel->mod_counter > mod_counter) {
   2977 				mod_counter = ac->clabel->mod_counter;
   2978 			}
   2979 		}
   2980 		ac = ac->next;
   2981 	}
   2982 
   2983 	num_missing = 0;
   2984 	auto_config = cset->ac;
   2985 
   2986 	even_pair_failed = 0;
   2987 	for(c=0; c<num_cols; c++) {
   2988 		ac = auto_config;
   2989 		while(ac!=NULL) {
   2990 			if ((ac->clabel->column == c) &&
   2991 			    (ac->clabel->mod_counter == mod_counter)) {
   2992 				/* it's this one... */
   2993 #if DEBUG
   2994 				printf("Found: %s at %d\n",
   2995 				       ac->devname,c);
   2996 #endif
   2997 				break;
   2998 			}
   2999 			ac=ac->next;
   3000 		}
   3001 		if (ac==NULL) {
   3002 				/* Didn't find one here! */
   3003 				/* special case for RAID 1, especially
   3004 				   where there are more than 2
   3005 				   components (where RAIDframe treats
   3006 				   things a little differently :( ) */
   3007 			if (parity_type == '1') {
   3008 				if (c%2 == 0) { /* even component */
   3009 					even_pair_failed = 1;
   3010 				} else { /* odd component.  If
   3011 					    we're failed, and
   3012 					    so is the even
   3013 					    component, it's
   3014 					    "Good Night, Charlie" */
   3015 					if (even_pair_failed == 1) {
   3016 						return(0);
   3017 					}
   3018 				}
   3019 			} else {
   3020 				/* normal accounting */
   3021 				num_missing++;
   3022 			}
   3023 		}
   3024 		if ((parity_type == '1') && (c%2 == 1)) {
   3025 				/* Just did an even component, and we didn't
   3026 				   bail.. reset the even_pair_failed flag,
   3027 				   and go on to the next component.... */
   3028 			even_pair_failed = 0;
   3029 		}
   3030 	}
   3031 
   3032 	clabel = cset->ac->clabel;
   3033 
   3034 	if (((clabel->parityConfig == '0') && (num_missing > 0)) ||
   3035 	    ((clabel->parityConfig == '4') && (num_missing > 1)) ||
   3036 	    ((clabel->parityConfig == '5') && (num_missing > 1))) {
   3037 		/* XXX this needs to be made *much* more general */
   3038 		/* Too many failures */
   3039 		return(0);
   3040 	}
   3041 	/* otherwise, all is well, and we've got enough to take a kick
   3042 	   at autoconfiguring this set */
   3043 	return(1);
   3044 }
   3045 
   3046 void
   3047 rf_create_configuration(RF_AutoConfig_t *ac, RF_Config_t *config,
   3048 			RF_Raid_t *raidPtr)
   3049 {
   3050 	RF_ComponentLabel_t *clabel;
   3051 	int i;
   3052 
   3053 	clabel = ac->clabel;
   3054 
   3055 	/* 1. Fill in the common stuff */
   3056 	config->numRow = clabel->num_rows = 1;
   3057 	config->numCol = clabel->num_columns;
   3058 	config->numSpare = 0; /* XXX should this be set here? */
   3059 	config->sectPerSU = clabel->sectPerSU;
   3060 	config->SUsPerPU = clabel->SUsPerPU;
   3061 	config->SUsPerRU = clabel->SUsPerRU;
   3062 	config->parityConfig = clabel->parityConfig;
   3063 	/* XXX... */
   3064 	strcpy(config->diskQueueType,"fifo");
   3065 	config->maxOutstandingDiskReqs = clabel->maxOutstanding;
   3066 	config->layoutSpecificSize = 0; /* XXX ?? */
   3067 
   3068 	while(ac!=NULL) {
   3069 		/* row/col values will be in range due to the checks
   3070 		   in reasonable_label() */
   3071 		strcpy(config->devnames[0][ac->clabel->column],
   3072 		       ac->devname);
   3073 		ac = ac->next;
   3074 	}
   3075 
   3076 	for(i=0;i<RF_MAXDBGV;i++) {
   3077 		config->debugVars[i][0] = 0;
   3078 	}
   3079 }
   3080 
   3081 int
   3082 rf_set_autoconfig(RF_Raid_t *raidPtr, int new_value)
   3083 {
   3084 	RF_ComponentLabel_t clabel;
   3085 	struct vnode *vp;
   3086 	dev_t dev;
   3087 	int column;
   3088 	int sparecol;
   3089 
   3090 	raidPtr->autoconfigure = new_value;
   3091 
   3092 	for(column=0; column<raidPtr->numCol; column++) {
   3093 		if (raidPtr->Disks[column].status == rf_ds_optimal) {
   3094 			dev = raidPtr->Disks[column].dev;
   3095 			vp = raidPtr->raid_cinfo[column].ci_vp;
   3096 			raidread_component_label(dev, vp, &clabel);
   3097 			clabel.autoconfigure = new_value;
   3098 			raidwrite_component_label(dev, vp, &clabel);
   3099 		}
   3100 	}
   3101 	for(column = 0; column < raidPtr->numSpare ; column++) {
   3102 		sparecol = raidPtr->numCol + column;
   3103 		if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
   3104 			dev = raidPtr->Disks[sparecol].dev;
   3105 			vp = raidPtr->raid_cinfo[sparecol].ci_vp;
   3106 			raidread_component_label(dev, vp, &clabel);
   3107 			clabel.autoconfigure = new_value;
   3108 			raidwrite_component_label(dev, vp, &clabel);
   3109 		}
   3110 	}
   3111 	return(new_value);
   3112 }
   3113 
   3114 int
   3115 rf_set_rootpartition(RF_Raid_t *raidPtr, int new_value)
   3116 {
   3117 	RF_ComponentLabel_t clabel;
   3118 	struct vnode *vp;
   3119 	dev_t dev;
   3120 	int column;
   3121 	int sparecol;
   3122 
   3123 	raidPtr->root_partition = new_value;
   3124 	for(column=0; column<raidPtr->numCol; column++) {
   3125 		if (raidPtr->Disks[column].status == rf_ds_optimal) {
   3126 			dev = raidPtr->Disks[column].dev;
   3127 			vp = raidPtr->raid_cinfo[column].ci_vp;
   3128 			raidread_component_label(dev, vp, &clabel);
   3129 			clabel.root_partition = new_value;
   3130 			raidwrite_component_label(dev, vp, &clabel);
   3131 		}
   3132 	}
   3133 	for(column = 0; column < raidPtr->numSpare ; column++) {
   3134 		sparecol = raidPtr->numCol + column;
   3135 		if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
   3136 			dev = raidPtr->Disks[sparecol].dev;
   3137 			vp = raidPtr->raid_cinfo[sparecol].ci_vp;
   3138 			raidread_component_label(dev, vp, &clabel);
   3139 			clabel.root_partition = new_value;
   3140 			raidwrite_component_label(dev, vp, &clabel);
   3141 		}
   3142 	}
   3143 	return(new_value);
   3144 }
   3145 
   3146 void
   3147 rf_release_all_vps(RF_ConfigSet_t *cset)
   3148 {
   3149 	RF_AutoConfig_t *ac;
   3150 
   3151 	ac = cset->ac;
   3152 	while(ac!=NULL) {
   3153 		/* Close the vp, and give it back */
   3154 		if (ac->vp) {
   3155 			vn_lock(ac->vp, LK_EXCLUSIVE | LK_RETRY);
   3156 			VOP_CLOSE(ac->vp, FREAD, NOCRED, 0);
   3157 			vput(ac->vp);
   3158 			ac->vp = NULL;
   3159 		}
   3160 		ac = ac->next;
   3161 	}
   3162 }
   3163 
   3164 
   3165 void
   3166 rf_cleanup_config_set(RF_ConfigSet_t *cset)
   3167 {
   3168 	RF_AutoConfig_t *ac;
   3169 	RF_AutoConfig_t *next_ac;
   3170 
   3171 	ac = cset->ac;
   3172 	while(ac!=NULL) {
   3173 		next_ac = ac->next;
   3174 		/* nuke the label */
   3175 		free(ac->clabel, M_RAIDFRAME);
   3176 		/* cleanup the config structure */
   3177 		free(ac, M_RAIDFRAME);
   3178 		/* "next.." */
   3179 		ac = next_ac;
   3180 	}
   3181 	/* and, finally, nuke the config set */
   3182 	free(cset, M_RAIDFRAME);
   3183 }
   3184 
   3185 
   3186 void
   3187 raid_init_component_label(RF_Raid_t *raidPtr, RF_ComponentLabel_t *clabel)
   3188 {
   3189 	/* current version number */
   3190 	clabel->version = RF_COMPONENT_LABEL_VERSION;
   3191 	clabel->serial_number = raidPtr->serial_number;
   3192 	clabel->mod_counter = raidPtr->mod_counter;
   3193 	clabel->num_rows = 1;
   3194 	clabel->num_columns = raidPtr->numCol;
   3195 	clabel->clean = RF_RAID_DIRTY; /* not clean */
   3196 	clabel->status = rf_ds_optimal; /* "It's good!" */
   3197 
   3198 	clabel->sectPerSU = raidPtr->Layout.sectorsPerStripeUnit;
   3199 	clabel->SUsPerPU = raidPtr->Layout.SUsPerPU;
   3200 	clabel->SUsPerRU = raidPtr->Layout.SUsPerRU;
   3201 
   3202 	clabel->blockSize = raidPtr->bytesPerSector;
   3203 	clabel->numBlocks = raidPtr->sectorsPerDisk;
   3204 
   3205 	/* XXX not portable */
   3206 	clabel->parityConfig = raidPtr->Layout.map->parityConfig;
   3207 	clabel->maxOutstanding = raidPtr->maxOutstanding;
   3208 	clabel->autoconfigure = raidPtr->autoconfigure;
   3209 	clabel->root_partition = raidPtr->root_partition;
   3210 	clabel->last_unit = raidPtr->raidid;
   3211 	clabel->config_order = raidPtr->config_order;
   3212 }
   3213 
   3214 int
   3215 rf_auto_config_set(RF_ConfigSet_t *cset, int *unit)
   3216 {
   3217 	RF_Raid_t *raidPtr;
   3218 	RF_Config_t *config;
   3219 	int raidID;
   3220 	int retcode;
   3221 
   3222 #if DEBUG
   3223 	printf("RAID autoconfigure\n");
   3224 #endif
   3225 
   3226 	retcode = 0;
   3227 	*unit = -1;
   3228 
   3229 	/* 1. Create a config structure */
   3230 
   3231 	config = (RF_Config_t *)malloc(sizeof(RF_Config_t),
   3232 				       M_RAIDFRAME,
   3233 				       M_NOWAIT);
   3234 	if (config==NULL) {
   3235 		printf("Out of mem!?!?\n");
   3236 				/* XXX do something more intelligent here. */
   3237 		return(1);
   3238 	}
   3239 
   3240 	memset(config, 0, sizeof(RF_Config_t));
   3241 
   3242 	/*
   3243 	   2. Figure out what RAID ID this one is supposed to live at
   3244 	   See if we can get the same RAID dev that it was configured
   3245 	   on last time..
   3246 	*/
   3247 
   3248 	raidID = cset->ac->clabel->last_unit;
   3249 	if ((raidID < 0) || (raidID >= numraid)) {
   3250 		/* let's not wander off into lala land. */
   3251 		raidID = numraid - 1;
   3252 	}
   3253 	if (raidPtrs[raidID]->valid != 0) {
   3254 
   3255 		/*
   3256 		   Nope... Go looking for an alternative...
   3257 		   Start high so we don't immediately use raid0 if that's
   3258 		   not taken.
   3259 		*/
   3260 
   3261 		for(raidID = numraid - 1; raidID >= 0; raidID--) {
   3262 			if (raidPtrs[raidID]->valid == 0) {
   3263 				/* can use this one! */
   3264 				break;
   3265 			}
   3266 		}
   3267 	}
   3268 
   3269 	if (raidID < 0) {
   3270 		/* punt... */
   3271 		printf("Unable to auto configure this set!\n");
   3272 		printf("(Out of RAID devs!)\n");
   3273 		return(1);
   3274 	}
   3275 
   3276 #if DEBUG
   3277 	printf("Configuring raid%d:\n",raidID);
   3278 #endif
   3279 
   3280 	raidPtr = raidPtrs[raidID];
   3281 
   3282 	/* XXX all this stuff should be done SOMEWHERE ELSE! */
   3283 	raidPtr->raidid = raidID;
   3284 	raidPtr->openings = RAIDOUTSTANDING;
   3285 
   3286 	/* 3. Build the configuration structure */
   3287 	rf_create_configuration(cset->ac, config, raidPtr);
   3288 
   3289 	/* 4. Do the configuration */
   3290 	retcode = rf_Configure(raidPtr, config, cset->ac);
   3291 
   3292 	if (retcode == 0) {
   3293 
   3294 		raidinit(raidPtrs[raidID]);
   3295 
   3296 		rf_markalldirty(raidPtrs[raidID]);
   3297 		raidPtrs[raidID]->autoconfigure = 1; /* XXX do this here? */
   3298 		if (cset->ac->clabel->root_partition==1) {
   3299 			/* everything configured just fine.  Make a note
   3300 			   that this set is eligible to be root. */
   3301 			cset->rootable = 1;
   3302 			/* XXX do this here? */
   3303 			raidPtrs[raidID]->root_partition = 1;
   3304 		}
   3305 	}
   3306 
   3307 	/* 5. Cleanup */
   3308 	free(config, M_RAIDFRAME);
   3309 
   3310 	*unit = raidID;
   3311 	return(retcode);
   3312 }
   3313 
   3314 void
   3315 rf_disk_unbusy(RF_RaidAccessDesc_t *desc)
   3316 {
   3317 	struct buf *bp;
   3318 
   3319 	bp = (struct buf *)desc->bp;
   3320 	disk_unbusy(&raid_softc[desc->raidPtr->raidid].sc_dkdev,
   3321 	    (bp->b_bcount - bp->b_resid), (bp->b_flags & B_READ));
   3322 }
   3323 
   3324 void
   3325 rf_pool_init(struct pool *p, size_t size, const char *w_chan,
   3326 	     size_t xmin, size_t xmax)
   3327 {
   3328 	pool_init(p, size, 0, 0, 0, w_chan, NULL);
   3329 	pool_sethiwat(p, xmax);
   3330 	pool_prime(p, xmin);
   3331 	pool_setlowat(p, xmin);
   3332 }
   3333