Home | History | Annotate | Line # | Download | only in raidframe
rf_netbsdkintf.c revision 1.162
      1 /*	$NetBSD: rf_netbsdkintf.c,v 1.162 2003/08/07 16:31:19 agc Exp $	*/
      2 /*-
      3  * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
      4  * All rights reserved.
      5  *
      6  * This code is derived from software contributed to The NetBSD Foundation
      7  * by Greg Oster; Jason R. Thorpe.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *        This product includes software developed by the NetBSD
     20  *        Foundation, Inc. and its contributors.
     21  * 4. Neither the name of The NetBSD Foundation nor the names of its
     22  *    contributors may be used to endorse or promote products derived
     23  *    from this software without specific prior written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     26  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*
     39  * Copyright (c) 1990, 1993
     40  *      The Regents of the University of California.  All rights reserved.
     41  *
     42  * This code is derived from software contributed to Berkeley by
     43  * the Systems Programming Group of the University of Utah Computer
     44  * Science Department.
     45  *
     46  * Redistribution and use in source and binary forms, with or without
     47  * modification, are permitted provided that the following conditions
     48  * are met:
     49  * 1. Redistributions of source code must retain the above copyright
     50  *    notice, this list of conditions and the following disclaimer.
     51  * 2. Redistributions in binary form must reproduce the above copyright
     52  *    notice, this list of conditions and the following disclaimer in the
     53  *    documentation and/or other materials provided with the distribution.
     54  * 3. Neither the name of the University nor the names of its contributors
     55  *    may be used to endorse or promote products derived from this software
     56  *    without specific prior written permission.
     57  *
     58  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     59  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     60  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     61  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     62  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     63  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     64  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     65  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     66  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     67  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     68  * SUCH DAMAGE.
     69  *
     70  * from: Utah $Hdr: cd.c 1.6 90/11/28$
     71  *
     72  *      @(#)cd.c        8.2 (Berkeley) 11/16/93
     73  */
     74 
     75 /*
     76  * Copyright (c) 1988 University of Utah.
     77  *
     78  * This code is derived from software contributed to Berkeley by
     79  * the Systems Programming Group of the University of Utah Computer
     80  * Science Department.
     81  *
     82  * Redistribution and use in source and binary forms, with or without
     83  * modification, are permitted provided that the following conditions
     84  * are met:
     85  * 1. Redistributions of source code must retain the above copyright
     86  *    notice, this list of conditions and the following disclaimer.
     87  * 2. Redistributions in binary form must reproduce the above copyright
     88  *    notice, this list of conditions and the following disclaimer in the
     89  *    documentation and/or other materials provided with the distribution.
     90  * 3. All advertising materials mentioning features or use of this software
     91  *    must display the following acknowledgement:
     92  *      This product includes software developed by the University of
     93  *      California, Berkeley and its contributors.
     94  * 4. Neither the name of the University nor the names of its contributors
     95  *    may be used to endorse or promote products derived from this software
     96  *    without specific prior written permission.
     97  *
     98  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     99  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
    100  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
    101  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
    102  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
    103  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
    104  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
    105  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
    106  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
    107  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
    108  * SUCH DAMAGE.
    109  *
    110  * from: Utah $Hdr: cd.c 1.6 90/11/28$
    111  *
    112  *      @(#)cd.c        8.2 (Berkeley) 11/16/93
    113  */
    114 
    115 /*
    116  * Copyright (c) 1995 Carnegie-Mellon University.
    117  * All rights reserved.
    118  *
    119  * Authors: Mark Holland, Jim Zelenka
    120  *
    121  * Permission to use, copy, modify and distribute this software and
    122  * its documentation is hereby granted, provided that both the copyright
    123  * notice and this permission notice appear in all copies of the
    124  * software, derivative works or modified versions, and any portions
    125  * thereof, and that both notices appear in supporting documentation.
    126  *
    127  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
    128  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
    129  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
    130  *
    131  * Carnegie Mellon requests users of this software to return to
    132  *
    133  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
    134  *  School of Computer Science
    135  *  Carnegie Mellon University
    136  *  Pittsburgh PA 15213-3890
    137  *
    138  * any improvements or extensions that they make and grant Carnegie the
    139  * rights to redistribute these changes.
    140  */
    141 
    142 /***********************************************************
    143  *
    144  * rf_kintf.c -- the kernel interface routines for RAIDframe
    145  *
    146  ***********************************************************/
    147 
    148 #include <sys/cdefs.h>
    149 __KERNEL_RCSID(0, "$NetBSD: rf_netbsdkintf.c,v 1.162 2003/08/07 16:31:19 agc Exp $");
    150 
    151 #include <sys/param.h>
    152 #include <sys/errno.h>
    153 #include <sys/pool.h>
    154 #include <sys/proc.h>
    155 #include <sys/queue.h>
    156 #include <sys/disk.h>
    157 #include <sys/device.h>
    158 #include <sys/stat.h>
    159 #include <sys/ioctl.h>
    160 #include <sys/fcntl.h>
    161 #include <sys/systm.h>
    162 #include <sys/namei.h>
    163 #include <sys/vnode.h>
    164 #include <sys/disklabel.h>
    165 #include <sys/conf.h>
    166 #include <sys/lock.h>
    167 #include <sys/buf.h>
    168 #include <sys/user.h>
    169 #include <sys/reboot.h>
    170 
    171 #include <dev/raidframe/raidframevar.h>
    172 #include <dev/raidframe/raidframeio.h>
    173 #include "raid.h"
    174 #include "opt_raid_autoconfig.h"
    175 #include "rf_raid.h"
    176 #include "rf_copyback.h"
    177 #include "rf_dag.h"
    178 #include "rf_dagflags.h"
    179 #include "rf_desc.h"
    180 #include "rf_diskqueue.h"
    181 #include "rf_etimer.h"
    182 #include "rf_general.h"
    183 #include "rf_kintf.h"
    184 #include "rf_options.h"
    185 #include "rf_driver.h"
    186 #include "rf_parityscan.h"
    187 #include "rf_threadstuff.h"
    188 
    189 #ifdef DEBUG
    190 int     rf_kdebug_level = 0;
    191 #define db1_printf(a) if (rf_kdebug_level > 0) printf a
    192 #else				/* DEBUG */
    193 #define db1_printf(a) { }
    194 #endif				/* DEBUG */
    195 
    196 static RF_Raid_t **raidPtrs;	/* global raid device descriptors */
    197 
    198 RF_DECLARE_STATIC_MUTEX(rf_sparet_wait_mutex)
    199 
    200 static RF_SparetWait_t *rf_sparet_wait_queue;	/* requests to install a
    201 						 * spare table */
    202 static RF_SparetWait_t *rf_sparet_resp_queue;	/* responses from
    203 						 * installation process */
    204 
    205 MALLOC_DEFINE(M_RAIDFRAME, "RAIDframe", "RAIDframe structures");
    206 
    207 /* prototypes */
    208 static void KernelWakeupFunc(struct buf * bp);
    209 static void InitBP(struct buf * bp, struct vnode *, unsigned rw_flag,
    210 		   dev_t dev, RF_SectorNum_t startSect,
    211 		   RF_SectorCount_t numSect, caddr_t buf,
    212 		   void (*cbFunc) (struct buf *), void *cbArg,
    213 		   int logBytesPerSector, struct proc * b_proc);
    214 static void raidinit(RF_Raid_t *);
    215 
    216 void raidattach(int);
    217 
    218 dev_type_open(raidopen);
    219 dev_type_close(raidclose);
    220 dev_type_read(raidread);
    221 dev_type_write(raidwrite);
    222 dev_type_ioctl(raidioctl);
    223 dev_type_strategy(raidstrategy);
    224 dev_type_dump(raiddump);
    225 dev_type_size(raidsize);
    226 
    227 const struct bdevsw raid_bdevsw = {
    228 	raidopen, raidclose, raidstrategy, raidioctl,
    229 	raiddump, raidsize, D_DISK
    230 };
    231 
    232 const struct cdevsw raid_cdevsw = {
    233 	raidopen, raidclose, raidread, raidwrite, raidioctl,
    234 	nostop, notty, nopoll, nommap, nokqfilter, D_DISK
    235 };
    236 
    237 /*
    238  * Pilfered from ccd.c
    239  */
    240 
    241 struct raidbuf {
    242 	struct buf rf_buf;	/* new I/O buf.  MUST BE FIRST!!! */
    243 	struct buf *rf_obp;	/* ptr. to original I/O buf */
    244 	RF_DiskQueueData_t *req;/* the request that this was part of.. */
    245 };
    246 
    247 /* component buffer pool */
    248 struct pool raidframe_cbufpool;
    249 
    250 /* XXX Not sure if the following should be replacing the raidPtrs above,
    251    or if it should be used in conjunction with that...
    252 */
    253 
    254 struct raid_softc {
    255 	int     sc_flags;	/* flags */
    256 	int     sc_cflags;	/* configuration flags */
    257 	size_t  sc_size;        /* size of the raid device */
    258 	char    sc_xname[20];	/* XXX external name */
    259 	struct disk sc_dkdev;	/* generic disk device info */
    260 	struct bufq_state buf_queue;	/* used for the device queue */
    261 };
    262 /* sc_flags */
    263 #define RAIDF_INITED	0x01	/* unit has been initialized */
    264 #define RAIDF_WLABEL	0x02	/* label area is writable */
    265 #define RAIDF_LABELLING	0x04	/* unit is currently being labelled */
    266 #define RAIDF_WANTED	0x40	/* someone is waiting to obtain a lock */
    267 #define RAIDF_LOCKED	0x80	/* unit is locked */
    268 
    269 #define	raidunit(x)	DISKUNIT(x)
    270 int numraid = 0;
    271 
    272 /*
    273  * Allow RAIDOUTSTANDING number of simultaneous IO's to this RAID device.
    274  * Be aware that large numbers can allow the driver to consume a lot of
    275  * kernel memory, especially on writes, and in degraded mode reads.
    276  *
    277  * For example: with a stripe width of 64 blocks (32k) and 5 disks,
    278  * a single 64K write will typically require 64K for the old data,
    279  * 64K for the old parity, and 64K for the new parity, for a total
    280  * of 192K (if the parity buffer is not re-used immediately).
    281  * Even it if is used immediately, that's still 128K, which when multiplied
    282  * by say 10 requests, is 1280K, *on top* of the 640K of incoming data.
    283  *
    284  * Now in degraded mode, for example, a 64K read on the above setup may
    285  * require data reconstruction, which will require *all* of the 4 remaining
    286  * disks to participate -- 4 * 32K/disk == 128K again.
    287  */
    288 
    289 #ifndef RAIDOUTSTANDING
    290 #define RAIDOUTSTANDING   6
    291 #endif
    292 
    293 #define RAIDLABELDEV(dev)	\
    294 	(MAKEDISKDEV(major((dev)), raidunit((dev)), RAW_PART))
    295 
    296 /* declared here, and made public, for the benefit of KVM stuff.. */
    297 struct raid_softc *raid_softc;
    298 
    299 static void raidgetdefaultlabel(RF_Raid_t *, struct raid_softc *,
    300 				     struct disklabel *);
    301 static void raidgetdisklabel(dev_t);
    302 static void raidmakedisklabel(struct raid_softc *);
    303 
    304 static int raidlock(struct raid_softc *);
    305 static void raidunlock(struct raid_softc *);
    306 
    307 static void rf_markalldirty(RF_Raid_t *);
    308 
    309 struct device *raidrootdev;
    310 
    311 void rf_ReconThread(struct rf_recon_req *);
    312 /* XXX what I want is: */
    313 /*void rf_ReconThread(RF_Raid_t *raidPtr);  */
    314 void rf_RewriteParityThread(RF_Raid_t *raidPtr);
    315 void rf_CopybackThread(RF_Raid_t *raidPtr);
    316 void rf_ReconstructInPlaceThread(struct rf_recon_req *);
    317 int rf_autoconfig(struct device *self);
    318 void rf_buildroothack(RF_ConfigSet_t *);
    319 
    320 RF_AutoConfig_t *rf_find_raid_components(void);
    321 RF_ConfigSet_t *rf_create_auto_sets(RF_AutoConfig_t *);
    322 static int rf_does_it_fit(RF_ConfigSet_t *,RF_AutoConfig_t *);
    323 static int rf_reasonable_label(RF_ComponentLabel_t *);
    324 void rf_create_configuration(RF_AutoConfig_t *,RF_Config_t *, RF_Raid_t *);
    325 int rf_set_autoconfig(RF_Raid_t *, int);
    326 int rf_set_rootpartition(RF_Raid_t *, int);
    327 void rf_release_all_vps(RF_ConfigSet_t *);
    328 void rf_cleanup_config_set(RF_ConfigSet_t *);
    329 int rf_have_enough_components(RF_ConfigSet_t *);
    330 int rf_auto_config_set(RF_ConfigSet_t *, int *);
    331 
    332 static int raidautoconfig = 0; /* Debugging, mostly.  Set to 0 to not
    333 				  allow autoconfig to take place.
    334 			          Note that this is overridden by having
    335 			          RAID_AUTOCONFIG as an option in the
    336 			          kernel config file.  */
    337 
    338 void
    339 raidattach(num)
    340 	int     num;
    341 {
    342 	int raidID;
    343 	int i, rc;
    344 
    345 #ifdef DEBUG
    346 	printf("raidattach: Asked for %d units\n", num);
    347 #endif
    348 
    349 	if (num <= 0) {
    350 #ifdef DIAGNOSTIC
    351 		panic("raidattach: count <= 0");
    352 #endif
    353 		return;
    354 	}
    355 	/* This is where all the initialization stuff gets done. */
    356 
    357 	numraid = num;
    358 
    359 	/* Make some space for requested number of units... */
    360 
    361 	RF_Calloc(raidPtrs, num, sizeof(RF_Raid_t *), (RF_Raid_t **));
    362 	if (raidPtrs == NULL) {
    363 		panic("raidPtrs is NULL!!");
    364 	}
    365 
    366 	/* Initialize the component buffer pool. */
    367 	pool_init(&raidframe_cbufpool, sizeof(struct raidbuf), 0,
    368 	    0, 0, "raidpl", NULL);
    369 
    370 	rc = rf_mutex_init(&rf_sparet_wait_mutex);
    371 	if (rc) {
    372 		RF_PANIC();
    373 	}
    374 
    375 	rf_sparet_wait_queue = rf_sparet_resp_queue = NULL;
    376 
    377 	for (i = 0; i < num; i++)
    378 		raidPtrs[i] = NULL;
    379 	rc = rf_BootRaidframe();
    380 	if (rc == 0)
    381 		printf("Kernelized RAIDframe activated\n");
    382 	else
    383 		panic("Serious error booting RAID!!");
    384 
    385 	/* put together some datastructures like the CCD device does.. This
    386 	 * lets us lock the device and what-not when it gets opened. */
    387 
    388 	raid_softc = (struct raid_softc *)
    389 		malloc(num * sizeof(struct raid_softc),
    390 		       M_RAIDFRAME, M_NOWAIT);
    391 	if (raid_softc == NULL) {
    392 		printf("WARNING: no memory for RAIDframe driver\n");
    393 		return;
    394 	}
    395 
    396 	memset(raid_softc, 0, num * sizeof(struct raid_softc));
    397 
    398 	raidrootdev = (struct device *)malloc(num * sizeof(struct device),
    399 					      M_RAIDFRAME, M_NOWAIT);
    400 	if (raidrootdev == NULL) {
    401 		panic("No memory for RAIDframe driver!!?!?!");
    402 	}
    403 
    404 	for (raidID = 0; raidID < num; raidID++) {
    405 		bufq_alloc(&raid_softc[raidID].buf_queue, BUFQ_FCFS);
    406 
    407 		raidrootdev[raidID].dv_class  = DV_DISK;
    408 		raidrootdev[raidID].dv_cfdata = NULL;
    409 		raidrootdev[raidID].dv_unit   = raidID;
    410 		raidrootdev[raidID].dv_parent = NULL;
    411 		raidrootdev[raidID].dv_flags  = 0;
    412 		sprintf(raidrootdev[raidID].dv_xname,"raid%d",raidID);
    413 
    414 		RF_Calloc(raidPtrs[raidID], 1, sizeof(RF_Raid_t),
    415 			  (RF_Raid_t *));
    416 		if (raidPtrs[raidID] == NULL) {
    417 			printf("WARNING: raidPtrs[%d] is NULL\n", raidID);
    418 			numraid = raidID;
    419 			return;
    420 		}
    421 	}
    422 
    423 #ifdef RAID_AUTOCONFIG
    424 	raidautoconfig = 1;
    425 #endif
    426 
    427 	/*
    428 	 * Register a finalizer which will be used to auto-config RAID
    429 	 * sets once all real hardware devices have been found.
    430 	 */
    431 	if (config_finalize_register(NULL, rf_autoconfig) != 0)
    432 		printf("WARNING: unable to register RAIDframe finalizer\n");
    433 }
    434 
    435 int
    436 rf_autoconfig(struct device *self)
    437 {
    438 	RF_AutoConfig_t *ac_list;
    439 	RF_ConfigSet_t *config_sets;
    440 
    441 	if (raidautoconfig == 0)
    442 		return (0);
    443 
    444 	/* XXX This code can only be run once. */
    445 	raidautoconfig = 0;
    446 
    447 	/* 1. locate all RAID components on the system */
    448 #ifdef DEBUG
    449 	printf("Searching for RAID components...\n");
    450 #endif
    451 	ac_list = rf_find_raid_components();
    452 
    453 	/* 2. Sort them into their respective sets. */
    454 	config_sets = rf_create_auto_sets(ac_list);
    455 
    456 	/*
    457 	 * 3. Evaluate each set andconfigure the valid ones.
    458 	 * This gets done in rf_buildroothack().
    459 	 */
    460 	rf_buildroothack(config_sets);
    461 
    462 	return (1);
    463 }
    464 
    465 void
    466 rf_buildroothack(RF_ConfigSet_t *config_sets)
    467 {
    468 	RF_ConfigSet_t *cset;
    469 	RF_ConfigSet_t *next_cset;
    470 	int retcode;
    471 	int raidID;
    472 	int rootID;
    473 	int num_root;
    474 
    475 	rootID = 0;
    476 	num_root = 0;
    477 	cset = config_sets;
    478 	while(cset != NULL ) {
    479 		next_cset = cset->next;
    480 		if (rf_have_enough_components(cset) &&
    481 		    cset->ac->clabel->autoconfigure==1) {
    482 			retcode = rf_auto_config_set(cset,&raidID);
    483 			if (!retcode) {
    484 				if (cset->rootable) {
    485 					rootID = raidID;
    486 					num_root++;
    487 				}
    488 			} else {
    489 				/* The autoconfig didn't work :( */
    490 #if DEBUG
    491 				printf("Autoconfig failed with code %d for raid%d\n", retcode, raidID);
    492 #endif
    493 				rf_release_all_vps(cset);
    494 			}
    495 		} else {
    496 			/* we're not autoconfiguring this set...
    497 			   release the associated resources */
    498 			rf_release_all_vps(cset);
    499 		}
    500 		/* cleanup */
    501 		rf_cleanup_config_set(cset);
    502 		cset = next_cset;
    503 	}
    504 
    505 	/* we found something bootable... */
    506 
    507 	if (num_root == 1) {
    508 		booted_device = &raidrootdev[rootID];
    509 	} else if (num_root > 1) {
    510 		/* we can't guess.. require the user to answer... */
    511 		boothowto |= RB_ASKNAME;
    512 	}
    513 }
    514 
    515 
    516 int
    517 raidsize(dev)
    518 	dev_t   dev;
    519 {
    520 	struct raid_softc *rs;
    521 	struct disklabel *lp;
    522 	int     part, unit, omask, size;
    523 
    524 	unit = raidunit(dev);
    525 	if (unit >= numraid)
    526 		return (-1);
    527 	rs = &raid_softc[unit];
    528 
    529 	if ((rs->sc_flags & RAIDF_INITED) == 0)
    530 		return (-1);
    531 
    532 	part = DISKPART(dev);
    533 	omask = rs->sc_dkdev.dk_openmask & (1 << part);
    534 	lp = rs->sc_dkdev.dk_label;
    535 
    536 	if (omask == 0 && raidopen(dev, 0, S_IFBLK, curproc))
    537 		return (-1);
    538 
    539 	if (lp->d_partitions[part].p_fstype != FS_SWAP)
    540 		size = -1;
    541 	else
    542 		size = lp->d_partitions[part].p_size *
    543 		    (lp->d_secsize / DEV_BSIZE);
    544 
    545 	if (omask == 0 && raidclose(dev, 0, S_IFBLK, curproc))
    546 		return (-1);
    547 
    548 	return (size);
    549 
    550 }
    551 
    552 int
    553 raiddump(dev, blkno, va, size)
    554 	dev_t   dev;
    555 	daddr_t blkno;
    556 	caddr_t va;
    557 	size_t  size;
    558 {
    559 	/* Not implemented. */
    560 	return ENXIO;
    561 }
    562 /* ARGSUSED */
    563 int
    564 raidopen(dev, flags, fmt, p)
    565 	dev_t   dev;
    566 	int     flags, fmt;
    567 	struct proc *p;
    568 {
    569 	int     unit = raidunit(dev);
    570 	struct raid_softc *rs;
    571 	struct disklabel *lp;
    572 	int     part, pmask;
    573 	int     error = 0;
    574 
    575 	if (unit >= numraid)
    576 		return (ENXIO);
    577 	rs = &raid_softc[unit];
    578 
    579 	if ((error = raidlock(rs)) != 0)
    580 		return (error);
    581 	lp = rs->sc_dkdev.dk_label;
    582 
    583 	part = DISKPART(dev);
    584 	pmask = (1 << part);
    585 
    586 	if ((rs->sc_flags & RAIDF_INITED) &&
    587 	    (rs->sc_dkdev.dk_openmask == 0))
    588 		raidgetdisklabel(dev);
    589 
    590 	/* make sure that this partition exists */
    591 
    592 	if (part != RAW_PART) {
    593 		if (((rs->sc_flags & RAIDF_INITED) == 0) ||
    594 		    ((part >= lp->d_npartitions) ||
    595 			(lp->d_partitions[part].p_fstype == FS_UNUSED))) {
    596 			error = ENXIO;
    597 			raidunlock(rs);
    598 			return (error);
    599 		}
    600 	}
    601 	/* Prevent this unit from being unconfigured while open. */
    602 	switch (fmt) {
    603 	case S_IFCHR:
    604 		rs->sc_dkdev.dk_copenmask |= pmask;
    605 		break;
    606 
    607 	case S_IFBLK:
    608 		rs->sc_dkdev.dk_bopenmask |= pmask;
    609 		break;
    610 	}
    611 
    612 	if ((rs->sc_dkdev.dk_openmask == 0) &&
    613 	    ((rs->sc_flags & RAIDF_INITED) != 0)) {
    614 		/* First one... mark things as dirty... Note that we *MUST*
    615 		 have done a configure before this.  I DO NOT WANT TO BE
    616 		 SCRIBBLING TO RANDOM COMPONENTS UNTIL IT'S BEEN DETERMINED
    617 		 THAT THEY BELONG TOGETHER!!!!! */
    618 		/* XXX should check to see if we're only open for reading
    619 		   here... If so, we needn't do this, but then need some
    620 		   other way of keeping track of what's happened.. */
    621 
    622 		rf_markalldirty( raidPtrs[unit] );
    623 	}
    624 
    625 
    626 	rs->sc_dkdev.dk_openmask =
    627 	    rs->sc_dkdev.dk_copenmask | rs->sc_dkdev.dk_bopenmask;
    628 
    629 	raidunlock(rs);
    630 
    631 	return (error);
    632 
    633 
    634 }
    635 /* ARGSUSED */
    636 int
    637 raidclose(dev, flags, fmt, p)
    638 	dev_t   dev;
    639 	int     flags, fmt;
    640 	struct proc *p;
    641 {
    642 	int     unit = raidunit(dev);
    643 	struct raid_softc *rs;
    644 	int     error = 0;
    645 	int     part;
    646 
    647 	if (unit >= numraid)
    648 		return (ENXIO);
    649 	rs = &raid_softc[unit];
    650 
    651 	if ((error = raidlock(rs)) != 0)
    652 		return (error);
    653 
    654 	part = DISKPART(dev);
    655 
    656 	/* ...that much closer to allowing unconfiguration... */
    657 	switch (fmt) {
    658 	case S_IFCHR:
    659 		rs->sc_dkdev.dk_copenmask &= ~(1 << part);
    660 		break;
    661 
    662 	case S_IFBLK:
    663 		rs->sc_dkdev.dk_bopenmask &= ~(1 << part);
    664 		break;
    665 	}
    666 	rs->sc_dkdev.dk_openmask =
    667 	    rs->sc_dkdev.dk_copenmask | rs->sc_dkdev.dk_bopenmask;
    668 
    669 	if ((rs->sc_dkdev.dk_openmask == 0) &&
    670 	    ((rs->sc_flags & RAIDF_INITED) != 0)) {
    671 		/* Last one... device is not unconfigured yet.
    672 		   Device shutdown has taken care of setting the
    673 		   clean bits if RAIDF_INITED is not set
    674 		   mark things as clean... */
    675 
    676 		rf_update_component_labels(raidPtrs[unit],
    677 						 RF_FINAL_COMPONENT_UPDATE);
    678 		if (doing_shutdown) {
    679 			/* last one, and we're going down, so
    680 			   lights out for this RAID set too. */
    681 			error = rf_Shutdown(raidPtrs[unit]);
    682 
    683 			/* It's no longer initialized... */
    684 			rs->sc_flags &= ~RAIDF_INITED;
    685 
    686 			/* Detach the disk. */
    687 			disk_detach(&rs->sc_dkdev);
    688 		}
    689 	}
    690 
    691 	raidunlock(rs);
    692 	return (0);
    693 
    694 }
    695 
    696 void
    697 raidstrategy(bp)
    698 	struct buf *bp;
    699 {
    700 	int s;
    701 
    702 	unsigned int raidID = raidunit(bp->b_dev);
    703 	RF_Raid_t *raidPtr;
    704 	struct raid_softc *rs = &raid_softc[raidID];
    705 	struct disklabel *lp;
    706 	int     wlabel;
    707 
    708 	if ((rs->sc_flags & RAIDF_INITED) ==0) {
    709 		bp->b_error = ENXIO;
    710 		bp->b_flags |= B_ERROR;
    711 		bp->b_resid = bp->b_bcount;
    712 		biodone(bp);
    713 		return;
    714 	}
    715 	if (raidID >= numraid || !raidPtrs[raidID]) {
    716 		bp->b_error = ENODEV;
    717 		bp->b_flags |= B_ERROR;
    718 		bp->b_resid = bp->b_bcount;
    719 		biodone(bp);
    720 		return;
    721 	}
    722 	raidPtr = raidPtrs[raidID];
    723 	if (!raidPtr->valid) {
    724 		bp->b_error = ENODEV;
    725 		bp->b_flags |= B_ERROR;
    726 		bp->b_resid = bp->b_bcount;
    727 		biodone(bp);
    728 		return;
    729 	}
    730 	if (bp->b_bcount == 0) {
    731 		db1_printf(("b_bcount is zero..\n"));
    732 		biodone(bp);
    733 		return;
    734 	}
    735 	lp = rs->sc_dkdev.dk_label;
    736 
    737 	/*
    738 	 * Do bounds checking and adjust transfer.  If there's an
    739 	 * error, the bounds check will flag that for us.
    740 	 */
    741 
    742 	wlabel = rs->sc_flags & (RAIDF_WLABEL | RAIDF_LABELLING);
    743 	if (DISKPART(bp->b_dev) != RAW_PART)
    744 		if (bounds_check_with_label(&rs->sc_dkdev, bp, wlabel) <= 0) {
    745 			db1_printf(("Bounds check failed!!:%d %d\n",
    746 				(int) bp->b_blkno, (int) wlabel));
    747 			biodone(bp);
    748 			return;
    749 		}
    750 	s = splbio();
    751 
    752 	bp->b_resid = 0;
    753 
    754 	/* stuff it onto our queue */
    755 	BUFQ_PUT(&rs->buf_queue, bp);
    756 
    757 	raidstart(raidPtrs[raidID]);
    758 
    759 	splx(s);
    760 }
    761 /* ARGSUSED */
    762 int
    763 raidread(dev, uio, flags)
    764 	dev_t   dev;
    765 	struct uio *uio;
    766 	int     flags;
    767 {
    768 	int     unit = raidunit(dev);
    769 	struct raid_softc *rs;
    770 	int     part;
    771 
    772 	if (unit >= numraid)
    773 		return (ENXIO);
    774 	rs = &raid_softc[unit];
    775 
    776 	if ((rs->sc_flags & RAIDF_INITED) == 0)
    777 		return (ENXIO);
    778 	part = DISKPART(dev);
    779 
    780 	return (physio(raidstrategy, NULL, dev, B_READ, minphys, uio));
    781 
    782 }
    783 /* ARGSUSED */
    784 int
    785 raidwrite(dev, uio, flags)
    786 	dev_t   dev;
    787 	struct uio *uio;
    788 	int     flags;
    789 {
    790 	int     unit = raidunit(dev);
    791 	struct raid_softc *rs;
    792 
    793 	if (unit >= numraid)
    794 		return (ENXIO);
    795 	rs = &raid_softc[unit];
    796 
    797 	if ((rs->sc_flags & RAIDF_INITED) == 0)
    798 		return (ENXIO);
    799 
    800 	return (physio(raidstrategy, NULL, dev, B_WRITE, minphys, uio));
    801 
    802 }
    803 
    804 int
    805 raidioctl(dev, cmd, data, flag, p)
    806 	dev_t   dev;
    807 	u_long  cmd;
    808 	caddr_t data;
    809 	int     flag;
    810 	struct proc *p;
    811 {
    812 	int     unit = raidunit(dev);
    813 	int     error = 0;
    814 	int     part, pmask;
    815 	struct raid_softc *rs;
    816 	RF_Config_t *k_cfg, *u_cfg;
    817 	RF_Raid_t *raidPtr;
    818 	RF_RaidDisk_t *diskPtr;
    819 	RF_AccTotals_t *totals;
    820 	RF_DeviceConfig_t *d_cfg, **ucfgp;
    821 	u_char *specific_buf;
    822 	int retcode = 0;
    823 	int row;
    824 	int column;
    825 	int raidid;
    826 	struct rf_recon_req *rrcopy, *rr;
    827 	RF_ComponentLabel_t *clabel;
    828 	RF_ComponentLabel_t ci_label;
    829 	RF_ComponentLabel_t **clabel_ptr;
    830 	RF_SingleComponent_t *sparePtr,*componentPtr;
    831 	RF_SingleComponent_t hot_spare;
    832 	RF_SingleComponent_t component;
    833 	RF_ProgressInfo_t progressInfo, **progressInfoPtr;
    834 	int i, j, d;
    835 #ifdef __HAVE_OLD_DISKLABEL
    836 	struct disklabel newlabel;
    837 #endif
    838 
    839 	if (unit >= numraid)
    840 		return (ENXIO);
    841 	rs = &raid_softc[unit];
    842 	raidPtr = raidPtrs[unit];
    843 
    844 	db1_printf(("raidioctl: %d %d %d %d\n", (int) dev,
    845 		(int) DISKPART(dev), (int) unit, (int) cmd));
    846 
    847 	/* Must be open for writes for these commands... */
    848 	switch (cmd) {
    849 	case DIOCSDINFO:
    850 	case DIOCWDINFO:
    851 #ifdef __HAVE_OLD_DISKLABEL
    852 	case ODIOCWDINFO:
    853 	case ODIOCSDINFO:
    854 #endif
    855 	case DIOCWLABEL:
    856 		if ((flag & FWRITE) == 0)
    857 			return (EBADF);
    858 	}
    859 
    860 	/* Must be initialized for these... */
    861 	switch (cmd) {
    862 	case DIOCGDINFO:
    863 	case DIOCSDINFO:
    864 	case DIOCWDINFO:
    865 #ifdef __HAVE_OLD_DISKLABEL
    866 	case ODIOCGDINFO:
    867 	case ODIOCWDINFO:
    868 	case ODIOCSDINFO:
    869 	case ODIOCGDEFLABEL:
    870 #endif
    871 	case DIOCGPART:
    872 	case DIOCWLABEL:
    873 	case DIOCGDEFLABEL:
    874 	case RAIDFRAME_SHUTDOWN:
    875 	case RAIDFRAME_REWRITEPARITY:
    876 	case RAIDFRAME_GET_INFO:
    877 	case RAIDFRAME_RESET_ACCTOTALS:
    878 	case RAIDFRAME_GET_ACCTOTALS:
    879 	case RAIDFRAME_KEEP_ACCTOTALS:
    880 	case RAIDFRAME_GET_SIZE:
    881 	case RAIDFRAME_FAIL_DISK:
    882 	case RAIDFRAME_COPYBACK:
    883 	case RAIDFRAME_CHECK_RECON_STATUS:
    884 	case RAIDFRAME_CHECK_RECON_STATUS_EXT:
    885 	case RAIDFRAME_GET_COMPONENT_LABEL:
    886 	case RAIDFRAME_SET_COMPONENT_LABEL:
    887 	case RAIDFRAME_ADD_HOT_SPARE:
    888 	case RAIDFRAME_REMOVE_HOT_SPARE:
    889 	case RAIDFRAME_INIT_LABELS:
    890 	case RAIDFRAME_REBUILD_IN_PLACE:
    891 	case RAIDFRAME_CHECK_PARITY:
    892 	case RAIDFRAME_CHECK_PARITYREWRITE_STATUS:
    893 	case RAIDFRAME_CHECK_PARITYREWRITE_STATUS_EXT:
    894 	case RAIDFRAME_CHECK_COPYBACK_STATUS:
    895 	case RAIDFRAME_CHECK_COPYBACK_STATUS_EXT:
    896 	case RAIDFRAME_SET_AUTOCONFIG:
    897 	case RAIDFRAME_SET_ROOT:
    898 	case RAIDFRAME_DELETE_COMPONENT:
    899 	case RAIDFRAME_INCORPORATE_HOT_SPARE:
    900 		if ((rs->sc_flags & RAIDF_INITED) == 0)
    901 			return (ENXIO);
    902 	}
    903 
    904 	switch (cmd) {
    905 
    906 		/* configure the system */
    907 	case RAIDFRAME_CONFIGURE:
    908 
    909 		if (raidPtr->valid) {
    910 			/* There is a valid RAID set running on this unit! */
    911 			printf("raid%d: Device already configured!\n",unit);
    912 			return(EINVAL);
    913 		}
    914 
    915 		/* copy-in the configuration information */
    916 		/* data points to a pointer to the configuration structure */
    917 
    918 		u_cfg = *((RF_Config_t **) data);
    919 		RF_Malloc(k_cfg, sizeof(RF_Config_t), (RF_Config_t *));
    920 		if (k_cfg == NULL) {
    921 			return (ENOMEM);
    922 		}
    923 		retcode = copyin(u_cfg, k_cfg, sizeof(RF_Config_t));
    924 		if (retcode) {
    925 			RF_Free(k_cfg, sizeof(RF_Config_t));
    926 			db1_printf(("rf_ioctl: retcode=%d copyin.1\n",
    927 				retcode));
    928 			return (retcode);
    929 		}
    930 		/* allocate a buffer for the layout-specific data, and copy it
    931 		 * in */
    932 		if (k_cfg->layoutSpecificSize) {
    933 			if (k_cfg->layoutSpecificSize > 10000) {
    934 				/* sanity check */
    935 				RF_Free(k_cfg, sizeof(RF_Config_t));
    936 				return (EINVAL);
    937 			}
    938 			RF_Malloc(specific_buf, k_cfg->layoutSpecificSize,
    939 			    (u_char *));
    940 			if (specific_buf == NULL) {
    941 				RF_Free(k_cfg, sizeof(RF_Config_t));
    942 				return (ENOMEM);
    943 			}
    944 			retcode = copyin(k_cfg->layoutSpecific, specific_buf,
    945 			    k_cfg->layoutSpecificSize);
    946 			if (retcode) {
    947 				RF_Free(k_cfg, sizeof(RF_Config_t));
    948 				RF_Free(specific_buf,
    949 					k_cfg->layoutSpecificSize);
    950 				db1_printf(("rf_ioctl: retcode=%d copyin.2\n",
    951 					retcode));
    952 				return (retcode);
    953 			}
    954 		} else
    955 			specific_buf = NULL;
    956 		k_cfg->layoutSpecific = specific_buf;
    957 
    958 		/* should do some kind of sanity check on the configuration.
    959 		 * Store the sum of all the bytes in the last byte? */
    960 
    961 		/* configure the system */
    962 
    963 		/*
    964 		 * Clear the entire RAID descriptor, just to make sure
    965 		 *  there is no stale data left in the case of a
    966 		 *  reconfiguration
    967 		 */
    968 		memset((char *) raidPtr, 0, sizeof(RF_Raid_t));
    969 		raidPtr->raidid = unit;
    970 
    971 		retcode = rf_Configure(raidPtr, k_cfg, NULL);
    972 
    973 		if (retcode == 0) {
    974 
    975 			/* allow this many simultaneous IO's to
    976 			   this RAID device */
    977 			raidPtr->openings = RAIDOUTSTANDING;
    978 
    979 			raidinit(raidPtr);
    980 			rf_markalldirty(raidPtr);
    981 		}
    982 		/* free the buffers.  No return code here. */
    983 		if (k_cfg->layoutSpecificSize) {
    984 			RF_Free(specific_buf, k_cfg->layoutSpecificSize);
    985 		}
    986 		RF_Free(k_cfg, sizeof(RF_Config_t));
    987 
    988 		return (retcode);
    989 
    990 		/* shutdown the system */
    991 	case RAIDFRAME_SHUTDOWN:
    992 
    993 		if ((error = raidlock(rs)) != 0)
    994 			return (error);
    995 
    996 		/*
    997 		 * If somebody has a partition mounted, we shouldn't
    998 		 * shutdown.
    999 		 */
   1000 
   1001 		part = DISKPART(dev);
   1002 		pmask = (1 << part);
   1003 		if ((rs->sc_dkdev.dk_openmask & ~pmask) ||
   1004 		    ((rs->sc_dkdev.dk_bopenmask & pmask) &&
   1005 			(rs->sc_dkdev.dk_copenmask & pmask))) {
   1006 			raidunlock(rs);
   1007 			return (EBUSY);
   1008 		}
   1009 
   1010 		retcode = rf_Shutdown(raidPtr);
   1011 
   1012 		/* It's no longer initialized... */
   1013 		rs->sc_flags &= ~RAIDF_INITED;
   1014 
   1015 		/* Detach the disk. */
   1016 		disk_detach(&rs->sc_dkdev);
   1017 
   1018 		raidunlock(rs);
   1019 
   1020 		return (retcode);
   1021 	case RAIDFRAME_GET_COMPONENT_LABEL:
   1022 		clabel_ptr = (RF_ComponentLabel_t **) data;
   1023 		/* need to read the component label for the disk indicated
   1024 		   by row,column in clabel */
   1025 
   1026 		/* For practice, let's get it directly fromdisk, rather
   1027 		   than from the in-core copy */
   1028 		RF_Malloc( clabel, sizeof( RF_ComponentLabel_t ),
   1029 			   (RF_ComponentLabel_t *));
   1030 		if (clabel == NULL)
   1031 			return (ENOMEM);
   1032 
   1033 		memset((char *) clabel, 0, sizeof(RF_ComponentLabel_t));
   1034 
   1035 		retcode = copyin( *clabel_ptr, clabel,
   1036 				  sizeof(RF_ComponentLabel_t));
   1037 
   1038 		if (retcode) {
   1039 			RF_Free( clabel, sizeof(RF_ComponentLabel_t));
   1040 			return(retcode);
   1041 		}
   1042 
   1043 		row = clabel->row;
   1044 		column = clabel->column;
   1045 
   1046 		if ((row < 0) || (row >= raidPtr->numRow) ||
   1047 		    (column < 0) || (column >= raidPtr->numCol +
   1048 				     raidPtr->numSpare)) {
   1049 			RF_Free( clabel, sizeof(RF_ComponentLabel_t));
   1050 			return(EINVAL);
   1051 		}
   1052 
   1053 		raidread_component_label(raidPtr->Disks[row][column].dev,
   1054 				raidPtr->raid_cinfo[row][column].ci_vp,
   1055 				clabel );
   1056 
   1057 		retcode = copyout(clabel, *clabel_ptr,
   1058 				  sizeof(RF_ComponentLabel_t));
   1059 		RF_Free(clabel, sizeof(RF_ComponentLabel_t));
   1060 		return (retcode);
   1061 
   1062 	case RAIDFRAME_SET_COMPONENT_LABEL:
   1063 		clabel = (RF_ComponentLabel_t *) data;
   1064 
   1065 		/* XXX check the label for valid stuff... */
   1066 		/* Note that some things *should not* get modified --
   1067 		   the user should be re-initing the labels instead of
   1068 		   trying to patch things.
   1069 		   */
   1070 
   1071 		raidid = raidPtr->raidid;
   1072 		printf("raid%d: Got component label:\n", raidid);
   1073 		printf("raid%d: Version: %d\n", raidid, clabel->version);
   1074 		printf("raid%d: Serial Number: %d\n", raidid, clabel->serial_number);
   1075 		printf("raid%d: Mod counter: %d\n", raidid, clabel->mod_counter);
   1076 		printf("raid%d: Row: %d\n", raidid, clabel->row);
   1077 		printf("raid%d: Column: %d\n", raidid, clabel->column);
   1078 		printf("raid%d: Num Rows: %d\n", raidid, clabel->num_rows);
   1079 		printf("raid%d: Num Columns: %d\n", raidid, clabel->num_columns);
   1080 		printf("raid%d: Clean: %d\n", raidid, clabel->clean);
   1081 		printf("raid%d: Status: %d\n", raidid, clabel->status);
   1082 
   1083 		row = clabel->row;
   1084 		column = clabel->column;
   1085 
   1086 		if ((row < 0) || (row >= raidPtr->numRow) ||
   1087 		    (column < 0) || (column >= raidPtr->numCol)) {
   1088 			return(EINVAL);
   1089 		}
   1090 
   1091 		/* XXX this isn't allowed to do anything for now :-) */
   1092 
   1093 		/* XXX and before it is, we need to fill in the rest
   1094 		   of the fields!?!?!?! */
   1095 #if 0
   1096 		raidwrite_component_label(
   1097                             raidPtr->Disks[row][column].dev,
   1098 			    raidPtr->raid_cinfo[row][column].ci_vp,
   1099 			    clabel );
   1100 #endif
   1101 		return (0);
   1102 
   1103 	case RAIDFRAME_INIT_LABELS:
   1104 		clabel = (RF_ComponentLabel_t *) data;
   1105 		/*
   1106 		   we only want the serial number from
   1107 		   the above.  We get all the rest of the information
   1108 		   from the config that was used to create this RAID
   1109 		   set.
   1110 		   */
   1111 
   1112 		raidPtr->serial_number = clabel->serial_number;
   1113 
   1114 		raid_init_component_label(raidPtr, &ci_label);
   1115 		ci_label.serial_number = clabel->serial_number;
   1116 
   1117 		for(row=0;row<raidPtr->numRow;row++) {
   1118 			ci_label.row = row;
   1119 			for(column=0;column<raidPtr->numCol;column++) {
   1120 				diskPtr = &raidPtr->Disks[row][column];
   1121 				if (!RF_DEAD_DISK(diskPtr->status)) {
   1122 					ci_label.partitionSize = diskPtr->partitionSize;
   1123 					ci_label.column = column;
   1124 					raidwrite_component_label(
   1125 					  raidPtr->Disks[row][column].dev,
   1126 					  raidPtr->raid_cinfo[row][column].ci_vp,
   1127 					  &ci_label );
   1128 				}
   1129 			}
   1130 		}
   1131 
   1132 		return (retcode);
   1133 	case RAIDFRAME_SET_AUTOCONFIG:
   1134 		d = rf_set_autoconfig(raidPtr, *(int *) data);
   1135 		printf("raid%d: New autoconfig value is: %d\n",
   1136 		       raidPtr->raidid, d);
   1137 		*(int *) data = d;
   1138 		return (retcode);
   1139 
   1140 	case RAIDFRAME_SET_ROOT:
   1141 		d = rf_set_rootpartition(raidPtr, *(int *) data);
   1142 		printf("raid%d: New rootpartition value is: %d\n",
   1143 		       raidPtr->raidid, d);
   1144 		*(int *) data = d;
   1145 		return (retcode);
   1146 
   1147 		/* initialize all parity */
   1148 	case RAIDFRAME_REWRITEPARITY:
   1149 
   1150 		if (raidPtr->Layout.map->faultsTolerated == 0) {
   1151 			/* Parity for RAID 0 is trivially correct */
   1152 			raidPtr->parity_good = RF_RAID_CLEAN;
   1153 			return(0);
   1154 		}
   1155 
   1156 		if (raidPtr->parity_rewrite_in_progress == 1) {
   1157 			/* Re-write is already in progress! */
   1158 			return(EINVAL);
   1159 		}
   1160 
   1161 		retcode = RF_CREATE_THREAD(raidPtr->parity_rewrite_thread,
   1162 					   rf_RewriteParityThread,
   1163 					   raidPtr,"raid_parity");
   1164 		return (retcode);
   1165 
   1166 
   1167 	case RAIDFRAME_ADD_HOT_SPARE:
   1168 		sparePtr = (RF_SingleComponent_t *) data;
   1169 		memcpy( &hot_spare, sparePtr, sizeof(RF_SingleComponent_t));
   1170 		retcode = rf_add_hot_spare(raidPtr, &hot_spare);
   1171 		return(retcode);
   1172 
   1173 	case RAIDFRAME_REMOVE_HOT_SPARE:
   1174 		return(retcode);
   1175 
   1176 	case RAIDFRAME_DELETE_COMPONENT:
   1177 		componentPtr = (RF_SingleComponent_t *)data;
   1178 		memcpy( &component, componentPtr,
   1179 			sizeof(RF_SingleComponent_t));
   1180 		retcode = rf_delete_component(raidPtr, &component);
   1181 		return(retcode);
   1182 
   1183 	case RAIDFRAME_INCORPORATE_HOT_SPARE:
   1184 		componentPtr = (RF_SingleComponent_t *)data;
   1185 		memcpy( &component, componentPtr,
   1186 			sizeof(RF_SingleComponent_t));
   1187 		retcode = rf_incorporate_hot_spare(raidPtr, &component);
   1188 		return(retcode);
   1189 
   1190 	case RAIDFRAME_REBUILD_IN_PLACE:
   1191 
   1192 		if (raidPtr->Layout.map->faultsTolerated == 0) {
   1193 			/* Can't do this on a RAID 0!! */
   1194 			return(EINVAL);
   1195 		}
   1196 
   1197 		if (raidPtr->recon_in_progress == 1) {
   1198 			/* a reconstruct is already in progress! */
   1199 			return(EINVAL);
   1200 		}
   1201 
   1202 		componentPtr = (RF_SingleComponent_t *) data;
   1203 		memcpy( &component, componentPtr,
   1204 			sizeof(RF_SingleComponent_t));
   1205 		row = component.row;
   1206 		column = component.column;
   1207 
   1208 		if ((row < 0) || (row >= raidPtr->numRow) ||
   1209 		    (column < 0) || (column >= raidPtr->numCol)) {
   1210 			return(EINVAL);
   1211 		}
   1212 
   1213 		RF_LOCK_MUTEX(raidPtr->mutex);
   1214 		if ((raidPtr->Disks[row][column].status == rf_ds_optimal) &&
   1215 		    (raidPtr->numFailures > 0)) {
   1216 			/* XXX 0 above shouldn't be constant!!! */
   1217 			/* some component other than this has failed.
   1218 			   Let's not make things worse than they already
   1219 			   are... */
   1220 			printf("raid%d: Unable to reconstruct to disk at:\n",
   1221 			       raidPtr->raidid);
   1222 			printf("raid%d:     Row: %d Col: %d   Too many failures.\n",
   1223 			       raidPtr->raidid, row, column);
   1224 			RF_UNLOCK_MUTEX(raidPtr->mutex);
   1225 			return (EINVAL);
   1226 		}
   1227 		if (raidPtr->Disks[row][column].status ==
   1228 		    rf_ds_reconstructing) {
   1229 			printf("raid%d: Unable to reconstruct to disk at:\n",
   1230 			       raidPtr->raidid);
   1231 			printf("raid%d:    Row: %d Col: %d   Reconstruction already occuring!\n", raidPtr->raidid, row, column);
   1232 
   1233 			RF_UNLOCK_MUTEX(raidPtr->mutex);
   1234 			return (EINVAL);
   1235 		}
   1236 		if (raidPtr->Disks[row][column].status == rf_ds_spared) {
   1237 			RF_UNLOCK_MUTEX(raidPtr->mutex);
   1238 			return (EINVAL);
   1239 		}
   1240 		RF_UNLOCK_MUTEX(raidPtr->mutex);
   1241 
   1242 		RF_Malloc(rrcopy, sizeof(*rrcopy), (struct rf_recon_req *));
   1243 		if (rrcopy == NULL)
   1244 			return(ENOMEM);
   1245 
   1246 		rrcopy->raidPtr = (void *) raidPtr;
   1247 		rrcopy->row = row;
   1248 		rrcopy->col = column;
   1249 
   1250 		retcode = RF_CREATE_THREAD(raidPtr->recon_thread,
   1251 					   rf_ReconstructInPlaceThread,
   1252 					   rrcopy,"raid_reconip");
   1253 		return(retcode);
   1254 
   1255 	case RAIDFRAME_GET_INFO:
   1256 		if (!raidPtr->valid)
   1257 			return (ENODEV);
   1258 		ucfgp = (RF_DeviceConfig_t **) data;
   1259 		RF_Malloc(d_cfg, sizeof(RF_DeviceConfig_t),
   1260 			  (RF_DeviceConfig_t *));
   1261 		if (d_cfg == NULL)
   1262 			return (ENOMEM);
   1263 		memset((char *) d_cfg, 0, sizeof(RF_DeviceConfig_t));
   1264 		d_cfg->rows = raidPtr->numRow;
   1265 		d_cfg->cols = raidPtr->numCol;
   1266 		d_cfg->ndevs = raidPtr->numRow * raidPtr->numCol;
   1267 		if (d_cfg->ndevs >= RF_MAX_DISKS) {
   1268 			RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
   1269 			return (ENOMEM);
   1270 		}
   1271 		d_cfg->nspares = raidPtr->numSpare;
   1272 		if (d_cfg->nspares >= RF_MAX_DISKS) {
   1273 			RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
   1274 			return (ENOMEM);
   1275 		}
   1276 		d_cfg->maxqdepth = raidPtr->maxQueueDepth;
   1277 		d = 0;
   1278 		for (i = 0; i < d_cfg->rows; i++) {
   1279 			for (j = 0; j < d_cfg->cols; j++) {
   1280 				d_cfg->devs[d] = raidPtr->Disks[i][j];
   1281 				d++;
   1282 			}
   1283 		}
   1284 		for (j = d_cfg->cols, i = 0; i < d_cfg->nspares; i++, j++) {
   1285 			d_cfg->spares[i] = raidPtr->Disks[0][j];
   1286 		}
   1287 		retcode = copyout(d_cfg, *ucfgp, sizeof(RF_DeviceConfig_t));
   1288 		RF_Free(d_cfg, sizeof(RF_DeviceConfig_t));
   1289 
   1290 		return (retcode);
   1291 
   1292 	case RAIDFRAME_CHECK_PARITY:
   1293 		*(int *) data = raidPtr->parity_good;
   1294 		return (0);
   1295 
   1296 	case RAIDFRAME_RESET_ACCTOTALS:
   1297 		memset(&raidPtr->acc_totals, 0, sizeof(raidPtr->acc_totals));
   1298 		return (0);
   1299 
   1300 	case RAIDFRAME_GET_ACCTOTALS:
   1301 		totals = (RF_AccTotals_t *) data;
   1302 		*totals = raidPtr->acc_totals;
   1303 		return (0);
   1304 
   1305 	case RAIDFRAME_KEEP_ACCTOTALS:
   1306 		raidPtr->keep_acc_totals = *(int *)data;
   1307 		return (0);
   1308 
   1309 	case RAIDFRAME_GET_SIZE:
   1310 		*(int *) data = raidPtr->totalSectors;
   1311 		return (0);
   1312 
   1313 		/* fail a disk & optionally start reconstruction */
   1314 	case RAIDFRAME_FAIL_DISK:
   1315 
   1316 		if (raidPtr->Layout.map->faultsTolerated == 0) {
   1317 			/* Can't do this on a RAID 0!! */
   1318 			return(EINVAL);
   1319 		}
   1320 
   1321 		rr = (struct rf_recon_req *) data;
   1322 
   1323 		if (rr->row < 0 || rr->row >= raidPtr->numRow
   1324 		    || rr->col < 0 || rr->col >= raidPtr->numCol)
   1325 			return (EINVAL);
   1326 
   1327 
   1328 		RF_LOCK_MUTEX(raidPtr->mutex);
   1329 		if ((raidPtr->Disks[rr->row][rr->col].status ==
   1330 		     rf_ds_optimal) && (raidPtr->numFailures > 0)) {
   1331 			/* some other component has failed.  Let's not make
   1332 			   things worse. XXX wrong for RAID6 */
   1333 			RF_UNLOCK_MUTEX(raidPtr->mutex);
   1334 			return (EINVAL);
   1335 		}
   1336 		if (raidPtr->Disks[rr->row][rr->col].status == rf_ds_spared) {
   1337 			/* Can't fail a spared disk! */
   1338 			RF_UNLOCK_MUTEX(raidPtr->mutex);
   1339 			return (EINVAL);
   1340 		}
   1341 		RF_UNLOCK_MUTEX(raidPtr->mutex);
   1342 
   1343 		/* make a copy of the recon request so that we don't rely on
   1344 		 * the user's buffer */
   1345 		RF_Malloc(rrcopy, sizeof(*rrcopy), (struct rf_recon_req *));
   1346 		if (rrcopy == NULL)
   1347 			return(ENOMEM);
   1348 		memcpy(rrcopy, rr, sizeof(*rr));
   1349 		rrcopy->raidPtr = (void *) raidPtr;
   1350 
   1351 		retcode = RF_CREATE_THREAD(raidPtr->recon_thread,
   1352 					   rf_ReconThread,
   1353 					   rrcopy,"raid_recon");
   1354 		return (0);
   1355 
   1356 		/* invoke a copyback operation after recon on whatever disk
   1357 		 * needs it, if any */
   1358 	case RAIDFRAME_COPYBACK:
   1359 
   1360 		if (raidPtr->Layout.map->faultsTolerated == 0) {
   1361 			/* This makes no sense on a RAID 0!! */
   1362 			return(EINVAL);
   1363 		}
   1364 
   1365 		if (raidPtr->copyback_in_progress == 1) {
   1366 			/* Copyback is already in progress! */
   1367 			return(EINVAL);
   1368 		}
   1369 
   1370 		retcode = RF_CREATE_THREAD(raidPtr->copyback_thread,
   1371 					   rf_CopybackThread,
   1372 					   raidPtr,"raid_copyback");
   1373 		return (retcode);
   1374 
   1375 		/* return the percentage completion of reconstruction */
   1376 	case RAIDFRAME_CHECK_RECON_STATUS:
   1377 		if (raidPtr->Layout.map->faultsTolerated == 0) {
   1378 			/* This makes no sense on a RAID 0, so tell the
   1379 			   user it's done. */
   1380 			*(int *) data = 100;
   1381 			return(0);
   1382 		}
   1383 		row = 0; /* XXX we only consider a single row... */
   1384 		if (raidPtr->status[row] != rf_rs_reconstructing)
   1385 			*(int *) data = 100;
   1386 		else
   1387 			*(int *) data = raidPtr->reconControl[row]->percentComplete;
   1388 		return (0);
   1389 	case RAIDFRAME_CHECK_RECON_STATUS_EXT:
   1390 		progressInfoPtr = (RF_ProgressInfo_t **) data;
   1391 		row = 0; /* XXX we only consider a single row... */
   1392 		if (raidPtr->status[row] != rf_rs_reconstructing) {
   1393 			progressInfo.remaining = 0;
   1394 			progressInfo.completed = 100;
   1395 			progressInfo.total = 100;
   1396 		} else {
   1397 			progressInfo.total =
   1398 				raidPtr->reconControl[row]->numRUsTotal;
   1399 			progressInfo.completed =
   1400 				raidPtr->reconControl[row]->numRUsComplete;
   1401 			progressInfo.remaining = progressInfo.total -
   1402 				progressInfo.completed;
   1403 		}
   1404 		retcode = copyout(&progressInfo, *progressInfoPtr,
   1405 				  sizeof(RF_ProgressInfo_t));
   1406 		return (retcode);
   1407 
   1408 	case RAIDFRAME_CHECK_PARITYREWRITE_STATUS:
   1409 		if (raidPtr->Layout.map->faultsTolerated == 0) {
   1410 			/* This makes no sense on a RAID 0, so tell the
   1411 			   user it's done. */
   1412 			*(int *) data = 100;
   1413 			return(0);
   1414 		}
   1415 		if (raidPtr->parity_rewrite_in_progress == 1) {
   1416 			*(int *) data = 100 *
   1417 				raidPtr->parity_rewrite_stripes_done /
   1418 				raidPtr->Layout.numStripe;
   1419 		} else {
   1420 			*(int *) data = 100;
   1421 		}
   1422 		return (0);
   1423 
   1424 	case RAIDFRAME_CHECK_PARITYREWRITE_STATUS_EXT:
   1425 		progressInfoPtr = (RF_ProgressInfo_t **) data;
   1426 		if (raidPtr->parity_rewrite_in_progress == 1) {
   1427 			progressInfo.total = raidPtr->Layout.numStripe;
   1428 			progressInfo.completed =
   1429 				raidPtr->parity_rewrite_stripes_done;
   1430 			progressInfo.remaining = progressInfo.total -
   1431 				progressInfo.completed;
   1432 		} else {
   1433 			progressInfo.remaining = 0;
   1434 			progressInfo.completed = 100;
   1435 			progressInfo.total = 100;
   1436 		}
   1437 		retcode = copyout(&progressInfo, *progressInfoPtr,
   1438 				  sizeof(RF_ProgressInfo_t));
   1439 		return (retcode);
   1440 
   1441 	case RAIDFRAME_CHECK_COPYBACK_STATUS:
   1442 		if (raidPtr->Layout.map->faultsTolerated == 0) {
   1443 			/* This makes no sense on a RAID 0 */
   1444 			*(int *) data = 100;
   1445 			return(0);
   1446 		}
   1447 		if (raidPtr->copyback_in_progress == 1) {
   1448 			*(int *) data = 100 * raidPtr->copyback_stripes_done /
   1449 				raidPtr->Layout.numStripe;
   1450 		} else {
   1451 			*(int *) data = 100;
   1452 		}
   1453 		return (0);
   1454 
   1455 	case RAIDFRAME_CHECK_COPYBACK_STATUS_EXT:
   1456 		progressInfoPtr = (RF_ProgressInfo_t **) data;
   1457 		if (raidPtr->copyback_in_progress == 1) {
   1458 			progressInfo.total = raidPtr->Layout.numStripe;
   1459 			progressInfo.completed =
   1460 				raidPtr->copyback_stripes_done;
   1461 			progressInfo.remaining = progressInfo.total -
   1462 				progressInfo.completed;
   1463 		} else {
   1464 			progressInfo.remaining = 0;
   1465 			progressInfo.completed = 100;
   1466 			progressInfo.total = 100;
   1467 		}
   1468 		retcode = copyout(&progressInfo, *progressInfoPtr,
   1469 				  sizeof(RF_ProgressInfo_t));
   1470 		return (retcode);
   1471 
   1472 		/* the sparetable daemon calls this to wait for the kernel to
   1473 		 * need a spare table. this ioctl does not return until a
   1474 		 * spare table is needed. XXX -- calling mpsleep here in the
   1475 		 * ioctl code is almost certainly wrong and evil. -- XXX XXX
   1476 		 * -- I should either compute the spare table in the kernel,
   1477 		 * or have a different -- XXX XXX -- interface (a different
   1478 		 * character device) for delivering the table     -- XXX */
   1479 #if 0
   1480 	case RAIDFRAME_SPARET_WAIT:
   1481 		RF_LOCK_MUTEX(rf_sparet_wait_mutex);
   1482 		while (!rf_sparet_wait_queue)
   1483 			mpsleep(&rf_sparet_wait_queue, (PZERO + 1) | PCATCH, "sparet wait", 0, (void *) simple_lock_addr(rf_sparet_wait_mutex), MS_LOCK_SIMPLE);
   1484 		waitreq = rf_sparet_wait_queue;
   1485 		rf_sparet_wait_queue = rf_sparet_wait_queue->next;
   1486 		RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
   1487 
   1488 		/* structure assignment */
   1489 		*((RF_SparetWait_t *) data) = *waitreq;
   1490 
   1491 		RF_Free(waitreq, sizeof(*waitreq));
   1492 		return (0);
   1493 
   1494 		/* wakes up a process waiting on SPARET_WAIT and puts an error
   1495 		 * code in it that will cause the dameon to exit */
   1496 	case RAIDFRAME_ABORT_SPARET_WAIT:
   1497 		RF_Malloc(waitreq, sizeof(*waitreq), (RF_SparetWait_t *));
   1498 		waitreq->fcol = -1;
   1499 		RF_LOCK_MUTEX(rf_sparet_wait_mutex);
   1500 		waitreq->next = rf_sparet_wait_queue;
   1501 		rf_sparet_wait_queue = waitreq;
   1502 		RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
   1503 		wakeup(&rf_sparet_wait_queue);
   1504 		return (0);
   1505 
   1506 		/* used by the spare table daemon to deliver a spare table
   1507 		 * into the kernel */
   1508 	case RAIDFRAME_SEND_SPARET:
   1509 
   1510 		/* install the spare table */
   1511 		retcode = rf_SetSpareTable(raidPtr, *(void **) data);
   1512 
   1513 		/* respond to the requestor.  the return status of the spare
   1514 		 * table installation is passed in the "fcol" field */
   1515 		RF_Malloc(waitreq, sizeof(*waitreq), (RF_SparetWait_t *));
   1516 		waitreq->fcol = retcode;
   1517 		RF_LOCK_MUTEX(rf_sparet_wait_mutex);
   1518 		waitreq->next = rf_sparet_resp_queue;
   1519 		rf_sparet_resp_queue = waitreq;
   1520 		wakeup(&rf_sparet_resp_queue);
   1521 		RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
   1522 
   1523 		return (retcode);
   1524 #endif
   1525 
   1526 	default:
   1527 		break; /* fall through to the os-specific code below */
   1528 
   1529 	}
   1530 
   1531 	if (!raidPtr->valid)
   1532 		return (EINVAL);
   1533 
   1534 	/*
   1535 	 * Add support for "regular" device ioctls here.
   1536 	 */
   1537 
   1538 	switch (cmd) {
   1539 	case DIOCGDINFO:
   1540 		*(struct disklabel *) data = *(rs->sc_dkdev.dk_label);
   1541 		break;
   1542 #ifdef __HAVE_OLD_DISKLABEL
   1543 	case ODIOCGDINFO:
   1544 		newlabel = *(rs->sc_dkdev.dk_label);
   1545 		if (newlabel.d_npartitions > OLDMAXPARTITIONS)
   1546 			return ENOTTY;
   1547 		memcpy(data, &newlabel, sizeof (struct olddisklabel));
   1548 		break;
   1549 #endif
   1550 
   1551 	case DIOCGPART:
   1552 		((struct partinfo *) data)->disklab = rs->sc_dkdev.dk_label;
   1553 		((struct partinfo *) data)->part =
   1554 		    &rs->sc_dkdev.dk_label->d_partitions[DISKPART(dev)];
   1555 		break;
   1556 
   1557 	case DIOCWDINFO:
   1558 	case DIOCSDINFO:
   1559 #ifdef __HAVE_OLD_DISKLABEL
   1560 	case ODIOCWDINFO:
   1561 	case ODIOCSDINFO:
   1562 #endif
   1563 	{
   1564 		struct disklabel *lp;
   1565 #ifdef __HAVE_OLD_DISKLABEL
   1566 		if (cmd == ODIOCSDINFO || cmd == ODIOCWDINFO) {
   1567 			memset(&newlabel, 0, sizeof newlabel);
   1568 			memcpy(&newlabel, data, sizeof (struct olddisklabel));
   1569 			lp = &newlabel;
   1570 		} else
   1571 #endif
   1572 		lp = (struct disklabel *)data;
   1573 
   1574 		if ((error = raidlock(rs)) != 0)
   1575 			return (error);
   1576 
   1577 		rs->sc_flags |= RAIDF_LABELLING;
   1578 
   1579 		error = setdisklabel(rs->sc_dkdev.dk_label,
   1580 		    lp, 0, rs->sc_dkdev.dk_cpulabel);
   1581 		if (error == 0) {
   1582 			if (cmd == DIOCWDINFO
   1583 #ifdef __HAVE_OLD_DISKLABEL
   1584 			    || cmd == ODIOCWDINFO
   1585 #endif
   1586 			   )
   1587 				error = writedisklabel(RAIDLABELDEV(dev),
   1588 				    raidstrategy, rs->sc_dkdev.dk_label,
   1589 				    rs->sc_dkdev.dk_cpulabel);
   1590 		}
   1591 		rs->sc_flags &= ~RAIDF_LABELLING;
   1592 
   1593 		raidunlock(rs);
   1594 
   1595 		if (error)
   1596 			return (error);
   1597 		break;
   1598 	}
   1599 
   1600 	case DIOCWLABEL:
   1601 		if (*(int *) data != 0)
   1602 			rs->sc_flags |= RAIDF_WLABEL;
   1603 		else
   1604 			rs->sc_flags &= ~RAIDF_WLABEL;
   1605 		break;
   1606 
   1607 	case DIOCGDEFLABEL:
   1608 		raidgetdefaultlabel(raidPtr, rs, (struct disklabel *) data);
   1609 		break;
   1610 
   1611 #ifdef __HAVE_OLD_DISKLABEL
   1612 	case ODIOCGDEFLABEL:
   1613 		raidgetdefaultlabel(raidPtr, rs, &newlabel);
   1614 		if (newlabel.d_npartitions > OLDMAXPARTITIONS)
   1615 			return ENOTTY;
   1616 		memcpy(data, &newlabel, sizeof (struct olddisklabel));
   1617 		break;
   1618 #endif
   1619 
   1620 	default:
   1621 		retcode = ENOTTY;
   1622 	}
   1623 	return (retcode);
   1624 
   1625 }
   1626 
   1627 
   1628 /* raidinit -- complete the rest of the initialization for the
   1629    RAIDframe device.  */
   1630 
   1631 
   1632 static void
   1633 raidinit(raidPtr)
   1634 	RF_Raid_t *raidPtr;
   1635 {
   1636 	struct raid_softc *rs;
   1637 	int     unit;
   1638 
   1639 	unit = raidPtr->raidid;
   1640 
   1641 	rs = &raid_softc[unit];
   1642 
   1643 	/* XXX should check return code first... */
   1644 	rs->sc_flags |= RAIDF_INITED;
   1645 
   1646 	sprintf(rs->sc_xname, "raid%d", unit);	/* XXX doesn't check bounds. */
   1647 
   1648 	rs->sc_dkdev.dk_name = rs->sc_xname;
   1649 
   1650 	/* disk_attach actually creates space for the CPU disklabel, among
   1651 	 * other things, so it's critical to call this *BEFORE* we try putzing
   1652 	 * with disklabels. */
   1653 
   1654 	disk_attach(&rs->sc_dkdev);
   1655 
   1656 	/* XXX There may be a weird interaction here between this, and
   1657 	 * protectedSectors, as used in RAIDframe.  */
   1658 
   1659 	rs->sc_size = raidPtr->totalSectors;
   1660 
   1661 }
   1662 #if (RF_INCLUDE_PARITY_DECLUSTERING_DS > 0)
   1663 /* wake up the daemon & tell it to get us a spare table
   1664  * XXX
   1665  * the entries in the queues should be tagged with the raidPtr
   1666  * so that in the extremely rare case that two recons happen at once,
   1667  * we know for which device were requesting a spare table
   1668  * XXX
   1669  *
   1670  * XXX This code is not currently used. GO
   1671  */
   1672 int
   1673 rf_GetSpareTableFromDaemon(req)
   1674 	RF_SparetWait_t *req;
   1675 {
   1676 	int     retcode;
   1677 
   1678 	RF_LOCK_MUTEX(rf_sparet_wait_mutex);
   1679 	req->next = rf_sparet_wait_queue;
   1680 	rf_sparet_wait_queue = req;
   1681 	wakeup(&rf_sparet_wait_queue);
   1682 
   1683 	/* mpsleep unlocks the mutex */
   1684 	while (!rf_sparet_resp_queue) {
   1685 		tsleep(&rf_sparet_resp_queue, PRIBIO,
   1686 		    "raidframe getsparetable", 0);
   1687 	}
   1688 	req = rf_sparet_resp_queue;
   1689 	rf_sparet_resp_queue = req->next;
   1690 	RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
   1691 
   1692 	retcode = req->fcol;
   1693 	RF_Free(req, sizeof(*req));	/* this is not the same req as we
   1694 					 * alloc'd */
   1695 	return (retcode);
   1696 }
   1697 #endif
   1698 
   1699 /* a wrapper around rf_DoAccess that extracts appropriate info from the
   1700  * bp & passes it down.
   1701  * any calls originating in the kernel must use non-blocking I/O
   1702  * do some extra sanity checking to return "appropriate" error values for
   1703  * certain conditions (to make some standard utilities work)
   1704  *
   1705  * Formerly known as: rf_DoAccessKernel
   1706  */
   1707 void
   1708 raidstart(raidPtr)
   1709 	RF_Raid_t *raidPtr;
   1710 {
   1711 	RF_SectorCount_t num_blocks, pb, sum;
   1712 	RF_RaidAddr_t raid_addr;
   1713 	struct partition *pp;
   1714 	daddr_t blocknum;
   1715 	int     unit;
   1716 	struct raid_softc *rs;
   1717 	int     do_async;
   1718 	struct buf *bp;
   1719 
   1720 	unit = raidPtr->raidid;
   1721 	rs = &raid_softc[unit];
   1722 
   1723 	/* quick check to see if anything has died recently */
   1724 	RF_LOCK_MUTEX(raidPtr->mutex);
   1725 	if (raidPtr->numNewFailures > 0) {
   1726 		RF_UNLOCK_MUTEX(raidPtr->mutex);
   1727 		rf_update_component_labels(raidPtr,
   1728 					   RF_NORMAL_COMPONENT_UPDATE);
   1729 		RF_LOCK_MUTEX(raidPtr->mutex);
   1730 		raidPtr->numNewFailures--;
   1731 	}
   1732 
   1733 	/* Check to see if we're at the limit... */
   1734 	while (raidPtr->openings > 0) {
   1735 		RF_UNLOCK_MUTEX(raidPtr->mutex);
   1736 
   1737 		/* get the next item, if any, from the queue */
   1738 		if ((bp = BUFQ_GET(&rs->buf_queue)) == NULL) {
   1739 			/* nothing more to do */
   1740 			return;
   1741 		}
   1742 
   1743 		/* Ok, for the bp we have here, bp->b_blkno is relative to the
   1744 		 * partition.. Need to make it absolute to the underlying
   1745 		 * device.. */
   1746 
   1747 		blocknum = bp->b_blkno;
   1748 		if (DISKPART(bp->b_dev) != RAW_PART) {
   1749 			pp = &rs->sc_dkdev.dk_label->d_partitions[DISKPART(bp->b_dev)];
   1750 			blocknum += pp->p_offset;
   1751 		}
   1752 
   1753 		db1_printf(("Blocks: %d, %d\n", (int) bp->b_blkno,
   1754 			    (int) blocknum));
   1755 
   1756 		db1_printf(("bp->b_bcount = %d\n", (int) bp->b_bcount));
   1757 		db1_printf(("bp->b_resid = %d\n", (int) bp->b_resid));
   1758 
   1759 		/* *THIS* is where we adjust what block we're going to...
   1760 		 * but DO NOT TOUCH bp->b_blkno!!! */
   1761 		raid_addr = blocknum;
   1762 
   1763 		num_blocks = bp->b_bcount >> raidPtr->logBytesPerSector;
   1764 		pb = (bp->b_bcount & raidPtr->sectorMask) ? 1 : 0;
   1765 		sum = raid_addr + num_blocks + pb;
   1766 		if (1 || rf_debugKernelAccess) {
   1767 			db1_printf(("raid_addr=%d sum=%d num_blocks=%d(+%d) (%d)\n",
   1768 				    (int) raid_addr, (int) sum, (int) num_blocks,
   1769 				    (int) pb, (int) bp->b_resid));
   1770 		}
   1771 		if ((sum > raidPtr->totalSectors) || (sum < raid_addr)
   1772 		    || (sum < num_blocks) || (sum < pb)) {
   1773 			bp->b_error = ENOSPC;
   1774 			bp->b_flags |= B_ERROR;
   1775 			bp->b_resid = bp->b_bcount;
   1776 			biodone(bp);
   1777 			RF_LOCK_MUTEX(raidPtr->mutex);
   1778 			continue;
   1779 		}
   1780 		/*
   1781 		 * XXX rf_DoAccess() should do this, not just DoAccessKernel()
   1782 		 */
   1783 
   1784 		if (bp->b_bcount & raidPtr->sectorMask) {
   1785 			bp->b_error = EINVAL;
   1786 			bp->b_flags |= B_ERROR;
   1787 			bp->b_resid = bp->b_bcount;
   1788 			biodone(bp);
   1789 			RF_LOCK_MUTEX(raidPtr->mutex);
   1790 			continue;
   1791 
   1792 		}
   1793 		db1_printf(("Calling DoAccess..\n"));
   1794 
   1795 
   1796 		RF_LOCK_MUTEX(raidPtr->mutex);
   1797 		raidPtr->openings--;
   1798 		RF_UNLOCK_MUTEX(raidPtr->mutex);
   1799 
   1800 		/*
   1801 		 * Everything is async.
   1802 		 */
   1803 		do_async = 1;
   1804 
   1805 		disk_busy(&rs->sc_dkdev);
   1806 
   1807 		/* XXX we're still at splbio() here... do we *really*
   1808 		   need to be? */
   1809 
   1810 		/* don't ever condition on bp->b_flags & B_WRITE.
   1811 		 * always condition on B_READ instead */
   1812 
   1813 		bp->b_error = rf_DoAccess(raidPtr, (bp->b_flags & B_READ) ?
   1814 				      RF_IO_TYPE_READ : RF_IO_TYPE_WRITE,
   1815 				      do_async, raid_addr, num_blocks,
   1816 				      bp->b_data, bp, RF_DAG_NONBLOCKING_IO);
   1817 
   1818 		if (bp->b_error) {
   1819 			bp->b_flags |= B_ERROR;
   1820 		}
   1821 
   1822 		RF_LOCK_MUTEX(raidPtr->mutex);
   1823 	}
   1824 	RF_UNLOCK_MUTEX(raidPtr->mutex);
   1825 }
   1826 
   1827 
   1828 
   1829 
   1830 /* invoke an I/O from kernel mode.  Disk queue should be locked upon entry */
   1831 
   1832 int
   1833 rf_DispatchKernelIO(queue, req)
   1834 	RF_DiskQueue_t *queue;
   1835 	RF_DiskQueueData_t *req;
   1836 {
   1837 	int     op = (req->type == RF_IO_TYPE_READ) ? B_READ : B_WRITE;
   1838 	struct buf *bp;
   1839 	struct raidbuf *raidbp = NULL;
   1840 
   1841 	req->queue = queue;
   1842 
   1843 #if DIAGNOSTIC
   1844 	if (queue->raidPtr->raidid >= numraid) {
   1845 		printf("Invalid unit number: %d %d\n", queue->raidPtr->raidid,
   1846 		    numraid);
   1847 		panic("Invalid Unit number in rf_DispatchKernelIO");
   1848 	}
   1849 #endif
   1850 
   1851 	bp = req->bp;
   1852 #if 1
   1853 	/* XXX when there is a physical disk failure, someone is passing us a
   1854 	 * buffer that contains old stuff!!  Attempt to deal with this problem
   1855 	 * without taking a performance hit... (not sure where the real bug
   1856 	 * is.  It's buried in RAIDframe somewhere) :-(  GO ) */
   1857 
   1858 	if (bp->b_flags & B_ERROR) {
   1859 		bp->b_flags &= ~B_ERROR;
   1860 	}
   1861 	if (bp->b_error != 0) {
   1862 		bp->b_error = 0;
   1863 	}
   1864 #endif
   1865 	raidbp = pool_get(&raidframe_cbufpool, PR_NOWAIT);
   1866 	if (raidbp == NULL) {
   1867 		bp->b_flags |= B_ERROR;
   1868 		bp->b_error = ENOMEM;
   1869 		return (ENOMEM);
   1870 	}
   1871 	BUF_INIT(&raidbp->rf_buf);
   1872 
   1873 	/*
   1874 	 * context for raidiodone
   1875 	 */
   1876 	raidbp->rf_obp = bp;
   1877 	raidbp->req = req;
   1878 
   1879 	switch (req->type) {
   1880 	case RF_IO_TYPE_NOP:	/* used primarily to unlock a locked queue */
   1881 		/* XXX need to do something extra here.. */
   1882 		/* I'm leaving this in, as I've never actually seen it used,
   1883 		 * and I'd like folks to report it... GO */
   1884 		printf(("WAKEUP CALLED\n"));
   1885 		queue->numOutstanding++;
   1886 
   1887 		/* XXX need to glue the original buffer into this??  */
   1888 
   1889 		KernelWakeupFunc(&raidbp->rf_buf);
   1890 		break;
   1891 
   1892 	case RF_IO_TYPE_READ:
   1893 	case RF_IO_TYPE_WRITE:
   1894 
   1895 		if (req->tracerec) {
   1896 			RF_ETIMER_START(req->tracerec->timer);
   1897 		}
   1898 		InitBP(&raidbp->rf_buf, queue->rf_cinfo->ci_vp,
   1899 		    op | bp->b_flags, queue->rf_cinfo->ci_dev,
   1900 		    req->sectorOffset, req->numSector,
   1901 		    req->buf, KernelWakeupFunc, (void *) req,
   1902 		    queue->raidPtr->logBytesPerSector, req->b_proc);
   1903 
   1904 		if (rf_debugKernelAccess) {
   1905 			db1_printf(("dispatch: bp->b_blkno = %ld\n",
   1906 				(long) bp->b_blkno));
   1907 		}
   1908 		queue->numOutstanding++;
   1909 		queue->last_deq_sector = req->sectorOffset;
   1910 		/* acc wouldn't have been let in if there were any pending
   1911 		 * reqs at any other priority */
   1912 		queue->curPriority = req->priority;
   1913 
   1914 		db1_printf(("Going for %c to unit %d row %d col %d\n",
   1915 			    req->type, queue->raidPtr->raidid,
   1916 			    queue->row, queue->col));
   1917 		db1_printf(("sector %d count %d (%d bytes) %d\n",
   1918 			(int) req->sectorOffset, (int) req->numSector,
   1919 			(int) (req->numSector <<
   1920 			    queue->raidPtr->logBytesPerSector),
   1921 			(int) queue->raidPtr->logBytesPerSector));
   1922 		if ((raidbp->rf_buf.b_flags & B_READ) == 0) {
   1923 			raidbp->rf_buf.b_vp->v_numoutput++;
   1924 		}
   1925 		VOP_STRATEGY(&raidbp->rf_buf);
   1926 
   1927 		break;
   1928 
   1929 	default:
   1930 		panic("bad req->type in rf_DispatchKernelIO");
   1931 	}
   1932 	db1_printf(("Exiting from DispatchKernelIO\n"));
   1933 
   1934 	return (0);
   1935 }
   1936 /* this is the callback function associated with a I/O invoked from
   1937    kernel code.
   1938  */
   1939 static void
   1940 KernelWakeupFunc(vbp)
   1941 	struct buf *vbp;
   1942 {
   1943 	RF_DiskQueueData_t *req = NULL;
   1944 	RF_DiskQueue_t *queue;
   1945 	struct raidbuf *raidbp = (struct raidbuf *) vbp;
   1946 	struct buf *bp;
   1947 	int s;
   1948 
   1949 	s = splbio();
   1950 	db1_printf(("recovering the request queue:\n"));
   1951 	req = raidbp->req;
   1952 
   1953 	bp = raidbp->rf_obp;
   1954 
   1955 	queue = (RF_DiskQueue_t *) req->queue;
   1956 
   1957 	if (raidbp->rf_buf.b_flags & B_ERROR) {
   1958 		bp->b_flags |= B_ERROR;
   1959 		bp->b_error = raidbp->rf_buf.b_error ?
   1960 		    raidbp->rf_buf.b_error : EIO;
   1961 	}
   1962 
   1963 	/* XXX methinks this could be wrong... */
   1964 #if 1
   1965 	bp->b_resid = raidbp->rf_buf.b_resid;
   1966 #endif
   1967 
   1968 	if (req->tracerec) {
   1969 		RF_ETIMER_STOP(req->tracerec->timer);
   1970 		RF_ETIMER_EVAL(req->tracerec->timer);
   1971 		RF_LOCK_MUTEX(rf_tracing_mutex);
   1972 		req->tracerec->diskwait_us += RF_ETIMER_VAL_US(req->tracerec->timer);
   1973 		req->tracerec->phys_io_us += RF_ETIMER_VAL_US(req->tracerec->timer);
   1974 		req->tracerec->num_phys_ios++;
   1975 		RF_UNLOCK_MUTEX(rf_tracing_mutex);
   1976 	}
   1977 	bp->b_bcount = raidbp->rf_buf.b_bcount;	/* XXXX ?? */
   1978 
   1979 	/* XXX Ok, let's get aggressive... If B_ERROR is set, let's go
   1980 	 * ballistic, and mark the component as hosed... */
   1981 
   1982 	if (bp->b_flags & B_ERROR) {
   1983 		/* Mark the disk as dead */
   1984 		/* but only mark it once... */
   1985 		if (queue->raidPtr->Disks[queue->row][queue->col].status ==
   1986 		    rf_ds_optimal) {
   1987 			printf("raid%d: IO Error.  Marking %s as failed.\n",
   1988 			       queue->raidPtr->raidid,
   1989 			       queue->raidPtr->Disks[queue->row][queue->col].devname);
   1990 			queue->raidPtr->Disks[queue->row][queue->col].status =
   1991 			    rf_ds_failed;
   1992 			queue->raidPtr->status[queue->row] = rf_rs_degraded;
   1993 			queue->raidPtr->numFailures++;
   1994 			queue->raidPtr->numNewFailures++;
   1995 		} else {	/* Disk is already dead... */
   1996 			/* printf("Disk already marked as dead!\n"); */
   1997 		}
   1998 
   1999 	}
   2000 
   2001 	pool_put(&raidframe_cbufpool, raidbp);
   2002 
   2003 	/* Fill in the error value */
   2004 
   2005 	req->error = (bp->b_flags & B_ERROR) ? bp->b_error : 0;
   2006 
   2007 	simple_lock(&queue->raidPtr->iodone_lock);
   2008 
   2009 	/* Drop this one on the "finished" queue... */
   2010 	TAILQ_INSERT_TAIL(&(queue->raidPtr->iodone), req, iodone_entries);
   2011 
   2012 	/* Let the raidio thread know there is work to be done. */
   2013 	wakeup(&(queue->raidPtr->iodone));
   2014 
   2015 	simple_unlock(&queue->raidPtr->iodone_lock);
   2016 
   2017 	splx(s);
   2018 }
   2019 
   2020 
   2021 
   2022 /*
   2023  * initialize a buf structure for doing an I/O in the kernel.
   2024  */
   2025 static void
   2026 InitBP(bp, b_vp, rw_flag, dev, startSect, numSect, buf, cbFunc, cbArg,
   2027        logBytesPerSector, b_proc)
   2028 	struct buf *bp;
   2029 	struct vnode *b_vp;
   2030 	unsigned rw_flag;
   2031 	dev_t dev;
   2032 	RF_SectorNum_t startSect;
   2033 	RF_SectorCount_t numSect;
   2034 	caddr_t buf;
   2035 	void (*cbFunc) (struct buf *);
   2036 	void *cbArg;
   2037 	int logBytesPerSector;
   2038 	struct proc *b_proc;
   2039 {
   2040 	/* bp->b_flags       = B_PHYS | rw_flag; */
   2041 	bp->b_flags = B_CALL | rw_flag;	/* XXX need B_PHYS here too??? */
   2042 	bp->b_bcount = numSect << logBytesPerSector;
   2043 	bp->b_bufsize = bp->b_bcount;
   2044 	bp->b_error = 0;
   2045 	bp->b_dev = dev;
   2046 	bp->b_data = buf;
   2047 	bp->b_blkno = startSect;
   2048 	bp->b_resid = bp->b_bcount;	/* XXX is this right!??!?!! */
   2049 	if (bp->b_bcount == 0) {
   2050 		panic("bp->b_bcount is zero in InitBP!!");
   2051 	}
   2052 	bp->b_proc = b_proc;
   2053 	bp->b_iodone = cbFunc;
   2054 	bp->b_vp = b_vp;
   2055 
   2056 }
   2057 
   2058 static void
   2059 raidgetdefaultlabel(raidPtr, rs, lp)
   2060 	RF_Raid_t *raidPtr;
   2061 	struct raid_softc *rs;
   2062 	struct disklabel *lp;
   2063 {
   2064 	memset(lp, 0, sizeof(*lp));
   2065 
   2066 	/* fabricate a label... */
   2067 	lp->d_secperunit = raidPtr->totalSectors;
   2068 	lp->d_secsize = raidPtr->bytesPerSector;
   2069 	lp->d_nsectors = raidPtr->Layout.dataSectorsPerStripe;
   2070 	lp->d_ntracks = 4 * raidPtr->numCol;
   2071 	lp->d_ncylinders = raidPtr->totalSectors /
   2072 		(lp->d_nsectors * lp->d_ntracks);
   2073 	lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
   2074 
   2075 	strncpy(lp->d_typename, "raid", sizeof(lp->d_typename));
   2076 	lp->d_type = DTYPE_RAID;
   2077 	strncpy(lp->d_packname, "fictitious", sizeof(lp->d_packname));
   2078 	lp->d_rpm = 3600;
   2079 	lp->d_interleave = 1;
   2080 	lp->d_flags = 0;
   2081 
   2082 	lp->d_partitions[RAW_PART].p_offset = 0;
   2083 	lp->d_partitions[RAW_PART].p_size = raidPtr->totalSectors;
   2084 	lp->d_partitions[RAW_PART].p_fstype = FS_UNUSED;
   2085 	lp->d_npartitions = RAW_PART + 1;
   2086 
   2087 	lp->d_magic = DISKMAGIC;
   2088 	lp->d_magic2 = DISKMAGIC;
   2089 	lp->d_checksum = dkcksum(rs->sc_dkdev.dk_label);
   2090 
   2091 }
   2092 /*
   2093  * Read the disklabel from the raid device.  If one is not present, fake one
   2094  * up.
   2095  */
   2096 static void
   2097 raidgetdisklabel(dev)
   2098 	dev_t   dev;
   2099 {
   2100 	int     unit = raidunit(dev);
   2101 	struct raid_softc *rs = &raid_softc[unit];
   2102 	const char   *errstring;
   2103 	struct disklabel *lp = rs->sc_dkdev.dk_label;
   2104 	struct cpu_disklabel *clp = rs->sc_dkdev.dk_cpulabel;
   2105 	RF_Raid_t *raidPtr;
   2106 
   2107 	db1_printf(("Getting the disklabel...\n"));
   2108 
   2109 	memset(clp, 0, sizeof(*clp));
   2110 
   2111 	raidPtr = raidPtrs[unit];
   2112 
   2113 	raidgetdefaultlabel(raidPtr, rs, lp);
   2114 
   2115 	/*
   2116 	 * Call the generic disklabel extraction routine.
   2117 	 */
   2118 	errstring = readdisklabel(RAIDLABELDEV(dev), raidstrategy,
   2119 	    rs->sc_dkdev.dk_label, rs->sc_dkdev.dk_cpulabel);
   2120 	if (errstring)
   2121 		raidmakedisklabel(rs);
   2122 	else {
   2123 		int     i;
   2124 		struct partition *pp;
   2125 
   2126 		/*
   2127 		 * Sanity check whether the found disklabel is valid.
   2128 		 *
   2129 		 * This is necessary since total size of the raid device
   2130 		 * may vary when an interleave is changed even though exactly
   2131 		 * same componets are used, and old disklabel may used
   2132 		 * if that is found.
   2133 		 */
   2134 		if (lp->d_secperunit != rs->sc_size)
   2135 			printf("raid%d: WARNING: %s: "
   2136 			    "total sector size in disklabel (%d) != "
   2137 			    "the size of raid (%ld)\n", unit, rs->sc_xname,
   2138 			    lp->d_secperunit, (long) rs->sc_size);
   2139 		for (i = 0; i < lp->d_npartitions; i++) {
   2140 			pp = &lp->d_partitions[i];
   2141 			if (pp->p_offset + pp->p_size > rs->sc_size)
   2142 				printf("raid%d: WARNING: %s: end of partition `%c' "
   2143 				       "exceeds the size of raid (%ld)\n",
   2144 				       unit, rs->sc_xname, 'a' + i, (long) rs->sc_size);
   2145 		}
   2146 	}
   2147 
   2148 }
   2149 /*
   2150  * Take care of things one might want to take care of in the event
   2151  * that a disklabel isn't present.
   2152  */
   2153 static void
   2154 raidmakedisklabel(rs)
   2155 	struct raid_softc *rs;
   2156 {
   2157 	struct disklabel *lp = rs->sc_dkdev.dk_label;
   2158 	db1_printf(("Making a label..\n"));
   2159 
   2160 	/*
   2161 	 * For historical reasons, if there's no disklabel present
   2162 	 * the raw partition must be marked FS_BSDFFS.
   2163 	 */
   2164 
   2165 	lp->d_partitions[RAW_PART].p_fstype = FS_BSDFFS;
   2166 
   2167 	strncpy(lp->d_packname, "default label", sizeof(lp->d_packname));
   2168 
   2169 	lp->d_checksum = dkcksum(lp);
   2170 }
   2171 /*
   2172  * Lookup the provided name in the filesystem.  If the file exists,
   2173  * is a valid block device, and isn't being used by anyone else,
   2174  * set *vpp to the file's vnode.
   2175  * You'll find the original of this in ccd.c
   2176  */
   2177 int
   2178 raidlookup(path, p, vpp)
   2179 	char   *path;
   2180 	struct proc *p;
   2181 	struct vnode **vpp;	/* result */
   2182 {
   2183 	struct nameidata nd;
   2184 	struct vnode *vp;
   2185 	struct vattr va;
   2186 	int     error;
   2187 
   2188 	NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, path, p);
   2189 	if ((error = vn_open(&nd, FREAD | FWRITE, 0)) != 0) {
   2190 		return (error);
   2191 	}
   2192 	vp = nd.ni_vp;
   2193 	if (vp->v_usecount > 1) {
   2194 		VOP_UNLOCK(vp, 0);
   2195 		(void) vn_close(vp, FREAD | FWRITE, p->p_ucred, p);
   2196 		return (EBUSY);
   2197 	}
   2198 	if ((error = VOP_GETATTR(vp, &va, p->p_ucred, p)) != 0) {
   2199 		VOP_UNLOCK(vp, 0);
   2200 		(void) vn_close(vp, FREAD | FWRITE, p->p_ucred, p);
   2201 		return (error);
   2202 	}
   2203 	/* XXX: eventually we should handle VREG, too. */
   2204 	if (va.va_type != VBLK) {
   2205 		VOP_UNLOCK(vp, 0);
   2206 		(void) vn_close(vp, FREAD | FWRITE, p->p_ucred, p);
   2207 		return (ENOTBLK);
   2208 	}
   2209 	VOP_UNLOCK(vp, 0);
   2210 	*vpp = vp;
   2211 	return (0);
   2212 }
   2213 /*
   2214  * Wait interruptibly for an exclusive lock.
   2215  *
   2216  * XXX
   2217  * Several drivers do this; it should be abstracted and made MP-safe.
   2218  * (Hmm... where have we seen this warning before :->  GO )
   2219  */
   2220 static int
   2221 raidlock(rs)
   2222 	struct raid_softc *rs;
   2223 {
   2224 	int     error;
   2225 
   2226 	while ((rs->sc_flags & RAIDF_LOCKED) != 0) {
   2227 		rs->sc_flags |= RAIDF_WANTED;
   2228 		if ((error =
   2229 			tsleep(rs, PRIBIO | PCATCH, "raidlck", 0)) != 0)
   2230 			return (error);
   2231 	}
   2232 	rs->sc_flags |= RAIDF_LOCKED;
   2233 	return (0);
   2234 }
   2235 /*
   2236  * Unlock and wake up any waiters.
   2237  */
   2238 static void
   2239 raidunlock(rs)
   2240 	struct raid_softc *rs;
   2241 {
   2242 
   2243 	rs->sc_flags &= ~RAIDF_LOCKED;
   2244 	if ((rs->sc_flags & RAIDF_WANTED) != 0) {
   2245 		rs->sc_flags &= ~RAIDF_WANTED;
   2246 		wakeup(rs);
   2247 	}
   2248 }
   2249 
   2250 
   2251 #define RF_COMPONENT_INFO_OFFSET  16384 /* bytes */
   2252 #define RF_COMPONENT_INFO_SIZE     1024 /* bytes */
   2253 
   2254 int
   2255 raidmarkclean(dev_t dev, struct vnode *b_vp, int mod_counter)
   2256 {
   2257 	RF_ComponentLabel_t clabel;
   2258 	raidread_component_label(dev, b_vp, &clabel);
   2259 	clabel.mod_counter = mod_counter;
   2260 	clabel.clean = RF_RAID_CLEAN;
   2261 	raidwrite_component_label(dev, b_vp, &clabel);
   2262 	return(0);
   2263 }
   2264 
   2265 
   2266 int
   2267 raidmarkdirty(dev_t dev, struct vnode *b_vp, int mod_counter)
   2268 {
   2269 	RF_ComponentLabel_t clabel;
   2270 	raidread_component_label(dev, b_vp, &clabel);
   2271 	clabel.mod_counter = mod_counter;
   2272 	clabel.clean = RF_RAID_DIRTY;
   2273 	raidwrite_component_label(dev, b_vp, &clabel);
   2274 	return(0);
   2275 }
   2276 
   2277 /* ARGSUSED */
   2278 int
   2279 raidread_component_label(dev, b_vp, clabel)
   2280 	dev_t dev;
   2281 	struct vnode *b_vp;
   2282 	RF_ComponentLabel_t *clabel;
   2283 {
   2284 	struct buf *bp;
   2285 	const struct bdevsw *bdev;
   2286 	int error;
   2287 
   2288 	/* XXX should probably ensure that we don't try to do this if
   2289 	   someone has changed rf_protected_sectors. */
   2290 
   2291 	if (b_vp == NULL) {
   2292 		/* For whatever reason, this component is not valid.
   2293 		   Don't try to read a component label from it. */
   2294 		return(EINVAL);
   2295 	}
   2296 
   2297 	/* get a block of the appropriate size... */
   2298 	bp = geteblk((int)RF_COMPONENT_INFO_SIZE);
   2299 	bp->b_dev = dev;
   2300 
   2301 	/* get our ducks in a row for the read */
   2302 	bp->b_blkno = RF_COMPONENT_INFO_OFFSET / DEV_BSIZE;
   2303 	bp->b_bcount = RF_COMPONENT_INFO_SIZE;
   2304 	bp->b_flags |= B_READ;
   2305  	bp->b_resid = RF_COMPONENT_INFO_SIZE / DEV_BSIZE;
   2306 
   2307 	bdev = bdevsw_lookup(bp->b_dev);
   2308 	if (bdev == NULL)
   2309 		return (ENXIO);
   2310 	(*bdev->d_strategy)(bp);
   2311 
   2312 	error = biowait(bp);
   2313 
   2314 	if (!error) {
   2315 		memcpy(clabel, bp->b_data,
   2316 		       sizeof(RF_ComponentLabel_t));
   2317         }
   2318 
   2319 	brelse(bp);
   2320 	return(error);
   2321 }
   2322 /* ARGSUSED */
   2323 int
   2324 raidwrite_component_label(dev, b_vp, clabel)
   2325 	dev_t dev;
   2326 	struct vnode *b_vp;
   2327 	RF_ComponentLabel_t *clabel;
   2328 {
   2329 	struct buf *bp;
   2330 	const struct bdevsw *bdev;
   2331 	int error;
   2332 
   2333 	/* get a block of the appropriate size... */
   2334 	bp = geteblk((int)RF_COMPONENT_INFO_SIZE);
   2335 	bp->b_dev = dev;
   2336 
   2337 	/* get our ducks in a row for the write */
   2338 	bp->b_blkno = RF_COMPONENT_INFO_OFFSET / DEV_BSIZE;
   2339 	bp->b_bcount = RF_COMPONENT_INFO_SIZE;
   2340 	bp->b_flags |= B_WRITE;
   2341  	bp->b_resid = RF_COMPONENT_INFO_SIZE / DEV_BSIZE;
   2342 
   2343 	memset(bp->b_data, 0, RF_COMPONENT_INFO_SIZE );
   2344 
   2345 	memcpy(bp->b_data, clabel, sizeof(RF_ComponentLabel_t));
   2346 
   2347 	bdev = bdevsw_lookup(bp->b_dev);
   2348 	if (bdev == NULL)
   2349 		return (ENXIO);
   2350 	(*bdev->d_strategy)(bp);
   2351 	error = biowait(bp);
   2352 	brelse(bp);
   2353 	if (error) {
   2354 #if 1
   2355 		printf("Failed to write RAID component info!\n");
   2356 #endif
   2357 	}
   2358 
   2359 	return(error);
   2360 }
   2361 
   2362 void
   2363 rf_markalldirty(raidPtr)
   2364 	RF_Raid_t *raidPtr;
   2365 {
   2366 	RF_ComponentLabel_t clabel;
   2367 	int sparecol;
   2368 	int r,c;
   2369 	int i,j;
   2370 	int srow, scol;
   2371 
   2372 	raidPtr->mod_counter++;
   2373 	for (r = 0; r < raidPtr->numRow; r++) {
   2374 		for (c = 0; c < raidPtr->numCol; c++) {
   2375 			/* we don't want to touch (at all) a disk that has
   2376 			   failed */
   2377 			if (!RF_DEAD_DISK(raidPtr->Disks[r][c].status)) {
   2378 				raidread_component_label(
   2379 					raidPtr->Disks[r][c].dev,
   2380 					raidPtr->raid_cinfo[r][c].ci_vp,
   2381 					&clabel);
   2382 				if (clabel.status == rf_ds_spared) {
   2383 					/* XXX do something special...
   2384 					 but whatever you do, don't
   2385 					 try to access it!! */
   2386 				} else {
   2387 					raidmarkdirty(
   2388 					      raidPtr->Disks[r][c].dev,
   2389 					      raidPtr->raid_cinfo[r][c].ci_vp,
   2390 					      raidPtr->mod_counter);
   2391 				}
   2392 			}
   2393 		}
   2394 	}
   2395 
   2396 	for( c = 0; c < raidPtr->numSpare ; c++) {
   2397 		sparecol = raidPtr->numCol + c;
   2398 		if (raidPtr->Disks[0][sparecol].status == rf_ds_used_spare) {
   2399 			/*
   2400 
   2401 			   we claim this disk is "optimal" if it's
   2402 			   rf_ds_used_spare, as that means it should be
   2403 			   directly substitutable for the disk it replaced.
   2404 			   We note that too...
   2405 
   2406 			 */
   2407 
   2408 			for(i=0;i<raidPtr->numRow;i++) {
   2409 				for(j=0;j<raidPtr->numCol;j++) {
   2410 					if ((raidPtr->Disks[i][j].spareRow ==
   2411 					     0) &&
   2412 					    (raidPtr->Disks[i][j].spareCol ==
   2413 					     sparecol)) {
   2414 						srow = i;
   2415 						scol = j;
   2416 						break;
   2417 					}
   2418 				}
   2419 			}
   2420 
   2421 			raidread_component_label(
   2422 				 raidPtr->Disks[0][sparecol].dev,
   2423 				 raidPtr->raid_cinfo[0][sparecol].ci_vp,
   2424 				 &clabel);
   2425 			/* make sure status is noted */
   2426 
   2427 			raid_init_component_label(raidPtr, &clabel);
   2428 
   2429 			clabel.row = srow;
   2430 			clabel.column = scol;
   2431 			/* Note: we *don't* change status from rf_ds_used_spare
   2432 			   to rf_ds_optimal */
   2433 			/* clabel.status = rf_ds_optimal; */
   2434 
   2435 			raidmarkdirty(raidPtr->Disks[0][sparecol].dev,
   2436 				      raidPtr->raid_cinfo[0][sparecol].ci_vp,
   2437 				      raidPtr->mod_counter);
   2438 		}
   2439 	}
   2440 }
   2441 
   2442 
   2443 void
   2444 rf_update_component_labels(raidPtr, final)
   2445 	RF_Raid_t *raidPtr;
   2446 	int final;
   2447 {
   2448 	RF_ComponentLabel_t clabel;
   2449 	int sparecol;
   2450 	int r,c;
   2451 	int i,j;
   2452 	int srow, scol;
   2453 
   2454 	srow = -1;
   2455 	scol = -1;
   2456 
   2457 	/* XXX should do extra checks to make sure things really are clean,
   2458 	   rather than blindly setting the clean bit... */
   2459 
   2460 	raidPtr->mod_counter++;
   2461 
   2462 	for (r = 0; r < raidPtr->numRow; r++) {
   2463 		for (c = 0; c < raidPtr->numCol; c++) {
   2464 			if (raidPtr->Disks[r][c].status == rf_ds_optimal) {
   2465 				raidread_component_label(
   2466 					raidPtr->Disks[r][c].dev,
   2467 					raidPtr->raid_cinfo[r][c].ci_vp,
   2468 					&clabel);
   2469 				/* make sure status is noted */
   2470 				clabel.status = rf_ds_optimal;
   2471 				/* bump the counter */
   2472 				clabel.mod_counter = raidPtr->mod_counter;
   2473 
   2474 				raidwrite_component_label(
   2475 					raidPtr->Disks[r][c].dev,
   2476 					raidPtr->raid_cinfo[r][c].ci_vp,
   2477 					&clabel);
   2478 				if (final == RF_FINAL_COMPONENT_UPDATE) {
   2479 					if (raidPtr->parity_good == RF_RAID_CLEAN) {
   2480 						raidmarkclean(
   2481 							      raidPtr->Disks[r][c].dev,
   2482 							      raidPtr->raid_cinfo[r][c].ci_vp,
   2483 							      raidPtr->mod_counter);
   2484 					}
   2485 				}
   2486 			}
   2487 			/* else we don't touch it.. */
   2488 		}
   2489 	}
   2490 
   2491 	for( c = 0; c < raidPtr->numSpare ; c++) {
   2492 		sparecol = raidPtr->numCol + c;
   2493 		/* Need to ensure that the reconstruct actually completed! */
   2494 		if (raidPtr->Disks[0][sparecol].status == rf_ds_used_spare) {
   2495 			/*
   2496 
   2497 			   we claim this disk is "optimal" if it's
   2498 			   rf_ds_used_spare, as that means it should be
   2499 			   directly substitutable for the disk it replaced.
   2500 			   We note that too...
   2501 
   2502 			 */
   2503 
   2504 			for(i=0;i<raidPtr->numRow;i++) {
   2505 				for(j=0;j<raidPtr->numCol;j++) {
   2506 					if ((raidPtr->Disks[i][j].spareRow ==
   2507 					     0) &&
   2508 					    (raidPtr->Disks[i][j].spareCol ==
   2509 					     sparecol)) {
   2510 						srow = i;
   2511 						scol = j;
   2512 						break;
   2513 					}
   2514 				}
   2515 			}
   2516 
   2517 			/* XXX shouldn't *really* need this... */
   2518 			raidread_component_label(
   2519 				      raidPtr->Disks[0][sparecol].dev,
   2520 				      raidPtr->raid_cinfo[0][sparecol].ci_vp,
   2521 				      &clabel);
   2522 			/* make sure status is noted */
   2523 
   2524 			raid_init_component_label(raidPtr, &clabel);
   2525 
   2526 			clabel.mod_counter = raidPtr->mod_counter;
   2527 			clabel.row = srow;
   2528 			clabel.column = scol;
   2529 			clabel.status = rf_ds_optimal;
   2530 
   2531 			raidwrite_component_label(
   2532 				      raidPtr->Disks[0][sparecol].dev,
   2533 				      raidPtr->raid_cinfo[0][sparecol].ci_vp,
   2534 				      &clabel);
   2535 			if (final == RF_FINAL_COMPONENT_UPDATE) {
   2536 				if (raidPtr->parity_good == RF_RAID_CLEAN) {
   2537 					raidmarkclean( raidPtr->Disks[0][sparecol].dev,
   2538 						       raidPtr->raid_cinfo[0][sparecol].ci_vp,
   2539 						       raidPtr->mod_counter);
   2540 				}
   2541 			}
   2542 		}
   2543 	}
   2544 }
   2545 
   2546 void
   2547 rf_close_component(raidPtr, vp, auto_configured)
   2548 	RF_Raid_t *raidPtr;
   2549 	struct vnode *vp;
   2550 	int auto_configured;
   2551 {
   2552 	struct proc *p;
   2553 
   2554 	p = raidPtr->engine_thread;
   2555 
   2556 	if (vp != NULL) {
   2557 		if (auto_configured == 1) {
   2558 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
   2559 			VOP_CLOSE(vp, FREAD | FWRITE, NOCRED, 0);
   2560 			vput(vp);
   2561 
   2562 		} else {
   2563 			(void) vn_close(vp, FREAD | FWRITE, p->p_ucred, p);
   2564 		}
   2565 	}
   2566 }
   2567 
   2568 
   2569 void
   2570 rf_UnconfigureVnodes(raidPtr)
   2571 	RF_Raid_t *raidPtr;
   2572 {
   2573 	int r,c;
   2574 	struct vnode *vp;
   2575 	int acd;
   2576 
   2577 
   2578 	/* We take this opportunity to close the vnodes like we should.. */
   2579 
   2580 	for (r = 0; r < raidPtr->numRow; r++) {
   2581 		for (c = 0; c < raidPtr->numCol; c++) {
   2582 			vp = raidPtr->raid_cinfo[r][c].ci_vp;
   2583 			acd = raidPtr->Disks[r][c].auto_configured;
   2584 			rf_close_component(raidPtr, vp, acd);
   2585 			raidPtr->raid_cinfo[r][c].ci_vp = NULL;
   2586 			raidPtr->Disks[r][c].auto_configured = 0;
   2587 		}
   2588 	}
   2589 	for (r = 0; r < raidPtr->numSpare; r++) {
   2590 		vp = raidPtr->raid_cinfo[0][raidPtr->numCol + r].ci_vp;
   2591 		acd = raidPtr->Disks[0][raidPtr->numCol + r].auto_configured;
   2592 		rf_close_component(raidPtr, vp, acd);
   2593 		raidPtr->raid_cinfo[0][raidPtr->numCol + r].ci_vp = NULL;
   2594 		raidPtr->Disks[0][raidPtr->numCol + r].auto_configured = 0;
   2595 	}
   2596 }
   2597 
   2598 
   2599 void
   2600 rf_ReconThread(req)
   2601 	struct rf_recon_req *req;
   2602 {
   2603 	int     s;
   2604 	RF_Raid_t *raidPtr;
   2605 
   2606 	s = splbio();
   2607 	raidPtr = (RF_Raid_t *) req->raidPtr;
   2608 	raidPtr->recon_in_progress = 1;
   2609 
   2610 	rf_FailDisk((RF_Raid_t *) req->raidPtr, req->row, req->col,
   2611 		    ((req->flags & RF_FDFLAGS_RECON) ? 1 : 0));
   2612 
   2613 	/* XXX get rid of this! we don't need it at all.. */
   2614 	RF_Free(req, sizeof(*req));
   2615 
   2616 	raidPtr->recon_in_progress = 0;
   2617 	splx(s);
   2618 
   2619 	/* That's all... */
   2620 	kthread_exit(0);        /* does not return */
   2621 }
   2622 
   2623 void
   2624 rf_RewriteParityThread(raidPtr)
   2625 	RF_Raid_t *raidPtr;
   2626 {
   2627 	int retcode;
   2628 	int s;
   2629 
   2630 	raidPtr->parity_rewrite_in_progress = 1;
   2631 	s = splbio();
   2632 	retcode = rf_RewriteParity(raidPtr);
   2633 	splx(s);
   2634 	if (retcode) {
   2635 		printf("raid%d: Error re-writing parity!\n",raidPtr->raidid);
   2636 	} else {
   2637 		/* set the clean bit!  If we shutdown correctly,
   2638 		   the clean bit on each component label will get
   2639 		   set */
   2640 		raidPtr->parity_good = RF_RAID_CLEAN;
   2641 	}
   2642 	raidPtr->parity_rewrite_in_progress = 0;
   2643 
   2644 	/* Anyone waiting for us to stop?  If so, inform them... */
   2645 	if (raidPtr->waitShutdown) {
   2646 		wakeup(&raidPtr->parity_rewrite_in_progress);
   2647 	}
   2648 
   2649 	/* That's all... */
   2650 	kthread_exit(0);        /* does not return */
   2651 }
   2652 
   2653 
   2654 void
   2655 rf_CopybackThread(raidPtr)
   2656 	RF_Raid_t *raidPtr;
   2657 {
   2658 	int s;
   2659 
   2660 	raidPtr->copyback_in_progress = 1;
   2661 	s = splbio();
   2662 	rf_CopybackReconstructedData(raidPtr);
   2663 	splx(s);
   2664 	raidPtr->copyback_in_progress = 0;
   2665 
   2666 	/* That's all... */
   2667 	kthread_exit(0);        /* does not return */
   2668 }
   2669 
   2670 
   2671 void
   2672 rf_ReconstructInPlaceThread(req)
   2673 	struct rf_recon_req *req;
   2674 {
   2675 	int s;
   2676 	RF_Raid_t *raidPtr;
   2677 
   2678 	s = splbio();
   2679 	raidPtr = req->raidPtr;
   2680 	raidPtr->recon_in_progress = 1;
   2681 	rf_ReconstructInPlace(raidPtr, req->row, req->col);
   2682 	RF_Free(req, sizeof(*req));
   2683 	raidPtr->recon_in_progress = 0;
   2684 	splx(s);
   2685 
   2686 	/* That's all... */
   2687 	kthread_exit(0);        /* does not return */
   2688 }
   2689 
   2690 RF_AutoConfig_t *
   2691 rf_find_raid_components()
   2692 {
   2693 	struct vnode *vp;
   2694 	struct disklabel label;
   2695 	struct device *dv;
   2696 	dev_t dev;
   2697 	int bmajor;
   2698 	int error;
   2699 	int i;
   2700 	int good_one;
   2701 	RF_ComponentLabel_t *clabel;
   2702 	RF_AutoConfig_t *ac_list;
   2703 	RF_AutoConfig_t *ac;
   2704 
   2705 
   2706 	/* initialize the AutoConfig list */
   2707 	ac_list = NULL;
   2708 
   2709 	/* we begin by trolling through *all* the devices on the system */
   2710 
   2711 	for (dv = alldevs.tqh_first; dv != NULL;
   2712 	     dv = dv->dv_list.tqe_next) {
   2713 
   2714 		/* we are only interested in disks... */
   2715 		if (dv->dv_class != DV_DISK)
   2716 			continue;
   2717 
   2718 		/* we don't care about floppies... */
   2719 		if (!strcmp(dv->dv_cfdata->cf_name,"fd")) {
   2720 			continue;
   2721 		}
   2722 
   2723 		/* we don't care about CD's... */
   2724 		if (!strcmp(dv->dv_cfdata->cf_name,"cd")) {
   2725 			continue;
   2726 		}
   2727 
   2728 		/* hdfd is the Atari/Hades floppy driver */
   2729 		if (!strcmp(dv->dv_cfdata->cf_name,"hdfd")) {
   2730 			continue;
   2731 		}
   2732 		/* fdisa is the Atari/Milan floppy driver */
   2733 		if (!strcmp(dv->dv_cfdata->cf_name,"fdisa")) {
   2734 			continue;
   2735 		}
   2736 
   2737 		/* need to find the device_name_to_block_device_major stuff */
   2738 		bmajor = devsw_name2blk(dv->dv_xname, NULL, 0);
   2739 
   2740 		/* get a vnode for the raw partition of this disk */
   2741 
   2742 		dev = MAKEDISKDEV(bmajor, dv->dv_unit, RAW_PART);
   2743 		if (bdevvp(dev, &vp))
   2744 			panic("RAID can't alloc vnode");
   2745 
   2746 		error = VOP_OPEN(vp, FREAD, NOCRED, 0);
   2747 
   2748 		if (error) {
   2749 			/* "Who cares."  Continue looking
   2750 			   for something that exists*/
   2751 			vput(vp);
   2752 			continue;
   2753 		}
   2754 
   2755 		/* Ok, the disk exists.  Go get the disklabel. */
   2756 		error = VOP_IOCTL(vp, DIOCGDINFO, &label, FREAD, NOCRED, 0);
   2757 		if (error) {
   2758 			/*
   2759 			 * XXX can't happen - open() would
   2760 			 * have errored out (or faked up one)
   2761 			 */
   2762 			printf("can't get label for dev %s%c (%d)!?!?\n",
   2763 			       dv->dv_xname, 'a' + RAW_PART, error);
   2764 		}
   2765 
   2766 		/* don't need this any more.  We'll allocate it again
   2767 		   a little later if we really do... */
   2768 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
   2769 		VOP_CLOSE(vp, FREAD | FWRITE, NOCRED, 0);
   2770 		vput(vp);
   2771 
   2772 		for (i=0; i < label.d_npartitions; i++) {
   2773 			/* We only support partitions marked as RAID */
   2774 			if (label.d_partitions[i].p_fstype != FS_RAID)
   2775 				continue;
   2776 
   2777 			dev = MAKEDISKDEV(bmajor, dv->dv_unit, i);
   2778 			if (bdevvp(dev, &vp))
   2779 				panic("RAID can't alloc vnode");
   2780 
   2781 			error = VOP_OPEN(vp, FREAD, NOCRED, 0);
   2782 			if (error) {
   2783 				/* Whatever... */
   2784 				vput(vp);
   2785 				continue;
   2786 			}
   2787 
   2788 			good_one = 0;
   2789 
   2790 			clabel = (RF_ComponentLabel_t *)
   2791 				malloc(sizeof(RF_ComponentLabel_t),
   2792 				       M_RAIDFRAME, M_NOWAIT);
   2793 			if (clabel == NULL) {
   2794 				/* XXX CLEANUP HERE */
   2795 				printf("RAID auto config: out of memory!\n");
   2796 				return(NULL); /* XXX probably should panic? */
   2797 			}
   2798 
   2799 			if (!raidread_component_label(dev, vp, clabel)) {
   2800 				/* Got the label.  Does it look reasonable? */
   2801 				if (rf_reasonable_label(clabel) &&
   2802 				    (clabel->partitionSize <=
   2803 				     label.d_partitions[i].p_size)) {
   2804 #if DEBUG
   2805 					printf("Component on: %s%c: %d\n",
   2806 					       dv->dv_xname, 'a'+i,
   2807 					       label.d_partitions[i].p_size);
   2808 					rf_print_component_label(clabel);
   2809 #endif
   2810 					/* if it's reasonable, add it,
   2811 					   else ignore it. */
   2812 					ac = (RF_AutoConfig_t *)
   2813 						malloc(sizeof(RF_AutoConfig_t),
   2814 						       M_RAIDFRAME,
   2815 						       M_NOWAIT);
   2816 					if (ac == NULL) {
   2817 						/* XXX should panic?? */
   2818 						return(NULL);
   2819 					}
   2820 
   2821 					sprintf(ac->devname, "%s%c",
   2822 						dv->dv_xname, 'a'+i);
   2823 					ac->dev = dev;
   2824 					ac->vp = vp;
   2825 					ac->clabel = clabel;
   2826 					ac->next = ac_list;
   2827 					ac_list = ac;
   2828 					good_one = 1;
   2829 				}
   2830 			}
   2831 			if (!good_one) {
   2832 				/* cleanup */
   2833 				free(clabel, M_RAIDFRAME);
   2834 				vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
   2835 				VOP_CLOSE(vp, FREAD | FWRITE, NOCRED, 0);
   2836 				vput(vp);
   2837 			}
   2838 		}
   2839 	}
   2840 	return(ac_list);
   2841 }
   2842 
   2843 static int
   2844 rf_reasonable_label(clabel)
   2845 	RF_ComponentLabel_t *clabel;
   2846 {
   2847 
   2848 	if (((clabel->version==RF_COMPONENT_LABEL_VERSION_1) ||
   2849 	     (clabel->version==RF_COMPONENT_LABEL_VERSION)) &&
   2850 	    ((clabel->clean == RF_RAID_CLEAN) ||
   2851 	     (clabel->clean == RF_RAID_DIRTY)) &&
   2852 	    clabel->row >=0 &&
   2853 	    clabel->column >= 0 &&
   2854 	    clabel->num_rows > 0 &&
   2855 	    clabel->num_columns > 0 &&
   2856 	    clabel->row < clabel->num_rows &&
   2857 	    clabel->column < clabel->num_columns &&
   2858 	    clabel->blockSize > 0 &&
   2859 	    clabel->numBlocks > 0) {
   2860 		/* label looks reasonable enough... */
   2861 		return(1);
   2862 	}
   2863 	return(0);
   2864 }
   2865 
   2866 
   2867 #if DEBUG
   2868 void
   2869 rf_print_component_label(clabel)
   2870 	RF_ComponentLabel_t *clabel;
   2871 {
   2872 	printf("   Row: %d Column: %d Num Rows: %d Num Columns: %d\n",
   2873 	       clabel->row, clabel->column,
   2874 	       clabel->num_rows, clabel->num_columns);
   2875 	printf("   Version: %d Serial Number: %d Mod Counter: %d\n",
   2876 	       clabel->version, clabel->serial_number,
   2877 	       clabel->mod_counter);
   2878 	printf("   Clean: %s Status: %d\n",
   2879 	       clabel->clean ? "Yes" : "No", clabel->status );
   2880 	printf("   sectPerSU: %d SUsPerPU: %d SUsPerRU: %d\n",
   2881 	       clabel->sectPerSU, clabel->SUsPerPU, clabel->SUsPerRU);
   2882 	printf("   RAID Level: %c  blocksize: %d numBlocks: %d\n",
   2883 	       (char) clabel->parityConfig, clabel->blockSize,
   2884 	       clabel->numBlocks);
   2885 	printf("   Autoconfig: %s\n", clabel->autoconfigure ? "Yes" : "No" );
   2886 	printf("   Contains root partition: %s\n",
   2887 	       clabel->root_partition ? "Yes" : "No" );
   2888 	printf("   Last configured as: raid%d\n", clabel->last_unit );
   2889 #if 0
   2890 	   printf("   Config order: %d\n", clabel->config_order);
   2891 #endif
   2892 
   2893 }
   2894 #endif
   2895 
   2896 RF_ConfigSet_t *
   2897 rf_create_auto_sets(ac_list)
   2898 	RF_AutoConfig_t *ac_list;
   2899 {
   2900 	RF_AutoConfig_t *ac;
   2901 	RF_ConfigSet_t *config_sets;
   2902 	RF_ConfigSet_t *cset;
   2903 	RF_AutoConfig_t *ac_next;
   2904 
   2905 
   2906 	config_sets = NULL;
   2907 
   2908 	/* Go through the AutoConfig list, and figure out which components
   2909 	   belong to what sets.  */
   2910 	ac = ac_list;
   2911 	while(ac!=NULL) {
   2912 		/* we're going to putz with ac->next, so save it here
   2913 		   for use at the end of the loop */
   2914 		ac_next = ac->next;
   2915 
   2916 		if (config_sets == NULL) {
   2917 			/* will need at least this one... */
   2918 			config_sets = (RF_ConfigSet_t *)
   2919 				malloc(sizeof(RF_ConfigSet_t),
   2920 				       M_RAIDFRAME, M_NOWAIT);
   2921 			if (config_sets == NULL) {
   2922 				panic("rf_create_auto_sets: No memory!");
   2923 			}
   2924 			/* this one is easy :) */
   2925 			config_sets->ac = ac;
   2926 			config_sets->next = NULL;
   2927 			config_sets->rootable = 0;
   2928 			ac->next = NULL;
   2929 		} else {
   2930 			/* which set does this component fit into? */
   2931 			cset = config_sets;
   2932 			while(cset!=NULL) {
   2933 				if (rf_does_it_fit(cset, ac)) {
   2934 					/* looks like it matches... */
   2935 					ac->next = cset->ac;
   2936 					cset->ac = ac;
   2937 					break;
   2938 				}
   2939 				cset = cset->next;
   2940 			}
   2941 			if (cset==NULL) {
   2942 				/* didn't find a match above... new set..*/
   2943 				cset = (RF_ConfigSet_t *)
   2944 					malloc(sizeof(RF_ConfigSet_t),
   2945 					       M_RAIDFRAME, M_NOWAIT);
   2946 				if (cset == NULL) {
   2947 					panic("rf_create_auto_sets: No memory!");
   2948 				}
   2949 				cset->ac = ac;
   2950 				ac->next = NULL;
   2951 				cset->next = config_sets;
   2952 				cset->rootable = 0;
   2953 				config_sets = cset;
   2954 			}
   2955 		}
   2956 		ac = ac_next;
   2957 	}
   2958 
   2959 
   2960 	return(config_sets);
   2961 }
   2962 
   2963 static int
   2964 rf_does_it_fit(cset, ac)
   2965 	RF_ConfigSet_t *cset;
   2966 	RF_AutoConfig_t *ac;
   2967 {
   2968 	RF_ComponentLabel_t *clabel1, *clabel2;
   2969 
   2970 	/* If this one matches the *first* one in the set, that's good
   2971 	   enough, since the other members of the set would have been
   2972 	   through here too... */
   2973 	/* note that we are not checking partitionSize here..
   2974 
   2975 	   Note that we are also not checking the mod_counters here.
   2976 	   If everything else matches execpt the mod_counter, that's
   2977 	   good enough for this test.  We will deal with the mod_counters
   2978 	   a little later in the autoconfiguration process.
   2979 
   2980 	    (clabel1->mod_counter == clabel2->mod_counter) &&
   2981 
   2982 	   The reason we don't check for this is that failed disks
   2983 	   will have lower modification counts.  If those disks are
   2984 	   not added to the set they used to belong to, then they will
   2985 	   form their own set, which may result in 2 different sets,
   2986 	   for example, competing to be configured at raid0, and
   2987 	   perhaps competing to be the root filesystem set.  If the
   2988 	   wrong ones get configured, or both attempt to become /,
   2989 	   weird behaviour and or serious lossage will occur.  Thus we
   2990 	   need to bring them into the fold here, and kick them out at
   2991 	   a later point.
   2992 
   2993 	*/
   2994 
   2995 	clabel1 = cset->ac->clabel;
   2996 	clabel2 = ac->clabel;
   2997 	if ((clabel1->version == clabel2->version) &&
   2998 	    (clabel1->serial_number == clabel2->serial_number) &&
   2999 	    (clabel1->num_rows == clabel2->num_rows) &&
   3000 	    (clabel1->num_columns == clabel2->num_columns) &&
   3001 	    (clabel1->sectPerSU == clabel2->sectPerSU) &&
   3002 	    (clabel1->SUsPerPU == clabel2->SUsPerPU) &&
   3003 	    (clabel1->SUsPerRU == clabel2->SUsPerRU) &&
   3004 	    (clabel1->parityConfig == clabel2->parityConfig) &&
   3005 	    (clabel1->maxOutstanding == clabel2->maxOutstanding) &&
   3006 	    (clabel1->blockSize == clabel2->blockSize) &&
   3007 	    (clabel1->numBlocks == clabel2->numBlocks) &&
   3008 	    (clabel1->autoconfigure == clabel2->autoconfigure) &&
   3009 	    (clabel1->root_partition == clabel2->root_partition) &&
   3010 	    (clabel1->last_unit == clabel2->last_unit) &&
   3011 	    (clabel1->config_order == clabel2->config_order)) {
   3012 		/* if it get's here, it almost *has* to be a match */
   3013 	} else {
   3014 		/* it's not consistent with somebody in the set..
   3015 		   punt */
   3016 		return(0);
   3017 	}
   3018 	/* all was fine.. it must fit... */
   3019 	return(1);
   3020 }
   3021 
   3022 int
   3023 rf_have_enough_components(cset)
   3024 	RF_ConfigSet_t *cset;
   3025 {
   3026 	RF_AutoConfig_t *ac;
   3027 	RF_AutoConfig_t *auto_config;
   3028 	RF_ComponentLabel_t *clabel;
   3029 	int r,c;
   3030 	int num_rows;
   3031 	int num_cols;
   3032 	int num_missing;
   3033 	int mod_counter;
   3034 	int mod_counter_found;
   3035 	int even_pair_failed;
   3036 	char parity_type;
   3037 
   3038 
   3039 	/* check to see that we have enough 'live' components
   3040 	   of this set.  If so, we can configure it if necessary */
   3041 
   3042 	num_rows = cset->ac->clabel->num_rows;
   3043 	num_cols = cset->ac->clabel->num_columns;
   3044 	parity_type = cset->ac->clabel->parityConfig;
   3045 
   3046 	/* XXX Check for duplicate components!?!?!? */
   3047 
   3048 	/* Determine what the mod_counter is supposed to be for this set. */
   3049 
   3050 	mod_counter_found = 0;
   3051 	mod_counter = 0;
   3052 	ac = cset->ac;
   3053 	while(ac!=NULL) {
   3054 		if (mod_counter_found==0) {
   3055 			mod_counter = ac->clabel->mod_counter;
   3056 			mod_counter_found = 1;
   3057 		} else {
   3058 			if (ac->clabel->mod_counter > mod_counter) {
   3059 				mod_counter = ac->clabel->mod_counter;
   3060 			}
   3061 		}
   3062 		ac = ac->next;
   3063 	}
   3064 
   3065 	num_missing = 0;
   3066 	auto_config = cset->ac;
   3067 
   3068 	for(r=0; r<num_rows; r++) {
   3069 		even_pair_failed = 0;
   3070 		for(c=0; c<num_cols; c++) {
   3071 			ac = auto_config;
   3072 			while(ac!=NULL) {
   3073 				if ((ac->clabel->row == r) &&
   3074 				    (ac->clabel->column == c) &&
   3075 				    (ac->clabel->mod_counter == mod_counter)) {
   3076 					/* it's this one... */
   3077 #if DEBUG
   3078 					printf("Found: %s at %d,%d\n",
   3079 					       ac->devname,r,c);
   3080 #endif
   3081 					break;
   3082 				}
   3083 				ac=ac->next;
   3084 			}
   3085 			if (ac==NULL) {
   3086 				/* Didn't find one here! */
   3087 				/* special case for RAID 1, especially
   3088 				   where there are more than 2
   3089 				   components (where RAIDframe treats
   3090 				   things a little differently :( ) */
   3091 				if (parity_type == '1') {
   3092 					if (c%2 == 0) { /* even component */
   3093 						even_pair_failed = 1;
   3094 					} else { /* odd component.  If
   3095                                                     we're failed, and
   3096                                                     so is the even
   3097                                                     component, it's
   3098                                                     "Good Night, Charlie" */
   3099 						if (even_pair_failed == 1) {
   3100 							return(0);
   3101 						}
   3102 					}
   3103 				} else {
   3104 					/* normal accounting */
   3105 					num_missing++;
   3106 				}
   3107 			}
   3108 			if ((parity_type == '1') && (c%2 == 1)) {
   3109 				/* Just did an even component, and we didn't
   3110 				   bail.. reset the even_pair_failed flag,
   3111 				   and go on to the next component.... */
   3112 				even_pair_failed = 0;
   3113 			}
   3114 		}
   3115 	}
   3116 
   3117 	clabel = cset->ac->clabel;
   3118 
   3119 	if (((clabel->parityConfig == '0') && (num_missing > 0)) ||
   3120 	    ((clabel->parityConfig == '4') && (num_missing > 1)) ||
   3121 	    ((clabel->parityConfig == '5') && (num_missing > 1))) {
   3122 		/* XXX this needs to be made *much* more general */
   3123 		/* Too many failures */
   3124 		return(0);
   3125 	}
   3126 	/* otherwise, all is well, and we've got enough to take a kick
   3127 	   at autoconfiguring this set */
   3128 	return(1);
   3129 }
   3130 
   3131 void
   3132 rf_create_configuration(ac,config,raidPtr)
   3133 	RF_AutoConfig_t *ac;
   3134 	RF_Config_t *config;
   3135 	RF_Raid_t *raidPtr;
   3136 {
   3137 	RF_ComponentLabel_t *clabel;
   3138 	int i;
   3139 
   3140 	clabel = ac->clabel;
   3141 
   3142 	/* 1. Fill in the common stuff */
   3143 	config->numRow = clabel->num_rows;
   3144 	config->numCol = clabel->num_columns;
   3145 	config->numSpare = 0; /* XXX should this be set here? */
   3146 	config->sectPerSU = clabel->sectPerSU;
   3147 	config->SUsPerPU = clabel->SUsPerPU;
   3148 	config->SUsPerRU = clabel->SUsPerRU;
   3149 	config->parityConfig = clabel->parityConfig;
   3150 	/* XXX... */
   3151 	strcpy(config->diskQueueType,"fifo");
   3152 	config->maxOutstandingDiskReqs = clabel->maxOutstanding;
   3153 	config->layoutSpecificSize = 0; /* XXX ?? */
   3154 
   3155 	while(ac!=NULL) {
   3156 		/* row/col values will be in range due to the checks
   3157 		   in reasonable_label() */
   3158 		strcpy(config->devnames[ac->clabel->row][ac->clabel->column],
   3159 		       ac->devname);
   3160 		ac = ac->next;
   3161 	}
   3162 
   3163 	for(i=0;i<RF_MAXDBGV;i++) {
   3164 		config->debugVars[i][0] = NULL;
   3165 	}
   3166 }
   3167 
   3168 int
   3169 rf_set_autoconfig(raidPtr, new_value)
   3170 	RF_Raid_t *raidPtr;
   3171 	int new_value;
   3172 {
   3173 	RF_ComponentLabel_t clabel;
   3174 	struct vnode *vp;
   3175 	dev_t dev;
   3176 	int row, column;
   3177 	int sparecol;
   3178 
   3179 	raidPtr->autoconfigure = new_value;
   3180 	for(row=0; row<raidPtr->numRow; row++) {
   3181 		for(column=0; column<raidPtr->numCol; column++) {
   3182 			if (raidPtr->Disks[row][column].status ==
   3183 			    rf_ds_optimal) {
   3184 				dev = raidPtr->Disks[row][column].dev;
   3185 				vp = raidPtr->raid_cinfo[row][column].ci_vp;
   3186 				raidread_component_label(dev, vp, &clabel);
   3187 				clabel.autoconfigure = new_value;
   3188 				raidwrite_component_label(dev, vp, &clabel);
   3189 			}
   3190 		}
   3191 	}
   3192 	for(column = 0; column < raidPtr->numSpare ; column++) {
   3193 		sparecol = raidPtr->numCol + column;
   3194 		if (raidPtr->Disks[0][sparecol].status == rf_ds_used_spare) {
   3195 			dev = raidPtr->Disks[0][sparecol].dev;
   3196 			vp = raidPtr->raid_cinfo[0][sparecol].ci_vp;
   3197 			raidread_component_label(dev, vp, &clabel);
   3198 			clabel.autoconfigure = new_value;
   3199 			raidwrite_component_label(dev, vp, &clabel);
   3200 		}
   3201 	}
   3202 	return(new_value);
   3203 }
   3204 
   3205 int
   3206 rf_set_rootpartition(raidPtr, new_value)
   3207 	RF_Raid_t *raidPtr;
   3208 	int new_value;
   3209 {
   3210 	RF_ComponentLabel_t clabel;
   3211 	struct vnode *vp;
   3212 	dev_t dev;
   3213 	int row, column;
   3214 	int sparecol;
   3215 
   3216 	raidPtr->root_partition = new_value;
   3217 	for(row=0; row<raidPtr->numRow; row++) {
   3218 		for(column=0; column<raidPtr->numCol; column++) {
   3219 			if (raidPtr->Disks[row][column].status ==
   3220 			    rf_ds_optimal) {
   3221 				dev = raidPtr->Disks[row][column].dev;
   3222 				vp = raidPtr->raid_cinfo[row][column].ci_vp;
   3223 				raidread_component_label(dev, vp, &clabel);
   3224 				clabel.root_partition = new_value;
   3225 				raidwrite_component_label(dev, vp, &clabel);
   3226 			}
   3227 		}
   3228 	}
   3229 	for(column = 0; column < raidPtr->numSpare ; column++) {
   3230 		sparecol = raidPtr->numCol + column;
   3231 		if (raidPtr->Disks[0][sparecol].status == rf_ds_used_spare) {
   3232 			dev = raidPtr->Disks[0][sparecol].dev;
   3233 			vp = raidPtr->raid_cinfo[0][sparecol].ci_vp;
   3234 			raidread_component_label(dev, vp, &clabel);
   3235 			clabel.root_partition = new_value;
   3236 			raidwrite_component_label(dev, vp, &clabel);
   3237 		}
   3238 	}
   3239 	return(new_value);
   3240 }
   3241 
   3242 void
   3243 rf_release_all_vps(cset)
   3244 	RF_ConfigSet_t *cset;
   3245 {
   3246 	RF_AutoConfig_t *ac;
   3247 
   3248 	ac = cset->ac;
   3249 	while(ac!=NULL) {
   3250 		/* Close the vp, and give it back */
   3251 		if (ac->vp) {
   3252 			vn_lock(ac->vp, LK_EXCLUSIVE | LK_RETRY);
   3253 			VOP_CLOSE(ac->vp, FREAD, NOCRED, 0);
   3254 			vput(ac->vp);
   3255 			ac->vp = NULL;
   3256 		}
   3257 		ac = ac->next;
   3258 	}
   3259 }
   3260 
   3261 
   3262 void
   3263 rf_cleanup_config_set(cset)
   3264 	RF_ConfigSet_t *cset;
   3265 {
   3266 	RF_AutoConfig_t *ac;
   3267 	RF_AutoConfig_t *next_ac;
   3268 
   3269 	ac = cset->ac;
   3270 	while(ac!=NULL) {
   3271 		next_ac = ac->next;
   3272 		/* nuke the label */
   3273 		free(ac->clabel, M_RAIDFRAME);
   3274 		/* cleanup the config structure */
   3275 		free(ac, M_RAIDFRAME);
   3276 		/* "next.." */
   3277 		ac = next_ac;
   3278 	}
   3279 	/* and, finally, nuke the config set */
   3280 	free(cset, M_RAIDFRAME);
   3281 }
   3282 
   3283 
   3284 void
   3285 raid_init_component_label(raidPtr, clabel)
   3286 	RF_Raid_t *raidPtr;
   3287 	RF_ComponentLabel_t *clabel;
   3288 {
   3289 	/* current version number */
   3290 	clabel->version = RF_COMPONENT_LABEL_VERSION;
   3291 	clabel->serial_number = raidPtr->serial_number;
   3292 	clabel->mod_counter = raidPtr->mod_counter;
   3293 	clabel->num_rows = raidPtr->numRow;
   3294 	clabel->num_columns = raidPtr->numCol;
   3295 	clabel->clean = RF_RAID_DIRTY; /* not clean */
   3296 	clabel->status = rf_ds_optimal; /* "It's good!" */
   3297 
   3298 	clabel->sectPerSU = raidPtr->Layout.sectorsPerStripeUnit;
   3299 	clabel->SUsPerPU = raidPtr->Layout.SUsPerPU;
   3300 	clabel->SUsPerRU = raidPtr->Layout.SUsPerRU;
   3301 
   3302 	clabel->blockSize = raidPtr->bytesPerSector;
   3303 	clabel->numBlocks = raidPtr->sectorsPerDisk;
   3304 
   3305 	/* XXX not portable */
   3306 	clabel->parityConfig = raidPtr->Layout.map->parityConfig;
   3307 	clabel->maxOutstanding = raidPtr->maxOutstanding;
   3308 	clabel->autoconfigure = raidPtr->autoconfigure;
   3309 	clabel->root_partition = raidPtr->root_partition;
   3310 	clabel->last_unit = raidPtr->raidid;
   3311 	clabel->config_order = raidPtr->config_order;
   3312 }
   3313 
   3314 int
   3315 rf_auto_config_set(cset,unit)
   3316 	RF_ConfigSet_t *cset;
   3317 	int *unit;
   3318 {
   3319 	RF_Raid_t *raidPtr;
   3320 	RF_Config_t *config;
   3321 	int raidID;
   3322 	int retcode;
   3323 
   3324 #if DEBUG
   3325 	printf("RAID autoconfigure\n");
   3326 #endif
   3327 
   3328 	retcode = 0;
   3329 	*unit = -1;
   3330 
   3331 	/* 1. Create a config structure */
   3332 
   3333 	config = (RF_Config_t *)malloc(sizeof(RF_Config_t),
   3334 				       M_RAIDFRAME,
   3335 				       M_NOWAIT);
   3336 	if (config==NULL) {
   3337 		printf("Out of mem!?!?\n");
   3338 				/* XXX do something more intelligent here. */
   3339 		return(1);
   3340 	}
   3341 
   3342 	memset(config, 0, sizeof(RF_Config_t));
   3343 
   3344 	/*
   3345 	   2. Figure out what RAID ID this one is supposed to live at
   3346 	   See if we can get the same RAID dev that it was configured
   3347 	   on last time..
   3348 	*/
   3349 
   3350 	raidID = cset->ac->clabel->last_unit;
   3351 	if ((raidID < 0) || (raidID >= numraid)) {
   3352 		/* let's not wander off into lala land. */
   3353 		raidID = numraid - 1;
   3354 	}
   3355 	if (raidPtrs[raidID]->valid != 0) {
   3356 
   3357 		/*
   3358 		   Nope... Go looking for an alternative...
   3359 		   Start high so we don't immediately use raid0 if that's
   3360 		   not taken.
   3361 		*/
   3362 
   3363 		for(raidID = numraid - 1; raidID >= 0; raidID--) {
   3364 			if (raidPtrs[raidID]->valid == 0) {
   3365 				/* can use this one! */
   3366 				break;
   3367 			}
   3368 		}
   3369 	}
   3370 
   3371 	if (raidID < 0) {
   3372 		/* punt... */
   3373 		printf("Unable to auto configure this set!\n");
   3374 		printf("(Out of RAID devs!)\n");
   3375 		return(1);
   3376 	}
   3377 
   3378 #if DEBUG
   3379 	printf("Configuring raid%d:\n",raidID);
   3380 #endif
   3381 
   3382 	raidPtr = raidPtrs[raidID];
   3383 
   3384 	/* XXX all this stuff should be done SOMEWHERE ELSE! */
   3385 	raidPtr->raidid = raidID;
   3386 	raidPtr->openings = RAIDOUTSTANDING;
   3387 
   3388 	/* 3. Build the configuration structure */
   3389 	rf_create_configuration(cset->ac, config, raidPtr);
   3390 
   3391 	/* 4. Do the configuration */
   3392 	retcode = rf_Configure(raidPtr, config, cset->ac);
   3393 
   3394 	if (retcode == 0) {
   3395 
   3396 		raidinit(raidPtrs[raidID]);
   3397 
   3398 		rf_markalldirty(raidPtrs[raidID]);
   3399 		raidPtrs[raidID]->autoconfigure = 1; /* XXX do this here? */
   3400 		if (cset->ac->clabel->root_partition==1) {
   3401 			/* everything configured just fine.  Make a note
   3402 			   that this set is eligible to be root. */
   3403 			cset->rootable = 1;
   3404 			/* XXX do this here? */
   3405 			raidPtrs[raidID]->root_partition = 1;
   3406 		}
   3407 	}
   3408 
   3409 	/* 5. Cleanup */
   3410 	free(config, M_RAIDFRAME);
   3411 
   3412 	*unit = raidID;
   3413 	return(retcode);
   3414 }
   3415 
   3416 void
   3417 rf_disk_unbusy(desc)
   3418 	RF_RaidAccessDesc_t *desc;
   3419 {
   3420 	struct buf *bp;
   3421 
   3422 	bp = (struct buf *)desc->bp;
   3423 	disk_unbusy(&raid_softc[desc->raidPtr->raidid].sc_dkdev,
   3424 	    (bp->b_bcount - bp->b_resid), (bp->b_flags & B_READ));
   3425 }
   3426