Home | History | Annotate | Line # | Download | only in raidframe
rf_driver.c revision 1.74
      1 /*	$NetBSD: rf_driver.c,v 1.74 2003/12/29 04:00:17 oster Exp $	*/
      2 /*-
      3  * Copyright (c) 1999 The NetBSD Foundation, Inc.
      4  * All rights reserved.
      5  *
      6  * This code is derived from software contributed to The NetBSD Foundation
      7  * by Greg Oster
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *        This product includes software developed by the NetBSD
     20  *        Foundation, Inc. and its contributors.
     21  * 4. Neither the name of The NetBSD Foundation nor the names of its
     22  *    contributors may be used to endorse or promote products derived
     23  *    from this software without specific prior written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     26  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*
     39  * Copyright (c) 1995 Carnegie-Mellon University.
     40  * All rights reserved.
     41  *
     42  * Author: Mark Holland, Khalil Amiri, Claudson Bornstein, William V. Courtright II,
     43  *         Robby Findler, Daniel Stodolsky, Rachad Youssef, Jim Zelenka
     44  *
     45  * Permission to use, copy, modify and distribute this software and
     46  * its documentation is hereby granted, provided that both the copyright
     47  * notice and this permission notice appear in all copies of the
     48  * software, derivative works or modified versions, and any portions
     49  * thereof, and that both notices appear in supporting documentation.
     50  *
     51  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     52  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     53  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     54  *
     55  * Carnegie Mellon requests users of this software to return to
     56  *
     57  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     58  *  School of Computer Science
     59  *  Carnegie Mellon University
     60  *  Pittsburgh PA 15213-3890
     61  *
     62  * any improvements or extensions that they make and grant Carnegie the
     63  * rights to redistribute these changes.
     64  */
     65 
     66 /******************************************************************************
     67  *
     68  * rf_driver.c -- main setup, teardown, and access routines for the RAID driver
     69  *
     70  * all routines are prefixed with rf_ (raidframe), to avoid conficts.
     71  *
     72  ******************************************************************************/
     73 
     74 
     75 #include <sys/cdefs.h>
     76 __KERNEL_RCSID(0, "$NetBSD: rf_driver.c,v 1.74 2003/12/29 04:00:17 oster Exp $");
     77 
     78 #include "opt_raid_diagnostic.h"
     79 
     80 #include <sys/param.h>
     81 #include <sys/systm.h>
     82 #include <sys/ioctl.h>
     83 #include <sys/fcntl.h>
     84 #include <sys/vnode.h>
     85 
     86 
     87 #include "rf_archs.h"
     88 #include "rf_threadstuff.h"
     89 
     90 #include <sys/errno.h>
     91 
     92 #include "rf_raid.h"
     93 #include "rf_dag.h"
     94 #include "rf_aselect.h"
     95 #include "rf_diskqueue.h"
     96 #include "rf_parityscan.h"
     97 #include "rf_alloclist.h"
     98 #include "rf_dagutils.h"
     99 #include "rf_utils.h"
    100 #include "rf_etimer.h"
    101 #include "rf_acctrace.h"
    102 #include "rf_general.h"
    103 #include "rf_desc.h"
    104 #include "rf_states.h"
    105 #include "rf_decluster.h"
    106 #include "rf_map.h"
    107 #include "rf_revent.h"
    108 #include "rf_callback.h"
    109 #include "rf_engine.h"
    110 #include "rf_mcpair.h"
    111 #include "rf_nwayxor.h"
    112 #include "rf_copyback.h"
    113 #include "rf_driver.h"
    114 #include "rf_options.h"
    115 #include "rf_shutdown.h"
    116 #include "rf_kintf.h"
    117 
    118 #include <sys/buf.h>
    119 
    120 #ifndef RF_ACCESS_DEBUG
    121 #define RF_ACCESS_DEBUG 0
    122 #endif
    123 
    124 /* rad == RF_RaidAccessDesc_t */
    125 RF_DECLARE_MUTEX(rf_rad_pool_lock)
    126 static struct pool rf_rad_pool;
    127 #define RF_MAX_FREE_RAD 128
    128 #define RF_RAD_INC       16
    129 #define RF_RAD_INITIAL   32
    130 
    131 /* debug variables */
    132 char    rf_panicbuf[2048];	/* a buffer to hold an error msg when we panic */
    133 
    134 /* main configuration routines */
    135 static int raidframe_booted = 0;
    136 
    137 static void rf_ConfigureDebug(RF_Config_t * cfgPtr);
    138 static void set_debug_option(char *name, long val);
    139 static void rf_UnconfigureArray(void);
    140 static void rf_ShutdownRDFreeList(void *);
    141 static int rf_ConfigureRDFreeList(RF_ShutdownList_t **);
    142 
    143 RF_DECLARE_MUTEX(rf_printf_mutex)	/* debug only:  avoids interleaved
    144 					 * printfs by different stripes */
    145 
    146 #define SIGNAL_QUIESCENT_COND(_raid_)  wakeup(&((_raid_)->accesses_suspended))
    147 #define WAIT_FOR_QUIESCENCE(_raid_) \
    148 	ltsleep(&((_raid_)->accesses_suspended), PRIBIO, \
    149 		"raidframe quiesce", 0, &((_raid_)->access_suspend_mutex))
    150 
    151 #define IO_BUF_ERR(bp, err) { \
    152 	bp->b_flags |= B_ERROR; \
    153 	bp->b_resid = bp->b_bcount; \
    154 	bp->b_error = err; \
    155 	biodone(bp); \
    156 }
    157 
    158 static int configureCount = 0;	/* number of active configurations */
    159 static int isconfigged = 0;	/* is basic raidframe (non per-array)
    160 				 * stuff configged */
    161 RF_DECLARE_LKMGR_STATIC_MUTEX(configureMutex)	/* used to lock the configuration
    162 					 * stuff */
    163 static RF_ShutdownList_t *globalShutdown;	/* non array-specific
    164 						 * stuff */
    165 
    166 static int rf_ConfigureRDFreeList(RF_ShutdownList_t ** listp);
    167 
    168 /* called at system boot time */
    169 int
    170 rf_BootRaidframe()
    171 {
    172 	int     rc;
    173 
    174 	if (raidframe_booted)
    175 		return (EBUSY);
    176 	raidframe_booted = 1;
    177 
    178 	rc = rf_lkmgr_mutex_init(&configureMutex);
    179 	if (rc) {
    180 		rf_print_unable_to_init_mutex( __FILE__, __LINE__, rc);
    181 		RF_PANIC();
    182 	}
    183 	configureCount = 0;
    184 	isconfigged = 0;
    185 	globalShutdown = NULL;
    186 	return (0);
    187 }
    188 
    189 /*
    190  * Called whenever an array is shutdown
    191  */
    192 static void
    193 rf_UnconfigureArray()
    194 {
    195 	int     rc;
    196 
    197 	RF_LOCK_LKMGR_MUTEX(configureMutex);
    198 	if (--configureCount == 0) {	/* if no active configurations, shut
    199 					 * everything down */
    200 		isconfigged = 0;
    201 
    202 		rc = rf_ShutdownList(&globalShutdown);
    203 		if (rc) {
    204 			RF_ERRORMSG1("RAIDFRAME: unable to do global shutdown, rc=%d\n", rc);
    205 		}
    206 
    207 		/*
    208 	         * We must wait until now, because the AllocList module
    209 	         * uses the DebugMem module.
    210 	         */
    211 #if RF_DEBUG_MEM
    212 		if (rf_memDebug)
    213 			rf_print_unfreed();
    214 #endif
    215 	}
    216 	RF_UNLOCK_LKMGR_MUTEX(configureMutex);
    217 }
    218 
    219 /*
    220  * Called to shut down an array.
    221  */
    222 int
    223 rf_Shutdown(raidPtr)
    224 	RF_Raid_t *raidPtr;
    225 {
    226 
    227 	if (!raidPtr->valid) {
    228 		RF_ERRORMSG("Attempt to shut down unconfigured RAIDframe driver.  Aborting shutdown\n");
    229 		return (EINVAL);
    230 	}
    231 	/*
    232          * wait for outstanding IOs to land
    233          * As described in rf_raid.h, we use the rad_freelist lock
    234          * to protect the per-array info about outstanding descs
    235          * since we need to do freelist locking anyway, and this
    236          * cuts down on the amount of serialization we've got going
    237          * on.
    238          */
    239 	RF_LOCK_MUTEX(rf_rad_pool_lock);
    240 	if (raidPtr->waitShutdown) {
    241 		RF_UNLOCK_MUTEX(rf_rad_pool_lock);
    242 		return (EBUSY);
    243 	}
    244 	raidPtr->waitShutdown = 1;
    245 	while (raidPtr->nAccOutstanding) {
    246 		RF_WAIT_COND(raidPtr->outstandingCond, rf_rad_pool_lock);
    247 	}
    248 	RF_UNLOCK_MUTEX(rf_rad_pool_lock);
    249 
    250 	/* Wait for any parity re-writes to stop... */
    251 	while (raidPtr->parity_rewrite_in_progress) {
    252 		printf("Waiting for parity re-write to exit...\n");
    253 		tsleep(&raidPtr->parity_rewrite_in_progress, PRIBIO,
    254 		       "rfprwshutdown", 0);
    255 	}
    256 
    257 	raidPtr->valid = 0;
    258 
    259 	rf_update_component_labels(raidPtr, RF_FINAL_COMPONENT_UPDATE);
    260 
    261 	rf_UnconfigureVnodes(raidPtr);
    262 
    263 	rf_ShutdownList(&raidPtr->shutdownList);
    264 
    265 	rf_UnconfigureArray();
    266 
    267 	return (0);
    268 }
    269 
    270 
    271 #define DO_INIT_CONFIGURE(f) { \
    272 	rc = f (&globalShutdown); \
    273 	if (rc) { \
    274 		RF_ERRORMSG2("RAIDFRAME: failed %s with %d\n", RF_STRING(f), rc); \
    275 		rf_ShutdownList(&globalShutdown); \
    276 		configureCount--; \
    277 		RF_UNLOCK_LKMGR_MUTEX(configureMutex); \
    278 		return(rc); \
    279 	} \
    280 }
    281 
    282 #define DO_RAID_FAIL() { \
    283 	rf_UnconfigureVnodes(raidPtr); \
    284 	rf_ShutdownList(&raidPtr->shutdownList); \
    285 	rf_UnconfigureArray(); \
    286 }
    287 
    288 #define DO_RAID_INIT_CONFIGURE(f) { \
    289 	rc = f (&raidPtr->shutdownList, raidPtr, cfgPtr); \
    290 	if (rc) { \
    291 		RF_ERRORMSG2("RAIDFRAME: failed %s with %d\n", RF_STRING(f), rc); \
    292 		DO_RAID_FAIL(); \
    293 		return(rc); \
    294 	} \
    295 }
    296 
    297 #define DO_RAID_MUTEX(_m_) { \
    298 	rc = rf_create_managed_mutex(&raidPtr->shutdownList, (_m_)); \
    299 	if (rc) { \
    300 		rf_print_unable_to_init_mutex(__FILE__, __LINE__, rc); \
    301 		DO_RAID_FAIL(); \
    302 		return(rc); \
    303 	} \
    304 }
    305 
    306 #define DO_RAID_COND(_c_) { \
    307 	rc = rf_create_managed_cond(&raidPtr->shutdownList, (_c_)); \
    308 	if (rc) { \
    309 		rf_print_unable_to_init_cond(__FILE__, __LINE__, rc); \
    310 		DO_RAID_FAIL(); \
    311 		return(rc); \
    312 	} \
    313 }
    314 
    315 int
    316 rf_Configure(raidPtr, cfgPtr, ac)
    317 	RF_Raid_t *raidPtr;
    318 	RF_Config_t *cfgPtr;
    319 	RF_AutoConfig_t *ac;
    320 {
    321 	RF_RowCol_t col;
    322 	int     rc;
    323 
    324 	RF_LOCK_LKMGR_MUTEX(configureMutex);
    325 	configureCount++;
    326 	if (isconfigged == 0) {
    327 		rc = rf_create_managed_mutex(&globalShutdown, &rf_printf_mutex);
    328 		if (rc) {
    329 			rf_print_unable_to_init_mutex(__FILE__, __LINE__, rc);
    330 			rf_ShutdownList(&globalShutdown);
    331 			return (rc);
    332 		}
    333 		/* initialize globals */
    334 
    335 		DO_INIT_CONFIGURE(rf_ConfigureAllocList);
    336 
    337 		/*
    338 	         * Yes, this does make debugging general to the whole
    339 	         * system instead of being array specific. Bummer, drag.
    340 		 */
    341 		rf_ConfigureDebug(cfgPtr);
    342 		DO_INIT_CONFIGURE(rf_ConfigureDebugMem);
    343 		DO_INIT_CONFIGURE(rf_ConfigureAccessTrace);
    344 		DO_INIT_CONFIGURE(rf_ConfigureMapModule);
    345 		DO_INIT_CONFIGURE(rf_ConfigureReconEvent);
    346 		DO_INIT_CONFIGURE(rf_ConfigureCallback);
    347 		DO_INIT_CONFIGURE(rf_ConfigureRDFreeList);
    348 		DO_INIT_CONFIGURE(rf_ConfigureNWayXor);
    349 		DO_INIT_CONFIGURE(rf_ConfigureStripeLockFreeList);
    350 		DO_INIT_CONFIGURE(rf_ConfigureMCPair);
    351 		DO_INIT_CONFIGURE(rf_ConfigureDAGs);
    352 		DO_INIT_CONFIGURE(rf_ConfigureDAGFuncs);
    353 		DO_INIT_CONFIGURE(rf_ConfigureReconstruction);
    354 		DO_INIT_CONFIGURE(rf_ConfigureCopyback);
    355 		DO_INIT_CONFIGURE(rf_ConfigureDiskQueueSystem);
    356 		isconfigged = 1;
    357 	}
    358 	RF_UNLOCK_LKMGR_MUTEX(configureMutex);
    359 
    360 	DO_RAID_MUTEX(&raidPtr->mutex);
    361 	/* set up the cleanup list.  Do this after ConfigureDebug so that
    362 	 * value of memDebug will be set */
    363 
    364 	rf_MakeAllocList(raidPtr->cleanupList);
    365 	if (raidPtr->cleanupList == NULL) {
    366 		DO_RAID_FAIL();
    367 		return (ENOMEM);
    368 	}
    369 	rc = rf_ShutdownCreate(&raidPtr->shutdownList,
    370 	    (void (*) (void *)) rf_FreeAllocList,
    371 	    raidPtr->cleanupList);
    372 	if (rc) {
    373 		rf_print_unable_to_add_shutdown(__FILE__, __LINE__, rc);
    374 		DO_RAID_FAIL();
    375 		return (rc);
    376 	}
    377 	raidPtr->numCol = cfgPtr->numCol;
    378 	raidPtr->numSpare = cfgPtr->numSpare;
    379 
    380 	raidPtr->status = rf_rs_optimal;
    381 	raidPtr->reconControl = NULL;
    382 
    383 	TAILQ_INIT(&(raidPtr->iodone));
    384 	simple_lock_init(&(raidPtr->iodone_lock));
    385 
    386 	DO_RAID_INIT_CONFIGURE(rf_ConfigureEngine);
    387 	DO_RAID_INIT_CONFIGURE(rf_ConfigureStripeLocks);
    388 
    389 	DO_RAID_COND(&raidPtr->outstandingCond);
    390 
    391 	raidPtr->nAccOutstanding = 0;
    392 	raidPtr->waitShutdown = 0;
    393 
    394 	DO_RAID_MUTEX(&raidPtr->access_suspend_mutex);
    395 	DO_RAID_COND(&raidPtr->quiescent_cond);
    396 
    397 	DO_RAID_COND(&raidPtr->waitForReconCond);
    398 
    399 	DO_RAID_MUTEX(&raidPtr->recon_done_proc_mutex);
    400 
    401 	if (ac!=NULL) {
    402 		/* We have an AutoConfig structure..  Don't do the
    403 		   normal disk configuration... call the auto config
    404 		   stuff */
    405 		rf_AutoConfigureDisks(raidPtr, cfgPtr, ac);
    406 	} else {
    407 		DO_RAID_INIT_CONFIGURE(rf_ConfigureDisks);
    408 		DO_RAID_INIT_CONFIGURE(rf_ConfigureSpareDisks);
    409 	}
    410 	/* do this after ConfigureDisks & ConfigureSpareDisks to be sure dev
    411 	 * no. is set */
    412 	DO_RAID_INIT_CONFIGURE(rf_ConfigureDiskQueues);
    413 
    414 	DO_RAID_INIT_CONFIGURE(rf_ConfigureLayout);
    415 
    416 	DO_RAID_INIT_CONFIGURE(rf_ConfigurePSStatus);
    417 
    418 	for (col = 0; col < raidPtr->numCol; col++) {
    419 		/*
    420 		 * XXX better distribution
    421 		 */
    422 		raidPtr->hist_diskreq[col] = 0;
    423 	}
    424 
    425 	raidPtr->numNewFailures = 0;
    426 	raidPtr->copyback_in_progress = 0;
    427 	raidPtr->parity_rewrite_in_progress = 0;
    428 	raidPtr->adding_hot_spare = 0;
    429 	raidPtr->recon_in_progress = 0;
    430 	raidPtr->maxOutstanding = cfgPtr->maxOutstandingDiskReqs;
    431 
    432 	/* autoconfigure and root_partition will actually get filled in
    433 	   after the config is done */
    434 	raidPtr->autoconfigure = 0;
    435 	raidPtr->root_partition = 0;
    436 	raidPtr->last_unit = raidPtr->raidid;
    437 	raidPtr->config_order = 0;
    438 
    439 	if (rf_keepAccTotals) {
    440 		raidPtr->keep_acc_totals = 1;
    441 	}
    442 	rf_StartUserStats(raidPtr);
    443 
    444 	raidPtr->valid = 1;
    445 
    446 	printf("raid%d: %s\n", raidPtr->raidid,
    447 	       raidPtr->Layout.map->configName);
    448 	printf("raid%d: Components:", raidPtr->raidid);
    449 
    450 	for (col = 0; col < raidPtr->numCol; col++) {
    451 		printf(" %s", raidPtr->Disks[col].devname);
    452 		if (RF_DEAD_DISK(raidPtr->Disks[col].status)) {
    453 			printf("[**FAILED**]");
    454 		}
    455 	}
    456 	printf("\n");
    457 	printf("raid%d: Total Sectors: %lu (%lu MB)\n",
    458 	       raidPtr->raidid,
    459 	       (unsigned long) raidPtr->totalSectors,
    460 	       (unsigned long) (raidPtr->totalSectors / 1024 *
    461 				(1 << raidPtr->logBytesPerSector) / 1024));
    462 
    463 	return (0);
    464 }
    465 
    466 static void
    467 rf_ShutdownRDFreeList(ignored)
    468 	void   *ignored;
    469 {
    470 	pool_destroy(&rf_rad_pool);
    471 }
    472 
    473 static int
    474 rf_ConfigureRDFreeList(listp)
    475 	RF_ShutdownList_t **listp;
    476 {
    477 	int     rc;
    478 
    479 	pool_init(&rf_rad_pool, sizeof(RF_RaidAccessDesc_t), 0, 0, 0,
    480 		  "rf_rad_pl", NULL);
    481 	pool_sethiwat(&rf_rad_pool, RF_MAX_FREE_RAD);
    482 	pool_prime(&rf_rad_pool, RF_RAD_INITIAL);
    483 	rc = rf_ShutdownCreate(listp, rf_ShutdownRDFreeList, NULL);
    484 	if (rc) {
    485 		rf_print_unable_to_add_shutdown(__FILE__, __LINE__, rc);
    486 		rf_ShutdownRDFreeList(NULL);
    487 		return (rc);
    488 	}
    489 	return (0);
    490 }
    491 
    492 RF_RaidAccessDesc_t *
    493 rf_AllocRaidAccDesc(
    494     RF_Raid_t * raidPtr,
    495     RF_IoType_t type,
    496     RF_RaidAddr_t raidAddress,
    497     RF_SectorCount_t numBlocks,
    498     caddr_t bufPtr,
    499     void *bp,
    500     RF_RaidAccessFlags_t flags,
    501     RF_AccessState_t * states)
    502 {
    503 	RF_RaidAccessDesc_t *desc;
    504 
    505 	desc = pool_get(&rf_rad_pool, PR_WAITOK);
    506 	simple_lock_init(&desc->mutex);
    507 	desc->cond = 0;
    508 
    509 	if (raidPtr->waitShutdown) {
    510 		/*
    511 	         * Actually, we're shutting the array down. Free the desc
    512 	         * and return NULL.
    513 	         */
    514 
    515 		RF_UNLOCK_MUTEX(rf_rad_pool_lock);
    516 		pool_put(&rf_rad_pool, desc);
    517 		return (NULL);
    518 	}
    519 	raidPtr->nAccOutstanding++;
    520 
    521 	RF_UNLOCK_MUTEX(rf_rad_pool_lock);
    522 
    523 	desc->raidPtr = (void *) raidPtr;
    524 	desc->type = type;
    525 	desc->raidAddress = raidAddress;
    526 	desc->numBlocks = numBlocks;
    527 	desc->bufPtr = bufPtr;
    528 	desc->bp = bp;
    529 	desc->paramDAG = NULL;
    530 	desc->paramASM = NULL;
    531 	desc->flags = flags;
    532 	desc->states = states;
    533 	desc->state = 0;
    534 
    535 	desc->status = 0;
    536 	memset((char *) &desc->tracerec, 0, sizeof(RF_AccTraceEntry_t));
    537 	desc->callbackFunc = NULL;
    538 	desc->callbackArg = NULL;
    539 	desc->next = NULL;
    540 	desc->head = desc;
    541 	desc->cleanupList = NULL;
    542 	rf_MakeAllocList(desc->cleanupList);
    543 	return (desc);
    544 }
    545 
    546 void
    547 rf_FreeRaidAccDesc(RF_RaidAccessDesc_t * desc)
    548 {
    549 	RF_Raid_t *raidPtr = desc->raidPtr;
    550 
    551 	RF_ASSERT(desc);
    552 
    553 	rf_FreeAllocList(desc->cleanupList);
    554 	pool_put(&rf_rad_pool, desc);
    555 	raidPtr->nAccOutstanding--;
    556 	if (raidPtr->waitShutdown) {
    557 		RF_SIGNAL_COND(raidPtr->outstandingCond);
    558 	}
    559 	RF_UNLOCK_MUTEX(rf_rad_pool_lock);
    560 }
    561 /*********************************************************************
    562  * Main routine for performing an access.
    563  * Accesses are retried until a DAG can not be selected.  This occurs
    564  * when either the DAG library is incomplete or there are too many
    565  * failures in a parity group.
    566  ********************************************************************/
    567 int
    568 rf_DoAccess(
    569     RF_Raid_t * raidPtr,
    570     RF_IoType_t type,
    571     int async_flag,
    572     RF_RaidAddr_t raidAddress,
    573     RF_SectorCount_t numBlocks,
    574     caddr_t bufPtr,
    575     void *bp_in,
    576     RF_RaidAccessFlags_t flags)
    577 /*
    578 type should be read or write
    579 async_flag should be RF_TRUE or RF_FALSE
    580 bp_in is a buf pointer.  void * to facilitate ignoring it outside the kernel
    581 */
    582 {
    583 	RF_RaidAccessDesc_t *desc;
    584 	caddr_t lbufPtr = bufPtr;
    585 	struct buf *bp = (struct buf *) bp_in;
    586 
    587 	raidAddress += rf_raidSectorOffset;
    588 
    589 #if RF_ACCESS_DEBUG
    590 	if (rf_accessDebug) {
    591 
    592 		printf("logBytes is: %d %d %d\n", raidPtr->raidid,
    593 		    raidPtr->logBytesPerSector,
    594 		    (int) rf_RaidAddressToByte(raidPtr, numBlocks));
    595 		printf("raid%d: %s raidAddr %d (stripeid %d-%d) numBlocks %d (%d bytes) buf 0x%lx\n", raidPtr->raidid,
    596 		    (type == RF_IO_TYPE_READ) ? "READ" : "WRITE", (int) raidAddress,
    597 		    (int) rf_RaidAddressToStripeID(&raidPtr->Layout, raidAddress),
    598 		    (int) rf_RaidAddressToStripeID(&raidPtr->Layout, raidAddress + numBlocks - 1),
    599 		    (int) numBlocks,
    600 		    (int) rf_RaidAddressToByte(raidPtr, numBlocks),
    601 		    (long) bufPtr);
    602 	}
    603 #endif
    604 	if (raidAddress + numBlocks > raidPtr->totalSectors) {
    605 
    606 		printf("DoAccess: raid addr %lu too large to access %lu sectors.  Max legal addr is %lu\n",
    607 		    (u_long) raidAddress, (u_long) numBlocks, (u_long) raidPtr->totalSectors);
    608 
    609 		IO_BUF_ERR(bp, ENOSPC);
    610 		return (ENOSPC);
    611 	}
    612 	desc = rf_AllocRaidAccDesc(raidPtr, type, raidAddress,
    613 	    numBlocks, lbufPtr, bp, flags, raidPtr->Layout.map->states);
    614 
    615 	if (desc == NULL) {
    616 		return (ENOMEM);
    617 	}
    618 	RF_ETIMER_START(desc->tracerec.tot_timer);
    619 
    620 	desc->async_flag = async_flag;
    621 
    622 	rf_ContinueRaidAccess(desc);
    623 
    624 	return (0);
    625 }
    626 #if 0
    627 /* force the array into reconfigured mode without doing reconstruction */
    628 int
    629 rf_SetReconfiguredMode(raidPtr, col)
    630 	RF_Raid_t *raidPtr;
    631 	int     col;
    632 {
    633 	if (!(raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE)) {
    634 		printf("Can't set reconfigured mode in dedicated-spare array\n");
    635 		RF_PANIC();
    636 	}
    637 	RF_LOCK_MUTEX(raidPtr->mutex);
    638 	raidPtr->numFailures++;
    639 	raidPtr->Disks[col].status = rf_ds_dist_spared;
    640 	raidPtr->status = rf_rs_reconfigured;
    641 	rf_update_component_labels(raidPtr, RF_NORMAL_COMPONENT_UPDATE);
    642 	/* install spare table only if declustering + distributed sparing
    643 	 * architecture. */
    644 	if (raidPtr->Layout.map->flags & RF_BD_DECLUSTERED)
    645 		rf_InstallSpareTable(raidPtr, col);
    646 	RF_UNLOCK_MUTEX(raidPtr->mutex);
    647 	return (0);
    648 }
    649 #endif
    650 
    651 int
    652 rf_FailDisk(
    653     RF_Raid_t * raidPtr,
    654     int fcol,
    655     int initRecon)
    656 {
    657 	RF_LOCK_MUTEX(raidPtr->mutex);
    658 	if (raidPtr->Disks[fcol].status != rf_ds_failed) {
    659 		/* must be failing something that is valid, or else it's
    660 		   already marked as failed (in which case we don't
    661 		   want to mark it failed again!) */
    662 		raidPtr->numFailures++;
    663 		raidPtr->Disks[fcol].status = rf_ds_failed;
    664 		raidPtr->status = rf_rs_degraded;
    665 	}
    666 	RF_UNLOCK_MUTEX(raidPtr->mutex);
    667 
    668 	rf_update_component_labels(raidPtr, RF_NORMAL_COMPONENT_UPDATE);
    669 
    670 	/* Close the component, so that it's not "locked" if someone
    671 	   else want's to use it! */
    672 
    673 	rf_close_component(raidPtr, raidPtr->raid_cinfo[fcol].ci_vp,
    674 			   raidPtr->Disks[fcol].auto_configured);
    675 
    676 	RF_LOCK_MUTEX(raidPtr->mutex);
    677 	raidPtr->raid_cinfo[fcol].ci_vp = NULL;
    678 
    679 	/* Need to mark the component as not being auto_configured
    680 	   (in case it was previously). */
    681 
    682 	raidPtr->Disks[fcol].auto_configured = 0;
    683 	RF_UNLOCK_MUTEX(raidPtr->mutex);
    684 
    685 	if (initRecon)
    686 		rf_ReconstructFailedDisk(raidPtr, fcol);
    687 	return (0);
    688 }
    689 /* releases a thread that is waiting for the array to become quiesced.
    690  * access_suspend_mutex should be locked upon calling this
    691  */
    692 void
    693 rf_SignalQuiescenceLock(raidPtr)
    694 	RF_Raid_t *raidPtr;
    695 {
    696 #if RF_DEBUG_QUIESCE
    697 	if (rf_quiesceDebug) {
    698 		printf("raid%d: Signalling quiescence lock\n",
    699 		       raidPtr->raidid);
    700 	}
    701 #endif
    702 	raidPtr->access_suspend_release = 1;
    703 
    704 	if (raidPtr->waiting_for_quiescence) {
    705 		SIGNAL_QUIESCENT_COND(raidPtr);
    706 	}
    707 }
    708 /* suspends all new requests to the array.  No effect on accesses that are in flight.  */
    709 int
    710 rf_SuspendNewRequestsAndWait(raidPtr)
    711 	RF_Raid_t *raidPtr;
    712 {
    713 #if RF_DEBUG_QUIESCE
    714 	if (rf_quiesceDebug)
    715 		printf("raid%d: Suspending new reqs\n", raidPtr->raidid);
    716 #endif
    717 	RF_LOCK_MUTEX(raidPtr->access_suspend_mutex);
    718 	raidPtr->accesses_suspended++;
    719 	raidPtr->waiting_for_quiescence = (raidPtr->accs_in_flight == 0) ? 0 : 1;
    720 
    721 	if (raidPtr->waiting_for_quiescence) {
    722 		raidPtr->access_suspend_release = 0;
    723 		while (!raidPtr->access_suspend_release) {
    724 			printf("raid%d: Suspending: Waiting for Quiescence\n",
    725 			       raidPtr->raidid);
    726 			WAIT_FOR_QUIESCENCE(raidPtr);
    727 			raidPtr->waiting_for_quiescence = 0;
    728 		}
    729 	}
    730 	printf("raid%d: Quiescence reached..\n", raidPtr->raidid);
    731 
    732 	RF_UNLOCK_MUTEX(raidPtr->access_suspend_mutex);
    733 	return (raidPtr->waiting_for_quiescence);
    734 }
    735 /* wake up everyone waiting for quiescence to be released */
    736 void
    737 rf_ResumeNewRequests(raidPtr)
    738 	RF_Raid_t *raidPtr;
    739 {
    740 	RF_CallbackDesc_t *t, *cb;
    741 
    742 #if RF_DEBUG_QUIESCE
    743 	if (rf_quiesceDebug)
    744 		printf("Resuming new reqs\n");
    745 #endif
    746 
    747 	RF_LOCK_MUTEX(raidPtr->access_suspend_mutex);
    748 	raidPtr->accesses_suspended--;
    749 	if (raidPtr->accesses_suspended == 0)
    750 		cb = raidPtr->quiesce_wait_list;
    751 	else
    752 		cb = NULL;
    753 	raidPtr->quiesce_wait_list = NULL;
    754 	RF_UNLOCK_MUTEX(raidPtr->access_suspend_mutex);
    755 
    756 	while (cb) {
    757 		t = cb;
    758 		cb = cb->next;
    759 		(t->callbackFunc) (t->callbackArg);
    760 		rf_FreeCallbackDesc(t);
    761 	}
    762 }
    763 /*****************************************************************************************
    764  *
    765  * debug routines
    766  *
    767  ****************************************************************************************/
    768 
    769 static void
    770 set_debug_option(name, val)
    771 	char   *name;
    772 	long    val;
    773 {
    774 	RF_DebugName_t *p;
    775 
    776 	for (p = rf_debugNames; p->name; p++) {
    777 		if (!strcmp(p->name, name)) {
    778 			*(p->ptr) = val;
    779 			printf("[Set debug variable %s to %ld]\n", name, val);
    780 			return;
    781 		}
    782 	}
    783 	RF_ERRORMSG1("Unknown debug string \"%s\"\n", name);
    784 }
    785 
    786 
    787 /* would like to use sscanf here, but apparently not available in kernel */
    788 /*ARGSUSED*/
    789 static void
    790 rf_ConfigureDebug(cfgPtr)
    791 	RF_Config_t *cfgPtr;
    792 {
    793 	char   *val_p, *name_p, *white_p;
    794 	long    val;
    795 	int     i;
    796 
    797 	rf_ResetDebugOptions();
    798 	for (i = 0; cfgPtr->debugVars[i][0] && i < RF_MAXDBGV; i++) {
    799 		name_p = rf_find_non_white(&cfgPtr->debugVars[i][0]);
    800 		white_p = rf_find_white(name_p);	/* skip to start of 2nd
    801 							 * word */
    802 		val_p = rf_find_non_white(white_p);
    803 		if (*val_p == '0' && *(val_p + 1) == 'x')
    804 			val = rf_htoi(val_p + 2);
    805 		else
    806 			val = rf_atoi(val_p);
    807 		*white_p = '\0';
    808 		set_debug_option(name_p, val);
    809 	}
    810 }
    811 /* performance monitoring stuff */
    812 
    813 #define TIMEVAL_TO_US(t) (((long) t.tv_sec) * 1000000L + (long) t.tv_usec)
    814 
    815 #if !defined(_KERNEL) && !defined(SIMULATE)
    816 
    817 /*
    818  * Throughput stats currently only used in user-level RAIDframe
    819  */
    820 
    821 static int
    822 rf_InitThroughputStats(
    823     RF_ShutdownList_t ** listp,
    824     RF_Raid_t * raidPtr,
    825     RF_Config_t * cfgPtr)
    826 {
    827 	int     rc;
    828 
    829 	/* these used by user-level raidframe only */
    830 	rc = rf_create_managed_mutex(listp, &raidPtr->throughputstats.mutex);
    831 	if (rc) {
    832 		rf_print_unable_to_init_mutex(__FILE__, __LINE__, rc);
    833 		return (rc);
    834 	}
    835 	raidPtr->throughputstats.sum_io_us = 0;
    836 	raidPtr->throughputstats.num_ios = 0;
    837 	raidPtr->throughputstats.num_out_ios = 0;
    838 	return (0);
    839 }
    840 
    841 void
    842 rf_StartThroughputStats(RF_Raid_t * raidPtr)
    843 {
    844 	RF_LOCK_MUTEX(raidPtr->throughputstats.mutex);
    845 	raidPtr->throughputstats.num_ios++;
    846 	raidPtr->throughputstats.num_out_ios++;
    847 	if (raidPtr->throughputstats.num_out_ios == 1)
    848 		RF_GETTIME(raidPtr->throughputstats.start);
    849 	RF_UNLOCK_MUTEX(raidPtr->throughputstats.mutex);
    850 }
    851 
    852 static void
    853 rf_StopThroughputStats(RF_Raid_t * raidPtr)
    854 {
    855 	struct timeval diff;
    856 
    857 	RF_LOCK_MUTEX(raidPtr->throughputstats.mutex);
    858 	raidPtr->throughputstats.num_out_ios--;
    859 	if (raidPtr->throughputstats.num_out_ios == 0) {
    860 		RF_GETTIME(raidPtr->throughputstats.stop);
    861 		RF_TIMEVAL_DIFF(&raidPtr->throughputstats.start, &raidPtr->throughputstats.stop, &diff);
    862 		raidPtr->throughputstats.sum_io_us += TIMEVAL_TO_US(diff);
    863 	}
    864 	RF_UNLOCK_MUTEX(raidPtr->throughputstats.mutex);
    865 }
    866 
    867 static void
    868 rf_PrintThroughputStats(RF_Raid_t * raidPtr)
    869 {
    870 	RF_ASSERT(raidPtr->throughputstats.num_out_ios == 0);
    871 	if (raidPtr->throughputstats.sum_io_us != 0) {
    872 		printf("[Througphut: %8.2f IOs/second]\n", raidPtr->throughputstats.num_ios
    873 		    / (raidPtr->throughputstats.sum_io_us / 1000000.0));
    874 	}
    875 }
    876 #endif				/* !KERNEL && !SIMULATE */
    877 
    878 void
    879 rf_StartUserStats(RF_Raid_t * raidPtr)
    880 {
    881 	RF_GETTIME(raidPtr->userstats.start);
    882 	raidPtr->userstats.sum_io_us = 0;
    883 	raidPtr->userstats.num_ios = 0;
    884 	raidPtr->userstats.num_sect_moved = 0;
    885 }
    886 
    887 void
    888 rf_StopUserStats(RF_Raid_t * raidPtr)
    889 {
    890 	RF_GETTIME(raidPtr->userstats.stop);
    891 }
    892 
    893 void
    894 rf_UpdateUserStats(raidPtr, rt, numsect)
    895 	RF_Raid_t *raidPtr;
    896 	int     rt;		/* resp time in us */
    897 	int     numsect;	/* number of sectors for this access */
    898 {
    899 	raidPtr->userstats.sum_io_us += rt;
    900 	raidPtr->userstats.num_ios++;
    901 	raidPtr->userstats.num_sect_moved += numsect;
    902 }
    903 
    904 void
    905 rf_PrintUserStats(RF_Raid_t * raidPtr)
    906 {
    907 	long    elapsed_us, mbs, mbs_frac;
    908 	struct timeval diff;
    909 
    910 	RF_TIMEVAL_DIFF(&raidPtr->userstats.start,
    911 			&raidPtr->userstats.stop, &diff);
    912 	elapsed_us = TIMEVAL_TO_US(diff);
    913 
    914 	/* 2000 sectors per megabyte, 10000000 microseconds per second */
    915 	if (elapsed_us)
    916 		mbs = (raidPtr->userstats.num_sect_moved / 2000) /
    917 			(elapsed_us / 1000000);
    918 	else
    919 		mbs = 0;
    920 
    921 	/* this computes only the first digit of the fractional mb/s moved */
    922 	if (elapsed_us) {
    923 		mbs_frac = ((raidPtr->userstats.num_sect_moved / 200) /
    924 			    (elapsed_us / 1000000)) - (mbs * 10);
    925 	} else {
    926 		mbs_frac = 0;
    927 	}
    928 
    929 	printf("raid%d: Number of I/Os:             %ld\n",
    930 	       raidPtr->raidid, raidPtr->userstats.num_ios);
    931 	printf("raid%d: Elapsed time (us):          %ld\n",
    932 	       raidPtr->raidid, elapsed_us);
    933 	printf("raid%d: User I/Os per second:       %ld\n",
    934 	       raidPtr->raidid, RF_DB0_CHECK(raidPtr->userstats.num_ios,
    935 					     (elapsed_us / 1000000)));
    936 	printf("raid%d: Average user response time: %ld us\n",
    937 	       raidPtr->raidid, RF_DB0_CHECK(raidPtr->userstats.sum_io_us,
    938 					     raidPtr->userstats.num_ios));
    939 	printf("raid%d: Total sectors moved:        %ld\n",
    940 	       raidPtr->raidid, raidPtr->userstats.num_sect_moved);
    941 	printf("raid%d: Average access size (sect): %ld\n",
    942 	       raidPtr->raidid, RF_DB0_CHECK(raidPtr->userstats.num_sect_moved,
    943 					     raidPtr->userstats.num_ios));
    944 	printf("raid%d: Achieved data rate:         %ld.%ld MB/sec\n",
    945 	       raidPtr->raidid, mbs, mbs_frac);
    946 }
    947 
    948 
    949 void
    950 rf_print_panic_message(line,file)
    951 	int line;
    952 	char *file;
    953 {
    954 	sprintf(rf_panicbuf,"raidframe error at line %d file %s",
    955 		line, file);
    956 }
    957 
    958 #ifdef RAID_DIAGNOSTIC
    959 void
    960 rf_print_assert_panic_message(line,file,condition)
    961 	int line;
    962 	char *file;
    963 	char *condition;
    964 {
    965 	sprintf(rf_panicbuf,
    966 		"raidframe error at line %d file %s (failed asserting %s)\n",
    967 		line, file, condition);
    968 }
    969 #endif
    970 
    971 void
    972 rf_print_unable_to_init_mutex(file,line,rc)
    973 	char *file;
    974 	int line;
    975 	int rc;
    976 {
    977 	RF_ERRORMSG3("Unable to init mutex file %s line %d rc=%d\n",
    978 		     file, line, rc);
    979 }
    980 
    981 void
    982 rf_print_unable_to_init_cond(file,line,rc)
    983 	char *file;
    984 	int line;
    985 	int rc;
    986 {
    987 	RF_ERRORMSG3("Unable to init cond file %s line %d rc=%d\n",
    988 		     file, line, rc);
    989 }
    990 
    991 void
    992 rf_print_unable_to_add_shutdown(file,line,rc)
    993 	char *file;
    994 	int line;
    995 	int rc;
    996 {
    997 	RF_ERRORMSG3("Unable to add to shutdown list file %s line %d rc=%d\n",
    998 		     file, line, rc);
    999 }
   1000