Home | History | Annotate | Line # | Download | only in raidframe
rf_raid5.c revision 1.10
      1 /*	$NetBSD: rf_raid5.c,v 1.10 2003/12/30 21:59:03 oster Exp $	*/
      2 /*
      3  * Copyright (c) 1995 Carnegie-Mellon University.
      4  * All rights reserved.
      5  *
      6  * Author: Mark Holland
      7  *
      8  * Permission to use, copy, modify and distribute this software and
      9  * its documentation is hereby granted, provided that both the copyright
     10  * notice and this permission notice appear in all copies of the
     11  * software, derivative works or modified versions, and any portions
     12  * thereof, and that both notices appear in supporting documentation.
     13  *
     14  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     15  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     16  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     17  *
     18  * Carnegie Mellon requests users of this software to return to
     19  *
     20  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     21  *  School of Computer Science
     22  *  Carnegie Mellon University
     23  *  Pittsburgh PA 15213-3890
     24  *
     25  * any improvements or extensions that they make and grant Carnegie the
     26  * rights to redistribute these changes.
     27  */
     28 
     29 /******************************************************************************
     30  *
     31  * rf_raid5.c -- implements RAID Level 5
     32  *
     33  *****************************************************************************/
     34 
     35 #include <sys/cdefs.h>
     36 __KERNEL_RCSID(0, "$NetBSD: rf_raid5.c,v 1.10 2003/12/30 21:59:03 oster Exp $");
     37 
     38 #include <dev/raidframe/raidframevar.h>
     39 
     40 #include "rf_raid.h"
     41 #include "rf_raid5.h"
     42 #include "rf_dag.h"
     43 #include "rf_dagffrd.h"
     44 #include "rf_dagffwr.h"
     45 #include "rf_dagdegrd.h"
     46 #include "rf_dagdegwr.h"
     47 #include "rf_dagutils.h"
     48 #include "rf_general.h"
     49 #include "rf_map.h"
     50 #include "rf_utils.h"
     51 
     52 typedef struct RF_Raid5ConfigInfo_s {
     53 	RF_RowCol_t **stripeIdentifier;	/* filled in at config time and used
     54 					 * by IdentifyStripe */
     55 }       RF_Raid5ConfigInfo_t;
     56 
     57 int
     58 rf_ConfigureRAID5(RF_ShutdownList_t **listp, RF_Raid_t *raidPtr,
     59 		  RF_Config_t *cfgPtr)
     60 {
     61 	RF_RaidLayout_t *layoutPtr = &raidPtr->Layout;
     62 	RF_Raid5ConfigInfo_t *info;
     63 	RF_RowCol_t i, j, startdisk;
     64 
     65 	/* create a RAID level 5 configuration structure */
     66 	RF_MallocAndAdd(info, sizeof(RF_Raid5ConfigInfo_t), (RF_Raid5ConfigInfo_t *), raidPtr->cleanupList);
     67 	if (info == NULL)
     68 		return (ENOMEM);
     69 	layoutPtr->layoutSpecificInfo = (void *) info;
     70 
     71 	/* the stripe identifier must identify the disks in each stripe, IN
     72 	 * THE ORDER THAT THEY APPEAR IN THE STRIPE. */
     73 	info->stripeIdentifier = rf_make_2d_array(raidPtr->numCol, raidPtr->numCol, raidPtr->cleanupList);
     74 	if (info->stripeIdentifier == NULL)
     75 		return (ENOMEM);
     76 	startdisk = 0;
     77 	for (i = 0; i < raidPtr->numCol; i++) {
     78 		for (j = 0; j < raidPtr->numCol; j++) {
     79 			info->stripeIdentifier[i][j] = (startdisk + j) % raidPtr->numCol;
     80 		}
     81 		if ((--startdisk) < 0)
     82 			startdisk = raidPtr->numCol - 1;
     83 	}
     84 
     85 	/* fill in the remaining layout parameters */
     86 	layoutPtr->numStripe = layoutPtr->stripeUnitsPerDisk;
     87 	layoutPtr->numDataCol = raidPtr->numCol - 1;
     88 	layoutPtr->dataSectorsPerStripe = layoutPtr->numDataCol * layoutPtr->sectorsPerStripeUnit;
     89 	layoutPtr->numParityCol = 1;
     90 	layoutPtr->dataStripeUnitsPerDisk = layoutPtr->stripeUnitsPerDisk;
     91 
     92 	raidPtr->totalSectors = layoutPtr->stripeUnitsPerDisk * layoutPtr->numDataCol * layoutPtr->sectorsPerStripeUnit;
     93 
     94 	return (0);
     95 }
     96 
     97 int
     98 rf_GetDefaultNumFloatingReconBuffersRAID5(RF_Raid_t *raidPtr)
     99 {
    100 	return (20);
    101 }
    102 
    103 RF_HeadSepLimit_t
    104 rf_GetDefaultHeadSepLimitRAID5(RF_Raid_t *raidPtr)
    105 {
    106 	return (10);
    107 }
    108 #if !defined(__NetBSD__) && !defined(_KERNEL)
    109 /* not currently used */
    110 int
    111 rf_ShutdownRAID5(RF_Raid_t *raidPtr)
    112 {
    113 	return (0);
    114 }
    115 #endif
    116 
    117 void
    118 rf_MapSectorRAID5(RF_Raid_t *raidPtr, RF_RaidAddr_t raidSector,
    119 		  RF_RowCol_t *col, RF_SectorNum_t *diskSector, int remap)
    120 {
    121 	RF_StripeNum_t SUID = raidSector / raidPtr->Layout.sectorsPerStripeUnit;
    122 	*col = (SUID % raidPtr->numCol);
    123 	*diskSector = (SUID / (raidPtr->Layout.numDataCol)) * raidPtr->Layout.sectorsPerStripeUnit +
    124 	    (raidSector % raidPtr->Layout.sectorsPerStripeUnit);
    125 }
    126 
    127 void
    128 rf_MapParityRAID5(RF_Raid_t *raidPtr, RF_RaidAddr_t raidSector,
    129 		  RF_RowCol_t *col, RF_SectorNum_t *diskSector, int remap)
    130 {
    131 	RF_StripeNum_t SUID = raidSector / raidPtr->Layout.sectorsPerStripeUnit;
    132 
    133 	*col = raidPtr->Layout.numDataCol - (SUID / raidPtr->Layout.numDataCol) % raidPtr->numCol;
    134 	*diskSector = (SUID / (raidPtr->Layout.numDataCol)) * raidPtr->Layout.sectorsPerStripeUnit +
    135 	    (raidSector % raidPtr->Layout.sectorsPerStripeUnit);
    136 }
    137 
    138 void
    139 rf_IdentifyStripeRAID5(RF_Raid_t *raidPtr, RF_RaidAddr_t addr,
    140 		       RF_RowCol_t **diskids)
    141 {
    142 	RF_StripeNum_t stripeID = rf_RaidAddressToStripeID(&raidPtr->Layout, addr);
    143 	RF_Raid5ConfigInfo_t *info = (RF_Raid5ConfigInfo_t *) raidPtr->Layout.layoutSpecificInfo;
    144 
    145 	*diskids = info->stripeIdentifier[stripeID % raidPtr->numCol];
    146 }
    147 
    148 void
    149 rf_MapSIDToPSIDRAID5(RF_RaidLayout_t *layoutPtr, RF_StripeNum_t stripeID,
    150 		     RF_StripeNum_t *psID, RF_ReconUnitNum_t *which_ru)
    151 {
    152 	*which_ru = 0;
    153 	*psID = stripeID;
    154 }
    155 /* select an algorithm for performing an access.  Returns two pointers,
    156  * one to a function that will return information about the DAG, and
    157  * another to a function that will create the dag.
    158  */
    159 void
    160 rf_RaidFiveDagSelect(RF_Raid_t *raidPtr, RF_IoType_t type,
    161 		     RF_AccessStripeMap_t *asmap,
    162 		     RF_VoidFuncPtr *createFunc)
    163 {
    164 	RF_RaidLayout_t *layoutPtr = &(raidPtr->Layout);
    165 	RF_PhysDiskAddr_t *failedPDA = NULL;
    166 	RF_RowCol_t fcol;
    167 	RF_RowStatus_t rstat;
    168 	int     prior_recon;
    169 
    170 	RF_ASSERT(RF_IO_IS_R_OR_W(type));
    171 
    172 	if (asmap->numDataFailed + asmap->numParityFailed > 1) {
    173 		RF_ERRORMSG("Multiple disks failed in a single group!  Aborting I/O operation.\n");
    174 		*createFunc = NULL;
    175 		return;
    176 	} else
    177 		if (asmap->numDataFailed + asmap->numParityFailed == 1) {
    178 
    179 			/* if under recon & already reconstructed, redirect
    180 			 * the access to the spare drive and eliminate the
    181 			 * failure indication */
    182 			failedPDA = asmap->failedPDAs[0];
    183 			fcol = failedPDA->col;
    184 			rstat = raidPtr->status;
    185 			prior_recon = (rstat == rf_rs_reconfigured) || (
    186 			    (rstat == rf_rs_reconstructing) ?
    187 			    rf_CheckRUReconstructed(raidPtr->reconControl->reconMap, failedPDA->startSector) : 0
    188 			    );
    189 			if (prior_recon) {
    190 				RF_RowCol_t oc = failedPDA->col;
    191 				RF_SectorNum_t oo = failedPDA->startSector;
    192 
    193 				if (layoutPtr->map->flags & RF_DISTRIBUTE_SPARE) {	/* redirect to dist
    194 											 * spare space */
    195 
    196 					if (failedPDA == asmap->parityInfo) {
    197 
    198 						/* parity has failed */
    199 						(layoutPtr->map->MapParity) (raidPtr, failedPDA->raidAddress,
    200 						    &failedPDA->col, &failedPDA->startSector, RF_REMAP);
    201 
    202 						if (asmap->parityInfo->next) {	/* redir 2nd component,
    203 										 * if any */
    204 							RF_PhysDiskAddr_t *p = asmap->parityInfo->next;
    205 							RF_SectorNum_t SUoffs = p->startSector % layoutPtr->sectorsPerStripeUnit;
    206 							p->col = failedPDA->col;
    207 							p->startSector = rf_RaidAddressOfPrevStripeUnitBoundary(layoutPtr, failedPDA->startSector) +
    208 							    SUoffs;	/* cheating:
    209 									 * startSector is not
    210 									 * really a RAID address */
    211 						}
    212 					} else
    213 						if (asmap->parityInfo->next && failedPDA == asmap->parityInfo->next) {
    214 							RF_ASSERT(0);	/* should not ever
    215 									 * happen */
    216 						} else {
    217 
    218 							/* data has failed */
    219 							(layoutPtr->map->MapSector) (raidPtr, failedPDA->raidAddress,
    220 							    &failedPDA->col, &failedPDA->startSector, RF_REMAP);
    221 
    222 						}
    223 
    224 				} else {	/* redirect to dedicated spare
    225 						 * space */
    226 
    227 					failedPDA->col = raidPtr->Disks[fcol].spareCol;
    228 
    229 					/* the parity may have two distinct
    230 					 * components, both of which may need
    231 					 * to be redirected */
    232 					if (asmap->parityInfo->next) {
    233 						if (failedPDA == asmap->parityInfo) {
    234 							failedPDA->next->col = failedPDA->col;
    235 						} else
    236 							if (failedPDA == asmap->parityInfo->next) {	/* paranoid:  should
    237 													 * never occur */
    238 								asmap->parityInfo->col = failedPDA->col;
    239 							}
    240 					}
    241 				}
    242 
    243 				RF_ASSERT(failedPDA->col != -1);
    244 
    245 				if (rf_dagDebug || rf_mapDebug) {
    246 					printf("raid%d: Redirected type '%c' c %d o %ld -> c %d o %ld\n",
    247 					       raidPtr->raidid, type, oc,
    248 					       (long) oo, failedPDA->col,
    249 					       (long) failedPDA->startSector);
    250 				}
    251 				asmap->numDataFailed = asmap->numParityFailed = 0;
    252 			}
    253 		}
    254 	/* all dags begin/end with block/unblock node therefore, hdrSucc &
    255 	 * termAnt counts should always be 1 also, these counts should not be
    256 	 * visible outside dag creation routines - manipulating the counts
    257 	 * here should be removed */
    258 	if (type == RF_IO_TYPE_READ) {
    259 		if (asmap->numDataFailed == 0)
    260 			*createFunc = (RF_VoidFuncPtr) rf_CreateFaultFreeReadDAG;
    261 		else
    262 			*createFunc = (RF_VoidFuncPtr) rf_CreateRaidFiveDegradedReadDAG;
    263 	} else {
    264 
    265 
    266 		/* if mirroring, always use large writes.  If the access
    267 		 * requires two distinct parity updates, always do a small
    268 		 * write.  If the stripe contains a failure but the access
    269 		 * does not, do a small write. The first conditional
    270 		 * (numStripeUnitsAccessed <= numDataCol/2) uses a
    271 		 * less-than-or-equal rather than just a less-than because
    272 		 * when G is 3 or 4, numDataCol/2 is 1, and I want
    273 		 * single-stripe-unit updates to use just one disk. */
    274 		if ((asmap->numDataFailed + asmap->numParityFailed) == 0) {
    275 			if (rf_suppressLocksAndLargeWrites ||
    276 			    (((asmap->numStripeUnitsAccessed <= (layoutPtr->numDataCol / 2)) && (layoutPtr->numDataCol != 1)) ||
    277 				(asmap->parityInfo->next != NULL) || rf_CheckStripeForFailures(raidPtr, asmap))) {
    278 				*createFunc = (RF_VoidFuncPtr) rf_CreateSmallWriteDAG;
    279 			} else
    280 				*createFunc = (RF_VoidFuncPtr) rf_CreateLargeWriteDAG;
    281 		} else {
    282 			if (asmap->numParityFailed == 1)
    283 				*createFunc = (RF_VoidFuncPtr) rf_CreateNonRedundantWriteDAG;
    284 			else
    285 				if (asmap->numStripeUnitsAccessed != 1 && failedPDA->numSector != layoutPtr->sectorsPerStripeUnit)
    286 					*createFunc = NULL;
    287 				else
    288 					*createFunc = (RF_VoidFuncPtr) rf_CreateDegradedWriteDAG;
    289 		}
    290 	}
    291 }
    292