Home | History | Annotate | Line # | Download | only in raidframe
rf_raid5.c revision 1.12
      1 /*	$NetBSD: rf_raid5.c,v 1.12 2004/02/29 20:11:26 oster Exp $	*/
      2 /*
      3  * Copyright (c) 1995 Carnegie-Mellon University.
      4  * All rights reserved.
      5  *
      6  * Author: Mark Holland
      7  *
      8  * Permission to use, copy, modify and distribute this software and
      9  * its documentation is hereby granted, provided that both the copyright
     10  * notice and this permission notice appear in all copies of the
     11  * software, derivative works or modified versions, and any portions
     12  * thereof, and that both notices appear in supporting documentation.
     13  *
     14  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     15  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     16  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     17  *
     18  * Carnegie Mellon requests users of this software to return to
     19  *
     20  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     21  *  School of Computer Science
     22  *  Carnegie Mellon University
     23  *  Pittsburgh PA 15213-3890
     24  *
     25  * any improvements or extensions that they make and grant Carnegie the
     26  * rights to redistribute these changes.
     27  */
     28 
     29 /******************************************************************************
     30  *
     31  * rf_raid5.c -- implements RAID Level 5
     32  *
     33  *****************************************************************************/
     34 
     35 #include <sys/cdefs.h>
     36 __KERNEL_RCSID(0, "$NetBSD: rf_raid5.c,v 1.12 2004/02/29 20:11:26 oster Exp $");
     37 
     38 #include <dev/raidframe/raidframevar.h>
     39 
     40 #include "rf_raid.h"
     41 #include "rf_raid5.h"
     42 #include "rf_dag.h"
     43 #include "rf_dagffrd.h"
     44 #include "rf_dagffwr.h"
     45 #include "rf_dagdegrd.h"
     46 #include "rf_dagdegwr.h"
     47 #include "rf_dagutils.h"
     48 #include "rf_general.h"
     49 #include "rf_map.h"
     50 #include "rf_utils.h"
     51 
     52 typedef struct RF_Raid5ConfigInfo_s {
     53 	RF_RowCol_t **stripeIdentifier;	/* filled in at config time and used
     54 					 * by IdentifyStripe */
     55 }       RF_Raid5ConfigInfo_t;
     56 
     57 int
     58 rf_ConfigureRAID5(RF_ShutdownList_t **listp, RF_Raid_t *raidPtr,
     59 		  RF_Config_t *cfgPtr)
     60 {
     61 	RF_RaidLayout_t *layoutPtr = &raidPtr->Layout;
     62 	RF_Raid5ConfigInfo_t *info;
     63 	RF_RowCol_t i, j, startdisk;
     64 
     65 	/* create a RAID level 5 configuration structure */
     66 	RF_MallocAndAdd(info, sizeof(RF_Raid5ConfigInfo_t), (RF_Raid5ConfigInfo_t *), raidPtr->cleanupList);
     67 	if (info == NULL)
     68 		return (ENOMEM);
     69 	layoutPtr->layoutSpecificInfo = (void *) info;
     70 
     71 	/* the stripe identifier must identify the disks in each stripe, IN
     72 	 * THE ORDER THAT THEY APPEAR IN THE STRIPE. */
     73 	info->stripeIdentifier = rf_make_2d_array(raidPtr->numCol, raidPtr->numCol, raidPtr->cleanupList);
     74 	if (info->stripeIdentifier == NULL)
     75 		return (ENOMEM);
     76 	startdisk = 0;
     77 	for (i = 0; i < raidPtr->numCol; i++) {
     78 		for (j = 0; j < raidPtr->numCol; j++) {
     79 			info->stripeIdentifier[i][j] = (startdisk + j) % raidPtr->numCol;
     80 		}
     81 		if ((--startdisk) < 0)
     82 			startdisk = raidPtr->numCol - 1;
     83 	}
     84 
     85 	/* fill in the remaining layout parameters */
     86 	layoutPtr->numStripe = layoutPtr->stripeUnitsPerDisk;
     87 	layoutPtr->numDataCol = raidPtr->numCol - 1;
     88 	layoutPtr->dataSectorsPerStripe = layoutPtr->numDataCol * layoutPtr->sectorsPerStripeUnit;
     89 	layoutPtr->numParityCol = 1;
     90 	layoutPtr->dataStripeUnitsPerDisk = layoutPtr->stripeUnitsPerDisk;
     91 
     92 	raidPtr->totalSectors = layoutPtr->stripeUnitsPerDisk * layoutPtr->numDataCol * layoutPtr->sectorsPerStripeUnit;
     93 
     94 	return (0);
     95 }
     96 
     97 int
     98 rf_GetDefaultNumFloatingReconBuffersRAID5(RF_Raid_t *raidPtr)
     99 {
    100 	return (20);
    101 }
    102 
    103 RF_HeadSepLimit_t
    104 rf_GetDefaultHeadSepLimitRAID5(RF_Raid_t *raidPtr)
    105 {
    106 	return (10);
    107 }
    108 #if !defined(__NetBSD__) && !defined(_KERNEL)
    109 /* not currently used */
    110 int
    111 rf_ShutdownRAID5(RF_Raid_t *raidPtr)
    112 {
    113 	return (0);
    114 }
    115 #endif
    116 
    117 void
    118 rf_MapSectorRAID5(RF_Raid_t *raidPtr, RF_RaidAddr_t raidSector,
    119 		  RF_RowCol_t *col, RF_SectorNum_t *diskSector, int remap)
    120 {
    121 	RF_StripeNum_t SUID = raidSector / raidPtr->Layout.sectorsPerStripeUnit;
    122 	*col = (SUID % raidPtr->numCol);
    123 	*diskSector = (SUID / (raidPtr->Layout.numDataCol)) * raidPtr->Layout.sectorsPerStripeUnit +
    124 	    (raidSector % raidPtr->Layout.sectorsPerStripeUnit);
    125 }
    126 
    127 void
    128 rf_MapParityRAID5(RF_Raid_t *raidPtr, RF_RaidAddr_t raidSector,
    129 		  RF_RowCol_t *col, RF_SectorNum_t *diskSector, int remap)
    130 {
    131 	RF_StripeNum_t SUID = raidSector / raidPtr->Layout.sectorsPerStripeUnit;
    132 
    133 	*col = raidPtr->Layout.numDataCol - (SUID / raidPtr->Layout.numDataCol) % raidPtr->numCol;
    134 	*diskSector = (SUID / (raidPtr->Layout.numDataCol)) * raidPtr->Layout.sectorsPerStripeUnit +
    135 	    (raidSector % raidPtr->Layout.sectorsPerStripeUnit);
    136 }
    137 
    138 void
    139 rf_IdentifyStripeRAID5(RF_Raid_t *raidPtr, RF_RaidAddr_t addr,
    140 		       RF_RowCol_t **diskids)
    141 {
    142 	RF_StripeNum_t stripeID = rf_RaidAddressToStripeID(&raidPtr->Layout, addr);
    143 	RF_Raid5ConfigInfo_t *info = (RF_Raid5ConfigInfo_t *) raidPtr->Layout.layoutSpecificInfo;
    144 
    145 	*diskids = info->stripeIdentifier[stripeID % raidPtr->numCol];
    146 }
    147 
    148 void
    149 rf_MapSIDToPSIDRAID5(RF_RaidLayout_t *layoutPtr, RF_StripeNum_t stripeID,
    150 		     RF_StripeNum_t *psID, RF_ReconUnitNum_t *which_ru)
    151 {
    152 	*which_ru = 0;
    153 	*psID = stripeID;
    154 }
    155 /* select an algorithm for performing an access.  Returns two pointers,
    156  * one to a function that will return information about the DAG, and
    157  * another to a function that will create the dag.
    158  */
    159 void
    160 rf_RaidFiveDagSelect(RF_Raid_t *raidPtr, RF_IoType_t type,
    161 		     RF_AccessStripeMap_t *asmap,
    162 		     RF_VoidFuncPtr *createFunc)
    163 {
    164 	RF_RaidLayout_t *layoutPtr = &(raidPtr->Layout);
    165 	RF_PhysDiskAddr_t *failedPDA = NULL;
    166 	RF_RowCol_t fcol;
    167 	RF_RowStatus_t rstat;
    168 	int     prior_recon;
    169 
    170 	RF_ASSERT(RF_IO_IS_R_OR_W(type));
    171 
    172 	if ((asmap->numDataFailed + asmap->numParityFailed > 1) ||
    173 	    (raidPtr->numFailures > 1)){
    174 		if (rf_dagDebug)
    175 			RF_ERRORMSG("Multiple disks failed in a single group!  Aborting I/O operation.\n");
    176 		*createFunc = NULL;
    177 		return;
    178 	}
    179 
    180 	if (asmap->numDataFailed + asmap->numParityFailed == 1) {
    181 
    182 		/* if under recon & already reconstructed, redirect
    183 		 * the access to the spare drive and eliminate the
    184 		 * failure indication */
    185 		failedPDA = asmap->failedPDAs[0];
    186 		fcol = failedPDA->col;
    187 		rstat = raidPtr->status;
    188 		prior_recon = (rstat == rf_rs_reconfigured) || (
    189 			    (rstat == rf_rs_reconstructing) ?
    190 			    rf_CheckRUReconstructed(raidPtr->reconControl->reconMap, failedPDA->startSector) : 0
    191 			    );
    192 		if (prior_recon) {
    193 			RF_RowCol_t oc = failedPDA->col;
    194 			RF_SectorNum_t oo = failedPDA->startSector;
    195 
    196 			if (layoutPtr->map->flags & RF_DISTRIBUTE_SPARE) {	/* redirect to dist
    197 										 * spare space */
    198 
    199 				if (failedPDA == asmap->parityInfo) {
    200 
    201 					/* parity has failed */
    202 					(layoutPtr->map->MapParity) (raidPtr, failedPDA->raidAddress,
    203 								     &failedPDA->col, &failedPDA->startSector, RF_REMAP);
    204 
    205 					if (asmap->parityInfo->next) {	/* redir 2nd component,
    206 									 * if any */
    207 						RF_PhysDiskAddr_t *p = asmap->parityInfo->next;
    208 						RF_SectorNum_t SUoffs = p->startSector % layoutPtr->sectorsPerStripeUnit;
    209 						p->col = failedPDA->col;
    210 						p->startSector = rf_RaidAddressOfPrevStripeUnitBoundary(layoutPtr, failedPDA->startSector) +
    211 							SUoffs;	/* cheating:
    212 								 * startSector is not
    213 								 * really a RAID address */
    214 					}
    215 				} else
    216 					if (asmap->parityInfo->next && failedPDA == asmap->parityInfo->next) {
    217 						RF_ASSERT(0);	/* should not ever
    218 								 * happen */
    219 					} else {
    220 
    221 						/* data has failed */
    222 						(layoutPtr->map->MapSector) (raidPtr, failedPDA->raidAddress,
    223 									     &failedPDA->col, &failedPDA->startSector, RF_REMAP);
    224 
    225 					}
    226 
    227 			} else {	/* redirect to dedicated spare
    228 					 * space */
    229 
    230 				failedPDA->col = raidPtr->Disks[fcol].spareCol;
    231 
    232 				/* the parity may have two distinct
    233 				 * components, both of which may need
    234 				 * to be redirected */
    235 				if (asmap->parityInfo->next) {
    236 					if (failedPDA == asmap->parityInfo) {
    237 						failedPDA->next->col = failedPDA->col;
    238 					} else
    239 						if (failedPDA == asmap->parityInfo->next) {	/* paranoid:  should
    240 												 * never occur */
    241 							asmap->parityInfo->col = failedPDA->col;
    242 						}
    243 				}
    244 			}
    245 
    246 			RF_ASSERT(failedPDA->col != -1);
    247 
    248 			if (rf_dagDebug || rf_mapDebug) {
    249 				printf("raid%d: Redirected type '%c' c %d o %ld -> c %d o %ld\n",
    250 				       raidPtr->raidid, type, oc,
    251 				       (long) oo, failedPDA->col,
    252 				       (long) failedPDA->startSector);
    253 			}
    254 			asmap->numDataFailed = asmap->numParityFailed = 0;
    255 		}
    256 	}
    257 	/* all dags begin/end with block/unblock node therefore, hdrSucc &
    258 	 * termAnt counts should always be 1 also, these counts should not be
    259 	 * visible outside dag creation routines - manipulating the counts
    260 	 * here should be removed */
    261 	if (type == RF_IO_TYPE_READ) {
    262 		if (asmap->numDataFailed == 0)
    263 			*createFunc = (RF_VoidFuncPtr) rf_CreateFaultFreeReadDAG;
    264 		else
    265 			*createFunc = (RF_VoidFuncPtr) rf_CreateRaidFiveDegradedReadDAG;
    266 	} else {
    267 
    268 
    269 		/* if mirroring, always use large writes.  If the access
    270 		 * requires two distinct parity updates, always do a small
    271 		 * write.  If the stripe contains a failure but the access
    272 		 * does not, do a small write. The first conditional
    273 		 * (numStripeUnitsAccessed <= numDataCol/2) uses a
    274 		 * less-than-or-equal rather than just a less-than because
    275 		 * when G is 3 or 4, numDataCol/2 is 1, and I want
    276 		 * single-stripe-unit updates to use just one disk. */
    277 		if ((asmap->numDataFailed + asmap->numParityFailed) == 0) {
    278 			if (rf_suppressLocksAndLargeWrites ||
    279 			    (((asmap->numStripeUnitsAccessed <= (layoutPtr->numDataCol / 2)) && (layoutPtr->numDataCol != 1)) ||
    280 				(asmap->parityInfo->next != NULL) || rf_CheckStripeForFailures(raidPtr, asmap))) {
    281 				*createFunc = (RF_VoidFuncPtr) rf_CreateSmallWriteDAG;
    282 			} else
    283 				*createFunc = (RF_VoidFuncPtr) rf_CreateLargeWriteDAG;
    284 		} else {
    285 			if (asmap->numParityFailed == 1)
    286 				*createFunc = (RF_VoidFuncPtr) rf_CreateNonRedundantWriteDAG;
    287 			else
    288 				if (asmap->numStripeUnitsAccessed != 1 && failedPDA->numSector != layoutPtr->sectorsPerStripeUnit)
    289 					*createFunc = NULL;
    290 				else
    291 					*createFunc = (RF_VoidFuncPtr) rf_CreateDegradedWriteDAG;
    292 		}
    293 	}
    294 }
    295