Home | History | Annotate | Line # | Download | only in raidframe
rf_reconutil.c revision 1.5
      1 /*	$NetBSD: rf_reconutil.c,v 1.5 2001/11/13 07:11:16 lukem Exp $	*/
      2 /*
      3  * Copyright (c) 1995 Carnegie-Mellon University.
      4  * All rights reserved.
      5  *
      6  * Author: Mark Holland
      7  *
      8  * Permission to use, copy, modify and distribute this software and
      9  * its documentation is hereby granted, provided that both the copyright
     10  * notice and this permission notice appear in all copies of the
     11  * software, derivative works or modified versions, and any portions
     12  * thereof, and that both notices appear in supporting documentation.
     13  *
     14  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     15  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     16  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     17  *
     18  * Carnegie Mellon requests users of this software to return to
     19  *
     20  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     21  *  School of Computer Science
     22  *  Carnegie Mellon University
     23  *  Pittsburgh PA 15213-3890
     24  *
     25  * any improvements or extensions that they make and grant Carnegie the
     26  * rights to redistribute these changes.
     27  */
     28 
     29 /********************************************
     30  * rf_reconutil.c -- reconstruction utilities
     31  ********************************************/
     32 
     33 #include <sys/cdefs.h>
     34 __KERNEL_RCSID(0, "$NetBSD: rf_reconutil.c,v 1.5 2001/11/13 07:11:16 lukem Exp $");
     35 
     36 #include <dev/raidframe/raidframevar.h>
     37 
     38 #include "rf_raid.h"
     39 #include "rf_desc.h"
     40 #include "rf_reconutil.h"
     41 #include "rf_reconbuffer.h"
     42 #include "rf_general.h"
     43 #include "rf_decluster.h"
     44 #include "rf_raid5_rotatedspare.h"
     45 #include "rf_interdecluster.h"
     46 #include "rf_chaindecluster.h"
     47 
     48 /*******************************************************************
     49  * allocates/frees the reconstruction control information structures
     50  *******************************************************************/
     51 RF_ReconCtrl_t *
     52 rf_MakeReconControl(reconDesc, frow, fcol, srow, scol)
     53 	RF_RaidReconDesc_t *reconDesc;
     54 	RF_RowCol_t frow;	/* failed row and column */
     55 	RF_RowCol_t fcol;
     56 	RF_RowCol_t srow;	/* identifies which spare we're using */
     57 	RF_RowCol_t scol;
     58 {
     59 	RF_Raid_t *raidPtr = reconDesc->raidPtr;
     60 	RF_RaidLayout_t *layoutPtr = &raidPtr->Layout;
     61 	RF_ReconUnitCount_t RUsPerPU = layoutPtr->SUsPerPU / layoutPtr->SUsPerRU;
     62 	RF_ReconUnitCount_t numSpareRUs;
     63 	RF_ReconCtrl_t *reconCtrlPtr;
     64 	RF_ReconBuffer_t *rbuf;
     65 	RF_LayoutSW_t *lp;
     66 	int     retcode, rc;
     67 	RF_RowCol_t i;
     68 
     69 	lp = raidPtr->Layout.map;
     70 
     71 	/* make and zero the global reconstruction structure and the per-disk
     72 	 * structure */
     73 	RF_Calloc(reconCtrlPtr, 1, sizeof(RF_ReconCtrl_t), (RF_ReconCtrl_t *));
     74 	RF_Calloc(reconCtrlPtr->perDiskInfo, raidPtr->numCol, sizeof(RF_PerDiskReconCtrl_t), (RF_PerDiskReconCtrl_t *));	/* this zeros it */
     75 	reconCtrlPtr->reconDesc = reconDesc;
     76 	reconCtrlPtr->fcol = fcol;
     77 	reconCtrlPtr->spareRow = srow;
     78 	reconCtrlPtr->spareCol = scol;
     79 	reconCtrlPtr->lastPSID = layoutPtr->numStripe / layoutPtr->SUsPerPU;
     80 	reconCtrlPtr->percentComplete = 0;
     81 
     82 	/* initialize each per-disk recon information structure */
     83 	for (i = 0; i < raidPtr->numCol; i++) {
     84 		reconCtrlPtr->perDiskInfo[i].reconCtrl = reconCtrlPtr;
     85 		reconCtrlPtr->perDiskInfo[i].row = frow;
     86 		reconCtrlPtr->perDiskInfo[i].col = i;
     87 		reconCtrlPtr->perDiskInfo[i].curPSID = -1;	/* make it appear as if
     88 								 * we just finished an
     89 								 * RU */
     90 		reconCtrlPtr->perDiskInfo[i].ru_count = RUsPerPU - 1;
     91 	}
     92 
     93 	/* Get the number of spare units per disk and the sparemap in case
     94 	 * spare is distributed  */
     95 
     96 	if (lp->GetNumSpareRUs) {
     97 		numSpareRUs = lp->GetNumSpareRUs(raidPtr);
     98 	} else {
     99 		numSpareRUs = 0;
    100 	}
    101 
    102 	/*
    103          * Not all distributed sparing archs need dynamic mappings
    104          */
    105 	if (lp->InstallSpareTable) {
    106 		retcode = rf_InstallSpareTable(raidPtr, frow, fcol);
    107 		if (retcode) {
    108 			RF_PANIC();	/* XXX fix this */
    109 		}
    110 	}
    111 	/* make the reconstruction map */
    112 	reconCtrlPtr->reconMap = rf_MakeReconMap(raidPtr, (int) (layoutPtr->SUsPerRU * layoutPtr->sectorsPerStripeUnit),
    113 	    raidPtr->sectorsPerDisk, numSpareRUs);
    114 
    115 	/* make the per-disk reconstruction buffers */
    116 	for (i = 0; i < raidPtr->numCol; i++) {
    117 		reconCtrlPtr->perDiskInfo[i].rbuf = (i == fcol) ? NULL : rf_MakeReconBuffer(raidPtr, frow, i, RF_RBUF_TYPE_EXCLUSIVE);
    118 	}
    119 
    120 	/* initialize the event queue */
    121 	rc = rf_mutex_init(&reconCtrlPtr->eq_mutex);
    122 	if (rc) {
    123 		/* XXX deallocate, cleanup */
    124 		RF_ERRORMSG3("Unable to init mutex file %s line %d rc=%d\n", __FILE__,
    125 		    __LINE__, rc);
    126 		return (NULL);
    127 	}
    128 	rc = rf_cond_init(&reconCtrlPtr->eq_cond);
    129 	if (rc) {
    130 		/* XXX deallocate, cleanup */
    131 		RF_ERRORMSG3("Unable to init cond file %s line %d rc=%d\n", __FILE__,
    132 		    __LINE__, rc);
    133 		return (NULL);
    134 	}
    135 	reconCtrlPtr->eventQueue = NULL;
    136 	reconCtrlPtr->eq_count = 0;
    137 
    138 	/* make the floating recon buffers and append them to the free list */
    139 	rc = rf_mutex_init(&reconCtrlPtr->rb_mutex);
    140 	if (rc) {
    141 		/* XXX deallocate, cleanup */
    142 		RF_ERRORMSG3("Unable to init mutex file %s line %d rc=%d\n", __FILE__,
    143 		    __LINE__, rc);
    144 		return (NULL);
    145 	}
    146 	reconCtrlPtr->fullBufferList = NULL;
    147 	reconCtrlPtr->priorityList = NULL;
    148 	reconCtrlPtr->floatingRbufs = NULL;
    149 	reconCtrlPtr->committedRbufs = NULL;
    150 	for (i = 0; i < raidPtr->numFloatingReconBufs; i++) {
    151 		rbuf = rf_MakeReconBuffer(raidPtr, frow, fcol, RF_RBUF_TYPE_FLOATING);
    152 		rbuf->next = reconCtrlPtr->floatingRbufs;
    153 		reconCtrlPtr->floatingRbufs = rbuf;
    154 	}
    155 
    156 	/* create the parity stripe status table */
    157 	reconCtrlPtr->pssTable = rf_MakeParityStripeStatusTable(raidPtr);
    158 
    159 	/* set the initial min head sep counter val */
    160 	reconCtrlPtr->minHeadSepCounter = 0;
    161 
    162 	return (reconCtrlPtr);
    163 }
    164 
    165 void
    166 rf_FreeReconControl(raidPtr, row)
    167 	RF_Raid_t *raidPtr;
    168 	RF_RowCol_t row;
    169 {
    170 	RF_ReconCtrl_t *reconCtrlPtr = raidPtr->reconControl[row];
    171 	RF_ReconBuffer_t *t;
    172 	RF_ReconUnitNum_t i;
    173 
    174 	RF_ASSERT(reconCtrlPtr);
    175 	for (i = 0; i < raidPtr->numCol; i++)
    176 		if (reconCtrlPtr->perDiskInfo[i].rbuf)
    177 			rf_FreeReconBuffer(reconCtrlPtr->perDiskInfo[i].rbuf);
    178 	for (i = 0; i < raidPtr->numFloatingReconBufs; i++) {
    179 		t = reconCtrlPtr->floatingRbufs;
    180 		RF_ASSERT(t);
    181 		reconCtrlPtr->floatingRbufs = t->next;
    182 		rf_FreeReconBuffer(t);
    183 	}
    184 	rf_mutex_destroy(&reconCtrlPtr->rb_mutex);
    185 	rf_mutex_destroy(&reconCtrlPtr->eq_mutex);
    186 	rf_cond_destroy(&reconCtrlPtr->eq_cond);
    187 	rf_FreeReconMap(reconCtrlPtr->reconMap);
    188 	rf_FreeParityStripeStatusTable(raidPtr, reconCtrlPtr->pssTable);
    189 	RF_Free(reconCtrlPtr->perDiskInfo, raidPtr->numCol * sizeof(RF_PerDiskReconCtrl_t));
    190 	RF_Free(reconCtrlPtr, sizeof(*reconCtrlPtr));
    191 }
    192 
    193 
    194 /******************************************************************************
    195  * computes the default head separation limit
    196  *****************************************************************************/
    197 RF_HeadSepLimit_t
    198 rf_GetDefaultHeadSepLimit(raidPtr)
    199 	RF_Raid_t *raidPtr;
    200 {
    201 	RF_HeadSepLimit_t hsl;
    202 	RF_LayoutSW_t *lp;
    203 
    204 	lp = raidPtr->Layout.map;
    205 	if (lp->GetDefaultHeadSepLimit == NULL)
    206 		return (-1);
    207 	hsl = lp->GetDefaultHeadSepLimit(raidPtr);
    208 	return (hsl);
    209 }
    210 
    211 
    212 /******************************************************************************
    213  * computes the default number of floating recon buffers
    214  *****************************************************************************/
    215 int
    216 rf_GetDefaultNumFloatingReconBuffers(raidPtr)
    217 	RF_Raid_t *raidPtr;
    218 {
    219 	RF_LayoutSW_t *lp;
    220 	int     nrb;
    221 
    222 	lp = raidPtr->Layout.map;
    223 	if (lp->GetDefaultNumFloatingReconBuffers == NULL)
    224 		return (3 * raidPtr->numCol);
    225 	nrb = lp->GetDefaultNumFloatingReconBuffers(raidPtr);
    226 	return (nrb);
    227 }
    228 
    229 
    230 /******************************************************************************
    231  * creates and initializes a reconstruction buffer
    232  *****************************************************************************/
    233 RF_ReconBuffer_t *
    234 rf_MakeReconBuffer(
    235     RF_Raid_t * raidPtr,
    236     RF_RowCol_t row,
    237     RF_RowCol_t col,
    238     RF_RbufType_t type)
    239 {
    240 	RF_RaidLayout_t *layoutPtr = &raidPtr->Layout;
    241 	RF_ReconBuffer_t *t;
    242 	u_int   recon_buffer_size = rf_RaidAddressToByte(raidPtr, layoutPtr->SUsPerRU * layoutPtr->sectorsPerStripeUnit);
    243 
    244 	RF_Malloc(t, sizeof(RF_ReconBuffer_t), (RF_ReconBuffer_t *));
    245 	RF_Malloc(t->buffer, recon_buffer_size, (caddr_t));
    246 	RF_Malloc(t->arrived, raidPtr->numCol * sizeof(char), (char *));
    247 	t->raidPtr = raidPtr;
    248 	t->row = row;
    249 	t->col = col;
    250 	t->priority = RF_IO_RECON_PRIORITY;
    251 	t->type = type;
    252 	t->pssPtr = NULL;
    253 	t->next = NULL;
    254 	return (t);
    255 }
    256 /******************************************************************************
    257  * frees a reconstruction buffer
    258  *****************************************************************************/
    259 void
    260 rf_FreeReconBuffer(rbuf)
    261 	RF_ReconBuffer_t *rbuf;
    262 {
    263 	RF_Raid_t *raidPtr = rbuf->raidPtr;
    264 	u_int   recon_buffer_size = rf_RaidAddressToByte(raidPtr, raidPtr->Layout.SUsPerRU * raidPtr->Layout.sectorsPerStripeUnit);
    265 
    266 	RF_Free(rbuf->arrived, raidPtr->numCol * sizeof(char));
    267 	RF_Free(rbuf->buffer, recon_buffer_size);
    268 	RF_Free(rbuf, sizeof(*rbuf));
    269 }
    270 
    271 
    272 /******************************************************************************
    273  * debug only:  sanity check the number of floating recon bufs in use
    274  *****************************************************************************/
    275 void
    276 rf_CheckFloatingRbufCount(raidPtr, dolock)
    277 	RF_Raid_t *raidPtr;
    278 	int     dolock;
    279 {
    280 	RF_ReconParityStripeStatus_t *p;
    281 	RF_PSStatusHeader_t *pssTable;
    282 	RF_ReconBuffer_t *rbuf;
    283 	int     i, j, sum = 0;
    284 	RF_RowCol_t frow = 0;
    285 
    286 	for (i = 0; i < raidPtr->numRow; i++)
    287 		if (raidPtr->reconControl[i]) {
    288 			frow = i;
    289 			break;
    290 		}
    291 	RF_ASSERT(frow >= 0);
    292 
    293 	if (dolock)
    294 		RF_LOCK_MUTEX(raidPtr->reconControl[frow]->rb_mutex);
    295 	pssTable = raidPtr->reconControl[frow]->pssTable;
    296 
    297 	for (i = 0; i < raidPtr->pssTableSize; i++) {
    298 		RF_LOCK_MUTEX(pssTable[i].mutex);
    299 		for (p = pssTable[i].chain; p; p = p->next) {
    300 			rbuf = (RF_ReconBuffer_t *) p->rbuf;
    301 			if (rbuf && rbuf->type == RF_RBUF_TYPE_FLOATING)
    302 				sum++;
    303 
    304 			rbuf = (RF_ReconBuffer_t *) p->writeRbuf;
    305 			if (rbuf && rbuf->type == RF_RBUF_TYPE_FLOATING)
    306 				sum++;
    307 
    308 			for (j = 0; j < p->xorBufCount; j++) {
    309 				rbuf = (RF_ReconBuffer_t *) p->rbufsForXor[j];
    310 				RF_ASSERT(rbuf);
    311 				if (rbuf->type == RF_RBUF_TYPE_FLOATING)
    312 					sum++;
    313 			}
    314 		}
    315 		RF_UNLOCK_MUTEX(pssTable[i].mutex);
    316 	}
    317 
    318 	for (rbuf = raidPtr->reconControl[frow]->floatingRbufs; rbuf; rbuf = rbuf->next) {
    319 		if (rbuf->type == RF_RBUF_TYPE_FLOATING)
    320 			sum++;
    321 	}
    322 	for (rbuf = raidPtr->reconControl[frow]->committedRbufs; rbuf; rbuf = rbuf->next) {
    323 		if (rbuf->type == RF_RBUF_TYPE_FLOATING)
    324 			sum++;
    325 	}
    326 	for (rbuf = raidPtr->reconControl[frow]->fullBufferList; rbuf; rbuf = rbuf->next) {
    327 		if (rbuf->type == RF_RBUF_TYPE_FLOATING)
    328 			sum++;
    329 	}
    330 	for (rbuf = raidPtr->reconControl[frow]->priorityList; rbuf; rbuf = rbuf->next) {
    331 		if (rbuf->type == RF_RBUF_TYPE_FLOATING)
    332 			sum++;
    333 	}
    334 
    335 	RF_ASSERT(sum == raidPtr->numFloatingReconBufs);
    336 
    337 	if (dolock)
    338 		RF_UNLOCK_MUTEX(raidPtr->reconControl[frow]->rb_mutex);
    339 }
    340