Home | History | Annotate | Line # | Download | only in raidframe
rf_dagdegrd.c revision 1.9
      1 /*	$NetBSD: rf_dagdegrd.c,v 1.9 2001/10/04 15:58:51 oster Exp $	*/
      2 /*
      3  * Copyright (c) 1995 Carnegie-Mellon University.
      4  * All rights reserved.
      5  *
      6  * Author: Mark Holland, Daniel Stodolsky, William V. Courtright II
      7  *
      8  * Permission to use, copy, modify and distribute this software and
      9  * its documentation is hereby granted, provided that both the copyright
     10  * notice and this permission notice appear in all copies of the
     11  * software, derivative works or modified versions, and any portions
     12  * thereof, and that both notices appear in supporting documentation.
     13  *
     14  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     15  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     16  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     17  *
     18  * Carnegie Mellon requests users of this software to return to
     19  *
     20  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     21  *  School of Computer Science
     22  *  Carnegie Mellon University
     23  *  Pittsburgh PA 15213-3890
     24  *
     25  * any improvements or extensions that they make and grant Carnegie the
     26  * rights to redistribute these changes.
     27  */
     28 
     29 /*
     30  * rf_dagdegrd.c
     31  *
     32  * code for creating degraded read DAGs
     33  */
     34 
     35 #include <dev/raidframe/raidframevar.h>
     36 
     37 #include "rf_archs.h"
     38 #include "rf_raid.h"
     39 #include "rf_dag.h"
     40 #include "rf_dagutils.h"
     41 #include "rf_dagfuncs.h"
     42 #include "rf_debugMem.h"
     43 #include "rf_memchunk.h"
     44 #include "rf_general.h"
     45 #include "rf_dagdegrd.h"
     46 
     47 
     48 /******************************************************************************
     49  *
     50  * General comments on DAG creation:
     51  *
     52  * All DAGs in this file use roll-away error recovery.  Each DAG has a single
     53  * commit node, usually called "Cmt."  If an error occurs before the Cmt node
     54  * is reached, the execution engine will halt forward execution and work
     55  * backward through the graph, executing the undo functions.  Assuming that
     56  * each node in the graph prior to the Cmt node are undoable and atomic - or -
     57  * does not make changes to permanent state, the graph will fail atomically.
     58  * If an error occurs after the Cmt node executes, the engine will roll-forward
     59  * through the graph, blindly executing nodes until it reaches the end.
     60  * If a graph reaches the end, it is assumed to have completed successfully.
     61  *
     62  * A graph has only 1 Cmt node.
     63  *
     64  */
     65 
     66 
     67 /******************************************************************************
     68  *
     69  * The following wrappers map the standard DAG creation interface to the
     70  * DAG creation routines.  Additionally, these wrappers enable experimentation
     71  * with new DAG structures by providing an extra level of indirection, allowing
     72  * the DAG creation routines to be replaced at this single point.
     73  */
     74 
     75 void
     76 rf_CreateRaidFiveDegradedReadDAG(
     77     RF_Raid_t * raidPtr,
     78     RF_AccessStripeMap_t * asmap,
     79     RF_DagHeader_t * dag_h,
     80     void *bp,
     81     RF_RaidAccessFlags_t flags,
     82     RF_AllocListElem_t * allocList)
     83 {
     84 	rf_CreateDegradedReadDAG(raidPtr, asmap, dag_h, bp, flags, allocList,
     85 	    &rf_xorRecoveryFuncs);
     86 }
     87 
     88 
     89 /******************************************************************************
     90  *
     91  * DAG creation code begins here
     92  */
     93 
     94 
     95 /******************************************************************************
     96  * Create a degraded read DAG for RAID level 1
     97  *
     98  * Hdr -> Nil -> R(p/s)d -> Commit -> Trm
     99  *
    100  * The "Rd" node reads data from the surviving disk in the mirror pair
    101  *   Rpd - read of primary copy
    102  *   Rsd - read of secondary copy
    103  *
    104  * Parameters:  raidPtr   - description of the physical array
    105  *              asmap     - logical & physical addresses for this access
    106  *              bp        - buffer ptr (for holding write data)
    107  *              flags     - general flags (e.g. disk locking)
    108  *              allocList - list of memory allocated in DAG creation
    109  *****************************************************************************/
    110 
    111 void
    112 rf_CreateRaidOneDegradedReadDAG(
    113     RF_Raid_t * raidPtr,
    114     RF_AccessStripeMap_t * asmap,
    115     RF_DagHeader_t * dag_h,
    116     void *bp,
    117     RF_RaidAccessFlags_t flags,
    118     RF_AllocListElem_t * allocList)
    119 {
    120 	RF_DagNode_t *nodes, *rdNode, *blockNode, *commitNode, *termNode;
    121 	RF_StripeNum_t parityStripeID;
    122 	RF_ReconUnitNum_t which_ru;
    123 	RF_PhysDiskAddr_t *pda;
    124 	int     useMirror, i;
    125 
    126 	useMirror = 0;
    127 	parityStripeID = rf_RaidAddressToParityStripeID(&(raidPtr->Layout),
    128 	    asmap->raidAddress, &which_ru);
    129 	if (rf_dagDebug) {
    130 		printf("[Creating RAID level 1 degraded read DAG]\n");
    131 	}
    132 	dag_h->creator = "RaidOneDegradedReadDAG";
    133 	/* alloc the Wnd nodes and the Wmir node */
    134 	if (asmap->numDataFailed == 0)
    135 		useMirror = RF_FALSE;
    136 	else
    137 		useMirror = RF_TRUE;
    138 
    139 	/* total number of nodes = 1 + (block + commit + terminator) */
    140 	RF_CallocAndAdd(nodes, 4, sizeof(RF_DagNode_t), (RF_DagNode_t *), allocList);
    141 	i = 0;
    142 	rdNode = &nodes[i];
    143 	i++;
    144 	blockNode = &nodes[i];
    145 	i++;
    146 	commitNode = &nodes[i];
    147 	i++;
    148 	termNode = &nodes[i];
    149 	i++;
    150 
    151 	/* this dag can not commit until the commit node is reached.   errors
    152 	 * prior to the commit point imply the dag has failed and must be
    153 	 * retried */
    154 	dag_h->numCommitNodes = 1;
    155 	dag_h->numCommits = 0;
    156 	dag_h->numSuccedents = 1;
    157 
    158 	/* initialize the block, commit, and terminator nodes */
    159 	rf_InitNode(blockNode, rf_wait, RF_FALSE, rf_NullNodeFunc, rf_NullNodeUndoFunc,
    160 	    NULL, 1, 0, 0, 0, dag_h, "Nil", allocList);
    161 	rf_InitNode(commitNode, rf_wait, RF_TRUE, rf_NullNodeFunc, rf_NullNodeUndoFunc,
    162 	    NULL, 1, 1, 0, 0, dag_h, "Cmt", allocList);
    163 	rf_InitNode(termNode, rf_wait, RF_FALSE, rf_TerminateFunc, rf_TerminateUndoFunc,
    164 	    NULL, 0, 1, 0, 0, dag_h, "Trm", allocList);
    165 
    166 	pda = asmap->physInfo;
    167 	RF_ASSERT(pda != NULL);
    168 	/* parityInfo must describe entire parity unit */
    169 	RF_ASSERT(asmap->parityInfo->next == NULL);
    170 
    171 	/* initialize the data node */
    172 	if (!useMirror) {
    173 		/* read primary copy of data */
    174 		rf_InitNode(rdNode, rf_wait, RF_FALSE, rf_DiskReadFunc, rf_DiskReadUndoFunc,
    175 		    rf_GenericWakeupFunc, 1, 1, 4, 0, dag_h, "Rpd", allocList);
    176 		rdNode->params[0].p = pda;
    177 		rdNode->params[1].p = pda->bufPtr;
    178 		rdNode->params[2].v = parityStripeID;
    179 		rdNode->params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, 0, which_ru);
    180 	} else {
    181 		/* read secondary copy of data */
    182 		rf_InitNode(rdNode, rf_wait, RF_FALSE, rf_DiskReadFunc, rf_DiskReadUndoFunc,
    183 		    rf_GenericWakeupFunc, 1, 1, 4, 0, dag_h, "Rsd", allocList);
    184 		rdNode->params[0].p = asmap->parityInfo;
    185 		rdNode->params[1].p = pda->bufPtr;
    186 		rdNode->params[2].v = parityStripeID;
    187 		rdNode->params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, 0, which_ru);
    188 	}
    189 
    190 	/* connect header to block node */
    191 	RF_ASSERT(dag_h->numSuccedents == 1);
    192 	RF_ASSERT(blockNode->numAntecedents == 0);
    193 	dag_h->succedents[0] = blockNode;
    194 
    195 	/* connect block node to rdnode */
    196 	RF_ASSERT(blockNode->numSuccedents == 1);
    197 	RF_ASSERT(rdNode->numAntecedents == 1);
    198 	blockNode->succedents[0] = rdNode;
    199 	rdNode->antecedents[0] = blockNode;
    200 	rdNode->antType[0] = rf_control;
    201 
    202 	/* connect rdnode to commit node */
    203 	RF_ASSERT(rdNode->numSuccedents == 1);
    204 	RF_ASSERT(commitNode->numAntecedents == 1);
    205 	rdNode->succedents[0] = commitNode;
    206 	commitNode->antecedents[0] = rdNode;
    207 	commitNode->antType[0] = rf_control;
    208 
    209 	/* connect commit node to terminator */
    210 	RF_ASSERT(commitNode->numSuccedents == 1);
    211 	RF_ASSERT(termNode->numAntecedents == 1);
    212 	RF_ASSERT(termNode->numSuccedents == 0);
    213 	commitNode->succedents[0] = termNode;
    214 	termNode->antecedents[0] = commitNode;
    215 	termNode->antType[0] = rf_control;
    216 }
    217 
    218 
    219 
    220 /******************************************************************************
    221  *
    222  * creates a DAG to perform a degraded-mode read of data within one stripe.
    223  * This DAG is as follows:
    224  *
    225  * Hdr -> Block -> Rud -> Xor -> Cmt -> T
    226  *              -> Rrd ->
    227  *              -> Rp -->
    228  *
    229  * Each R node is a successor of the L node
    230  * One successor arc from each R node goes to C, and the other to X
    231  * There is one Rud for each chunk of surviving user data requested by the
    232  * user, and one Rrd for each chunk of surviving user data _not_ being read by
    233  * the user
    234  * R = read, ud = user data, rd = recovery (surviving) data, p = parity
    235  * X = XOR, C = Commit, T = terminate
    236  *
    237  * The block node guarantees a single source node.
    238  *
    239  * Note:  The target buffer for the XOR node is set to the actual user buffer
    240  * where the failed data is supposed to end up.  This buffer is zero'd by the
    241  * code here.  Thus, if you create a degraded read dag, use it, and then
    242  * re-use, you have to be sure to zero the target buffer prior to the re-use.
    243  *
    244  * The recfunc argument at the end specifies the name and function used for
    245  * the redundancy
    246  * recovery function.
    247  *
    248  *****************************************************************************/
    249 
    250 void
    251 rf_CreateDegradedReadDAG(
    252     RF_Raid_t * raidPtr,
    253     RF_AccessStripeMap_t * asmap,
    254     RF_DagHeader_t * dag_h,
    255     void *bp,
    256     RF_RaidAccessFlags_t flags,
    257     RF_AllocListElem_t * allocList,
    258     RF_RedFuncs_t * recFunc)
    259 {
    260 	RF_DagNode_t *nodes, *rudNodes, *rrdNodes, *xorNode, *blockNode;
    261 	RF_DagNode_t *commitNode, *rpNode, *termNode;
    262 	int     nNodes, nRrdNodes, nRudNodes, nXorBufs, i;
    263 	int     j, paramNum;
    264 	RF_SectorCount_t sectorsPerSU;
    265 	RF_ReconUnitNum_t which_ru;
    266 	char   *overlappingPDAs;/* a temporary array of flags */
    267 	RF_AccessStripeMapHeader_t *new_asm_h[2];
    268 	RF_PhysDiskAddr_t *pda, *parityPDA;
    269 	RF_StripeNum_t parityStripeID;
    270 	RF_PhysDiskAddr_t *failedPDA;
    271 	RF_RaidLayout_t *layoutPtr;
    272 	char   *rpBuf;
    273 
    274 	layoutPtr = &(raidPtr->Layout);
    275 	/* failedPDA points to the pda within the asm that targets the failed
    276 	 * disk */
    277 	failedPDA = asmap->failedPDAs[0];
    278 	parityStripeID = rf_RaidAddressToParityStripeID(layoutPtr,
    279 	    asmap->raidAddress, &which_ru);
    280 	sectorsPerSU = layoutPtr->sectorsPerStripeUnit;
    281 
    282 	if (rf_dagDebug) {
    283 		printf("[Creating degraded read DAG]\n");
    284 	}
    285 	RF_ASSERT(asmap->numDataFailed == 1);
    286 	dag_h->creator = "DegradedReadDAG";
    287 
    288 	/*
    289          * generate two ASMs identifying the surviving data we need
    290          * in order to recover the lost data
    291          */
    292 
    293 	/* overlappingPDAs array must be zero'd */
    294 	RF_Calloc(overlappingPDAs, asmap->numStripeUnitsAccessed, sizeof(char), (char *));
    295 	rf_GenerateFailedAccessASMs(raidPtr, asmap, failedPDA, dag_h, new_asm_h, &nXorBufs,
    296 	    &rpBuf, overlappingPDAs, allocList);
    297 
    298 	/*
    299          * create all the nodes at once
    300          *
    301          * -1 because no access is generated for the failed pda
    302          */
    303 	nRudNodes = asmap->numStripeUnitsAccessed - 1;
    304 	nRrdNodes = ((new_asm_h[0]) ? new_asm_h[0]->stripeMap->numStripeUnitsAccessed : 0) +
    305 	    ((new_asm_h[1]) ? new_asm_h[1]->stripeMap->numStripeUnitsAccessed : 0);
    306 	nNodes = 5 + nRudNodes + nRrdNodes;	/* lock, unlock, xor, Rp, Rud,
    307 						 * Rrd */
    308 	RF_CallocAndAdd(nodes, nNodes, sizeof(RF_DagNode_t), (RF_DagNode_t *),
    309 	    allocList);
    310 	i = 0;
    311 	blockNode = &nodes[i];
    312 	i++;
    313 	commitNode = &nodes[i];
    314 	i++;
    315 	xorNode = &nodes[i];
    316 	i++;
    317 	rpNode = &nodes[i];
    318 	i++;
    319 	termNode = &nodes[i];
    320 	i++;
    321 	rudNodes = &nodes[i];
    322 	i += nRudNodes;
    323 	rrdNodes = &nodes[i];
    324 	i += nRrdNodes;
    325 	RF_ASSERT(i == nNodes);
    326 
    327 	/* initialize nodes */
    328 	dag_h->numCommitNodes = 1;
    329 	dag_h->numCommits = 0;
    330 	/* this dag can not commit until the commit node is reached errors
    331 	 * prior to the commit point imply the dag has failed */
    332 	dag_h->numSuccedents = 1;
    333 
    334 	rf_InitNode(blockNode, rf_wait, RF_FALSE, rf_NullNodeFunc, rf_NullNodeUndoFunc,
    335 	    NULL, nRudNodes + nRrdNodes + 1, 0, 0, 0, dag_h, "Nil", allocList);
    336 	rf_InitNode(commitNode, rf_wait, RF_TRUE, rf_NullNodeFunc, rf_NullNodeUndoFunc,
    337 	    NULL, 1, 1, 0, 0, dag_h, "Cmt", allocList);
    338 	rf_InitNode(termNode, rf_wait, RF_FALSE, rf_TerminateFunc, rf_TerminateUndoFunc,
    339 	    NULL, 0, 1, 0, 0, dag_h, "Trm", allocList);
    340 	rf_InitNode(xorNode, rf_wait, RF_FALSE, recFunc->simple, rf_NullNodeUndoFunc,
    341 	    NULL, 1, nRudNodes + nRrdNodes + 1, 2 * nXorBufs + 2, 1, dag_h,
    342 	    recFunc->SimpleName, allocList);
    343 
    344 	/* fill in the Rud nodes */
    345 	for (pda = asmap->physInfo, i = 0; i < nRudNodes; i++, pda = pda->next) {
    346 		if (pda == failedPDA) {
    347 			i--;
    348 			continue;
    349 		}
    350 		rf_InitNode(&rudNodes[i], rf_wait, RF_FALSE, rf_DiskReadFunc,
    351 		    rf_DiskReadUndoFunc, rf_GenericWakeupFunc, 1, 1, 4, 0, dag_h,
    352 		    "Rud", allocList);
    353 		RF_ASSERT(pda);
    354 		rudNodes[i].params[0].p = pda;
    355 		rudNodes[i].params[1].p = pda->bufPtr;
    356 		rudNodes[i].params[2].v = parityStripeID;
    357 		rudNodes[i].params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, 0, which_ru);
    358 	}
    359 
    360 	/* fill in the Rrd nodes */
    361 	i = 0;
    362 	if (new_asm_h[0]) {
    363 		for (pda = new_asm_h[0]->stripeMap->physInfo;
    364 		    i < new_asm_h[0]->stripeMap->numStripeUnitsAccessed;
    365 		    i++, pda = pda->next) {
    366 			rf_InitNode(&rrdNodes[i], rf_wait, RF_FALSE, rf_DiskReadFunc,
    367 			    rf_DiskReadUndoFunc, rf_GenericWakeupFunc, 1, 1, 4, 0,
    368 			    dag_h, "Rrd", allocList);
    369 			RF_ASSERT(pda);
    370 			rrdNodes[i].params[0].p = pda;
    371 			rrdNodes[i].params[1].p = pda->bufPtr;
    372 			rrdNodes[i].params[2].v = parityStripeID;
    373 			rrdNodes[i].params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, 0, which_ru);
    374 		}
    375 	}
    376 	if (new_asm_h[1]) {
    377 		for (j = 0, pda = new_asm_h[1]->stripeMap->physInfo;
    378 		    j < new_asm_h[1]->stripeMap->numStripeUnitsAccessed;
    379 		    j++, pda = pda->next) {
    380 			rf_InitNode(&rrdNodes[i + j], rf_wait, RF_FALSE, rf_DiskReadFunc,
    381 			    rf_DiskReadUndoFunc, rf_GenericWakeupFunc, 1, 1, 4, 0,
    382 			    dag_h, "Rrd", allocList);
    383 			RF_ASSERT(pda);
    384 			rrdNodes[i + j].params[0].p = pda;
    385 			rrdNodes[i + j].params[1].p = pda->bufPtr;
    386 			rrdNodes[i + j].params[2].v = parityStripeID;
    387 			rrdNodes[i + j].params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, 0, which_ru);
    388 		}
    389 	}
    390 	/* make a PDA for the parity unit */
    391 	RF_MallocAndAdd(parityPDA, sizeof(RF_PhysDiskAddr_t), (RF_PhysDiskAddr_t *), allocList);
    392 	parityPDA->row = asmap->parityInfo->row;
    393 	parityPDA->col = asmap->parityInfo->col;
    394 	parityPDA->startSector = ((asmap->parityInfo->startSector / sectorsPerSU)
    395 	    * sectorsPerSU) + (failedPDA->startSector % sectorsPerSU);
    396 	parityPDA->numSector = failedPDA->numSector;
    397 
    398 	/* initialize the Rp node */
    399 	rf_InitNode(rpNode, rf_wait, RF_FALSE, rf_DiskReadFunc, rf_DiskReadUndoFunc,
    400 	    rf_GenericWakeupFunc, 1, 1, 4, 0, dag_h, "Rp ", allocList);
    401 	rpNode->params[0].p = parityPDA;
    402 	rpNode->params[1].p = rpBuf;
    403 	rpNode->params[2].v = parityStripeID;
    404 	rpNode->params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, 0, which_ru);
    405 
    406 	/*
    407          * the last and nastiest step is to assign all
    408          * the parameters of the Xor node
    409          */
    410 	paramNum = 0;
    411 	for (i = 0; i < nRrdNodes; i++) {
    412 		/* all the Rrd nodes need to be xored together */
    413 		xorNode->params[paramNum++] = rrdNodes[i].params[0];
    414 		xorNode->params[paramNum++] = rrdNodes[i].params[1];
    415 	}
    416 	for (i = 0; i < nRudNodes; i++) {
    417 		/* any Rud nodes that overlap the failed access need to be
    418 		 * xored in */
    419 		if (overlappingPDAs[i]) {
    420 			RF_MallocAndAdd(pda, sizeof(RF_PhysDiskAddr_t), (RF_PhysDiskAddr_t *), allocList);
    421 			bcopy((char *) rudNodes[i].params[0].p, (char *) pda, sizeof(RF_PhysDiskAddr_t));
    422 			rf_RangeRestrictPDA(raidPtr, failedPDA, pda, RF_RESTRICT_DOBUFFER, 0);
    423 			xorNode->params[paramNum++].p = pda;
    424 			xorNode->params[paramNum++].p = pda->bufPtr;
    425 		}
    426 	}
    427 	RF_Free(overlappingPDAs, asmap->numStripeUnitsAccessed * sizeof(char));
    428 
    429 	/* install parity pda as last set of params to be xor'd */
    430 	xorNode->params[paramNum++].p = parityPDA;
    431 	xorNode->params[paramNum++].p = rpBuf;
    432 
    433 	/*
    434          * the last 2 params to the recovery xor node are
    435          * the failed PDA and the raidPtr
    436          */
    437 	xorNode->params[paramNum++].p = failedPDA;
    438 	xorNode->params[paramNum++].p = raidPtr;
    439 	RF_ASSERT(paramNum == 2 * nXorBufs + 2);
    440 
    441 	/*
    442          * The xor node uses results[0] as the target buffer.
    443          * Set pointer and zero the buffer. In the kernel, this
    444          * may be a user buffer in which case we have to remap it.
    445          */
    446 	xorNode->results[0] = failedPDA->bufPtr;
    447 	RF_BZERO(bp, failedPDA->bufPtr, rf_RaidAddressToByte(raidPtr,
    448 		failedPDA->numSector));
    449 
    450 	/* connect nodes to form graph */
    451 	/* connect the header to the block node */
    452 	RF_ASSERT(dag_h->numSuccedents == 1);
    453 	RF_ASSERT(blockNode->numAntecedents == 0);
    454 	dag_h->succedents[0] = blockNode;
    455 
    456 	/* connect the block node to the read nodes */
    457 	RF_ASSERT(blockNode->numSuccedents == (1 + nRrdNodes + nRudNodes));
    458 	RF_ASSERT(rpNode->numAntecedents == 1);
    459 	blockNode->succedents[0] = rpNode;
    460 	rpNode->antecedents[0] = blockNode;
    461 	rpNode->antType[0] = rf_control;
    462 	for (i = 0; i < nRrdNodes; i++) {
    463 		RF_ASSERT(rrdNodes[i].numSuccedents == 1);
    464 		blockNode->succedents[1 + i] = &rrdNodes[i];
    465 		rrdNodes[i].antecedents[0] = blockNode;
    466 		rrdNodes[i].antType[0] = rf_control;
    467 	}
    468 	for (i = 0; i < nRudNodes; i++) {
    469 		RF_ASSERT(rudNodes[i].numSuccedents == 1);
    470 		blockNode->succedents[1 + nRrdNodes + i] = &rudNodes[i];
    471 		rudNodes[i].antecedents[0] = blockNode;
    472 		rudNodes[i].antType[0] = rf_control;
    473 	}
    474 
    475 	/* connect the read nodes to the xor node */
    476 	RF_ASSERT(xorNode->numAntecedents == (1 + nRrdNodes + nRudNodes));
    477 	RF_ASSERT(rpNode->numSuccedents == 1);
    478 	rpNode->succedents[0] = xorNode;
    479 	xorNode->antecedents[0] = rpNode;
    480 	xorNode->antType[0] = rf_trueData;
    481 	for (i = 0; i < nRrdNodes; i++) {
    482 		RF_ASSERT(rrdNodes[i].numSuccedents == 1);
    483 		rrdNodes[i].succedents[0] = xorNode;
    484 		xorNode->antecedents[1 + i] = &rrdNodes[i];
    485 		xorNode->antType[1 + i] = rf_trueData;
    486 	}
    487 	for (i = 0; i < nRudNodes; i++) {
    488 		RF_ASSERT(rudNodes[i].numSuccedents == 1);
    489 		rudNodes[i].succedents[0] = xorNode;
    490 		xorNode->antecedents[1 + nRrdNodes + i] = &rudNodes[i];
    491 		xorNode->antType[1 + nRrdNodes + i] = rf_trueData;
    492 	}
    493 
    494 	/* connect the xor node to the commit node */
    495 	RF_ASSERT(xorNode->numSuccedents == 1);
    496 	RF_ASSERT(commitNode->numAntecedents == 1);
    497 	xorNode->succedents[0] = commitNode;
    498 	commitNode->antecedents[0] = xorNode;
    499 	commitNode->antType[0] = rf_control;
    500 
    501 	/* connect the termNode to the commit node */
    502 	RF_ASSERT(commitNode->numSuccedents == 1);
    503 	RF_ASSERT(termNode->numAntecedents == 1);
    504 	RF_ASSERT(termNode->numSuccedents == 0);
    505 	commitNode->succedents[0] = termNode;
    506 	termNode->antType[0] = rf_control;
    507 	termNode->antecedents[0] = commitNode;
    508 }
    509 
    510 #if (RF_INCLUDE_CHAINDECLUSTER > 0)
    511 /******************************************************************************
    512  * Create a degraded read DAG for Chained Declustering
    513  *
    514  * Hdr -> Nil -> R(p/s)d -> Cmt -> Trm
    515  *
    516  * The "Rd" node reads data from the surviving disk in the mirror pair
    517  *   Rpd - read of primary copy
    518  *   Rsd - read of secondary copy
    519  *
    520  * Parameters:  raidPtr   - description of the physical array
    521  *              asmap     - logical & physical addresses for this access
    522  *              bp        - buffer ptr (for holding write data)
    523  *              flags     - general flags (e.g. disk locking)
    524  *              allocList - list of memory allocated in DAG creation
    525  *****************************************************************************/
    526 
    527 void
    528 rf_CreateRaidCDegradedReadDAG(
    529     RF_Raid_t * raidPtr,
    530     RF_AccessStripeMap_t * asmap,
    531     RF_DagHeader_t * dag_h,
    532     void *bp,
    533     RF_RaidAccessFlags_t flags,
    534     RF_AllocListElem_t * allocList)
    535 {
    536 	RF_DagNode_t *nodes, *rdNode, *blockNode, *commitNode, *termNode;
    537 	RF_StripeNum_t parityStripeID;
    538 	int     useMirror, i, shiftable;
    539 	RF_ReconUnitNum_t which_ru;
    540 	RF_PhysDiskAddr_t *pda;
    541 
    542 	if ((asmap->numDataFailed + asmap->numParityFailed) == 0) {
    543 		shiftable = RF_TRUE;
    544 	} else {
    545 		shiftable = RF_FALSE;
    546 	}
    547 	useMirror = 0;
    548 	parityStripeID = rf_RaidAddressToParityStripeID(&(raidPtr->Layout),
    549 	    asmap->raidAddress, &which_ru);
    550 
    551 	if (rf_dagDebug) {
    552 		printf("[Creating RAID C degraded read DAG]\n");
    553 	}
    554 	dag_h->creator = "RaidCDegradedReadDAG";
    555 	/* alloc the Wnd nodes and the Wmir node */
    556 	if (asmap->numDataFailed == 0)
    557 		useMirror = RF_FALSE;
    558 	else
    559 		useMirror = RF_TRUE;
    560 
    561 	/* total number of nodes = 1 + (block + commit + terminator) */
    562 	RF_CallocAndAdd(nodes, 4, sizeof(RF_DagNode_t), (RF_DagNode_t *), allocList);
    563 	i = 0;
    564 	rdNode = &nodes[i];
    565 	i++;
    566 	blockNode = &nodes[i];
    567 	i++;
    568 	commitNode = &nodes[i];
    569 	i++;
    570 	termNode = &nodes[i];
    571 	i++;
    572 
    573 	/*
    574          * This dag can not commit until the commit node is reached.
    575          * Errors prior to the commit point imply the dag has failed
    576          * and must be retried.
    577          */
    578 	dag_h->numCommitNodes = 1;
    579 	dag_h->numCommits = 0;
    580 	dag_h->numSuccedents = 1;
    581 
    582 	/* initialize the block, commit, and terminator nodes */
    583 	rf_InitNode(blockNode, rf_wait, RF_FALSE, rf_NullNodeFunc, rf_NullNodeUndoFunc,
    584 	    NULL, 1, 0, 0, 0, dag_h, "Nil", allocList);
    585 	rf_InitNode(commitNode, rf_wait, RF_TRUE, rf_NullNodeFunc, rf_NullNodeUndoFunc,
    586 	    NULL, 1, 1, 0, 0, dag_h, "Cmt", allocList);
    587 	rf_InitNode(termNode, rf_wait, RF_FALSE, rf_TerminateFunc, rf_TerminateUndoFunc,
    588 	    NULL, 0, 1, 0, 0, dag_h, "Trm", allocList);
    589 
    590 	pda = asmap->physInfo;
    591 	RF_ASSERT(pda != NULL);
    592 	/* parityInfo must describe entire parity unit */
    593 	RF_ASSERT(asmap->parityInfo->next == NULL);
    594 
    595 	/* initialize the data node */
    596 	if (!useMirror) {
    597 		rf_InitNode(rdNode, rf_wait, RF_FALSE, rf_DiskReadFunc, rf_DiskReadUndoFunc,
    598 		    rf_GenericWakeupFunc, 1, 1, 4, 0, dag_h, "Rpd", allocList);
    599 		if (shiftable && rf_compute_workload_shift(raidPtr, pda)) {
    600 			/* shift this read to the next disk in line */
    601 			rdNode->params[0].p = asmap->parityInfo;
    602 			rdNode->params[1].p = pda->bufPtr;
    603 			rdNode->params[2].v = parityStripeID;
    604 			rdNode->params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, 0, which_ru);
    605 		} else {
    606 			/* read primary copy */
    607 			rdNode->params[0].p = pda;
    608 			rdNode->params[1].p = pda->bufPtr;
    609 			rdNode->params[2].v = parityStripeID;
    610 			rdNode->params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, 0, which_ru);
    611 		}
    612 	} else {
    613 		/* read secondary copy of data */
    614 		rf_InitNode(rdNode, rf_wait, RF_FALSE, rf_DiskReadFunc, rf_DiskReadUndoFunc,
    615 		    rf_GenericWakeupFunc, 1, 1, 4, 0, dag_h, "Rsd", allocList);
    616 		rdNode->params[0].p = asmap->parityInfo;
    617 		rdNode->params[1].p = pda->bufPtr;
    618 		rdNode->params[2].v = parityStripeID;
    619 		rdNode->params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, 0, which_ru);
    620 	}
    621 
    622 	/* connect header to block node */
    623 	RF_ASSERT(dag_h->numSuccedents == 1);
    624 	RF_ASSERT(blockNode->numAntecedents == 0);
    625 	dag_h->succedents[0] = blockNode;
    626 
    627 	/* connect block node to rdnode */
    628 	RF_ASSERT(blockNode->numSuccedents == 1);
    629 	RF_ASSERT(rdNode->numAntecedents == 1);
    630 	blockNode->succedents[0] = rdNode;
    631 	rdNode->antecedents[0] = blockNode;
    632 	rdNode->antType[0] = rf_control;
    633 
    634 	/* connect rdnode to commit node */
    635 	RF_ASSERT(rdNode->numSuccedents == 1);
    636 	RF_ASSERT(commitNode->numAntecedents == 1);
    637 	rdNode->succedents[0] = commitNode;
    638 	commitNode->antecedents[0] = rdNode;
    639 	commitNode->antType[0] = rf_control;
    640 
    641 	/* connect commit node to terminator */
    642 	RF_ASSERT(commitNode->numSuccedents == 1);
    643 	RF_ASSERT(termNode->numAntecedents == 1);
    644 	RF_ASSERT(termNode->numSuccedents == 0);
    645 	commitNode->succedents[0] = termNode;
    646 	termNode->antecedents[0] = commitNode;
    647 	termNode->antType[0] = rf_control;
    648 }
    649 #endif /* (RF_INCLUDE_CHAINDECLUSTER > 0) */
    650 
    651 #if (RF_INCLUDE_DECL_PQ > 0) || (RF_INCLUDE_RAID6 > 0) || (RF_INCLUDE_EVENODD > 0)
    652 /*
    653  * XXX move this elsewhere?
    654  */
    655 void
    656 rf_DD_GenerateFailedAccessASMs(
    657     RF_Raid_t * raidPtr,
    658     RF_AccessStripeMap_t * asmap,
    659     RF_PhysDiskAddr_t ** pdap,
    660     int *nNodep,
    661     RF_PhysDiskAddr_t ** pqpdap,
    662     int *nPQNodep,
    663     RF_AllocListElem_t * allocList)
    664 {
    665 	RF_RaidLayout_t *layoutPtr = &(raidPtr->Layout);
    666 	int     PDAPerDisk, i;
    667 	RF_SectorCount_t secPerSU = layoutPtr->sectorsPerStripeUnit;
    668 	int     numDataCol = layoutPtr->numDataCol;
    669 	int     state;
    670 	RF_SectorNum_t suoff, suend;
    671 	unsigned firstDataCol, napdas, count;
    672 	RF_SectorNum_t fone_start, fone_end, ftwo_start = 0, ftwo_end = 0;
    673 	RF_PhysDiskAddr_t *fone = asmap->failedPDAs[0], *ftwo = asmap->failedPDAs[1];
    674 	RF_PhysDiskAddr_t *pda_p;
    675 	RF_PhysDiskAddr_t *phys_p;
    676 	RF_RaidAddr_t sosAddr;
    677 
    678 	/* determine how many pda's we will have to generate per unaccess
    679 	 * stripe. If there is only one failed data unit, it is one; if two,
    680 	 * possibly two, depending wether they overlap. */
    681 
    682 	fone_start = rf_StripeUnitOffset(layoutPtr, fone->startSector);
    683 	fone_end = fone_start + fone->numSector;
    684 
    685 #define CONS_PDA(if,start,num) \
    686   pda_p->row = asmap->if->row;    pda_p->col = asmap->if->col; \
    687   pda_p->startSector = ((asmap->if->startSector / secPerSU) * secPerSU) + start; \
    688   pda_p->numSector = num; \
    689   pda_p->next = NULL; \
    690   RF_MallocAndAdd(pda_p->bufPtr,rf_RaidAddressToByte(raidPtr,num),(char *), allocList)
    691 
    692 	if (asmap->numDataFailed == 1) {
    693 		PDAPerDisk = 1;
    694 		state = 1;
    695 		RF_MallocAndAdd(*pqpdap, 2 * sizeof(RF_PhysDiskAddr_t), (RF_PhysDiskAddr_t *), allocList);
    696 		pda_p = *pqpdap;
    697 		/* build p */
    698 		CONS_PDA(parityInfo, fone_start, fone->numSector);
    699 		pda_p->type = RF_PDA_TYPE_PARITY;
    700 		pda_p++;
    701 		/* build q */
    702 		CONS_PDA(qInfo, fone_start, fone->numSector);
    703 		pda_p->type = RF_PDA_TYPE_Q;
    704 	} else {
    705 		ftwo_start = rf_StripeUnitOffset(layoutPtr, ftwo->startSector);
    706 		ftwo_end = ftwo_start + ftwo->numSector;
    707 		if (fone->numSector + ftwo->numSector > secPerSU) {
    708 			PDAPerDisk = 1;
    709 			state = 2;
    710 			RF_MallocAndAdd(*pqpdap, 2 * sizeof(RF_PhysDiskAddr_t), (RF_PhysDiskAddr_t *), allocList);
    711 			pda_p = *pqpdap;
    712 			CONS_PDA(parityInfo, 0, secPerSU);
    713 			pda_p->type = RF_PDA_TYPE_PARITY;
    714 			pda_p++;
    715 			CONS_PDA(qInfo, 0, secPerSU);
    716 			pda_p->type = RF_PDA_TYPE_Q;
    717 		} else {
    718 			PDAPerDisk = 2;
    719 			state = 3;
    720 			/* four of them, fone, then ftwo */
    721 			RF_MallocAndAdd(*pqpdap, 4 * sizeof(RF_PhysDiskAddr_t), (RF_PhysDiskAddr_t *), allocList);
    722 			pda_p = *pqpdap;
    723 			CONS_PDA(parityInfo, fone_start, fone->numSector);
    724 			pda_p->type = RF_PDA_TYPE_PARITY;
    725 			pda_p++;
    726 			CONS_PDA(qInfo, fone_start, fone->numSector);
    727 			pda_p->type = RF_PDA_TYPE_Q;
    728 			pda_p++;
    729 			CONS_PDA(parityInfo, ftwo_start, ftwo->numSector);
    730 			pda_p->type = RF_PDA_TYPE_PARITY;
    731 			pda_p++;
    732 			CONS_PDA(qInfo, ftwo_start, ftwo->numSector);
    733 			pda_p->type = RF_PDA_TYPE_Q;
    734 		}
    735 	}
    736 	/* figure out number of nonaccessed pda */
    737 	napdas = PDAPerDisk * (numDataCol - asmap->numStripeUnitsAccessed - (ftwo == NULL ? 1 : 0));
    738 	*nPQNodep = PDAPerDisk;
    739 
    740 	/* sweep over the over accessed pda's, figuring out the number of
    741 	 * additional pda's to generate. Of course, skip the failed ones */
    742 
    743 	count = 0;
    744 	for (pda_p = asmap->physInfo; pda_p; pda_p = pda_p->next) {
    745 		if ((pda_p == fone) || (pda_p == ftwo))
    746 			continue;
    747 		suoff = rf_StripeUnitOffset(layoutPtr, pda_p->startSector);
    748 		suend = suoff + pda_p->numSector;
    749 		switch (state) {
    750 		case 1:	/* one failed PDA to overlap */
    751 			/* if a PDA doesn't contain the failed unit, it can
    752 			 * only miss the start or end, not both */
    753 			if ((suoff > fone_start) || (suend < fone_end))
    754 				count++;
    755 			break;
    756 		case 2:	/* whole stripe */
    757 			if (suoff)	/* leak at begining */
    758 				count++;
    759 			if (suend < numDataCol)	/* leak at end */
    760 				count++;
    761 			break;
    762 		case 3:	/* two disjoint units */
    763 			if ((suoff > fone_start) || (suend < fone_end))
    764 				count++;
    765 			if ((suoff > ftwo_start) || (suend < ftwo_end))
    766 				count++;
    767 			break;
    768 		default:
    769 			RF_PANIC();
    770 		}
    771 	}
    772 
    773 	napdas += count;
    774 	*nNodep = napdas;
    775 	if (napdas == 0)
    776 		return;		/* short circuit */
    777 
    778 	/* allocate up our list of pda's */
    779 
    780 	RF_CallocAndAdd(pda_p, napdas, sizeof(RF_PhysDiskAddr_t), (RF_PhysDiskAddr_t *), allocList);
    781 	*pdap = pda_p;
    782 
    783 	/* linkem together */
    784 	for (i = 0; i < (napdas - 1); i++)
    785 		pda_p[i].next = pda_p + (i + 1);
    786 
    787 	/* march through the one's up to the first accessed disk */
    788 	firstDataCol = rf_RaidAddressToStripeUnitID(&(raidPtr->Layout), asmap->physInfo->raidAddress) % numDataCol;
    789 	sosAddr = rf_RaidAddressOfPrevStripeBoundary(layoutPtr, asmap->raidAddress);
    790 	for (i = 0; i < firstDataCol; i++) {
    791 		if ((pda_p - (*pdap)) == napdas)
    792 			continue;
    793 		pda_p->type = RF_PDA_TYPE_DATA;
    794 		pda_p->raidAddress = sosAddr + (i * secPerSU);
    795 		(raidPtr->Layout.map->MapSector) (raidPtr, pda_p->raidAddress, &(pda_p->row), &(pda_p->col), &(pda_p->startSector), 0);
    796 		/* skip over dead disks */
    797 		if (RF_DEAD_DISK(raidPtr->Disks[pda_p->row][pda_p->col].status))
    798 			continue;
    799 		switch (state) {
    800 		case 1:	/* fone */
    801 			pda_p->numSector = fone->numSector;
    802 			pda_p->raidAddress += fone_start;
    803 			pda_p->startSector += fone_start;
    804 			RF_MallocAndAdd(pda_p->bufPtr, rf_RaidAddressToByte(raidPtr, pda_p->numSector), (char *), allocList);
    805 			break;
    806 		case 2:	/* full stripe */
    807 			pda_p->numSector = secPerSU;
    808 			RF_MallocAndAdd(pda_p->bufPtr, rf_RaidAddressToByte(raidPtr, secPerSU), (char *), allocList);
    809 			break;
    810 		case 3:	/* two slabs */
    811 			pda_p->numSector = fone->numSector;
    812 			pda_p->raidAddress += fone_start;
    813 			pda_p->startSector += fone_start;
    814 			RF_MallocAndAdd(pda_p->bufPtr, rf_RaidAddressToByte(raidPtr, pda_p->numSector), (char *), allocList);
    815 			pda_p++;
    816 			pda_p->type = RF_PDA_TYPE_DATA;
    817 			pda_p->raidAddress = sosAddr + (i * secPerSU);
    818 			(raidPtr->Layout.map->MapSector) (raidPtr, pda_p->raidAddress, &(pda_p->row), &(pda_p->col), &(pda_p->startSector), 0);
    819 			pda_p->numSector = ftwo->numSector;
    820 			pda_p->raidAddress += ftwo_start;
    821 			pda_p->startSector += ftwo_start;
    822 			RF_MallocAndAdd(pda_p->bufPtr, rf_RaidAddressToByte(raidPtr, pda_p->numSector), (char *), allocList);
    823 			break;
    824 		default:
    825 			RF_PANIC();
    826 		}
    827 		pda_p++;
    828 	}
    829 
    830 	/* march through the touched stripe units */
    831 	for (phys_p = asmap->physInfo; phys_p; phys_p = phys_p->next, i++) {
    832 		if ((phys_p == asmap->failedPDAs[0]) || (phys_p == asmap->failedPDAs[1]))
    833 			continue;
    834 		suoff = rf_StripeUnitOffset(layoutPtr, phys_p->startSector);
    835 		suend = suoff + phys_p->numSector;
    836 		switch (state) {
    837 		case 1:	/* single buffer */
    838 			if (suoff > fone_start) {
    839 				RF_ASSERT(suend >= fone_end);
    840 				/* The data read starts after the mapped
    841 				 * access, snip off the begining */
    842 				pda_p->numSector = suoff - fone_start;
    843 				pda_p->raidAddress = sosAddr + (i * secPerSU) + fone_start;
    844 				(raidPtr->Layout.map->MapSector) (raidPtr, pda_p->raidAddress, &(pda_p->row), &(pda_p->col), &(pda_p->startSector), 0);
    845 				RF_MallocAndAdd(pda_p->bufPtr, rf_RaidAddressToByte(raidPtr, pda_p->numSector), (char *), allocList);
    846 				pda_p++;
    847 			}
    848 			if (suend < fone_end) {
    849 				RF_ASSERT(suoff <= fone_start);
    850 				/* The data read stops before the end of the
    851 				 * failed access, extend */
    852 				pda_p->numSector = fone_end - suend;
    853 				pda_p->raidAddress = sosAddr + (i * secPerSU) + suend;	/* off by one? */
    854 				(raidPtr->Layout.map->MapSector) (raidPtr, pda_p->raidAddress, &(pda_p->row), &(pda_p->col), &(pda_p->startSector), 0);
    855 				RF_MallocAndAdd(pda_p->bufPtr, rf_RaidAddressToByte(raidPtr, pda_p->numSector), (char *), allocList);
    856 				pda_p++;
    857 			}
    858 			break;
    859 		case 2:	/* whole stripe unit */
    860 			RF_ASSERT((suoff == 0) || (suend == secPerSU));
    861 			if (suend < secPerSU) {	/* short read, snip from end
    862 						 * on */
    863 				pda_p->numSector = secPerSU - suend;
    864 				pda_p->raidAddress = sosAddr + (i * secPerSU) + suend;	/* off by one? */
    865 				(raidPtr->Layout.map->MapSector) (raidPtr, pda_p->raidAddress, &(pda_p->row), &(pda_p->col), &(pda_p->startSector), 0);
    866 				RF_MallocAndAdd(pda_p->bufPtr, rf_RaidAddressToByte(raidPtr, pda_p->numSector), (char *), allocList);
    867 				pda_p++;
    868 			} else
    869 				if (suoff > 0) {	/* short at front */
    870 					pda_p->numSector = suoff;
    871 					pda_p->raidAddress = sosAddr + (i * secPerSU);
    872 					(raidPtr->Layout.map->MapSector) (raidPtr, pda_p->raidAddress, &(pda_p->row), &(pda_p->col), &(pda_p->startSector), 0);
    873 					RF_MallocAndAdd(pda_p->bufPtr, rf_RaidAddressToByte(raidPtr, pda_p->numSector), (char *), allocList);
    874 					pda_p++;
    875 				}
    876 			break;
    877 		case 3:	/* two nonoverlapping failures */
    878 			if ((suoff > fone_start) || (suend < fone_end)) {
    879 				if (suoff > fone_start) {
    880 					RF_ASSERT(suend >= fone_end);
    881 					/* The data read starts after the
    882 					 * mapped access, snip off the
    883 					 * begining */
    884 					pda_p->numSector = suoff - fone_start;
    885 					pda_p->raidAddress = sosAddr + (i * secPerSU) + fone_start;
    886 					(raidPtr->Layout.map->MapSector) (raidPtr, pda_p->raidAddress, &(pda_p->row), &(pda_p->col), &(pda_p->startSector), 0);
    887 					RF_MallocAndAdd(pda_p->bufPtr, rf_RaidAddressToByte(raidPtr, pda_p->numSector), (char *), allocList);
    888 					pda_p++;
    889 				}
    890 				if (suend < fone_end) {
    891 					RF_ASSERT(suoff <= fone_start);
    892 					/* The data read stops before the end
    893 					 * of the failed access, extend */
    894 					pda_p->numSector = fone_end - suend;
    895 					pda_p->raidAddress = sosAddr + (i * secPerSU) + suend;	/* off by one? */
    896 					(raidPtr->Layout.map->MapSector) (raidPtr, pda_p->raidAddress, &(pda_p->row), &(pda_p->col), &(pda_p->startSector), 0);
    897 					RF_MallocAndAdd(pda_p->bufPtr, rf_RaidAddressToByte(raidPtr, pda_p->numSector), (char *), allocList);
    898 					pda_p++;
    899 				}
    900 			}
    901 			if ((suoff > ftwo_start) || (suend < ftwo_end)) {
    902 				if (suoff > ftwo_start) {
    903 					RF_ASSERT(suend >= ftwo_end);
    904 					/* The data read starts after the
    905 					 * mapped access, snip off the
    906 					 * begining */
    907 					pda_p->numSector = suoff - ftwo_start;
    908 					pda_p->raidAddress = sosAddr + (i * secPerSU) + ftwo_start;
    909 					(raidPtr->Layout.map->MapSector) (raidPtr, pda_p->raidAddress, &(pda_p->row), &(pda_p->col), &(pda_p->startSector), 0);
    910 					RF_MallocAndAdd(pda_p->bufPtr, rf_RaidAddressToByte(raidPtr, pda_p->numSector), (char *), allocList);
    911 					pda_p++;
    912 				}
    913 				if (suend < ftwo_end) {
    914 					RF_ASSERT(suoff <= ftwo_start);
    915 					/* The data read stops before the end
    916 					 * of the failed access, extend */
    917 					pda_p->numSector = ftwo_end - suend;
    918 					pda_p->raidAddress = sosAddr + (i * secPerSU) + suend;	/* off by one? */
    919 					(raidPtr->Layout.map->MapSector) (raidPtr, pda_p->raidAddress, &(pda_p->row), &(pda_p->col), &(pda_p->startSector), 0);
    920 					RF_MallocAndAdd(pda_p->bufPtr, rf_RaidAddressToByte(raidPtr, pda_p->numSector), (char *), allocList);
    921 					pda_p++;
    922 				}
    923 			}
    924 			break;
    925 		default:
    926 			RF_PANIC();
    927 		}
    928 	}
    929 
    930 	/* after the last accessed disk */
    931 	for (; i < numDataCol; i++) {
    932 		if ((pda_p - (*pdap)) == napdas)
    933 			continue;
    934 		pda_p->type = RF_PDA_TYPE_DATA;
    935 		pda_p->raidAddress = sosAddr + (i * secPerSU);
    936 		(raidPtr->Layout.map->MapSector) (raidPtr, pda_p->raidAddress, &(pda_p->row), &(pda_p->col), &(pda_p->startSector), 0);
    937 		/* skip over dead disks */
    938 		if (RF_DEAD_DISK(raidPtr->Disks[pda_p->row][pda_p->col].status))
    939 			continue;
    940 		switch (state) {
    941 		case 1:	/* fone */
    942 			pda_p->numSector = fone->numSector;
    943 			pda_p->raidAddress += fone_start;
    944 			pda_p->startSector += fone_start;
    945 			RF_MallocAndAdd(pda_p->bufPtr, rf_RaidAddressToByte(raidPtr, pda_p->numSector), (char *), allocList);
    946 			break;
    947 		case 2:	/* full stripe */
    948 			pda_p->numSector = secPerSU;
    949 			RF_MallocAndAdd(pda_p->bufPtr, rf_RaidAddressToByte(raidPtr, secPerSU), (char *), allocList);
    950 			break;
    951 		case 3:	/* two slabs */
    952 			pda_p->numSector = fone->numSector;
    953 			pda_p->raidAddress += fone_start;
    954 			pda_p->startSector += fone_start;
    955 			RF_MallocAndAdd(pda_p->bufPtr, rf_RaidAddressToByte(raidPtr, pda_p->numSector), (char *), allocList);
    956 			pda_p++;
    957 			pda_p->type = RF_PDA_TYPE_DATA;
    958 			pda_p->raidAddress = sosAddr + (i * secPerSU);
    959 			(raidPtr->Layout.map->MapSector) (raidPtr, pda_p->raidAddress, &(pda_p->row), &(pda_p->col), &(pda_p->startSector), 0);
    960 			pda_p->numSector = ftwo->numSector;
    961 			pda_p->raidAddress += ftwo_start;
    962 			pda_p->startSector += ftwo_start;
    963 			RF_MallocAndAdd(pda_p->bufPtr, rf_RaidAddressToByte(raidPtr, pda_p->numSector), (char *), allocList);
    964 			break;
    965 		default:
    966 			RF_PANIC();
    967 		}
    968 		pda_p++;
    969 	}
    970 
    971 	RF_ASSERT(pda_p - *pdap == napdas);
    972 	return;
    973 }
    974 #define INIT_DISK_NODE(node,name) \
    975 rf_InitNode(node, rf_wait, RF_FALSE, rf_DiskReadFunc, rf_DiskReadUndoFunc, rf_GenericWakeupFunc, 2,1,4,0, dag_h, name, allocList); \
    976 (node)->succedents[0] = unblockNode; \
    977 (node)->succedents[1] = recoveryNode; \
    978 (node)->antecedents[0] = blockNode; \
    979 (node)->antType[0] = rf_control
    980 
    981 #define DISK_NODE_PARAMS(_node_,_p_) \
    982   (_node_).params[0].p = _p_ ; \
    983   (_node_).params[1].p = (_p_)->bufPtr; \
    984   (_node_).params[2].v = parityStripeID; \
    985   (_node_).params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, 0, which_ru)
    986 
    987 void
    988 rf_DoubleDegRead(
    989     RF_Raid_t * raidPtr,
    990     RF_AccessStripeMap_t * asmap,
    991     RF_DagHeader_t * dag_h,
    992     void *bp,
    993     RF_RaidAccessFlags_t flags,
    994     RF_AllocListElem_t * allocList,
    995     char *redundantReadNodeName,
    996     char *recoveryNodeName,
    997     int (*recovFunc) (RF_DagNode_t *))
    998 {
    999 	RF_RaidLayout_t *layoutPtr = &(raidPtr->Layout);
   1000 	RF_DagNode_t *nodes, *rudNodes, *rrdNodes, *recoveryNode, *blockNode,
   1001 	       *unblockNode, *rpNodes, *rqNodes, *termNode;
   1002 	RF_PhysDiskAddr_t *pda, *pqPDAs;
   1003 	RF_PhysDiskAddr_t *npdas;
   1004 	int     nNodes, nRrdNodes, nRudNodes, i;
   1005 	RF_ReconUnitNum_t which_ru;
   1006 	int     nReadNodes, nPQNodes;
   1007 	RF_PhysDiskAddr_t *failedPDA = asmap->failedPDAs[0];
   1008 	RF_PhysDiskAddr_t *failedPDAtwo = asmap->failedPDAs[1];
   1009 	RF_StripeNum_t parityStripeID = rf_RaidAddressToParityStripeID(layoutPtr, asmap->raidAddress, &which_ru);
   1010 
   1011 	if (rf_dagDebug)
   1012 		printf("[Creating Double Degraded Read DAG]\n");
   1013 	rf_DD_GenerateFailedAccessASMs(raidPtr, asmap, &npdas, &nRrdNodes, &pqPDAs, &nPQNodes, allocList);
   1014 
   1015 	nRudNodes = asmap->numStripeUnitsAccessed - (asmap->numDataFailed);
   1016 	nReadNodes = nRrdNodes + nRudNodes + 2 * nPQNodes;
   1017 	nNodes = 4 /* block, unblock, recovery, term */ + nReadNodes;
   1018 
   1019 	RF_CallocAndAdd(nodes, nNodes, sizeof(RF_DagNode_t), (RF_DagNode_t *), allocList);
   1020 	i = 0;
   1021 	blockNode = &nodes[i];
   1022 	i += 1;
   1023 	unblockNode = &nodes[i];
   1024 	i += 1;
   1025 	recoveryNode = &nodes[i];
   1026 	i += 1;
   1027 	termNode = &nodes[i];
   1028 	i += 1;
   1029 	rudNodes = &nodes[i];
   1030 	i += nRudNodes;
   1031 	rrdNodes = &nodes[i];
   1032 	i += nRrdNodes;
   1033 	rpNodes = &nodes[i];
   1034 	i += nPQNodes;
   1035 	rqNodes = &nodes[i];
   1036 	i += nPQNodes;
   1037 	RF_ASSERT(i == nNodes);
   1038 
   1039 	dag_h->numSuccedents = 1;
   1040 	dag_h->succedents[0] = blockNode;
   1041 	dag_h->creator = "DoubleDegRead";
   1042 	dag_h->numCommits = 0;
   1043 	dag_h->numCommitNodes = 1;	/* unblock */
   1044 
   1045 	rf_InitNode(termNode, rf_wait, RF_FALSE, rf_TerminateFunc, rf_TerminateUndoFunc, NULL, 0, 2, 0, 0, dag_h, "Trm", allocList);
   1046 	termNode->antecedents[0] = unblockNode;
   1047 	termNode->antType[0] = rf_control;
   1048 	termNode->antecedents[1] = recoveryNode;
   1049 	termNode->antType[1] = rf_control;
   1050 
   1051 	/* init the block and unblock nodes */
   1052 	/* The block node has all nodes except itself, unblock and recovery as
   1053 	 * successors. Similarly for predecessors of the unblock. */
   1054 	rf_InitNode(blockNode, rf_wait, RF_FALSE, rf_NullNodeFunc, rf_NullNodeUndoFunc, NULL, nReadNodes, 0, 0, 0, dag_h, "Nil", allocList);
   1055 	rf_InitNode(unblockNode, rf_wait, RF_TRUE, rf_NullNodeFunc, rf_NullNodeUndoFunc, NULL, 1, nReadNodes, 0, 0, dag_h, "Nil", allocList);
   1056 
   1057 	for (i = 0; i < nReadNodes; i++) {
   1058 		blockNode->succedents[i] = rudNodes + i;
   1059 		unblockNode->antecedents[i] = rudNodes + i;
   1060 		unblockNode->antType[i] = rf_control;
   1061 	}
   1062 	unblockNode->succedents[0] = termNode;
   1063 
   1064 	/* The recovery node has all the reads as predecessors, and the term
   1065 	 * node as successors. It gets a pda as a param from each of the read
   1066 	 * nodes plus the raidPtr. For each failed unit is has a result pda. */
   1067 	rf_InitNode(recoveryNode, rf_wait, RF_FALSE, recovFunc, rf_NullNodeUndoFunc, NULL,
   1068 	    1,			/* succesors */
   1069 	    nReadNodes,		/* preds */
   1070 	    nReadNodes + 2,	/* params */
   1071 	    asmap->numDataFailed,	/* results */
   1072 	    dag_h, recoveryNodeName, allocList);
   1073 
   1074 	recoveryNode->succedents[0] = termNode;
   1075 	for (i = 0; i < nReadNodes; i++) {
   1076 		recoveryNode->antecedents[i] = rudNodes + i;
   1077 		recoveryNode->antType[i] = rf_trueData;
   1078 	}
   1079 
   1080 	/* build the read nodes, then come back and fill in recovery params
   1081 	 * and results */
   1082 	pda = asmap->physInfo;
   1083 	for (i = 0; i < nRudNodes; pda = pda->next) {
   1084 		if ((pda == failedPDA) || (pda == failedPDAtwo))
   1085 			continue;
   1086 		INIT_DISK_NODE(rudNodes + i, "Rud");
   1087 		RF_ASSERT(pda);
   1088 		DISK_NODE_PARAMS(rudNodes[i], pda);
   1089 		i++;
   1090 	}
   1091 
   1092 	pda = npdas;
   1093 	for (i = 0; i < nRrdNodes; i++, pda = pda->next) {
   1094 		INIT_DISK_NODE(rrdNodes + i, "Rrd");
   1095 		RF_ASSERT(pda);
   1096 		DISK_NODE_PARAMS(rrdNodes[i], pda);
   1097 	}
   1098 
   1099 	/* redundancy pdas */
   1100 	pda = pqPDAs;
   1101 	INIT_DISK_NODE(rpNodes, "Rp");
   1102 	RF_ASSERT(pda);
   1103 	DISK_NODE_PARAMS(rpNodes[0], pda);
   1104 	pda++;
   1105 	INIT_DISK_NODE(rqNodes, redundantReadNodeName);
   1106 	RF_ASSERT(pda);
   1107 	DISK_NODE_PARAMS(rqNodes[0], pda);
   1108 	if (nPQNodes == 2) {
   1109 		pda++;
   1110 		INIT_DISK_NODE(rpNodes + 1, "Rp");
   1111 		RF_ASSERT(pda);
   1112 		DISK_NODE_PARAMS(rpNodes[1], pda);
   1113 		pda++;
   1114 		INIT_DISK_NODE(rqNodes + 1, redundantReadNodeName);
   1115 		RF_ASSERT(pda);
   1116 		DISK_NODE_PARAMS(rqNodes[1], pda);
   1117 	}
   1118 	/* fill in recovery node params */
   1119 	for (i = 0; i < nReadNodes; i++)
   1120 		recoveryNode->params[i] = rudNodes[i].params[0];	/* pda */
   1121 	recoveryNode->params[i++].p = (void *) raidPtr;
   1122 	recoveryNode->params[i++].p = (void *) asmap;
   1123 	recoveryNode->results[0] = failedPDA;
   1124 	if (asmap->numDataFailed == 2)
   1125 		recoveryNode->results[1] = failedPDAtwo;
   1126 
   1127 	/* zero fill the target data buffers? */
   1128 }
   1129 
   1130 #endif /* (RF_INCLUDE_DECL_PQ > 0) || (RF_INCLUDE_RAID6 > 0) || (RF_INCLUDE_EVENODD > 0) */
   1131