1 /* $NetBSD: rf_dagdegwr.c,v 1.37 2023/10/15 18:15:19 oster Exp $ */ 2 /* 3 * Copyright (c) 1995 Carnegie-Mellon University. 4 * All rights reserved. 5 * 6 * Author: Mark Holland, Daniel Stodolsky, William V. Courtright II 7 * 8 * Permission to use, copy, modify and distribute this software and 9 * its documentation is hereby granted, provided that both the copyright 10 * notice and this permission notice appear in all copies of the 11 * software, derivative works or modified versions, and any portions 12 * thereof, and that both notices appear in supporting documentation. 13 * 14 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 15 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 16 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 17 * 18 * Carnegie Mellon requests users of this software to return to 19 * 20 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU 21 * School of Computer Science 22 * Carnegie Mellon University 23 * Pittsburgh PA 15213-3890 24 * 25 * any improvements or extensions that they make and grant Carnegie the 26 * rights to redistribute these changes. 27 */ 28 29 /* 30 * rf_dagdegwr.c 31 * 32 * code for creating degraded write DAGs 33 * 34 */ 35 36 #include <sys/cdefs.h> 37 __KERNEL_RCSID(0, "$NetBSD: rf_dagdegwr.c,v 1.37 2023/10/15 18:15:19 oster Exp $"); 38 39 #include <dev/raidframe/raidframevar.h> 40 41 #include "rf_raid.h" 42 #include "rf_dag.h" 43 #include "rf_dagutils.h" 44 #include "rf_dagfuncs.h" 45 #include "rf_debugMem.h" 46 #include "rf_general.h" 47 #include "rf_dagdegwr.h" 48 #include "rf_map.h" 49 50 51 /****************************************************************************** 52 * 53 * General comments on DAG creation: 54 * 55 * All DAGs in this file use roll-away error recovery. Each DAG has a single 56 * commit node, usually called "Cmt." If an error occurs before the Cmt node 57 * is reached, the execution engine will halt forward execution and work 58 * backward through the graph, executing the undo functions. Assuming that 59 * each node in the graph prior to the Cmt node are undoable and atomic - or - 60 * does not make changes to permanent state, the graph will fail atomically. 61 * If an error occurs after the Cmt node executes, the engine will roll-forward 62 * through the graph, blindly executing nodes until it reaches the end. 63 * If a graph reaches the end, it is assumed to have completed successfully. 64 * 65 * A graph has only 1 Cmt node. 66 * 67 */ 68 69 70 /****************************************************************************** 71 * 72 * The following wrappers map the standard DAG creation interface to the 73 * DAG creation routines. Additionally, these wrappers enable experimentation 74 * with new DAG structures by providing an extra level of indirection, allowing 75 * the DAG creation routines to be replaced at this single point. 76 */ 77 78 static 79 RF_CREATE_DAG_FUNC_DECL(rf_CreateSimpleDegradedWriteDAG) 80 { 81 rf_CommonCreateSimpleDegradedWriteDAG(raidPtr, asmap, dag_h, bp, 82 flags, allocList, 1, rf_RecoveryXorFunc, RF_TRUE); 83 } 84 85 void 86 rf_CreateDegradedWriteDAG(RF_Raid_t *raidPtr, RF_AccessStripeMap_t *asmap, 87 RF_DagHeader_t *dag_h, void *bp, 88 RF_RaidAccessFlags_t flags, 89 RF_AllocListElem_t *allocList) 90 { 91 92 RF_ASSERT(asmap->numDataFailed == 1); 93 dag_h->creator = "DegradedWriteDAG"; 94 95 /* 96 * if the access writes only a portion of the failed unit, and also 97 * writes some portion of at least one surviving unit, we create two 98 * DAGs, one for the failed component and one for the non-failed 99 * component, and do them sequentially. Note that the fact that we're 100 * accessing only a portion of the failed unit indicates that the 101 * access either starts or ends in the failed unit, and hence we need 102 * create only two dags. This is inefficient in that the same data or 103 * parity can get read and written twice using this structure. I need 104 * to fix this to do the access all at once. 105 */ 106 RF_ASSERT(!(asmap->numStripeUnitsAccessed != 1 && 107 asmap->failedPDAs[0]->numSector != 108 raidPtr->Layout.sectorsPerStripeUnit)); 109 rf_CreateSimpleDegradedWriteDAG(raidPtr, asmap, dag_h, bp, flags, 110 allocList); 111 } 112 113 114 115 /****************************************************************************** 116 * 117 * DAG creation code begins here 118 */ 119 #define BUF_ALLOC(num) \ 120 RF_MallocAndAdd(rf_RaidAddressToByte(raidPtr, num), allocList) 121 122 123 124 /****************************************************************************** 125 * 126 * CommonCreateSimpleDegradedWriteDAG -- creates a DAG to do a degraded-mode 127 * write, which is as follows 128 * 129 * / {Wnq} --\ 130 * hdr -> blockNode -> Rod -> Xor -> Cmt -> Wnp ----> unblock -> term 131 * \ {Rod} / \ Wnd ---/ 132 * \ {Wnd} -/ 133 * 134 * commit nodes: Xor, Wnd 135 * 136 * IMPORTANT: 137 * This DAG generator does not work for double-degraded archs since it does not 138 * generate Q 139 * 140 * This dag is essentially identical to the large-write dag, except that the 141 * write to the failed data unit is suppressed. 142 * 143 * IMPORTANT: this dag does not work in the case where the access writes only 144 * a portion of the failed unit, and also writes some portion of at least one 145 * surviving SU. this case is handled in CreateDegradedWriteDAG above. 146 * 147 * The block & unblock nodes are leftovers from a previous version. They 148 * do nothing, but I haven't deleted them because it would be a tremendous 149 * effort to put them back in. 150 * 151 * This dag is used whenever a one of the data units in a write has failed. 152 * If it is the parity unit that failed, the nonredundant write dag (below) 153 * is used. 154 *****************************************************************************/ 155 156 void 157 rf_CommonCreateSimpleDegradedWriteDAG(RF_Raid_t *raidPtr, 158 RF_AccessStripeMap_t *asmap, 159 RF_DagHeader_t *dag_h, void *bp, 160 RF_RaidAccessFlags_t flags, 161 RF_AllocListElem_t *allocList, 162 int nfaults, 163 void (*redFunc) (RF_DagNode_t *), 164 int allowBufferRecycle) 165 { 166 int nRrdNodes, nWndNodes, nXorBufs, i, j, paramNum, 167 rdnodesFaked; 168 RF_DagNode_t *blockNode, *unblockNode, *wnpNode, *termNode; 169 #if (RF_INCLUDE_DECL_PQ > 0) || (RF_INCLUDE_RAID6 > 0) 170 RF_DagNode_t *wnqNode; 171 #endif 172 RF_DagNode_t *wndNodes, *rrdNodes, *xorNode, *commitNode; 173 RF_DagNode_t *tmpNode, *tmpwndNode, *tmprrdNode; 174 RF_SectorCount_t sectorsPerSU; 175 RF_ReconUnitNum_t which_ru; 176 char *xorTargetBuf = NULL; /* the target buffer for the XOR 177 * operation */ 178 char overlappingPDAs[RF_MAXCOL];/* a temporary array of flags */ 179 RF_AccessStripeMapHeader_t *new_asm_h[2]; 180 RF_PhysDiskAddr_t *pda, *parityPDA; 181 RF_StripeNum_t parityStripeID; 182 RF_PhysDiskAddr_t *failedPDA; 183 RF_RaidLayout_t *layoutPtr; 184 185 layoutPtr = &(raidPtr->Layout); 186 parityStripeID = rf_RaidAddressToParityStripeID(layoutPtr, asmap->raidAddress, 187 &which_ru); 188 sectorsPerSU = layoutPtr->sectorsPerStripeUnit; 189 /* failedPDA points to the pda within the asm that targets the failed 190 * disk */ 191 failedPDA = asmap->failedPDAs[0]; 192 193 #if RF_DEBUG_DAG 194 if (rf_dagDebug) 195 printf("[Creating degraded-write DAG]\n"); 196 #endif 197 198 RF_ASSERT(asmap->numDataFailed == 1); 199 dag_h->creator = "SimpleDegradedWriteDAG"; 200 201 /* 202 * Generate two ASMs identifying the surviving data 203 * we need in order to recover the lost data. 204 */ 205 /* overlappingPDAs array must be zero'd */ 206 memset(overlappingPDAs, 0, RF_MAXCOL); 207 rf_GenerateFailedAccessASMs(raidPtr, asmap, failedPDA, dag_h, new_asm_h, 208 &nXorBufs, NULL, overlappingPDAs, allocList); 209 210 /* create all the nodes at once */ 211 nWndNodes = asmap->numStripeUnitsAccessed - 1; /* no access is 212 * generated for the 213 * failed pda */ 214 215 nRrdNodes = ((new_asm_h[0]) ? new_asm_h[0]->stripeMap->numStripeUnitsAccessed : 0) + 216 ((new_asm_h[1]) ? new_asm_h[1]->stripeMap->numStripeUnitsAccessed : 0); 217 /* 218 * XXX 219 * 220 * There's a bug with a complete stripe overwrite- that means 0 reads 221 * of old data, and the rest of the DAG generation code doesn't like 222 * that. A release is coming, and I don't wanna risk breaking a critical 223 * DAG generator, so here's what I'm gonna do- if there's no read nodes, 224 * I'm gonna fake there being a read node, and I'm gonna swap in a 225 * no-op node in its place (to make all the link-up code happy). 226 * This should be fixed at some point. --jimz 227 */ 228 if (nRrdNodes == 0) { 229 nRrdNodes = 1; 230 rdnodesFaked = 1; 231 } else { 232 rdnodesFaked = 0; 233 } 234 235 blockNode = rf_AllocDAGNode(raidPtr); 236 blockNode->list_next = dag_h->nodes; 237 dag_h->nodes = blockNode; 238 239 commitNode = rf_AllocDAGNode(raidPtr); 240 commitNode->list_next = dag_h->nodes; 241 dag_h->nodes = commitNode; 242 243 unblockNode = rf_AllocDAGNode(raidPtr); 244 unblockNode->list_next = dag_h->nodes; 245 dag_h->nodes = unblockNode; 246 247 termNode = rf_AllocDAGNode(raidPtr); 248 termNode->list_next = dag_h->nodes; 249 dag_h->nodes = termNode; 250 251 xorNode = rf_AllocDAGNode(raidPtr); 252 xorNode->list_next = dag_h->nodes; 253 dag_h->nodes = xorNode; 254 255 wnpNode = rf_AllocDAGNode(raidPtr); 256 wnpNode->list_next = dag_h->nodes; 257 dag_h->nodes = wnpNode; 258 259 for (i = 0; i < nWndNodes; i++) { 260 tmpNode = rf_AllocDAGNode(raidPtr); 261 tmpNode->list_next = dag_h->nodes; 262 dag_h->nodes = tmpNode; 263 } 264 wndNodes = dag_h->nodes; 265 266 for (i = 0; i < nRrdNodes; i++) { 267 tmpNode = rf_AllocDAGNode(raidPtr); 268 tmpNode->list_next = dag_h->nodes; 269 dag_h->nodes = tmpNode; 270 } 271 rrdNodes = dag_h->nodes; 272 273 #if (RF_INCLUDE_DECL_PQ > 0) || (RF_INCLUDE_RAID6 > 0) 274 if (nfaults == 2) { 275 wnqNode = rf_AllocDAGNode(raidPtr); 276 wnqNode->list_next = dag_h->nodes; 277 dag_h->nodes = wnqNode; 278 } else { 279 wnqNode = NULL; 280 } 281 #endif 282 283 /* this dag can not commit until all rrd and xor Nodes have completed */ 284 dag_h->numCommitNodes = 1; 285 dag_h->numCommits = 0; 286 dag_h->numSuccedents = 1; 287 288 RF_ASSERT(nRrdNodes > 0); 289 rf_InitNode(blockNode, rf_wait, RF_FALSE, rf_NullNodeFunc, rf_NullNodeUndoFunc, 290 NULL, nRrdNodes, 0, 0, 0, dag_h, "Nil", allocList); 291 rf_InitNode(commitNode, rf_wait, RF_TRUE, rf_NullNodeFunc, rf_NullNodeUndoFunc, 292 NULL, nWndNodes + nfaults, 1, 0, 0, dag_h, "Cmt", allocList); 293 rf_InitNode(unblockNode, rf_wait, RF_FALSE, rf_NullNodeFunc, rf_NullNodeUndoFunc, 294 NULL, 1, nWndNodes + nfaults, 0, 0, dag_h, "Nil", allocList); 295 rf_InitNode(termNode, rf_wait, RF_FALSE, rf_TerminateFunc, rf_TerminateUndoFunc, 296 NULL, 0, 1, 0, 0, dag_h, "Trm", allocList); 297 rf_InitNode(xorNode, rf_wait, RF_FALSE, redFunc, rf_NullNodeUndoFunc, NULL, 1, 298 nRrdNodes, 2 * nXorBufs + 2, nfaults, dag_h, "Xrc", allocList); 299 300 /* 301 * Fill in the Rrd nodes. If any of the rrd buffers are the same size as 302 * the failed buffer, save a pointer to it so we can use it as the target 303 * of the XOR. The pdas in the rrd nodes have been range-restricted, so if 304 * a buffer is the same size as the failed buffer, it must also be at the 305 * same alignment within the SU. 306 */ 307 i = 0; 308 tmprrdNode = rrdNodes; 309 if (new_asm_h[0]) { 310 for (i = 0, pda = new_asm_h[0]->stripeMap->physInfo; 311 i < new_asm_h[0]->stripeMap->numStripeUnitsAccessed; 312 i++, pda = pda->next) { 313 rf_InitNode(tmprrdNode, rf_wait, RF_FALSE, rf_DiskReadFunc, rf_DiskReadUndoFunc, 314 rf_GenericWakeupFunc, 1, 1, 4, 0, dag_h, "Rrd", allocList); 315 RF_ASSERT(pda); 316 tmprrdNode->params[0].p = pda; 317 tmprrdNode->params[1].p = pda->bufPtr; 318 tmprrdNode->params[2].v = parityStripeID; 319 tmprrdNode->params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, which_ru); 320 tmprrdNode = tmprrdNode->list_next; 321 } 322 } 323 /* i now equals the number of stripe units accessed in new_asm_h[0] */ 324 /* Note that for tmprrdNode, this means a continuation from above, so no need to 325 assign it anything.. */ 326 if (new_asm_h[1]) { 327 for (j = 0, pda = new_asm_h[1]->stripeMap->physInfo; 328 j < new_asm_h[1]->stripeMap->numStripeUnitsAccessed; 329 j++, pda = pda->next) { 330 rf_InitNode(tmprrdNode, rf_wait, RF_FALSE, rf_DiskReadFunc, rf_DiskReadUndoFunc, 331 rf_GenericWakeupFunc, 1, 1, 4, 0, dag_h, "Rrd", allocList); 332 RF_ASSERT(pda); 333 tmprrdNode->params[0].p = pda; 334 tmprrdNode->params[1].p = pda->bufPtr; 335 tmprrdNode->params[2].v = parityStripeID; 336 tmprrdNode->params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, which_ru); 337 if (allowBufferRecycle && (pda->numSector == failedPDA->numSector)) 338 xorTargetBuf = pda->bufPtr; 339 tmprrdNode = tmprrdNode->list_next; 340 } 341 } 342 if (rdnodesFaked) { 343 /* 344 * This is where we'll init that fake noop read node 345 * (XXX should the wakeup func be different?) 346 */ 347 /* node that rrdNodes will just be a single node... */ 348 rf_InitNode(rrdNodes, rf_wait, RF_FALSE, rf_NullNodeFunc, rf_NullNodeUndoFunc, 349 NULL, 1, 1, 0, 0, dag_h, "RrN", allocList); 350 } 351 /* 352 * Make a PDA for the parity unit. The parity PDA should start at 353 * the same offset into the SU as the failed PDA. 354 */ 355 /* Danner comment: I don't think this copy is really necessary. We are 356 * in one of two cases here. (1) The entire failed unit is written. 357 * Then asmap->parityInfo will describe the entire parity. (2) We are 358 * only writing a subset of the failed unit and nothing else. Then the 359 * asmap->parityInfo describes the failed unit and the copy can also 360 * be avoided. */ 361 362 parityPDA = rf_AllocPhysDiskAddr(raidPtr); 363 parityPDA->next = dag_h->pda_cleanup_list; 364 dag_h->pda_cleanup_list = parityPDA; 365 parityPDA->col = asmap->parityInfo->col; 366 parityPDA->startSector = ((asmap->parityInfo->startSector / sectorsPerSU) 367 * sectorsPerSU) + (failedPDA->startSector % sectorsPerSU); 368 parityPDA->numSector = failedPDA->numSector; 369 370 if (!xorTargetBuf) { 371 xorTargetBuf = rf_AllocBuffer(raidPtr, dag_h, rf_RaidAddressToByte(raidPtr, failedPDA->numSector)); 372 } 373 /* init the Wnp node */ 374 rf_InitNode(wnpNode, rf_wait, RF_FALSE, rf_DiskWriteFunc, rf_DiskWriteUndoFunc, 375 rf_GenericWakeupFunc, 1, 1, 4, 0, dag_h, "Wnp", allocList); 376 wnpNode->params[0].p = parityPDA; 377 wnpNode->params[1].p = xorTargetBuf; 378 wnpNode->params[2].v = parityStripeID; 379 wnpNode->params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, which_ru); 380 381 #if (RF_INCLUDE_DECL_PQ > 0) || (RF_INCLUDE_RAID6 > 0) 382 /* fill in the Wnq Node */ 383 if (nfaults == 2) { 384 { 385 parityPDA = RF_MallocAndAdd(sizeof(*parityPDA), allocList); 386 parityPDA->col = asmap->qInfo->col; 387 parityPDA->startSector = ((asmap->qInfo->startSector / sectorsPerSU) 388 * sectorsPerSU) + (failedPDA->startSector % sectorsPerSU); 389 parityPDA->numSector = failedPDA->numSector; 390 391 rf_InitNode(wnqNode, rf_wait, RF_FALSE, rf_DiskWriteFunc, rf_DiskWriteUndoFunc, 392 rf_GenericWakeupFunc, 1, 1, 4, 0, dag_h, "Wnq", allocList); 393 wnqNode->params[0].p = parityPDA; 394 xorNode->results[1] = BUF_ALLOC(failedPDA->numSector); 395 wnqNode->params[1].p = xorNode->results[1]; 396 wnqNode->params[2].v = parityStripeID; 397 wnqNode->params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, which_ru); 398 } 399 } 400 #endif 401 /* fill in the Wnd nodes */ 402 tmpwndNode = wndNodes; 403 for (pda = asmap->physInfo, i = 0; i < nWndNodes; i++, pda = pda->next) { 404 if (pda == failedPDA) { 405 i--; 406 continue; 407 } 408 rf_InitNode(tmpwndNode, rf_wait, RF_FALSE, rf_DiskWriteFunc, rf_DiskWriteUndoFunc, 409 rf_GenericWakeupFunc, 1, 1, 4, 0, dag_h, "Wnd", allocList); 410 RF_ASSERT(pda); 411 tmpwndNode->params[0].p = pda; 412 tmpwndNode->params[1].p = pda->bufPtr; 413 tmpwndNode->params[2].v = parityStripeID; 414 tmpwndNode->params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, which_ru); 415 tmpwndNode = tmpwndNode->list_next; 416 } 417 418 /* fill in the results of the xor node */ 419 xorNode->results[0] = xorTargetBuf; 420 421 /* fill in the params of the xor node */ 422 423 paramNum = 0; 424 if (rdnodesFaked == 0) { 425 tmprrdNode = rrdNodes; 426 for (i = 0; i < nRrdNodes; i++) { 427 /* all the Rrd nodes need to be xored together */ 428 xorNode->params[paramNum++] = tmprrdNode->params[0]; 429 xorNode->params[paramNum++] = tmprrdNode->params[1]; 430 tmprrdNode = tmprrdNode->list_next; 431 } 432 } 433 tmpwndNode = wndNodes; 434 for (i = 0; i < nWndNodes; i++) { 435 /* any Wnd nodes that overlap the failed access need to be 436 * xored in */ 437 if (overlappingPDAs[i]) { 438 pda = rf_AllocPhysDiskAddr(raidPtr); 439 memcpy((char *) pda, (char *) tmpwndNode->params[0].p, sizeof(RF_PhysDiskAddr_t)); 440 /* add it into the pda_cleanup_list *after* the copy, TYVM */ 441 pda->next = dag_h->pda_cleanup_list; 442 dag_h->pda_cleanup_list = pda; 443 rf_RangeRestrictPDA(raidPtr, failedPDA, pda, RF_RESTRICT_DOBUFFER, 0); 444 xorNode->params[paramNum++].p = pda; 445 xorNode->params[paramNum++].p = pda->bufPtr; 446 } 447 tmpwndNode = tmpwndNode->list_next; 448 } 449 450 /* 451 * Install the failed PDA into the xor param list so that the 452 * new data gets xor'd in. 453 */ 454 xorNode->params[paramNum++].p = failedPDA; 455 xorNode->params[paramNum++].p = failedPDA->bufPtr; 456 457 /* 458 * The last 2 params to the recovery xor node are always the failed 459 * PDA and the raidPtr. install the failedPDA even though we have just 460 * done so above. This allows us to use the same XOR function for both 461 * degraded reads and degraded writes. 462 */ 463 xorNode->params[paramNum++].p = failedPDA; 464 xorNode->params[paramNum++].p = raidPtr; 465 RF_ASSERT(paramNum == 2 * nXorBufs + 2); 466 467 /* 468 * Code to link nodes begins here 469 */ 470 471 /* link header to block node */ 472 RF_ASSERT(blockNode->numAntecedents == 0); 473 dag_h->succedents[0] = blockNode; 474 475 /* link block node to rd nodes */ 476 RF_ASSERT(blockNode->numSuccedents == nRrdNodes); 477 tmprrdNode = rrdNodes; 478 for (i = 0; i < nRrdNodes; i++) { 479 RF_ASSERT(tmprrdNode->numAntecedents == 1); 480 blockNode->succedents[i] = tmprrdNode; 481 tmprrdNode->antecedents[0] = blockNode; 482 tmprrdNode->antType[0] = rf_control; 483 tmprrdNode = tmprrdNode->list_next; 484 } 485 486 /* link read nodes to xor node */ 487 RF_ASSERT(xorNode->numAntecedents == nRrdNodes); 488 tmprrdNode = rrdNodes; 489 for (i = 0; i < nRrdNodes; i++) { 490 RF_ASSERT(tmprrdNode->numSuccedents == 1); 491 tmprrdNode->succedents[0] = xorNode; 492 xorNode->antecedents[i] = tmprrdNode; 493 xorNode->antType[i] = rf_trueData; 494 tmprrdNode = tmprrdNode->list_next; 495 } 496 497 /* link xor node to commit node */ 498 RF_ASSERT(xorNode->numSuccedents == 1); 499 RF_ASSERT(commitNode->numAntecedents == 1); 500 xorNode->succedents[0] = commitNode; 501 commitNode->antecedents[0] = xorNode; 502 commitNode->antType[0] = rf_control; 503 504 /* link commit node to wnd nodes */ 505 RF_ASSERT(commitNode->numSuccedents == nfaults + nWndNodes); 506 tmpwndNode = wndNodes; 507 for (i = 0; i < nWndNodes; i++) { 508 RF_ASSERT(tmpwndNode->numAntecedents == 1); 509 commitNode->succedents[i] = tmpwndNode; 510 tmpwndNode->antecedents[0] = commitNode; 511 tmpwndNode->antType[0] = rf_control; 512 tmpwndNode = tmpwndNode->list_next; 513 } 514 515 /* link the commit node to wnp, wnq nodes */ 516 RF_ASSERT(wnpNode->numAntecedents == 1); 517 commitNode->succedents[nWndNodes] = wnpNode; 518 wnpNode->antecedents[0] = commitNode; 519 wnpNode->antType[0] = rf_control; 520 #if (RF_INCLUDE_DECL_PQ > 0) || (RF_INCLUDE_RAID6 > 0) 521 if (nfaults == 2) { 522 RF_ASSERT(wnqNode->numAntecedents == 1); 523 commitNode->succedents[nWndNodes + 1] = wnqNode; 524 wnqNode->antecedents[0] = commitNode; 525 wnqNode->antType[0] = rf_control; 526 } 527 #endif 528 /* link write new data nodes to unblock node */ 529 RF_ASSERT(unblockNode->numAntecedents == (nWndNodes + nfaults)); 530 tmpwndNode = wndNodes; 531 for (i = 0; i < nWndNodes; i++) { 532 RF_ASSERT(tmpwndNode->numSuccedents == 1); 533 tmpwndNode->succedents[0] = unblockNode; 534 unblockNode->antecedents[i] = tmpwndNode; 535 unblockNode->antType[i] = rf_control; 536 tmpwndNode = tmpwndNode->list_next; 537 } 538 539 /* link write new parity node to unblock node */ 540 RF_ASSERT(wnpNode->numSuccedents == 1); 541 wnpNode->succedents[0] = unblockNode; 542 unblockNode->antecedents[nWndNodes] = wnpNode; 543 unblockNode->antType[nWndNodes] = rf_control; 544 545 #if (RF_INCLUDE_DECL_PQ > 0) || (RF_INCLUDE_RAID6 > 0) 546 /* link write new q node to unblock node */ 547 if (nfaults == 2) { 548 RF_ASSERT(wnqNode->numSuccedents == 1); 549 wnqNode->succedents[0] = unblockNode; 550 unblockNode->antecedents[nWndNodes + 1] = wnqNode; 551 unblockNode->antType[nWndNodes + 1] = rf_control; 552 } 553 #endif 554 /* link unblock node to term node */ 555 RF_ASSERT(unblockNode->numSuccedents == 1); 556 RF_ASSERT(termNode->numAntecedents == 1); 557 RF_ASSERT(termNode->numSuccedents == 0); 558 unblockNode->succedents[0] = termNode; 559 termNode->antecedents[0] = unblockNode; 560 termNode->antType[0] = rf_control; 561 } 562 #define CONS_PDA(if,start,num) \ 563 pda_p->col = asmap->if->col; \ 564 pda_p->startSector = ((asmap->if->startSector / secPerSU) * secPerSU) + start; \ 565 pda_p->numSector = num; \ 566 pda_p->next = NULL; \ 567 pda_p->bufPtr = BUF_ALLOC(num) 568 #if (RF_INCLUDE_RAID6 > 0) || (RF_INCLUDE_PQ > 0) || (RF_INCLUDE_EVENODD > 0) 569 void 570 rf_WriteGenerateFailedAccessASMs( 571 RF_Raid_t * raidPtr, 572 RF_AccessStripeMap_t * asmap, 573 RF_PhysDiskAddr_t ** pdap, 574 int *nNodep, 575 RF_PhysDiskAddr_t ** pqpdap, 576 int *nPQNodep, 577 RF_AllocListElem_t * allocList) 578 { 579 RF_RaidLayout_t *layoutPtr = &(raidPtr->Layout); 580 int PDAPerDisk, i; 581 RF_SectorCount_t secPerSU = layoutPtr->sectorsPerStripeUnit; 582 int numDataCol = layoutPtr->numDataCol; 583 int state; 584 unsigned napdas; 585 RF_SectorNum_t fone_start, ftwo_start = 0; 586 RF_PhysDiskAddr_t *fone = asmap->failedPDAs[0], *ftwo = asmap->failedPDAs[1]; 587 RF_PhysDiskAddr_t *pda_p; 588 RF_RaidAddr_t sosAddr; 589 590 /* determine how many pda's we will have to generate per unaccess 591 * stripe. If there is only one failed data unit, it is one; if two, 592 * possibly two, depending whether they overlap. */ 593 594 fone_start = rf_StripeUnitOffset(layoutPtr, fone->startSector); 595 596 if (asmap->numDataFailed == 1) { 597 PDAPerDisk = 1; 598 state = 1; 599 *pqpdap = RF_MallocAndAdd(2 * sizeof(**pqpdap), allocList); 600 pda_p = *pqpdap; 601 /* build p */ 602 CONS_PDA(parityInfo, fone_start, fone->numSector); 603 pda_p->type = RF_PDA_TYPE_PARITY; 604 pda_p++; 605 /* build q */ 606 CONS_PDA(qInfo, fone_start, fone->numSector); 607 pda_p->type = RF_PDA_TYPE_Q; 608 } else { 609 ftwo_start = rf_StripeUnitOffset(layoutPtr, ftwo->startSector); 610 if (fone->numSector + ftwo->numSector > secPerSU) { 611 PDAPerDisk = 1; 612 state = 2; 613 *pqpdap = RF_MallocAndAdd(2 * sizeof(**pqpdap), 614 allocList); 615 pda_p = *pqpdap; 616 CONS_PDA(parityInfo, 0, secPerSU); 617 pda_p->type = RF_PDA_TYPE_PARITY; 618 pda_p++; 619 CONS_PDA(qInfo, 0, secPerSU); 620 pda_p->type = RF_PDA_TYPE_Q; 621 } else { 622 PDAPerDisk = 2; 623 state = 3; 624 /* four of them, fone, then ftwo */ 625 *pqpdap = RF_MallocAndAdd(4 * sizeof(*pqpdap), 626 allocList); 627 pda_p = *pqpdap; 628 CONS_PDA(parityInfo, fone_start, fone->numSector); 629 pda_p->type = RF_PDA_TYPE_PARITY; 630 pda_p++; 631 CONS_PDA(qInfo, fone_start, fone->numSector); 632 pda_p->type = RF_PDA_TYPE_Q; 633 pda_p++; 634 CONS_PDA(parityInfo, ftwo_start, ftwo->numSector); 635 pda_p->type = RF_PDA_TYPE_PARITY; 636 pda_p++; 637 CONS_PDA(qInfo, ftwo_start, ftwo->numSector); 638 pda_p->type = RF_PDA_TYPE_Q; 639 } 640 } 641 /* figure out number of nonaccessed pda */ 642 napdas = PDAPerDisk * (numDataCol - 2); 643 *nPQNodep = PDAPerDisk; 644 645 *nNodep = napdas; 646 if (napdas == 0) 647 return; /* short circuit */ 648 649 /* allocate up our list of pda's */ 650 651 pda_p = RF_MallocAndAdd(napdas * sizeof(*pda_p), allocList); 652 *pdap = pda_p; 653 654 /* linkem together */ 655 for (i = 0; i < (napdas - 1); i++) 656 pda_p[i].next = pda_p + (i + 1); 657 658 sosAddr = rf_RaidAddressOfPrevStripeBoundary(layoutPtr, asmap->raidAddress); 659 for (i = 0; i < numDataCol; i++) { 660 if ((pda_p - (*pdap)) == napdas) 661 continue; 662 pda_p->type = RF_PDA_TYPE_DATA; 663 pda_p->raidAddress = sosAddr + (i * secPerSU); 664 (raidPtr->Layout.map->MapSector) (raidPtr, pda_p->raidAddress, &(pda_p->col), &(pda_p->startSector), 0); 665 /* skip over dead disks */ 666 if (RF_DEAD_DISK(raidPtr->Disks[pda_p->col].status)) 667 continue; 668 switch (state) { 669 case 1: /* fone */ 670 pda_p->numSector = fone->numSector; 671 pda_p->raidAddress += fone_start; 672 pda_p->startSector += fone_start; 673 pda_p->bufPtr = BUF_ALLOC(pda_p->numSector); 674 break; 675 case 2: /* full stripe */ 676 pda_p->numSector = secPerSU; 677 pda_p->bufPtr = BUF_ALLOC(secPerSU); 678 break; 679 case 3: /* two slabs */ 680 pda_p->numSector = fone->numSector; 681 pda_p->raidAddress += fone_start; 682 pda_p->startSector += fone_start; 683 pda_p->bufPtr = BUF_ALLOC(pda_p->numSector); 684 pda_p++; 685 pda_p->type = RF_PDA_TYPE_DATA; 686 pda_p->raidAddress = sosAddr + (i * secPerSU); 687 (raidPtr->Layout.map->MapSector) (raidPtr, pda_p->raidAddress, &(pda_p->col), &(pda_p->startSector), 0); 688 pda_p->numSector = ftwo->numSector; 689 pda_p->raidAddress += ftwo_start; 690 pda_p->startSector += ftwo_start; 691 pda_p->bufPtr = BUF_ALLOC(pda_p->numSector); 692 break; 693 default: 694 RF_PANIC(); 695 } 696 pda_p++; 697 } 698 699 RF_ASSERT(pda_p - *pdap == napdas); 700 return; 701 } 702 #define DISK_NODE_PDA(node) ((node)->params[0].p) 703 704 #define DISK_NODE_PARAMS(_node_,_p_) \ 705 (_node_).params[0].p = _p_ ; \ 706 (_node_).params[1].p = (_p_)->bufPtr; \ 707 (_node_).params[2].v = parityStripeID; \ 708 (_node_).params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, which_ru) 709 710 void 711 rf_DoubleDegSmallWrite(RF_Raid_t *raidPtr, RF_AccessStripeMap_t *asmap, 712 RF_DagHeader_t *dag_h, void *bp, 713 RF_RaidAccessFlags_t flags, 714 RF_AllocListElem_t *allocList, 715 const char *redundantReadNodeName, 716 const char *redundantWriteNodeName, 717 const char *recoveryNodeName, 718 void (*recovFunc) (RF_DagNode_t *)) 719 { 720 RF_RaidLayout_t *layoutPtr = &(raidPtr->Layout); 721 RF_DagNode_t *nodes, *wudNodes, *rrdNodes, *recoveryNode, *blockNode, 722 *unblockNode, *rpNodes, *rqNodes, *wpNodes, *wqNodes, *termNode; 723 RF_PhysDiskAddr_t *pda, *pqPDAs; 724 RF_PhysDiskAddr_t *npdas; 725 int nWriteNodes, nNodes, nReadNodes, nRrdNodes, nWudNodes, i; 726 RF_ReconUnitNum_t which_ru; 727 int nPQNodes; 728 RF_StripeNum_t parityStripeID = rf_RaidAddressToParityStripeID(layoutPtr, asmap->raidAddress, &which_ru); 729 730 /* simple small write case - First part looks like a reconstruct-read 731 * of the failed data units. Then a write of all data units not 732 * failed. */ 733 734 735 /* Hdr | ------Block- / / \ Rrd Rrd ... Rrd Rp Rq \ \ 736 * / -------PQ----- / \ \ Wud Wp WQ \ | / 737 * --Unblock- | T 738 * 739 * Rrd = read recovery data (potentially none) Wud = write user data 740 * (not incl. failed disks) Wp = Write P (could be two) Wq = Write Q 741 * (could be two) 742 * 743 */ 744 745 rf_WriteGenerateFailedAccessASMs(raidPtr, asmap, &npdas, &nRrdNodes, &pqPDAs, &nPQNodes, allocList); 746 747 RF_ASSERT(asmap->numDataFailed == 1); 748 749 nWudNodes = asmap->numStripeUnitsAccessed - (asmap->numDataFailed); 750 nReadNodes = nRrdNodes + 2 * nPQNodes; 751 nWriteNodes = nWudNodes + 2 * nPQNodes; 752 nNodes = 4 + nReadNodes + nWriteNodes; 753 754 nodes = RF_MallocAndAdd(nNodes * sizeof(*nodes), allocList); 755 blockNode = nodes; 756 unblockNode = blockNode + 1; 757 termNode = unblockNode + 1; 758 recoveryNode = termNode + 1; 759 rrdNodes = recoveryNode + 1; 760 rpNodes = rrdNodes + nRrdNodes; 761 rqNodes = rpNodes + nPQNodes; 762 wudNodes = rqNodes + nPQNodes; 763 wpNodes = wudNodes + nWudNodes; 764 wqNodes = wpNodes + nPQNodes; 765 766 dag_h->creator = "PQ_DDSimpleSmallWrite"; 767 dag_h->numSuccedents = 1; 768 dag_h->succedents[0] = blockNode; 769 rf_InitNode(termNode, rf_wait, RF_FALSE, rf_TerminateFunc, rf_TerminateUndoFunc, NULL, 0, 1, 0, 0, dag_h, "Trm", allocList); 770 termNode->antecedents[0] = unblockNode; 771 termNode->antType[0] = rf_control; 772 773 /* init the block and unblock nodes */ 774 /* The block node has all the read nodes as successors */ 775 rf_InitNode(blockNode, rf_wait, RF_FALSE, rf_NullNodeFunc, rf_NullNodeUndoFunc, NULL, nReadNodes, 0, 0, 0, dag_h, "Nil", allocList); 776 for (i = 0; i < nReadNodes; i++) 777 blockNode->succedents[i] = rrdNodes + i; 778 779 /* The unblock node has all the writes as successors */ 780 rf_InitNode(unblockNode, rf_wait, RF_FALSE, rf_NullNodeFunc, rf_NullNodeUndoFunc, NULL, 1, nWriteNodes, 0, 0, dag_h, "Nil", allocList); 781 for (i = 0; i < nWriteNodes; i++) { 782 unblockNode->antecedents[i] = wudNodes + i; 783 unblockNode->antType[i] = rf_control; 784 } 785 unblockNode->succedents[0] = termNode; 786 787 #define INIT_READ_NODE(node,name) \ 788 rf_InitNode(node, rf_wait, RF_FALSE, rf_DiskReadFunc, rf_DiskReadUndoFunc, rf_GenericWakeupFunc, 1, 1, 4, 0, dag_h, name, allocList); \ 789 (node)->succedents[0] = recoveryNode; \ 790 (node)->antecedents[0] = blockNode; \ 791 (node)->antType[0] = rf_control; 792 793 /* build the read nodes */ 794 pda = npdas; 795 for (i = 0; i < nRrdNodes; i++, pda = pda->next) { 796 INIT_READ_NODE(rrdNodes + i, "rrd"); 797 DISK_NODE_PARAMS(rrdNodes[i], pda); 798 } 799 800 /* read redundancy pdas */ 801 pda = pqPDAs; 802 INIT_READ_NODE(rpNodes, "Rp"); 803 RF_ASSERT(pda); 804 DISK_NODE_PARAMS(rpNodes[0], pda); 805 pda++; 806 INIT_READ_NODE(rqNodes, redundantReadNodeName); 807 RF_ASSERT(pda); 808 DISK_NODE_PARAMS(rqNodes[0], pda); 809 if (nPQNodes == 2) { 810 pda++; 811 INIT_READ_NODE(rpNodes + 1, "Rp"); 812 RF_ASSERT(pda); 813 DISK_NODE_PARAMS(rpNodes[1], pda); 814 pda++; 815 INIT_READ_NODE(rqNodes + 1, redundantReadNodeName); 816 RF_ASSERT(pda); 817 DISK_NODE_PARAMS(rqNodes[1], pda); 818 } 819 /* the recovery node has all reads as precedessors and all writes as 820 * successors. It generates a result for every write P or write Q 821 * node. As parameters, it takes a pda per read and a pda per stripe 822 * of user data written. It also takes as the last params the raidPtr 823 * and asm. For results, it takes PDA for P & Q. */ 824 825 826 rf_InitNode(recoveryNode, rf_wait, RF_FALSE, recovFunc, rf_NullNodeUndoFunc, NULL, 827 nWriteNodes, /* succesors */ 828 nReadNodes, /* preds */ 829 nReadNodes + nWudNodes + 3, /* params */ 830 2 * nPQNodes, /* results */ 831 dag_h, recoveryNodeName, allocList); 832 833 834 835 for (i = 0; i < nReadNodes; i++) { 836 recoveryNode->antecedents[i] = rrdNodes + i; 837 recoveryNode->antType[i] = rf_control; 838 recoveryNode->params[i].p = DISK_NODE_PDA(rrdNodes + i); 839 } 840 for (i = 0; i < nWudNodes; i++) { 841 recoveryNode->succedents[i] = wudNodes + i; 842 } 843 recoveryNode->params[nReadNodes + nWudNodes].p = asmap->failedPDAs[0]; 844 recoveryNode->params[nReadNodes + nWudNodes + 1].p = raidPtr; 845 recoveryNode->params[nReadNodes + nWudNodes + 2].p = asmap; 846 847 for (; i < nWriteNodes; i++) 848 recoveryNode->succedents[i] = wudNodes + i; 849 850 pda = pqPDAs; 851 recoveryNode->results[0] = pda; 852 pda++; 853 recoveryNode->results[1] = pda; 854 if (nPQNodes == 2) { 855 pda++; 856 recoveryNode->results[2] = pda; 857 pda++; 858 recoveryNode->results[3] = pda; 859 } 860 /* fill writes */ 861 #define INIT_WRITE_NODE(node,name) \ 862 rf_InitNode(node, rf_wait, RF_FALSE, rf_DiskWriteFunc, rf_DiskWriteUndoFunc, rf_GenericWakeupFunc, 1, 1, 4, 0, dag_h, name, allocList); \ 863 (node)->succedents[0] = unblockNode; \ 864 (node)->antecedents[0] = recoveryNode; \ 865 (node)->antType[0] = rf_control; 866 867 pda = asmap->physInfo; 868 for (i = 0; i < nWudNodes; i++) { 869 INIT_WRITE_NODE(wudNodes + i, "Wd"); 870 DISK_NODE_PARAMS(wudNodes[i], pda); 871 recoveryNode->params[nReadNodes + i].p = DISK_NODE_PDA(wudNodes + i); 872 pda = pda->next; 873 } 874 /* write redundancy pdas */ 875 pda = pqPDAs; 876 INIT_WRITE_NODE(wpNodes, "Wp"); 877 RF_ASSERT(pda); 878 DISK_NODE_PARAMS(wpNodes[0], pda); 879 pda++; 880 INIT_WRITE_NODE(wqNodes, "Wq"); 881 RF_ASSERT(pda); 882 DISK_NODE_PARAMS(wqNodes[0], pda); 883 if (nPQNodes == 2) { 884 pda++; 885 INIT_WRITE_NODE(wpNodes + 1, "Wp"); 886 RF_ASSERT(pda); 887 DISK_NODE_PARAMS(wpNodes[1], pda); 888 pda++; 889 INIT_WRITE_NODE(wqNodes + 1, "Wq"); 890 RF_ASSERT(pda); 891 DISK_NODE_PARAMS(wqNodes[1], pda); 892 } 893 } 894 #endif /* (RF_INCLUDE_PQ > 0) || (RF_INCLUDE_EVENODD > 0) */ 895