rf_dagffwr.c revision 1.5.6.1 1 1.5.6.1 nathanw /* $NetBSD: rf_dagffwr.c,v 1.5.6.1 2001/10/22 20:41:33 nathanw Exp $ */
2 1.1 oster /*
3 1.1 oster * Copyright (c) 1995 Carnegie-Mellon University.
4 1.1 oster * All rights reserved.
5 1.1 oster *
6 1.1 oster * Author: Mark Holland, Daniel Stodolsky, William V. Courtright II
7 1.1 oster *
8 1.1 oster * Permission to use, copy, modify and distribute this software and
9 1.1 oster * its documentation is hereby granted, provided that both the copyright
10 1.1 oster * notice and this permission notice appear in all copies of the
11 1.1 oster * software, derivative works or modified versions, and any portions
12 1.1 oster * thereof, and that both notices appear in supporting documentation.
13 1.1 oster *
14 1.1 oster * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
15 1.1 oster * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
16 1.1 oster * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
17 1.1 oster *
18 1.1 oster * Carnegie Mellon requests users of this software to return to
19 1.1 oster *
20 1.1 oster * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
21 1.1 oster * School of Computer Science
22 1.1 oster * Carnegie Mellon University
23 1.1 oster * Pittsburgh PA 15213-3890
24 1.1 oster *
25 1.1 oster * any improvements or extensions that they make and grant Carnegie the
26 1.1 oster * rights to redistribute these changes.
27 1.1 oster */
28 1.1 oster
29 1.1 oster /*
30 1.1 oster * rf_dagff.c
31 1.1 oster *
32 1.1 oster * code for creating fault-free DAGs
33 1.1 oster *
34 1.1 oster */
35 1.1 oster
36 1.5.6.1 nathanw #include <dev/raidframe/raidframevar.h>
37 1.5.6.1 nathanw
38 1.1 oster #include "rf_raid.h"
39 1.1 oster #include "rf_dag.h"
40 1.1 oster #include "rf_dagutils.h"
41 1.1 oster #include "rf_dagfuncs.h"
42 1.1 oster #include "rf_debugMem.h"
43 1.1 oster #include "rf_dagffrd.h"
44 1.1 oster #include "rf_memchunk.h"
45 1.1 oster #include "rf_general.h"
46 1.1 oster #include "rf_dagffwr.h"
47 1.1 oster
48 1.1 oster /******************************************************************************
49 1.1 oster *
50 1.1 oster * General comments on DAG creation:
51 1.3 oster *
52 1.1 oster * All DAGs in this file use roll-away error recovery. Each DAG has a single
53 1.1 oster * commit node, usually called "Cmt." If an error occurs before the Cmt node
54 1.1 oster * is reached, the execution engine will halt forward execution and work
55 1.1 oster * backward through the graph, executing the undo functions. Assuming that
56 1.1 oster * each node in the graph prior to the Cmt node are undoable and atomic - or -
57 1.1 oster * does not make changes to permanent state, the graph will fail atomically.
58 1.1 oster * If an error occurs after the Cmt node executes, the engine will roll-forward
59 1.1 oster * through the graph, blindly executing nodes until it reaches the end.
60 1.1 oster * If a graph reaches the end, it is assumed to have completed successfully.
61 1.1 oster *
62 1.1 oster * A graph has only 1 Cmt node.
63 1.1 oster *
64 1.1 oster */
65 1.1 oster
66 1.1 oster
67 1.1 oster /******************************************************************************
68 1.1 oster *
69 1.1 oster * The following wrappers map the standard DAG creation interface to the
70 1.1 oster * DAG creation routines. Additionally, these wrappers enable experimentation
71 1.1 oster * with new DAG structures by providing an extra level of indirection, allowing
72 1.1 oster * the DAG creation routines to be replaced at this single point.
73 1.1 oster */
74 1.1 oster
75 1.1 oster
76 1.3 oster void
77 1.3 oster rf_CreateNonRedundantWriteDAG(
78 1.3 oster RF_Raid_t * raidPtr,
79 1.3 oster RF_AccessStripeMap_t * asmap,
80 1.3 oster RF_DagHeader_t * dag_h,
81 1.3 oster void *bp,
82 1.3 oster RF_RaidAccessFlags_t flags,
83 1.3 oster RF_AllocListElem_t * allocList,
84 1.3 oster RF_IoType_t type)
85 1.1 oster {
86 1.3 oster rf_CreateNonredundantDAG(raidPtr, asmap, dag_h, bp, flags, allocList,
87 1.3 oster RF_IO_TYPE_WRITE);
88 1.1 oster }
89 1.1 oster
90 1.3 oster void
91 1.3 oster rf_CreateRAID0WriteDAG(
92 1.3 oster RF_Raid_t * raidPtr,
93 1.3 oster RF_AccessStripeMap_t * asmap,
94 1.3 oster RF_DagHeader_t * dag_h,
95 1.3 oster void *bp,
96 1.3 oster RF_RaidAccessFlags_t flags,
97 1.3 oster RF_AllocListElem_t * allocList,
98 1.3 oster RF_IoType_t type)
99 1.1 oster {
100 1.3 oster rf_CreateNonredundantDAG(raidPtr, asmap, dag_h, bp, flags, allocList,
101 1.3 oster RF_IO_TYPE_WRITE);
102 1.1 oster }
103 1.1 oster
104 1.3 oster void
105 1.3 oster rf_CreateSmallWriteDAG(
106 1.3 oster RF_Raid_t * raidPtr,
107 1.3 oster RF_AccessStripeMap_t * asmap,
108 1.3 oster RF_DagHeader_t * dag_h,
109 1.3 oster void *bp,
110 1.3 oster RF_RaidAccessFlags_t flags,
111 1.3 oster RF_AllocListElem_t * allocList)
112 1.1 oster {
113 1.3 oster /* "normal" rollaway */
114 1.3 oster rf_CommonCreateSmallWriteDAG(raidPtr, asmap, dag_h, bp, flags, allocList,
115 1.3 oster &rf_xorFuncs, NULL);
116 1.1 oster }
117 1.1 oster
118 1.3 oster void
119 1.3 oster rf_CreateLargeWriteDAG(
120 1.3 oster RF_Raid_t * raidPtr,
121 1.3 oster RF_AccessStripeMap_t * asmap,
122 1.3 oster RF_DagHeader_t * dag_h,
123 1.3 oster void *bp,
124 1.3 oster RF_RaidAccessFlags_t flags,
125 1.3 oster RF_AllocListElem_t * allocList)
126 1.1 oster {
127 1.3 oster /* "normal" rollaway */
128 1.3 oster rf_CommonCreateLargeWriteDAG(raidPtr, asmap, dag_h, bp, flags, allocList,
129 1.3 oster 1, rf_RegularXorFunc, RF_TRUE);
130 1.1 oster }
131 1.1 oster
132 1.1 oster
133 1.1 oster /******************************************************************************
134 1.1 oster *
135 1.1 oster * DAG creation code begins here
136 1.1 oster */
137 1.1 oster
138 1.1 oster
139 1.1 oster /******************************************************************************
140 1.1 oster *
141 1.1 oster * creates a DAG to perform a large-write operation:
142 1.1 oster *
143 1.1 oster * / Rod \ / Wnd \
144 1.1 oster * H -- block- Rod - Xor - Cmt - Wnd --- T
145 1.1 oster * \ Rod / \ Wnp /
146 1.1 oster * \[Wnq]/
147 1.1 oster *
148 1.1 oster * The XOR node also does the Q calculation in the P+Q architecture.
149 1.1 oster * All nodes are before the commit node (Cmt) are assumed to be atomic and
150 1.1 oster * undoable - or - they make no changes to permanent state.
151 1.1 oster *
152 1.1 oster * Rod = read old data
153 1.1 oster * Cmt = commit node
154 1.1 oster * Wnp = write new parity
155 1.1 oster * Wnd = write new data
156 1.1 oster * Wnq = write new "q"
157 1.1 oster * [] denotes optional segments in the graph
158 1.1 oster *
159 1.1 oster * Parameters: raidPtr - description of the physical array
160 1.1 oster * asmap - logical & physical addresses for this access
161 1.1 oster * bp - buffer ptr (holds write data)
162 1.3 oster * flags - general flags (e.g. disk locking)
163 1.1 oster * allocList - list of memory allocated in DAG creation
164 1.1 oster * nfaults - number of faults array can tolerate
165 1.1 oster * (equal to # redundancy units in stripe)
166 1.1 oster * redfuncs - list of redundancy generating functions
167 1.1 oster *
168 1.1 oster *****************************************************************************/
169 1.1 oster
170 1.3 oster void
171 1.3 oster rf_CommonCreateLargeWriteDAG(
172 1.3 oster RF_Raid_t * raidPtr,
173 1.3 oster RF_AccessStripeMap_t * asmap,
174 1.3 oster RF_DagHeader_t * dag_h,
175 1.3 oster void *bp,
176 1.3 oster RF_RaidAccessFlags_t flags,
177 1.3 oster RF_AllocListElem_t * allocList,
178 1.3 oster int nfaults,
179 1.3 oster int (*redFunc) (RF_DagNode_t *),
180 1.3 oster int allowBufferRecycle)
181 1.1 oster {
182 1.3 oster RF_DagNode_t *nodes, *wndNodes, *rodNodes, *xorNode, *wnpNode;
183 1.3 oster RF_DagNode_t *wnqNode, *blockNode, *commitNode, *termNode;
184 1.3 oster int nWndNodes, nRodNodes, i, nodeNum, asmNum;
185 1.3 oster RF_AccessStripeMapHeader_t *new_asm_h[2];
186 1.3 oster RF_StripeNum_t parityStripeID;
187 1.3 oster char *sosBuffer, *eosBuffer;
188 1.3 oster RF_ReconUnitNum_t which_ru;
189 1.3 oster RF_RaidLayout_t *layoutPtr;
190 1.3 oster RF_PhysDiskAddr_t *pda;
191 1.3 oster
192 1.3 oster layoutPtr = &(raidPtr->Layout);
193 1.3 oster parityStripeID = rf_RaidAddressToParityStripeID(layoutPtr, asmap->raidAddress,
194 1.3 oster &which_ru);
195 1.3 oster
196 1.3 oster if (rf_dagDebug) {
197 1.3 oster printf("[Creating large-write DAG]\n");
198 1.3 oster }
199 1.3 oster dag_h->creator = "LargeWriteDAG";
200 1.3 oster
201 1.3 oster dag_h->numCommitNodes = 1;
202 1.3 oster dag_h->numCommits = 0;
203 1.3 oster dag_h->numSuccedents = 1;
204 1.3 oster
205 1.3 oster /* alloc the nodes: Wnd, xor, commit, block, term, and Wnp */
206 1.3 oster nWndNodes = asmap->numStripeUnitsAccessed;
207 1.3 oster RF_CallocAndAdd(nodes, nWndNodes + 4 + nfaults, sizeof(RF_DagNode_t),
208 1.3 oster (RF_DagNode_t *), allocList);
209 1.3 oster i = 0;
210 1.3 oster wndNodes = &nodes[i];
211 1.3 oster i += nWndNodes;
212 1.3 oster xorNode = &nodes[i];
213 1.3 oster i += 1;
214 1.3 oster wnpNode = &nodes[i];
215 1.3 oster i += 1;
216 1.3 oster blockNode = &nodes[i];
217 1.3 oster i += 1;
218 1.3 oster commitNode = &nodes[i];
219 1.3 oster i += 1;
220 1.3 oster termNode = &nodes[i];
221 1.3 oster i += 1;
222 1.3 oster if (nfaults == 2) {
223 1.3 oster wnqNode = &nodes[i];
224 1.3 oster i += 1;
225 1.3 oster } else {
226 1.3 oster wnqNode = NULL;
227 1.3 oster }
228 1.3 oster rf_MapUnaccessedPortionOfStripe(raidPtr, layoutPtr, asmap, dag_h, new_asm_h,
229 1.3 oster &nRodNodes, &sosBuffer, &eosBuffer, allocList);
230 1.3 oster if (nRodNodes > 0) {
231 1.3 oster RF_CallocAndAdd(rodNodes, nRodNodes, sizeof(RF_DagNode_t),
232 1.3 oster (RF_DagNode_t *), allocList);
233 1.3 oster } else {
234 1.3 oster rodNodes = NULL;
235 1.3 oster }
236 1.3 oster
237 1.3 oster /* begin node initialization */
238 1.3 oster if (nRodNodes > 0) {
239 1.3 oster rf_InitNode(blockNode, rf_wait, RF_FALSE, rf_NullNodeFunc, rf_NullNodeUndoFunc,
240 1.3 oster NULL, nRodNodes, 0, 0, 0, dag_h, "Nil", allocList);
241 1.3 oster } else {
242 1.3 oster rf_InitNode(blockNode, rf_wait, RF_FALSE, rf_NullNodeFunc, rf_NullNodeUndoFunc,
243 1.3 oster NULL, 1, 0, 0, 0, dag_h, "Nil", allocList);
244 1.3 oster }
245 1.3 oster
246 1.3 oster rf_InitNode(commitNode, rf_wait, RF_TRUE, rf_NullNodeFunc, rf_NullNodeUndoFunc, NULL,
247 1.3 oster nWndNodes + nfaults, 1, 0, 0, dag_h, "Cmt", allocList);
248 1.3 oster rf_InitNode(termNode, rf_wait, RF_FALSE, rf_TerminateFunc, rf_TerminateUndoFunc, NULL,
249 1.3 oster 0, nWndNodes + nfaults, 0, 0, dag_h, "Trm", allocList);
250 1.3 oster
251 1.3 oster /* initialize the Rod nodes */
252 1.3 oster for (nodeNum = asmNum = 0; asmNum < 2; asmNum++) {
253 1.3 oster if (new_asm_h[asmNum]) {
254 1.3 oster pda = new_asm_h[asmNum]->stripeMap->physInfo;
255 1.3 oster while (pda) {
256 1.3 oster rf_InitNode(&rodNodes[nodeNum], rf_wait, RF_FALSE, rf_DiskReadFunc,
257 1.3 oster rf_DiskReadUndoFunc, rf_GenericWakeupFunc, 1, 1, 4, 0, dag_h,
258 1.3 oster "Rod", allocList);
259 1.3 oster rodNodes[nodeNum].params[0].p = pda;
260 1.3 oster rodNodes[nodeNum].params[1].p = pda->bufPtr;
261 1.3 oster rodNodes[nodeNum].params[2].v = parityStripeID;
262 1.3 oster rodNodes[nodeNum].params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY,
263 1.3 oster 0, 0, which_ru);
264 1.3 oster nodeNum++;
265 1.3 oster pda = pda->next;
266 1.3 oster }
267 1.3 oster }
268 1.3 oster }
269 1.3 oster RF_ASSERT(nodeNum == nRodNodes);
270 1.3 oster
271 1.3 oster /* initialize the wnd nodes */
272 1.3 oster pda = asmap->physInfo;
273 1.3 oster for (i = 0; i < nWndNodes; i++) {
274 1.3 oster rf_InitNode(&wndNodes[i], rf_wait, RF_FALSE, rf_DiskWriteFunc, rf_DiskWriteUndoFunc,
275 1.3 oster rf_GenericWakeupFunc, 1, 1, 4, 0, dag_h, "Wnd", allocList);
276 1.3 oster RF_ASSERT(pda != NULL);
277 1.3 oster wndNodes[i].params[0].p = pda;
278 1.3 oster wndNodes[i].params[1].p = pda->bufPtr;
279 1.3 oster wndNodes[i].params[2].v = parityStripeID;
280 1.3 oster wndNodes[i].params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, 0, which_ru);
281 1.3 oster pda = pda->next;
282 1.3 oster }
283 1.3 oster
284 1.3 oster /* initialize the redundancy node */
285 1.3 oster if (nRodNodes > 0) {
286 1.3 oster rf_InitNode(xorNode, rf_wait, RF_FALSE, redFunc, rf_NullNodeUndoFunc, NULL, 1,
287 1.3 oster nRodNodes, 2 * (nWndNodes + nRodNodes) + 1, nfaults, dag_h,
288 1.3 oster "Xr ", allocList);
289 1.3 oster } else {
290 1.3 oster rf_InitNode(xorNode, rf_wait, RF_FALSE, redFunc, rf_NullNodeUndoFunc, NULL, 1,
291 1.3 oster 1, 2 * (nWndNodes + nRodNodes) + 1, nfaults, dag_h, "Xr ", allocList);
292 1.3 oster }
293 1.3 oster xorNode->flags |= RF_DAGNODE_FLAG_YIELD;
294 1.3 oster for (i = 0; i < nWndNodes; i++) {
295 1.3 oster xorNode->params[2 * i + 0] = wndNodes[i].params[0]; /* pda */
296 1.3 oster xorNode->params[2 * i + 1] = wndNodes[i].params[1]; /* buf ptr */
297 1.3 oster }
298 1.3 oster for (i = 0; i < nRodNodes; i++) {
299 1.3 oster xorNode->params[2 * (nWndNodes + i) + 0] = rodNodes[i].params[0]; /* pda */
300 1.3 oster xorNode->params[2 * (nWndNodes + i) + 1] = rodNodes[i].params[1]; /* buf ptr */
301 1.3 oster }
302 1.3 oster /* xor node needs to get at RAID information */
303 1.3 oster xorNode->params[2 * (nWndNodes + nRodNodes)].p = raidPtr;
304 1.3 oster
305 1.3 oster /*
306 1.3 oster * Look for an Rod node that reads a complete SU. If none, alloc a buffer
307 1.3 oster * to receive the parity info. Note that we can't use a new data buffer
308 1.3 oster * because it will not have gotten written when the xor occurs.
309 1.3 oster */
310 1.3 oster if (allowBufferRecycle) {
311 1.3 oster for (i = 0; i < nRodNodes; i++) {
312 1.3 oster if (((RF_PhysDiskAddr_t *) rodNodes[i].params[0].p)->numSector == raidPtr->Layout.sectorsPerStripeUnit)
313 1.3 oster break;
314 1.3 oster }
315 1.3 oster }
316 1.3 oster if ((!allowBufferRecycle) || (i == nRodNodes)) {
317 1.3 oster RF_CallocAndAdd(xorNode->results[0], 1,
318 1.3 oster rf_RaidAddressToByte(raidPtr, raidPtr->Layout.sectorsPerStripeUnit),
319 1.3 oster (void *), allocList);
320 1.3 oster } else {
321 1.3 oster xorNode->results[0] = rodNodes[i].params[1].p;
322 1.3 oster }
323 1.3 oster
324 1.3 oster /* initialize the Wnp node */
325 1.3 oster rf_InitNode(wnpNode, rf_wait, RF_FALSE, rf_DiskWriteFunc, rf_DiskWriteUndoFunc,
326 1.3 oster rf_GenericWakeupFunc, 1, 1, 4, 0, dag_h, "Wnp", allocList);
327 1.3 oster wnpNode->params[0].p = asmap->parityInfo;
328 1.3 oster wnpNode->params[1].p = xorNode->results[0];
329 1.3 oster wnpNode->params[2].v = parityStripeID;
330 1.3 oster wnpNode->params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, 0, which_ru);
331 1.3 oster /* parityInfo must describe entire parity unit */
332 1.3 oster RF_ASSERT(asmap->parityInfo->next == NULL);
333 1.3 oster
334 1.3 oster if (nfaults == 2) {
335 1.3 oster /*
336 1.3 oster * We never try to recycle a buffer for the Q calcuation
337 1.3 oster * in addition to the parity. This would cause two buffers
338 1.3 oster * to get smashed during the P and Q calculation, guaranteeing
339 1.3 oster * one would be wrong.
340 1.3 oster */
341 1.3 oster RF_CallocAndAdd(xorNode->results[1], 1,
342 1.3 oster rf_RaidAddressToByte(raidPtr, raidPtr->Layout.sectorsPerStripeUnit),
343 1.3 oster (void *), allocList);
344 1.3 oster rf_InitNode(wnqNode, rf_wait, RF_FALSE, rf_DiskWriteFunc, rf_DiskWriteUndoFunc,
345 1.3 oster rf_GenericWakeupFunc, 1, 1, 4, 0, dag_h, "Wnq", allocList);
346 1.3 oster wnqNode->params[0].p = asmap->qInfo;
347 1.3 oster wnqNode->params[1].p = xorNode->results[1];
348 1.3 oster wnqNode->params[2].v = parityStripeID;
349 1.3 oster wnqNode->params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, 0, which_ru);
350 1.3 oster /* parityInfo must describe entire parity unit */
351 1.3 oster RF_ASSERT(asmap->parityInfo->next == NULL);
352 1.3 oster }
353 1.3 oster /*
354 1.3 oster * Connect nodes to form graph.
355 1.3 oster */
356 1.3 oster
357 1.3 oster /* connect dag header to block node */
358 1.3 oster RF_ASSERT(blockNode->numAntecedents == 0);
359 1.3 oster dag_h->succedents[0] = blockNode;
360 1.3 oster
361 1.3 oster if (nRodNodes > 0) {
362 1.3 oster /* connect the block node to the Rod nodes */
363 1.3 oster RF_ASSERT(blockNode->numSuccedents == nRodNodes);
364 1.3 oster RF_ASSERT(xorNode->numAntecedents == nRodNodes);
365 1.3 oster for (i = 0; i < nRodNodes; i++) {
366 1.3 oster RF_ASSERT(rodNodes[i].numAntecedents == 1);
367 1.3 oster blockNode->succedents[i] = &rodNodes[i];
368 1.3 oster rodNodes[i].antecedents[0] = blockNode;
369 1.3 oster rodNodes[i].antType[0] = rf_control;
370 1.3 oster
371 1.3 oster /* connect the Rod nodes to the Xor node */
372 1.3 oster RF_ASSERT(rodNodes[i].numSuccedents == 1);
373 1.3 oster rodNodes[i].succedents[0] = xorNode;
374 1.3 oster xorNode->antecedents[i] = &rodNodes[i];
375 1.3 oster xorNode->antType[i] = rf_trueData;
376 1.3 oster }
377 1.3 oster } else {
378 1.3 oster /* connect the block node to the Xor node */
379 1.3 oster RF_ASSERT(blockNode->numSuccedents == 1);
380 1.3 oster RF_ASSERT(xorNode->numAntecedents == 1);
381 1.3 oster blockNode->succedents[0] = xorNode;
382 1.3 oster xorNode->antecedents[0] = blockNode;
383 1.3 oster xorNode->antType[0] = rf_control;
384 1.3 oster }
385 1.3 oster
386 1.3 oster /* connect the xor node to the commit node */
387 1.3 oster RF_ASSERT(xorNode->numSuccedents == 1);
388 1.3 oster RF_ASSERT(commitNode->numAntecedents == 1);
389 1.3 oster xorNode->succedents[0] = commitNode;
390 1.3 oster commitNode->antecedents[0] = xorNode;
391 1.3 oster commitNode->antType[0] = rf_control;
392 1.3 oster
393 1.3 oster /* connect the commit node to the write nodes */
394 1.3 oster RF_ASSERT(commitNode->numSuccedents == nWndNodes + nfaults);
395 1.3 oster for (i = 0; i < nWndNodes; i++) {
396 1.3 oster RF_ASSERT(wndNodes->numAntecedents == 1);
397 1.3 oster commitNode->succedents[i] = &wndNodes[i];
398 1.3 oster wndNodes[i].antecedents[0] = commitNode;
399 1.3 oster wndNodes[i].antType[0] = rf_control;
400 1.3 oster }
401 1.3 oster RF_ASSERT(wnpNode->numAntecedents == 1);
402 1.3 oster commitNode->succedents[nWndNodes] = wnpNode;
403 1.3 oster wnpNode->antecedents[0] = commitNode;
404 1.3 oster wnpNode->antType[0] = rf_trueData;
405 1.3 oster if (nfaults == 2) {
406 1.3 oster RF_ASSERT(wnqNode->numAntecedents == 1);
407 1.3 oster commitNode->succedents[nWndNodes + 1] = wnqNode;
408 1.3 oster wnqNode->antecedents[0] = commitNode;
409 1.3 oster wnqNode->antType[0] = rf_trueData;
410 1.3 oster }
411 1.3 oster /* connect the write nodes to the term node */
412 1.3 oster RF_ASSERT(termNode->numAntecedents == nWndNodes + nfaults);
413 1.3 oster RF_ASSERT(termNode->numSuccedents == 0);
414 1.3 oster for (i = 0; i < nWndNodes; i++) {
415 1.3 oster RF_ASSERT(wndNodes->numSuccedents == 1);
416 1.3 oster wndNodes[i].succedents[0] = termNode;
417 1.3 oster termNode->antecedents[i] = &wndNodes[i];
418 1.3 oster termNode->antType[i] = rf_control;
419 1.3 oster }
420 1.3 oster RF_ASSERT(wnpNode->numSuccedents == 1);
421 1.3 oster wnpNode->succedents[0] = termNode;
422 1.3 oster termNode->antecedents[nWndNodes] = wnpNode;
423 1.3 oster termNode->antType[nWndNodes] = rf_control;
424 1.3 oster if (nfaults == 2) {
425 1.3 oster RF_ASSERT(wnqNode->numSuccedents == 1);
426 1.3 oster wnqNode->succedents[0] = termNode;
427 1.3 oster termNode->antecedents[nWndNodes + 1] = wnqNode;
428 1.3 oster termNode->antType[nWndNodes + 1] = rf_control;
429 1.3 oster }
430 1.1 oster }
431 1.1 oster /******************************************************************************
432 1.1 oster *
433 1.1 oster * creates a DAG to perform a small-write operation (either raid 5 or pq),
434 1.1 oster * which is as follows:
435 1.1 oster *
436 1.1 oster * Hdr -> Nil -> Rop -> Xor -> Cmt ----> Wnp [Unp] --> Trm
437 1.1 oster * \- Rod X / \----> Wnd [Und]-/
438 1.1 oster * [\- Rod X / \---> Wnd [Und]-/]
439 1.1 oster * [\- Roq -> Q / \--> Wnq [Unq]-/]
440 1.1 oster *
441 1.1 oster * Rop = read old parity
442 1.1 oster * Rod = read old data
443 1.1 oster * Roq = read old "q"
444 1.1 oster * Cmt = commit node
445 1.1 oster * Und = unlock data disk
446 1.1 oster * Unp = unlock parity disk
447 1.1 oster * Unq = unlock q disk
448 1.1 oster * Wnp = write new parity
449 1.1 oster * Wnd = write new data
450 1.1 oster * Wnq = write new "q"
451 1.1 oster * [ ] denotes optional segments in the graph
452 1.1 oster *
453 1.1 oster * Parameters: raidPtr - description of the physical array
454 1.1 oster * asmap - logical & physical addresses for this access
455 1.1 oster * bp - buffer ptr (holds write data)
456 1.3 oster * flags - general flags (e.g. disk locking)
457 1.1 oster * allocList - list of memory allocated in DAG creation
458 1.1 oster * pfuncs - list of parity generating functions
459 1.1 oster * qfuncs - list of q generating functions
460 1.1 oster *
461 1.1 oster * A null qfuncs indicates single fault tolerant
462 1.1 oster *****************************************************************************/
463 1.1 oster
464 1.3 oster void
465 1.3 oster rf_CommonCreateSmallWriteDAG(
466 1.3 oster RF_Raid_t * raidPtr,
467 1.3 oster RF_AccessStripeMap_t * asmap,
468 1.3 oster RF_DagHeader_t * dag_h,
469 1.3 oster void *bp,
470 1.3 oster RF_RaidAccessFlags_t flags,
471 1.3 oster RF_AllocListElem_t * allocList,
472 1.3 oster RF_RedFuncs_t * pfuncs,
473 1.3 oster RF_RedFuncs_t * qfuncs)
474 1.1 oster {
475 1.3 oster RF_DagNode_t *readDataNodes, *readParityNodes, *readQNodes, *termNode;
476 1.3 oster RF_DagNode_t *unlockDataNodes, *unlockParityNodes, *unlockQNodes;
477 1.3 oster RF_DagNode_t *xorNodes, *qNodes, *blockNode, *commitNode, *nodes;
478 1.3 oster RF_DagNode_t *writeDataNodes, *writeParityNodes, *writeQNodes;
479 1.3 oster int i, j, nNodes, totalNumNodes, lu_flag;
480 1.3 oster RF_ReconUnitNum_t which_ru;
481 1.3 oster int (*func) (RF_DagNode_t *), (*undoFunc) (RF_DagNode_t *);
482 1.3 oster int (*qfunc) (RF_DagNode_t *);
483 1.3 oster int numDataNodes, numParityNodes;
484 1.3 oster RF_StripeNum_t parityStripeID;
485 1.3 oster RF_PhysDiskAddr_t *pda;
486 1.3 oster char *name, *qname;
487 1.3 oster long nfaults;
488 1.3 oster
489 1.3 oster nfaults = qfuncs ? 2 : 1;
490 1.3 oster lu_flag = (rf_enableAtomicRMW) ? 1 : 0; /* lock/unlock flag */
491 1.3 oster
492 1.3 oster parityStripeID = rf_RaidAddressToParityStripeID(&(raidPtr->Layout),
493 1.3 oster asmap->raidAddress, &which_ru);
494 1.3 oster pda = asmap->physInfo;
495 1.3 oster numDataNodes = asmap->numStripeUnitsAccessed;
496 1.3 oster numParityNodes = (asmap->parityInfo->next) ? 2 : 1;
497 1.3 oster
498 1.3 oster if (rf_dagDebug) {
499 1.3 oster printf("[Creating small-write DAG]\n");
500 1.3 oster }
501 1.3 oster RF_ASSERT(numDataNodes > 0);
502 1.3 oster dag_h->creator = "SmallWriteDAG";
503 1.3 oster
504 1.3 oster dag_h->numCommitNodes = 1;
505 1.3 oster dag_h->numCommits = 0;
506 1.3 oster dag_h->numSuccedents = 1;
507 1.3 oster
508 1.3 oster /*
509 1.3 oster * DAG creation occurs in four steps:
510 1.3 oster * 1. count the number of nodes in the DAG
511 1.3 oster * 2. create the nodes
512 1.3 oster * 3. initialize the nodes
513 1.3 oster * 4. connect the nodes
514 1.3 oster */
515 1.3 oster
516 1.3 oster /*
517 1.3 oster * Step 1. compute number of nodes in the graph
518 1.3 oster */
519 1.3 oster
520 1.3 oster /* number of nodes: a read and write for each data unit a redundancy
521 1.3 oster * computation node for each parity node (nfaults * nparity) a read
522 1.3 oster * and write for each parity unit a block and commit node (2) a
523 1.3 oster * terminate node if atomic RMW an unlock node for each data unit,
524 1.3 oster * redundancy unit */
525 1.3 oster totalNumNodes = (2 * numDataNodes) + (nfaults * numParityNodes)
526 1.3 oster + (nfaults * 2 * numParityNodes) + 3;
527 1.3 oster if (lu_flag) {
528 1.3 oster totalNumNodes += (numDataNodes + (nfaults * numParityNodes));
529 1.3 oster }
530 1.3 oster /*
531 1.3 oster * Step 2. create the nodes
532 1.3 oster */
533 1.3 oster RF_CallocAndAdd(nodes, totalNumNodes, sizeof(RF_DagNode_t),
534 1.3 oster (RF_DagNode_t *), allocList);
535 1.3 oster i = 0;
536 1.3 oster blockNode = &nodes[i];
537 1.3 oster i += 1;
538 1.3 oster commitNode = &nodes[i];
539 1.3 oster i += 1;
540 1.3 oster readDataNodes = &nodes[i];
541 1.3 oster i += numDataNodes;
542 1.3 oster readParityNodes = &nodes[i];
543 1.3 oster i += numParityNodes;
544 1.3 oster writeDataNodes = &nodes[i];
545 1.3 oster i += numDataNodes;
546 1.3 oster writeParityNodes = &nodes[i];
547 1.3 oster i += numParityNodes;
548 1.3 oster xorNodes = &nodes[i];
549 1.3 oster i += numParityNodes;
550 1.3 oster termNode = &nodes[i];
551 1.3 oster i += 1;
552 1.3 oster if (lu_flag) {
553 1.3 oster unlockDataNodes = &nodes[i];
554 1.3 oster i += numDataNodes;
555 1.3 oster unlockParityNodes = &nodes[i];
556 1.3 oster i += numParityNodes;
557 1.3 oster } else {
558 1.3 oster unlockDataNodes = unlockParityNodes = NULL;
559 1.3 oster }
560 1.3 oster if (nfaults == 2) {
561 1.3 oster readQNodes = &nodes[i];
562 1.3 oster i += numParityNodes;
563 1.3 oster writeQNodes = &nodes[i];
564 1.3 oster i += numParityNodes;
565 1.3 oster qNodes = &nodes[i];
566 1.3 oster i += numParityNodes;
567 1.3 oster if (lu_flag) {
568 1.3 oster unlockQNodes = &nodes[i];
569 1.3 oster i += numParityNodes;
570 1.3 oster } else {
571 1.3 oster unlockQNodes = NULL;
572 1.3 oster }
573 1.3 oster } else {
574 1.3 oster readQNodes = writeQNodes = qNodes = unlockQNodes = NULL;
575 1.3 oster }
576 1.3 oster RF_ASSERT(i == totalNumNodes);
577 1.3 oster
578 1.3 oster /*
579 1.3 oster * Step 3. initialize the nodes
580 1.3 oster */
581 1.3 oster /* initialize block node (Nil) */
582 1.3 oster nNodes = numDataNodes + (nfaults * numParityNodes);
583 1.3 oster rf_InitNode(blockNode, rf_wait, RF_FALSE, rf_NullNodeFunc, rf_NullNodeUndoFunc,
584 1.3 oster NULL, nNodes, 0, 0, 0, dag_h, "Nil", allocList);
585 1.3 oster
586 1.3 oster /* initialize commit node (Cmt) */
587 1.3 oster rf_InitNode(commitNode, rf_wait, RF_TRUE, rf_NullNodeFunc, rf_NullNodeUndoFunc,
588 1.3 oster NULL, nNodes, (nfaults * numParityNodes), 0, 0, dag_h, "Cmt", allocList);
589 1.3 oster
590 1.3 oster /* initialize terminate node (Trm) */
591 1.3 oster rf_InitNode(termNode, rf_wait, RF_FALSE, rf_TerminateFunc, rf_TerminateUndoFunc,
592 1.3 oster NULL, 0, nNodes, 0, 0, dag_h, "Trm", allocList);
593 1.3 oster
594 1.3 oster /* initialize nodes which read old data (Rod) */
595 1.3 oster for (i = 0; i < numDataNodes; i++) {
596 1.3 oster rf_InitNode(&readDataNodes[i], rf_wait, RF_FALSE, rf_DiskReadFunc, rf_DiskReadUndoFunc,
597 1.3 oster rf_GenericWakeupFunc, (nfaults * numParityNodes), 1, 4, 0, dag_h,
598 1.3 oster "Rod", allocList);
599 1.3 oster RF_ASSERT(pda != NULL);
600 1.3 oster /* physical disk addr desc */
601 1.3 oster readDataNodes[i].params[0].p = pda;
602 1.3 oster /* buffer to hold old data */
603 1.3 oster readDataNodes[i].params[1].p = rf_AllocBuffer(raidPtr,
604 1.3 oster dag_h, pda, allocList);
605 1.3 oster readDataNodes[i].params[2].v = parityStripeID;
606 1.3 oster readDataNodes[i].params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY,
607 1.3 oster lu_flag, 0, which_ru);
608 1.3 oster pda = pda->next;
609 1.3 oster for (j = 0; j < readDataNodes[i].numSuccedents; j++) {
610 1.3 oster readDataNodes[i].propList[j] = NULL;
611 1.3 oster }
612 1.3 oster }
613 1.3 oster
614 1.3 oster /* initialize nodes which read old parity (Rop) */
615 1.3 oster pda = asmap->parityInfo;
616 1.3 oster i = 0;
617 1.3 oster for (i = 0; i < numParityNodes; i++) {
618 1.3 oster RF_ASSERT(pda != NULL);
619 1.3 oster rf_InitNode(&readParityNodes[i], rf_wait, RF_FALSE, rf_DiskReadFunc,
620 1.3 oster rf_DiskReadUndoFunc, rf_GenericWakeupFunc, numParityNodes, 1, 4,
621 1.3 oster 0, dag_h, "Rop", allocList);
622 1.3 oster readParityNodes[i].params[0].p = pda;
623 1.3 oster /* buffer to hold old parity */
624 1.3 oster readParityNodes[i].params[1].p = rf_AllocBuffer(raidPtr,
625 1.3 oster dag_h, pda, allocList);
626 1.3 oster readParityNodes[i].params[2].v = parityStripeID;
627 1.3 oster readParityNodes[i].params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY,
628 1.3 oster lu_flag, 0, which_ru);
629 1.3 oster pda = pda->next;
630 1.3 oster for (j = 0; j < readParityNodes[i].numSuccedents; j++) {
631 1.3 oster readParityNodes[i].propList[0] = NULL;
632 1.3 oster }
633 1.3 oster }
634 1.3 oster
635 1.3 oster /* initialize nodes which read old Q (Roq) */
636 1.3 oster if (nfaults == 2) {
637 1.3 oster pda = asmap->qInfo;
638 1.3 oster for (i = 0; i < numParityNodes; i++) {
639 1.3 oster RF_ASSERT(pda != NULL);
640 1.3 oster rf_InitNode(&readQNodes[i], rf_wait, RF_FALSE, rf_DiskReadFunc, rf_DiskReadUndoFunc,
641 1.3 oster rf_GenericWakeupFunc, numParityNodes, 1, 4, 0, dag_h, "Roq", allocList);
642 1.3 oster readQNodes[i].params[0].p = pda;
643 1.3 oster /* buffer to hold old Q */
644 1.3 oster readQNodes[i].params[1].p = rf_AllocBuffer(raidPtr, dag_h, pda,
645 1.3 oster allocList);
646 1.3 oster readQNodes[i].params[2].v = parityStripeID;
647 1.3 oster readQNodes[i].params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY,
648 1.3 oster lu_flag, 0, which_ru);
649 1.3 oster pda = pda->next;
650 1.3 oster for (j = 0; j < readQNodes[i].numSuccedents; j++) {
651 1.3 oster readQNodes[i].propList[0] = NULL;
652 1.3 oster }
653 1.3 oster }
654 1.3 oster }
655 1.3 oster /* initialize nodes which write new data (Wnd) */
656 1.3 oster pda = asmap->physInfo;
657 1.3 oster for (i = 0; i < numDataNodes; i++) {
658 1.3 oster RF_ASSERT(pda != NULL);
659 1.3 oster rf_InitNode(&writeDataNodes[i], rf_wait, RF_FALSE, rf_DiskWriteFunc,
660 1.3 oster rf_DiskWriteUndoFunc, rf_GenericWakeupFunc, 1, 1, 4, 0, dag_h,
661 1.3 oster "Wnd", allocList);
662 1.3 oster /* physical disk addr desc */
663 1.3 oster writeDataNodes[i].params[0].p = pda;
664 1.3 oster /* buffer holding new data to be written */
665 1.3 oster writeDataNodes[i].params[1].p = pda->bufPtr;
666 1.3 oster writeDataNodes[i].params[2].v = parityStripeID;
667 1.3 oster writeDataNodes[i].params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY,
668 1.3 oster 0, 0, which_ru);
669 1.3 oster if (lu_flag) {
670 1.3 oster /* initialize node to unlock the disk queue */
671 1.3 oster rf_InitNode(&unlockDataNodes[i], rf_wait, RF_FALSE, rf_DiskUnlockFunc,
672 1.3 oster rf_DiskUnlockUndoFunc, rf_GenericWakeupFunc, 1, 1, 2, 0, dag_h,
673 1.3 oster "Und", allocList);
674 1.3 oster /* physical disk addr desc */
675 1.3 oster unlockDataNodes[i].params[0].p = pda;
676 1.3 oster unlockDataNodes[i].params[1].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY,
677 1.3 oster 0, lu_flag, which_ru);
678 1.3 oster }
679 1.3 oster pda = pda->next;
680 1.3 oster }
681 1.3 oster
682 1.3 oster /*
683 1.3 oster * Initialize nodes which compute new parity and Q.
684 1.3 oster */
685 1.3 oster /*
686 1.3 oster * We use the simple XOR func in the double-XOR case, and when
687 1.3 oster * we're accessing only a portion of one stripe unit. The distinction
688 1.3 oster * between the two is that the regular XOR func assumes that the targbuf
689 1.3 oster * is a full SU in size, and examines the pda associated with the buffer
690 1.3 oster * to decide where within the buffer to XOR the data, whereas
691 1.3 oster * the simple XOR func just XORs the data into the start of the buffer.
692 1.3 oster */
693 1.3 oster if ((numParityNodes == 2) || ((numDataNodes == 1)
694 1.3 oster && (asmap->totalSectorsAccessed < raidPtr->Layout.sectorsPerStripeUnit))) {
695 1.3 oster func = pfuncs->simple;
696 1.3 oster undoFunc = rf_NullNodeUndoFunc;
697 1.3 oster name = pfuncs->SimpleName;
698 1.3 oster if (qfuncs) {
699 1.3 oster qfunc = qfuncs->simple;
700 1.3 oster qname = qfuncs->SimpleName;
701 1.3 oster } else {
702 1.3 oster qfunc = NULL;
703 1.3 oster qname = NULL;
704 1.3 oster }
705 1.3 oster } else {
706 1.3 oster func = pfuncs->regular;
707 1.3 oster undoFunc = rf_NullNodeUndoFunc;
708 1.3 oster name = pfuncs->RegularName;
709 1.3 oster if (qfuncs) {
710 1.3 oster qfunc = qfuncs->regular;
711 1.3 oster qname = qfuncs->RegularName;
712 1.3 oster } else {
713 1.3 oster qfunc = NULL;
714 1.3 oster qname = NULL;
715 1.3 oster }
716 1.3 oster }
717 1.3 oster /*
718 1.3 oster * Initialize the xor nodes: params are {pda,buf}
719 1.3 oster * from {Rod,Wnd,Rop} nodes, and raidPtr
720 1.3 oster */
721 1.3 oster if (numParityNodes == 2) {
722 1.3 oster /* double-xor case */
723 1.3 oster for (i = 0; i < numParityNodes; i++) {
724 1.3 oster /* note: no wakeup func for xor */
725 1.3 oster rf_InitNode(&xorNodes[i], rf_wait, RF_FALSE, func, undoFunc, NULL,
726 1.3 oster 1, (numDataNodes + numParityNodes), 7, 1, dag_h, name, allocList);
727 1.3 oster xorNodes[i].flags |= RF_DAGNODE_FLAG_YIELD;
728 1.3 oster xorNodes[i].params[0] = readDataNodes[i].params[0];
729 1.3 oster xorNodes[i].params[1] = readDataNodes[i].params[1];
730 1.3 oster xorNodes[i].params[2] = readParityNodes[i].params[0];
731 1.3 oster xorNodes[i].params[3] = readParityNodes[i].params[1];
732 1.3 oster xorNodes[i].params[4] = writeDataNodes[i].params[0];
733 1.3 oster xorNodes[i].params[5] = writeDataNodes[i].params[1];
734 1.3 oster xorNodes[i].params[6].p = raidPtr;
735 1.3 oster /* use old parity buf as target buf */
736 1.3 oster xorNodes[i].results[0] = readParityNodes[i].params[1].p;
737 1.3 oster if (nfaults == 2) {
738 1.3 oster /* note: no wakeup func for qor */
739 1.3 oster rf_InitNode(&qNodes[i], rf_wait, RF_FALSE, qfunc, undoFunc, NULL, 1,
740 1.3 oster (numDataNodes + numParityNodes), 7, 1, dag_h, qname, allocList);
741 1.3 oster qNodes[i].params[0] = readDataNodes[i].params[0];
742 1.3 oster qNodes[i].params[1] = readDataNodes[i].params[1];
743 1.3 oster qNodes[i].params[2] = readQNodes[i].params[0];
744 1.3 oster qNodes[i].params[3] = readQNodes[i].params[1];
745 1.3 oster qNodes[i].params[4] = writeDataNodes[i].params[0];
746 1.3 oster qNodes[i].params[5] = writeDataNodes[i].params[1];
747 1.3 oster qNodes[i].params[6].p = raidPtr;
748 1.3 oster /* use old Q buf as target buf */
749 1.3 oster qNodes[i].results[0] = readQNodes[i].params[1].p;
750 1.3 oster }
751 1.3 oster }
752 1.3 oster } else {
753 1.3 oster /* there is only one xor node in this case */
754 1.3 oster rf_InitNode(&xorNodes[0], rf_wait, RF_FALSE, func, undoFunc, NULL, 1,
755 1.3 oster (numDataNodes + numParityNodes),
756 1.3 oster (2 * (numDataNodes + numDataNodes + 1) + 1), 1, dag_h, name, allocList);
757 1.3 oster xorNodes[0].flags |= RF_DAGNODE_FLAG_YIELD;
758 1.3 oster for (i = 0; i < numDataNodes + 1; i++) {
759 1.3 oster /* set up params related to Rod and Rop nodes */
760 1.3 oster xorNodes[0].params[2 * i + 0] = readDataNodes[i].params[0]; /* pda */
761 1.3 oster xorNodes[0].params[2 * i + 1] = readDataNodes[i].params[1]; /* buffer ptr */
762 1.3 oster }
763 1.3 oster for (i = 0; i < numDataNodes; i++) {
764 1.3 oster /* set up params related to Wnd and Wnp nodes */
765 1.3 oster xorNodes[0].params[2 * (numDataNodes + 1 + i) + 0] = /* pda */
766 1.3 oster writeDataNodes[i].params[0];
767 1.3 oster xorNodes[0].params[2 * (numDataNodes + 1 + i) + 1] = /* buffer ptr */
768 1.3 oster writeDataNodes[i].params[1];
769 1.3 oster }
770 1.3 oster /* xor node needs to get at RAID information */
771 1.3 oster xorNodes[0].params[2 * (numDataNodes + numDataNodes + 1)].p = raidPtr;
772 1.3 oster xorNodes[0].results[0] = readParityNodes[0].params[1].p;
773 1.3 oster if (nfaults == 2) {
774 1.3 oster rf_InitNode(&qNodes[0], rf_wait, RF_FALSE, qfunc, undoFunc, NULL, 1,
775 1.3 oster (numDataNodes + numParityNodes),
776 1.3 oster (2 * (numDataNodes + numDataNodes + 1) + 1), 1, dag_h,
777 1.3 oster qname, allocList);
778 1.3 oster for (i = 0; i < numDataNodes; i++) {
779 1.3 oster /* set up params related to Rod */
780 1.3 oster qNodes[0].params[2 * i + 0] = readDataNodes[i].params[0]; /* pda */
781 1.3 oster qNodes[0].params[2 * i + 1] = readDataNodes[i].params[1]; /* buffer ptr */
782 1.3 oster }
783 1.3 oster /* and read old q */
784 1.3 oster qNodes[0].params[2 * numDataNodes + 0] = /* pda */
785 1.3 oster readQNodes[0].params[0];
786 1.3 oster qNodes[0].params[2 * numDataNodes + 1] = /* buffer ptr */
787 1.3 oster readQNodes[0].params[1];
788 1.3 oster for (i = 0; i < numDataNodes; i++) {
789 1.3 oster /* set up params related to Wnd nodes */
790 1.3 oster qNodes[0].params[2 * (numDataNodes + 1 + i) + 0] = /* pda */
791 1.3 oster writeDataNodes[i].params[0];
792 1.3 oster qNodes[0].params[2 * (numDataNodes + 1 + i) + 1] = /* buffer ptr */
793 1.3 oster writeDataNodes[i].params[1];
794 1.3 oster }
795 1.3 oster /* xor node needs to get at RAID information */
796 1.3 oster qNodes[0].params[2 * (numDataNodes + numDataNodes + 1)].p = raidPtr;
797 1.3 oster qNodes[0].results[0] = readQNodes[0].params[1].p;
798 1.3 oster }
799 1.3 oster }
800 1.3 oster
801 1.3 oster /* initialize nodes which write new parity (Wnp) */
802 1.3 oster pda = asmap->parityInfo;
803 1.3 oster for (i = 0; i < numParityNodes; i++) {
804 1.3 oster rf_InitNode(&writeParityNodes[i], rf_wait, RF_FALSE, rf_DiskWriteFunc,
805 1.3 oster rf_DiskWriteUndoFunc, rf_GenericWakeupFunc, 1, 1, 4, 0, dag_h,
806 1.3 oster "Wnp", allocList);
807 1.3 oster RF_ASSERT(pda != NULL);
808 1.3 oster writeParityNodes[i].params[0].p = pda; /* param 1 (bufPtr)
809 1.3 oster * filled in by xor node */
810 1.3 oster writeParityNodes[i].params[1].p = xorNodes[i].results[0]; /* buffer pointer for
811 1.3 oster * parity write
812 1.3 oster * operation */
813 1.3 oster writeParityNodes[i].params[2].v = parityStripeID;
814 1.3 oster writeParityNodes[i].params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY,
815 1.3 oster 0, 0, which_ru);
816 1.3 oster if (lu_flag) {
817 1.3 oster /* initialize node to unlock the disk queue */
818 1.3 oster rf_InitNode(&unlockParityNodes[i], rf_wait, RF_FALSE, rf_DiskUnlockFunc,
819 1.3 oster rf_DiskUnlockUndoFunc, rf_GenericWakeupFunc, 1, 1, 2, 0, dag_h,
820 1.3 oster "Unp", allocList);
821 1.3 oster unlockParityNodes[i].params[0].p = pda; /* physical disk addr
822 1.3 oster * desc */
823 1.3 oster unlockParityNodes[i].params[1].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY,
824 1.3 oster 0, lu_flag, which_ru);
825 1.3 oster }
826 1.3 oster pda = pda->next;
827 1.3 oster }
828 1.3 oster
829 1.3 oster /* initialize nodes which write new Q (Wnq) */
830 1.3 oster if (nfaults == 2) {
831 1.3 oster pda = asmap->qInfo;
832 1.3 oster for (i = 0; i < numParityNodes; i++) {
833 1.3 oster rf_InitNode(&writeQNodes[i], rf_wait, RF_FALSE, rf_DiskWriteFunc,
834 1.3 oster rf_DiskWriteUndoFunc, rf_GenericWakeupFunc, 1, 1, 4, 0, dag_h,
835 1.3 oster "Wnq", allocList);
836 1.3 oster RF_ASSERT(pda != NULL);
837 1.3 oster writeQNodes[i].params[0].p = pda; /* param 1 (bufPtr)
838 1.3 oster * filled in by xor node */
839 1.3 oster writeQNodes[i].params[1].p = qNodes[i].results[0]; /* buffer pointer for
840 1.3 oster * parity write
841 1.3 oster * operation */
842 1.3 oster writeQNodes[i].params[2].v = parityStripeID;
843 1.3 oster writeQNodes[i].params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY,
844 1.3 oster 0, 0, which_ru);
845 1.3 oster if (lu_flag) {
846 1.3 oster /* initialize node to unlock the disk queue */
847 1.3 oster rf_InitNode(&unlockQNodes[i], rf_wait, RF_FALSE, rf_DiskUnlockFunc,
848 1.3 oster rf_DiskUnlockUndoFunc, rf_GenericWakeupFunc, 1, 1, 2, 0, dag_h,
849 1.3 oster "Unq", allocList);
850 1.3 oster unlockQNodes[i].params[0].p = pda; /* physical disk addr
851 1.3 oster * desc */
852 1.3 oster unlockQNodes[i].params[1].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY,
853 1.3 oster 0, lu_flag, which_ru);
854 1.3 oster }
855 1.3 oster pda = pda->next;
856 1.3 oster }
857 1.3 oster }
858 1.3 oster /*
859 1.3 oster * Step 4. connect the nodes.
860 1.3 oster */
861 1.3 oster
862 1.3 oster /* connect header to block node */
863 1.3 oster dag_h->succedents[0] = blockNode;
864 1.3 oster
865 1.3 oster /* connect block node to read old data nodes */
866 1.3 oster RF_ASSERT(blockNode->numSuccedents == (numDataNodes + (numParityNodes * nfaults)));
867 1.3 oster for (i = 0; i < numDataNodes; i++) {
868 1.3 oster blockNode->succedents[i] = &readDataNodes[i];
869 1.3 oster RF_ASSERT(readDataNodes[i].numAntecedents == 1);
870 1.3 oster readDataNodes[i].antecedents[0] = blockNode;
871 1.3 oster readDataNodes[i].antType[0] = rf_control;
872 1.3 oster }
873 1.3 oster
874 1.3 oster /* connect block node to read old parity nodes */
875 1.3 oster for (i = 0; i < numParityNodes; i++) {
876 1.3 oster blockNode->succedents[numDataNodes + i] = &readParityNodes[i];
877 1.3 oster RF_ASSERT(readParityNodes[i].numAntecedents == 1);
878 1.3 oster readParityNodes[i].antecedents[0] = blockNode;
879 1.3 oster readParityNodes[i].antType[0] = rf_control;
880 1.3 oster }
881 1.3 oster
882 1.3 oster /* connect block node to read old Q nodes */
883 1.3 oster if (nfaults == 2) {
884 1.3 oster for (i = 0; i < numParityNodes; i++) {
885 1.3 oster blockNode->succedents[numDataNodes + numParityNodes + i] = &readQNodes[i];
886 1.3 oster RF_ASSERT(readQNodes[i].numAntecedents == 1);
887 1.3 oster readQNodes[i].antecedents[0] = blockNode;
888 1.3 oster readQNodes[i].antType[0] = rf_control;
889 1.3 oster }
890 1.3 oster }
891 1.3 oster /* connect read old data nodes to xor nodes */
892 1.3 oster for (i = 0; i < numDataNodes; i++) {
893 1.3 oster RF_ASSERT(readDataNodes[i].numSuccedents == (nfaults * numParityNodes));
894 1.3 oster for (j = 0; j < numParityNodes; j++) {
895 1.3 oster RF_ASSERT(xorNodes[j].numAntecedents == numDataNodes + numParityNodes);
896 1.3 oster readDataNodes[i].succedents[j] = &xorNodes[j];
897 1.3 oster xorNodes[j].antecedents[i] = &readDataNodes[i];
898 1.3 oster xorNodes[j].antType[i] = rf_trueData;
899 1.3 oster }
900 1.3 oster }
901 1.3 oster
902 1.3 oster /* connect read old data nodes to q nodes */
903 1.3 oster if (nfaults == 2) {
904 1.3 oster for (i = 0; i < numDataNodes; i++) {
905 1.3 oster for (j = 0; j < numParityNodes; j++) {
906 1.3 oster RF_ASSERT(qNodes[j].numAntecedents == numDataNodes + numParityNodes);
907 1.3 oster readDataNodes[i].succedents[numParityNodes + j] = &qNodes[j];
908 1.3 oster qNodes[j].antecedents[i] = &readDataNodes[i];
909 1.3 oster qNodes[j].antType[i] = rf_trueData;
910 1.3 oster }
911 1.3 oster }
912 1.3 oster }
913 1.3 oster /* connect read old parity nodes to xor nodes */
914 1.3 oster for (i = 0; i < numParityNodes; i++) {
915 1.3 oster RF_ASSERT(readParityNodes[i].numSuccedents == numParityNodes);
916 1.3 oster for (j = 0; j < numParityNodes; j++) {
917 1.3 oster readParityNodes[i].succedents[j] = &xorNodes[j];
918 1.3 oster xorNodes[j].antecedents[numDataNodes + i] = &readParityNodes[i];
919 1.3 oster xorNodes[j].antType[numDataNodes + i] = rf_trueData;
920 1.3 oster }
921 1.3 oster }
922 1.3 oster
923 1.3 oster /* connect read old q nodes to q nodes */
924 1.3 oster if (nfaults == 2) {
925 1.3 oster for (i = 0; i < numParityNodes; i++) {
926 1.3 oster RF_ASSERT(readParityNodes[i].numSuccedents == numParityNodes);
927 1.3 oster for (j = 0; j < numParityNodes; j++) {
928 1.3 oster readQNodes[i].succedents[j] = &qNodes[j];
929 1.3 oster qNodes[j].antecedents[numDataNodes + i] = &readQNodes[i];
930 1.3 oster qNodes[j].antType[numDataNodes + i] = rf_trueData;
931 1.3 oster }
932 1.3 oster }
933 1.3 oster }
934 1.3 oster /* connect xor nodes to commit node */
935 1.3 oster RF_ASSERT(commitNode->numAntecedents == (nfaults * numParityNodes));
936 1.3 oster for (i = 0; i < numParityNodes; i++) {
937 1.3 oster RF_ASSERT(xorNodes[i].numSuccedents == 1);
938 1.3 oster xorNodes[i].succedents[0] = commitNode;
939 1.3 oster commitNode->antecedents[i] = &xorNodes[i];
940 1.3 oster commitNode->antType[i] = rf_control;
941 1.3 oster }
942 1.3 oster
943 1.3 oster /* connect q nodes to commit node */
944 1.3 oster if (nfaults == 2) {
945 1.3 oster for (i = 0; i < numParityNodes; i++) {
946 1.3 oster RF_ASSERT(qNodes[i].numSuccedents == 1);
947 1.3 oster qNodes[i].succedents[0] = commitNode;
948 1.3 oster commitNode->antecedents[i + numParityNodes] = &qNodes[i];
949 1.3 oster commitNode->antType[i + numParityNodes] = rf_control;
950 1.3 oster }
951 1.3 oster }
952 1.3 oster /* connect commit node to write nodes */
953 1.3 oster RF_ASSERT(commitNode->numSuccedents == (numDataNodes + (nfaults * numParityNodes)));
954 1.3 oster for (i = 0; i < numDataNodes; i++) {
955 1.3 oster RF_ASSERT(writeDataNodes[i].numAntecedents == 1);
956 1.3 oster commitNode->succedents[i] = &writeDataNodes[i];
957 1.3 oster writeDataNodes[i].antecedents[0] = commitNode;
958 1.3 oster writeDataNodes[i].antType[0] = rf_trueData;
959 1.3 oster }
960 1.3 oster for (i = 0; i < numParityNodes; i++) {
961 1.3 oster RF_ASSERT(writeParityNodes[i].numAntecedents == 1);
962 1.3 oster commitNode->succedents[i + numDataNodes] = &writeParityNodes[i];
963 1.3 oster writeParityNodes[i].antecedents[0] = commitNode;
964 1.3 oster writeParityNodes[i].antType[0] = rf_trueData;
965 1.3 oster }
966 1.3 oster if (nfaults == 2) {
967 1.3 oster for (i = 0; i < numParityNodes; i++) {
968 1.3 oster RF_ASSERT(writeQNodes[i].numAntecedents == 1);
969 1.3 oster commitNode->succedents[i + numDataNodes + numParityNodes] = &writeQNodes[i];
970 1.3 oster writeQNodes[i].antecedents[0] = commitNode;
971 1.3 oster writeQNodes[i].antType[0] = rf_trueData;
972 1.3 oster }
973 1.3 oster }
974 1.3 oster RF_ASSERT(termNode->numAntecedents == (numDataNodes + (nfaults * numParityNodes)));
975 1.3 oster RF_ASSERT(termNode->numSuccedents == 0);
976 1.3 oster for (i = 0; i < numDataNodes; i++) {
977 1.3 oster if (lu_flag) {
978 1.3 oster /* connect write new data nodes to unlock nodes */
979 1.3 oster RF_ASSERT(writeDataNodes[i].numSuccedents == 1);
980 1.3 oster RF_ASSERT(unlockDataNodes[i].numAntecedents == 1);
981 1.3 oster writeDataNodes[i].succedents[0] = &unlockDataNodes[i];
982 1.3 oster unlockDataNodes[i].antecedents[0] = &writeDataNodes[i];
983 1.3 oster unlockDataNodes[i].antType[0] = rf_control;
984 1.3 oster
985 1.3 oster /* connect unlock nodes to term node */
986 1.3 oster RF_ASSERT(unlockDataNodes[i].numSuccedents == 1);
987 1.3 oster unlockDataNodes[i].succedents[0] = termNode;
988 1.3 oster termNode->antecedents[i] = &unlockDataNodes[i];
989 1.3 oster termNode->antType[i] = rf_control;
990 1.3 oster } else {
991 1.3 oster /* connect write new data nodes to term node */
992 1.3 oster RF_ASSERT(writeDataNodes[i].numSuccedents == 1);
993 1.3 oster RF_ASSERT(termNode->numAntecedents == (numDataNodes + (nfaults * numParityNodes)));
994 1.3 oster writeDataNodes[i].succedents[0] = termNode;
995 1.3 oster termNode->antecedents[i] = &writeDataNodes[i];
996 1.3 oster termNode->antType[i] = rf_control;
997 1.3 oster }
998 1.3 oster }
999 1.3 oster
1000 1.3 oster for (i = 0; i < numParityNodes; i++) {
1001 1.3 oster if (lu_flag) {
1002 1.3 oster /* connect write new parity nodes to unlock nodes */
1003 1.3 oster RF_ASSERT(writeParityNodes[i].numSuccedents == 1);
1004 1.3 oster RF_ASSERT(unlockParityNodes[i].numAntecedents == 1);
1005 1.3 oster writeParityNodes[i].succedents[0] = &unlockParityNodes[i];
1006 1.3 oster unlockParityNodes[i].antecedents[0] = &writeParityNodes[i];
1007 1.3 oster unlockParityNodes[i].antType[0] = rf_control;
1008 1.3 oster
1009 1.3 oster /* connect unlock nodes to term node */
1010 1.3 oster RF_ASSERT(unlockParityNodes[i].numSuccedents == 1);
1011 1.3 oster unlockParityNodes[i].succedents[0] = termNode;
1012 1.3 oster termNode->antecedents[numDataNodes + i] = &unlockParityNodes[i];
1013 1.3 oster termNode->antType[numDataNodes + i] = rf_control;
1014 1.3 oster } else {
1015 1.3 oster RF_ASSERT(writeParityNodes[i].numSuccedents == 1);
1016 1.3 oster writeParityNodes[i].succedents[0] = termNode;
1017 1.3 oster termNode->antecedents[numDataNodes + i] = &writeParityNodes[i];
1018 1.3 oster termNode->antType[numDataNodes + i] = rf_control;
1019 1.3 oster }
1020 1.3 oster }
1021 1.3 oster
1022 1.3 oster if (nfaults == 2) {
1023 1.3 oster for (i = 0; i < numParityNodes; i++) {
1024 1.3 oster if (lu_flag) {
1025 1.3 oster /* connect write new Q nodes to unlock nodes */
1026 1.3 oster RF_ASSERT(writeQNodes[i].numSuccedents == 1);
1027 1.3 oster RF_ASSERT(unlockQNodes[i].numAntecedents == 1);
1028 1.3 oster writeQNodes[i].succedents[0] = &unlockQNodes[i];
1029 1.3 oster unlockQNodes[i].antecedents[0] = &writeQNodes[i];
1030 1.3 oster unlockQNodes[i].antType[0] = rf_control;
1031 1.3 oster
1032 1.3 oster /* connect unlock nodes to unblock node */
1033 1.3 oster RF_ASSERT(unlockQNodes[i].numSuccedents == 1);
1034 1.3 oster unlockQNodes[i].succedents[0] = termNode;
1035 1.3 oster termNode->antecedents[numDataNodes + numParityNodes + i] = &unlockQNodes[i];
1036 1.3 oster termNode->antType[numDataNodes + numParityNodes + i] = rf_control;
1037 1.3 oster } else {
1038 1.3 oster RF_ASSERT(writeQNodes[i].numSuccedents == 1);
1039 1.3 oster writeQNodes[i].succedents[0] = termNode;
1040 1.3 oster termNode->antecedents[numDataNodes + numParityNodes + i] = &writeQNodes[i];
1041 1.3 oster termNode->antType[numDataNodes + numParityNodes + i] = rf_control;
1042 1.3 oster }
1043 1.3 oster }
1044 1.3 oster }
1045 1.1 oster }
1046 1.1 oster
1047 1.1 oster
1048 1.1 oster /******************************************************************************
1049 1.1 oster * create a write graph (fault-free or degraded) for RAID level 1
1050 1.1 oster *
1051 1.1 oster * Hdr -> Commit -> Wpd -> Nil -> Trm
1052 1.1 oster * -> Wsd ->
1053 1.1 oster *
1054 1.1 oster * The "Wpd" node writes data to the primary copy in the mirror pair
1055 1.1 oster * The "Wsd" node writes data to the secondary copy in the mirror pair
1056 1.1 oster *
1057 1.1 oster * Parameters: raidPtr - description of the physical array
1058 1.1 oster * asmap - logical & physical addresses for this access
1059 1.1 oster * bp - buffer ptr (holds write data)
1060 1.3 oster * flags - general flags (e.g. disk locking)
1061 1.1 oster * allocList - list of memory allocated in DAG creation
1062 1.1 oster *****************************************************************************/
1063 1.1 oster
1064 1.3 oster void
1065 1.3 oster rf_CreateRaidOneWriteDAG(
1066 1.3 oster RF_Raid_t * raidPtr,
1067 1.3 oster RF_AccessStripeMap_t * asmap,
1068 1.3 oster RF_DagHeader_t * dag_h,
1069 1.3 oster void *bp,
1070 1.3 oster RF_RaidAccessFlags_t flags,
1071 1.3 oster RF_AllocListElem_t * allocList)
1072 1.1 oster {
1073 1.3 oster RF_DagNode_t *unblockNode, *termNode, *commitNode;
1074 1.3 oster RF_DagNode_t *nodes, *wndNode, *wmirNode;
1075 1.3 oster int nWndNodes, nWmirNodes, i;
1076 1.3 oster RF_ReconUnitNum_t which_ru;
1077 1.3 oster RF_PhysDiskAddr_t *pda, *pdaP;
1078 1.3 oster RF_StripeNum_t parityStripeID;
1079 1.3 oster
1080 1.3 oster parityStripeID = rf_RaidAddressToParityStripeID(&(raidPtr->Layout),
1081 1.3 oster asmap->raidAddress, &which_ru);
1082 1.3 oster if (rf_dagDebug) {
1083 1.3 oster printf("[Creating RAID level 1 write DAG]\n");
1084 1.3 oster }
1085 1.3 oster dag_h->creator = "RaidOneWriteDAG";
1086 1.3 oster
1087 1.3 oster /* 2 implies access not SU aligned */
1088 1.3 oster nWmirNodes = (asmap->parityInfo->next) ? 2 : 1;
1089 1.3 oster nWndNodes = (asmap->physInfo->next) ? 2 : 1;
1090 1.3 oster
1091 1.3 oster /* alloc the Wnd nodes and the Wmir node */
1092 1.3 oster if (asmap->numDataFailed == 1)
1093 1.3 oster nWndNodes--;
1094 1.3 oster if (asmap->numParityFailed == 1)
1095 1.3 oster nWmirNodes--;
1096 1.3 oster
1097 1.3 oster /* total number of nodes = nWndNodes + nWmirNodes + (commit + unblock
1098 1.3 oster * + terminator) */
1099 1.3 oster RF_CallocAndAdd(nodes, nWndNodes + nWmirNodes + 3, sizeof(RF_DagNode_t),
1100 1.3 oster (RF_DagNode_t *), allocList);
1101 1.3 oster i = 0;
1102 1.3 oster wndNode = &nodes[i];
1103 1.3 oster i += nWndNodes;
1104 1.3 oster wmirNode = &nodes[i];
1105 1.3 oster i += nWmirNodes;
1106 1.3 oster commitNode = &nodes[i];
1107 1.3 oster i += 1;
1108 1.3 oster unblockNode = &nodes[i];
1109 1.3 oster i += 1;
1110 1.3 oster termNode = &nodes[i];
1111 1.3 oster i += 1;
1112 1.3 oster RF_ASSERT(i == (nWndNodes + nWmirNodes + 3));
1113 1.3 oster
1114 1.3 oster /* this dag can commit immediately */
1115 1.3 oster dag_h->numCommitNodes = 1;
1116 1.3 oster dag_h->numCommits = 0;
1117 1.3 oster dag_h->numSuccedents = 1;
1118 1.3 oster
1119 1.3 oster /* initialize the commit, unblock, and term nodes */
1120 1.3 oster rf_InitNode(commitNode, rf_wait, RF_TRUE, rf_NullNodeFunc, rf_NullNodeUndoFunc,
1121 1.3 oster NULL, (nWndNodes + nWmirNodes), 0, 0, 0, dag_h, "Cmt", allocList);
1122 1.3 oster rf_InitNode(unblockNode, rf_wait, RF_FALSE, rf_NullNodeFunc, rf_NullNodeUndoFunc,
1123 1.3 oster NULL, 1, (nWndNodes + nWmirNodes), 0, 0, dag_h, "Nil", allocList);
1124 1.3 oster rf_InitNode(termNode, rf_wait, RF_FALSE, rf_TerminateFunc, rf_TerminateUndoFunc,
1125 1.3 oster NULL, 0, 1, 0, 0, dag_h, "Trm", allocList);
1126 1.3 oster
1127 1.3 oster /* initialize the wnd nodes */
1128 1.3 oster if (nWndNodes > 0) {
1129 1.3 oster pda = asmap->physInfo;
1130 1.3 oster for (i = 0; i < nWndNodes; i++) {
1131 1.3 oster rf_InitNode(&wndNode[i], rf_wait, RF_FALSE, rf_DiskWriteFunc, rf_DiskWriteUndoFunc,
1132 1.3 oster rf_GenericWakeupFunc, 1, 1, 4, 0, dag_h, "Wpd", allocList);
1133 1.3 oster RF_ASSERT(pda != NULL);
1134 1.3 oster wndNode[i].params[0].p = pda;
1135 1.3 oster wndNode[i].params[1].p = pda->bufPtr;
1136 1.3 oster wndNode[i].params[2].v = parityStripeID;
1137 1.3 oster wndNode[i].params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, 0, which_ru);
1138 1.3 oster pda = pda->next;
1139 1.3 oster }
1140 1.3 oster RF_ASSERT(pda == NULL);
1141 1.3 oster }
1142 1.3 oster /* initialize the mirror nodes */
1143 1.3 oster if (nWmirNodes > 0) {
1144 1.3 oster pda = asmap->physInfo;
1145 1.3 oster pdaP = asmap->parityInfo;
1146 1.3 oster for (i = 0; i < nWmirNodes; i++) {
1147 1.3 oster rf_InitNode(&wmirNode[i], rf_wait, RF_FALSE, rf_DiskWriteFunc, rf_DiskWriteUndoFunc,
1148 1.3 oster rf_GenericWakeupFunc, 1, 1, 4, 0, dag_h, "Wsd", allocList);
1149 1.3 oster RF_ASSERT(pda != NULL);
1150 1.3 oster wmirNode[i].params[0].p = pdaP;
1151 1.3 oster wmirNode[i].params[1].p = pda->bufPtr;
1152 1.3 oster wmirNode[i].params[2].v = parityStripeID;
1153 1.3 oster wmirNode[i].params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, 0, which_ru);
1154 1.3 oster pda = pda->next;
1155 1.3 oster pdaP = pdaP->next;
1156 1.3 oster }
1157 1.3 oster RF_ASSERT(pda == NULL);
1158 1.3 oster RF_ASSERT(pdaP == NULL);
1159 1.3 oster }
1160 1.3 oster /* link the header node to the commit node */
1161 1.3 oster RF_ASSERT(dag_h->numSuccedents == 1);
1162 1.3 oster RF_ASSERT(commitNode->numAntecedents == 0);
1163 1.3 oster dag_h->succedents[0] = commitNode;
1164 1.3 oster
1165 1.3 oster /* link the commit node to the write nodes */
1166 1.3 oster RF_ASSERT(commitNode->numSuccedents == (nWndNodes + nWmirNodes));
1167 1.3 oster for (i = 0; i < nWndNodes; i++) {
1168 1.3 oster RF_ASSERT(wndNode[i].numAntecedents == 1);
1169 1.3 oster commitNode->succedents[i] = &wndNode[i];
1170 1.3 oster wndNode[i].antecedents[0] = commitNode;
1171 1.3 oster wndNode[i].antType[0] = rf_control;
1172 1.3 oster }
1173 1.3 oster for (i = 0; i < nWmirNodes; i++) {
1174 1.3 oster RF_ASSERT(wmirNode[i].numAntecedents == 1);
1175 1.3 oster commitNode->succedents[i + nWndNodes] = &wmirNode[i];
1176 1.3 oster wmirNode[i].antecedents[0] = commitNode;
1177 1.3 oster wmirNode[i].antType[0] = rf_control;
1178 1.3 oster }
1179 1.3 oster
1180 1.3 oster /* link the write nodes to the unblock node */
1181 1.3 oster RF_ASSERT(unblockNode->numAntecedents == (nWndNodes + nWmirNodes));
1182 1.3 oster for (i = 0; i < nWndNodes; i++) {
1183 1.3 oster RF_ASSERT(wndNode[i].numSuccedents == 1);
1184 1.3 oster wndNode[i].succedents[0] = unblockNode;
1185 1.3 oster unblockNode->antecedents[i] = &wndNode[i];
1186 1.3 oster unblockNode->antType[i] = rf_control;
1187 1.3 oster }
1188 1.3 oster for (i = 0; i < nWmirNodes; i++) {
1189 1.3 oster RF_ASSERT(wmirNode[i].numSuccedents == 1);
1190 1.3 oster wmirNode[i].succedents[0] = unblockNode;
1191 1.3 oster unblockNode->antecedents[i + nWndNodes] = &wmirNode[i];
1192 1.3 oster unblockNode->antType[i + nWndNodes] = rf_control;
1193 1.3 oster }
1194 1.3 oster
1195 1.3 oster /* link the unblock node to the term node */
1196 1.3 oster RF_ASSERT(unblockNode->numSuccedents == 1);
1197 1.3 oster RF_ASSERT(termNode->numAntecedents == 1);
1198 1.3 oster RF_ASSERT(termNode->numSuccedents == 0);
1199 1.3 oster unblockNode->succedents[0] = termNode;
1200 1.3 oster termNode->antecedents[0] = unblockNode;
1201 1.3 oster termNode->antType[0] = rf_control;
1202 1.1 oster }
1203 1.1 oster
1204 1.1 oster
1205 1.1 oster
1206 1.1 oster /* DAGs which have no commit points.
1207 1.1 oster *
1208 1.1 oster * The following DAGs are used in forward and backward error recovery experiments.
1209 1.1 oster * They are identical to the DAGs above this comment with the exception that the
1210 1.1 oster * the commit points have been removed.
1211 1.1 oster */
1212 1.1 oster
1213 1.1 oster
1214 1.1 oster
1215 1.3 oster void
1216 1.3 oster rf_CommonCreateLargeWriteDAGFwd(
1217 1.3 oster RF_Raid_t * raidPtr,
1218 1.3 oster RF_AccessStripeMap_t * asmap,
1219 1.3 oster RF_DagHeader_t * dag_h,
1220 1.3 oster void *bp,
1221 1.3 oster RF_RaidAccessFlags_t flags,
1222 1.3 oster RF_AllocListElem_t * allocList,
1223 1.3 oster int nfaults,
1224 1.3 oster int (*redFunc) (RF_DagNode_t *),
1225 1.3 oster int allowBufferRecycle)
1226 1.1 oster {
1227 1.3 oster RF_DagNode_t *nodes, *wndNodes, *rodNodes, *xorNode, *wnpNode;
1228 1.3 oster RF_DagNode_t *wnqNode, *blockNode, *syncNode, *termNode;
1229 1.3 oster int nWndNodes, nRodNodes, i, nodeNum, asmNum;
1230 1.3 oster RF_AccessStripeMapHeader_t *new_asm_h[2];
1231 1.3 oster RF_StripeNum_t parityStripeID;
1232 1.3 oster char *sosBuffer, *eosBuffer;
1233 1.3 oster RF_ReconUnitNum_t which_ru;
1234 1.3 oster RF_RaidLayout_t *layoutPtr;
1235 1.3 oster RF_PhysDiskAddr_t *pda;
1236 1.3 oster
1237 1.3 oster layoutPtr = &(raidPtr->Layout);
1238 1.3 oster parityStripeID = rf_RaidAddressToParityStripeID(&(raidPtr->Layout), asmap->raidAddress, &which_ru);
1239 1.3 oster
1240 1.3 oster if (rf_dagDebug)
1241 1.3 oster printf("[Creating large-write DAG]\n");
1242 1.3 oster dag_h->creator = "LargeWriteDAGFwd";
1243 1.3 oster
1244 1.3 oster dag_h->numCommitNodes = 0;
1245 1.3 oster dag_h->numCommits = 0;
1246 1.3 oster dag_h->numSuccedents = 1;
1247 1.3 oster
1248 1.3 oster /* alloc the nodes: Wnd, xor, commit, block, term, and Wnp */
1249 1.3 oster nWndNodes = asmap->numStripeUnitsAccessed;
1250 1.3 oster RF_CallocAndAdd(nodes, nWndNodes + 4 + nfaults, sizeof(RF_DagNode_t), (RF_DagNode_t *), allocList);
1251 1.3 oster i = 0;
1252 1.3 oster wndNodes = &nodes[i];
1253 1.3 oster i += nWndNodes;
1254 1.3 oster xorNode = &nodes[i];
1255 1.3 oster i += 1;
1256 1.3 oster wnpNode = &nodes[i];
1257 1.3 oster i += 1;
1258 1.3 oster blockNode = &nodes[i];
1259 1.3 oster i += 1;
1260 1.3 oster syncNode = &nodes[i];
1261 1.3 oster i += 1;
1262 1.3 oster termNode = &nodes[i];
1263 1.3 oster i += 1;
1264 1.3 oster if (nfaults == 2) {
1265 1.3 oster wnqNode = &nodes[i];
1266 1.3 oster i += 1;
1267 1.3 oster } else {
1268 1.3 oster wnqNode = NULL;
1269 1.3 oster }
1270 1.3 oster rf_MapUnaccessedPortionOfStripe(raidPtr, layoutPtr, asmap, dag_h, new_asm_h, &nRodNodes, &sosBuffer, &eosBuffer, allocList);
1271 1.3 oster if (nRodNodes > 0) {
1272 1.3 oster RF_CallocAndAdd(rodNodes, nRodNodes, sizeof(RF_DagNode_t), (RF_DagNode_t *), allocList);
1273 1.3 oster } else {
1274 1.3 oster rodNodes = NULL;
1275 1.3 oster }
1276 1.3 oster
1277 1.3 oster /* begin node initialization */
1278 1.3 oster if (nRodNodes > 0) {
1279 1.3 oster rf_InitNode(blockNode, rf_wait, RF_FALSE, rf_NullNodeFunc, rf_NullNodeUndoFunc, NULL, nRodNodes, 0, 0, 0, dag_h, "Nil", allocList);
1280 1.3 oster rf_InitNode(syncNode, rf_wait, RF_FALSE, rf_NullNodeFunc, rf_NullNodeUndoFunc, NULL, nWndNodes + 1, nRodNodes, 0, 0, dag_h, "Nil", allocList);
1281 1.3 oster } else {
1282 1.3 oster rf_InitNode(blockNode, rf_wait, RF_FALSE, rf_NullNodeFunc, rf_NullNodeUndoFunc, NULL, 1, 0, 0, 0, dag_h, "Nil", allocList);
1283 1.3 oster rf_InitNode(syncNode, rf_wait, RF_FALSE, rf_NullNodeFunc, rf_NullNodeUndoFunc, NULL, nWndNodes + 1, 1, 0, 0, dag_h, "Nil", allocList);
1284 1.3 oster }
1285 1.3 oster
1286 1.3 oster rf_InitNode(termNode, rf_wait, RF_FALSE, rf_TerminateFunc, rf_TerminateUndoFunc, NULL, 0, nWndNodes + nfaults, 0, 0, dag_h, "Trm", allocList);
1287 1.3 oster
1288 1.3 oster /* initialize the Rod nodes */
1289 1.3 oster for (nodeNum = asmNum = 0; asmNum < 2; asmNum++) {
1290 1.3 oster if (new_asm_h[asmNum]) {
1291 1.3 oster pda = new_asm_h[asmNum]->stripeMap->physInfo;
1292 1.3 oster while (pda) {
1293 1.3 oster rf_InitNode(&rodNodes[nodeNum], rf_wait, RF_FALSE, rf_DiskReadFunc, rf_DiskReadUndoFunc, rf_GenericWakeupFunc, 1, 1, 4, 0, dag_h, "Rod", allocList);
1294 1.3 oster rodNodes[nodeNum].params[0].p = pda;
1295 1.3 oster rodNodes[nodeNum].params[1].p = pda->bufPtr;
1296 1.3 oster rodNodes[nodeNum].params[2].v = parityStripeID;
1297 1.3 oster rodNodes[nodeNum].params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, 0, which_ru);
1298 1.3 oster nodeNum++;
1299 1.3 oster pda = pda->next;
1300 1.3 oster }
1301 1.3 oster }
1302 1.3 oster }
1303 1.3 oster RF_ASSERT(nodeNum == nRodNodes);
1304 1.3 oster
1305 1.3 oster /* initialize the wnd nodes */
1306 1.3 oster pda = asmap->physInfo;
1307 1.3 oster for (i = 0; i < nWndNodes; i++) {
1308 1.3 oster rf_InitNode(&wndNodes[i], rf_wait, RF_FALSE, rf_DiskWriteFunc, rf_DiskWriteUndoFunc, rf_GenericWakeupFunc, 1, 1, 4, 0, dag_h, "Wnd", allocList);
1309 1.3 oster RF_ASSERT(pda != NULL);
1310 1.3 oster wndNodes[i].params[0].p = pda;
1311 1.3 oster wndNodes[i].params[1].p = pda->bufPtr;
1312 1.3 oster wndNodes[i].params[2].v = parityStripeID;
1313 1.3 oster wndNodes[i].params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, 0, which_ru);
1314 1.3 oster pda = pda->next;
1315 1.3 oster }
1316 1.3 oster
1317 1.3 oster /* initialize the redundancy node */
1318 1.3 oster rf_InitNode(xorNode, rf_wait, RF_FALSE, redFunc, rf_NullNodeUndoFunc, NULL, 1, nfaults, 2 * (nWndNodes + nRodNodes) + 1, nfaults, dag_h, "Xr ", allocList);
1319 1.3 oster xorNode->flags |= RF_DAGNODE_FLAG_YIELD;
1320 1.3 oster for (i = 0; i < nWndNodes; i++) {
1321 1.3 oster xorNode->params[2 * i + 0] = wndNodes[i].params[0]; /* pda */
1322 1.3 oster xorNode->params[2 * i + 1] = wndNodes[i].params[1]; /* buf ptr */
1323 1.3 oster }
1324 1.3 oster for (i = 0; i < nRodNodes; i++) {
1325 1.3 oster xorNode->params[2 * (nWndNodes + i) + 0] = rodNodes[i].params[0]; /* pda */
1326 1.3 oster xorNode->params[2 * (nWndNodes + i) + 1] = rodNodes[i].params[1]; /* buf ptr */
1327 1.3 oster }
1328 1.3 oster xorNode->params[2 * (nWndNodes + nRodNodes)].p = raidPtr; /* xor node needs to get
1329 1.3 oster * at RAID information */
1330 1.3 oster
1331 1.3 oster /* look for an Rod node that reads a complete SU. If none, alloc a
1332 1.3 oster * buffer to receive the parity info. Note that we can't use a new
1333 1.3 oster * data buffer because it will not have gotten written when the xor
1334 1.3 oster * occurs. */
1335 1.3 oster if (allowBufferRecycle) {
1336 1.3 oster for (i = 0; i < nRodNodes; i++)
1337 1.3 oster if (((RF_PhysDiskAddr_t *) rodNodes[i].params[0].p)->numSector == raidPtr->Layout.sectorsPerStripeUnit)
1338 1.3 oster break;
1339 1.3 oster }
1340 1.3 oster if ((!allowBufferRecycle) || (i == nRodNodes)) {
1341 1.3 oster RF_CallocAndAdd(xorNode->results[0], 1, rf_RaidAddressToByte(raidPtr, raidPtr->Layout.sectorsPerStripeUnit), (void *), allocList);
1342 1.3 oster } else
1343 1.3 oster xorNode->results[0] = rodNodes[i].params[1].p;
1344 1.3 oster
1345 1.3 oster /* initialize the Wnp node */
1346 1.3 oster rf_InitNode(wnpNode, rf_wait, RF_FALSE, rf_DiskWriteFunc, rf_DiskWriteUndoFunc, rf_GenericWakeupFunc, 1, 1, 4, 0, dag_h, "Wnp", allocList);
1347 1.3 oster wnpNode->params[0].p = asmap->parityInfo;
1348 1.3 oster wnpNode->params[1].p = xorNode->results[0];
1349 1.3 oster wnpNode->params[2].v = parityStripeID;
1350 1.3 oster wnpNode->params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, 0, which_ru);
1351 1.3 oster RF_ASSERT(asmap->parityInfo->next == NULL); /* parityInfo must
1352 1.3 oster * describe entire
1353 1.3 oster * parity unit */
1354 1.3 oster
1355 1.3 oster if (nfaults == 2) {
1356 1.3 oster /* we never try to recycle a buffer for the Q calcuation in
1357 1.3 oster * addition to the parity. This would cause two buffers to get
1358 1.3 oster * smashed during the P and Q calculation, guaranteeing one
1359 1.3 oster * would be wrong. */
1360 1.3 oster RF_CallocAndAdd(xorNode->results[1], 1, rf_RaidAddressToByte(raidPtr, raidPtr->Layout.sectorsPerStripeUnit), (void *), allocList);
1361 1.3 oster rf_InitNode(wnqNode, rf_wait, RF_FALSE, rf_DiskWriteFunc, rf_DiskWriteUndoFunc, rf_GenericWakeupFunc, 1, 1, 4, 0, dag_h, "Wnq", allocList);
1362 1.3 oster wnqNode->params[0].p = asmap->qInfo;
1363 1.3 oster wnqNode->params[1].p = xorNode->results[1];
1364 1.3 oster wnqNode->params[2].v = parityStripeID;
1365 1.3 oster wnqNode->params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, 0, which_ru);
1366 1.3 oster RF_ASSERT(asmap->parityInfo->next == NULL); /* parityInfo must
1367 1.3 oster * describe entire
1368 1.3 oster * parity unit */
1369 1.3 oster }
1370 1.3 oster /* connect nodes to form graph */
1371 1.3 oster
1372 1.3 oster /* connect dag header to block node */
1373 1.3 oster RF_ASSERT(blockNode->numAntecedents == 0);
1374 1.3 oster dag_h->succedents[0] = blockNode;
1375 1.3 oster
1376 1.3 oster if (nRodNodes > 0) {
1377 1.3 oster /* connect the block node to the Rod nodes */
1378 1.3 oster RF_ASSERT(blockNode->numSuccedents == nRodNodes);
1379 1.3 oster RF_ASSERT(syncNode->numAntecedents == nRodNodes);
1380 1.3 oster for (i = 0; i < nRodNodes; i++) {
1381 1.3 oster RF_ASSERT(rodNodes[i].numAntecedents == 1);
1382 1.3 oster blockNode->succedents[i] = &rodNodes[i];
1383 1.3 oster rodNodes[i].antecedents[0] = blockNode;
1384 1.3 oster rodNodes[i].antType[0] = rf_control;
1385 1.3 oster
1386 1.3 oster /* connect the Rod nodes to the Nil node */
1387 1.3 oster RF_ASSERT(rodNodes[i].numSuccedents == 1);
1388 1.3 oster rodNodes[i].succedents[0] = syncNode;
1389 1.3 oster syncNode->antecedents[i] = &rodNodes[i];
1390 1.3 oster syncNode->antType[i] = rf_trueData;
1391 1.3 oster }
1392 1.3 oster } else {
1393 1.3 oster /* connect the block node to the Nil node */
1394 1.3 oster RF_ASSERT(blockNode->numSuccedents == 1);
1395 1.3 oster RF_ASSERT(syncNode->numAntecedents == 1);
1396 1.3 oster blockNode->succedents[0] = syncNode;
1397 1.3 oster syncNode->antecedents[0] = blockNode;
1398 1.3 oster syncNode->antType[0] = rf_control;
1399 1.3 oster }
1400 1.3 oster
1401 1.3 oster /* connect the sync node to the Wnd nodes */
1402 1.3 oster RF_ASSERT(syncNode->numSuccedents == (1 + nWndNodes));
1403 1.3 oster for (i = 0; i < nWndNodes; i++) {
1404 1.3 oster RF_ASSERT(wndNodes->numAntecedents == 1);
1405 1.3 oster syncNode->succedents[i] = &wndNodes[i];
1406 1.3 oster wndNodes[i].antecedents[0] = syncNode;
1407 1.3 oster wndNodes[i].antType[0] = rf_control;
1408 1.3 oster }
1409 1.3 oster
1410 1.3 oster /* connect the sync node to the Xor node */
1411 1.3 oster RF_ASSERT(xorNode->numAntecedents == 1);
1412 1.3 oster syncNode->succedents[nWndNodes] = xorNode;
1413 1.3 oster xorNode->antecedents[0] = syncNode;
1414 1.3 oster xorNode->antType[0] = rf_control;
1415 1.3 oster
1416 1.3 oster /* connect the xor node to the write parity node */
1417 1.3 oster RF_ASSERT(xorNode->numSuccedents == nfaults);
1418 1.3 oster RF_ASSERT(wnpNode->numAntecedents == 1);
1419 1.3 oster xorNode->succedents[0] = wnpNode;
1420 1.3 oster wnpNode->antecedents[0] = xorNode;
1421 1.3 oster wnpNode->antType[0] = rf_trueData;
1422 1.3 oster if (nfaults == 2) {
1423 1.3 oster RF_ASSERT(wnqNode->numAntecedents == 1);
1424 1.3 oster xorNode->succedents[1] = wnqNode;
1425 1.3 oster wnqNode->antecedents[0] = xorNode;
1426 1.3 oster wnqNode->antType[0] = rf_trueData;
1427 1.3 oster }
1428 1.3 oster /* connect the write nodes to the term node */
1429 1.3 oster RF_ASSERT(termNode->numAntecedents == nWndNodes + nfaults);
1430 1.3 oster RF_ASSERT(termNode->numSuccedents == 0);
1431 1.3 oster for (i = 0; i < nWndNodes; i++) {
1432 1.3 oster RF_ASSERT(wndNodes->numSuccedents == 1);
1433 1.3 oster wndNodes[i].succedents[0] = termNode;
1434 1.3 oster termNode->antecedents[i] = &wndNodes[i];
1435 1.3 oster termNode->antType[i] = rf_control;
1436 1.3 oster }
1437 1.3 oster RF_ASSERT(wnpNode->numSuccedents == 1);
1438 1.3 oster wnpNode->succedents[0] = termNode;
1439 1.3 oster termNode->antecedents[nWndNodes] = wnpNode;
1440 1.3 oster termNode->antType[nWndNodes] = rf_control;
1441 1.3 oster if (nfaults == 2) {
1442 1.3 oster RF_ASSERT(wnqNode->numSuccedents == 1);
1443 1.3 oster wnqNode->succedents[0] = termNode;
1444 1.3 oster termNode->antecedents[nWndNodes + 1] = wnqNode;
1445 1.3 oster termNode->antType[nWndNodes + 1] = rf_control;
1446 1.3 oster }
1447 1.1 oster }
1448 1.1 oster
1449 1.1 oster
1450 1.1 oster /******************************************************************************
1451 1.1 oster *
1452 1.1 oster * creates a DAG to perform a small-write operation (either raid 5 or pq),
1453 1.1 oster * which is as follows:
1454 1.1 oster *
1455 1.1 oster * Hdr -> Nil -> Rop - Xor - Wnp [Unp] -- Trm
1456 1.1 oster * \- Rod X- Wnd [Und] -------/
1457 1.1 oster * [\- Rod X- Wnd [Und] ------/]
1458 1.1 oster * [\- Roq - Q --> Wnq [Unq]-/]
1459 1.1 oster *
1460 1.1 oster * Rop = read old parity
1461 1.1 oster * Rod = read old data
1462 1.1 oster * Roq = read old "q"
1463 1.1 oster * Cmt = commit node
1464 1.1 oster * Und = unlock data disk
1465 1.1 oster * Unp = unlock parity disk
1466 1.1 oster * Unq = unlock q disk
1467 1.1 oster * Wnp = write new parity
1468 1.1 oster * Wnd = write new data
1469 1.1 oster * Wnq = write new "q"
1470 1.1 oster * [ ] denotes optional segments in the graph
1471 1.1 oster *
1472 1.1 oster * Parameters: raidPtr - description of the physical array
1473 1.1 oster * asmap - logical & physical addresses for this access
1474 1.1 oster * bp - buffer ptr (holds write data)
1475 1.3 oster * flags - general flags (e.g. disk locking)
1476 1.1 oster * allocList - list of memory allocated in DAG creation
1477 1.1 oster * pfuncs - list of parity generating functions
1478 1.1 oster * qfuncs - list of q generating functions
1479 1.1 oster *
1480 1.1 oster * A null qfuncs indicates single fault tolerant
1481 1.1 oster *****************************************************************************/
1482 1.1 oster
1483 1.3 oster void
1484 1.3 oster rf_CommonCreateSmallWriteDAGFwd(
1485 1.3 oster RF_Raid_t * raidPtr,
1486 1.3 oster RF_AccessStripeMap_t * asmap,
1487 1.3 oster RF_DagHeader_t * dag_h,
1488 1.3 oster void *bp,
1489 1.3 oster RF_RaidAccessFlags_t flags,
1490 1.3 oster RF_AllocListElem_t * allocList,
1491 1.3 oster RF_RedFuncs_t * pfuncs,
1492 1.3 oster RF_RedFuncs_t * qfuncs)
1493 1.1 oster {
1494 1.3 oster RF_DagNode_t *readDataNodes, *readParityNodes, *readQNodes, *termNode;
1495 1.3 oster RF_DagNode_t *unlockDataNodes, *unlockParityNodes, *unlockQNodes;
1496 1.3 oster RF_DagNode_t *xorNodes, *qNodes, *blockNode, *nodes;
1497 1.3 oster RF_DagNode_t *writeDataNodes, *writeParityNodes, *writeQNodes;
1498 1.3 oster int i, j, nNodes, totalNumNodes, lu_flag;
1499 1.3 oster RF_ReconUnitNum_t which_ru;
1500 1.3 oster int (*func) (RF_DagNode_t *), (*undoFunc) (RF_DagNode_t *);
1501 1.3 oster int (*qfunc) (RF_DagNode_t *);
1502 1.3 oster int numDataNodes, numParityNodes;
1503 1.3 oster RF_StripeNum_t parityStripeID;
1504 1.3 oster RF_PhysDiskAddr_t *pda;
1505 1.3 oster char *name, *qname;
1506 1.3 oster long nfaults;
1507 1.3 oster
1508 1.3 oster nfaults = qfuncs ? 2 : 1;
1509 1.3 oster lu_flag = (rf_enableAtomicRMW) ? 1 : 0; /* lock/unlock flag */
1510 1.3 oster
1511 1.3 oster parityStripeID = rf_RaidAddressToParityStripeID(&(raidPtr->Layout), asmap->raidAddress, &which_ru);
1512 1.3 oster pda = asmap->physInfo;
1513 1.3 oster numDataNodes = asmap->numStripeUnitsAccessed;
1514 1.3 oster numParityNodes = (asmap->parityInfo->next) ? 2 : 1;
1515 1.3 oster
1516 1.3 oster if (rf_dagDebug)
1517 1.3 oster printf("[Creating small-write DAG]\n");
1518 1.3 oster RF_ASSERT(numDataNodes > 0);
1519 1.3 oster dag_h->creator = "SmallWriteDAGFwd";
1520 1.3 oster
1521 1.3 oster dag_h->numCommitNodes = 0;
1522 1.3 oster dag_h->numCommits = 0;
1523 1.3 oster dag_h->numSuccedents = 1;
1524 1.3 oster
1525 1.3 oster qfunc = NULL;
1526 1.3 oster qname = NULL;
1527 1.3 oster
1528 1.3 oster /* DAG creation occurs in four steps: 1. count the number of nodes in
1529 1.3 oster * the DAG 2. create the nodes 3. initialize the nodes 4. connect the
1530 1.3 oster * nodes */
1531 1.3 oster
1532 1.3 oster /* Step 1. compute number of nodes in the graph */
1533 1.3 oster
1534 1.3 oster /* number of nodes: a read and write for each data unit a redundancy
1535 1.3 oster * computation node for each parity node (nfaults * nparity) a read
1536 1.3 oster * and write for each parity unit a block node a terminate node if
1537 1.3 oster * atomic RMW an unlock node for each data unit, redundancy unit */
1538 1.3 oster totalNumNodes = (2 * numDataNodes) + (nfaults * numParityNodes) + (nfaults * 2 * numParityNodes) + 2;
1539 1.3 oster if (lu_flag)
1540 1.3 oster totalNumNodes += (numDataNodes + (nfaults * numParityNodes));
1541 1.3 oster
1542 1.3 oster
1543 1.3 oster /* Step 2. create the nodes */
1544 1.3 oster RF_CallocAndAdd(nodes, totalNumNodes, sizeof(RF_DagNode_t), (RF_DagNode_t *), allocList);
1545 1.3 oster i = 0;
1546 1.3 oster blockNode = &nodes[i];
1547 1.3 oster i += 1;
1548 1.3 oster readDataNodes = &nodes[i];
1549 1.3 oster i += numDataNodes;
1550 1.3 oster readParityNodes = &nodes[i];
1551 1.3 oster i += numParityNodes;
1552 1.3 oster writeDataNodes = &nodes[i];
1553 1.3 oster i += numDataNodes;
1554 1.3 oster writeParityNodes = &nodes[i];
1555 1.3 oster i += numParityNodes;
1556 1.3 oster xorNodes = &nodes[i];
1557 1.3 oster i += numParityNodes;
1558 1.3 oster termNode = &nodes[i];
1559 1.3 oster i += 1;
1560 1.3 oster if (lu_flag) {
1561 1.3 oster unlockDataNodes = &nodes[i];
1562 1.3 oster i += numDataNodes;
1563 1.3 oster unlockParityNodes = &nodes[i];
1564 1.3 oster i += numParityNodes;
1565 1.3 oster } else {
1566 1.3 oster unlockDataNodes = unlockParityNodes = NULL;
1567 1.3 oster }
1568 1.3 oster if (nfaults == 2) {
1569 1.3 oster readQNodes = &nodes[i];
1570 1.3 oster i += numParityNodes;
1571 1.3 oster writeQNodes = &nodes[i];
1572 1.3 oster i += numParityNodes;
1573 1.3 oster qNodes = &nodes[i];
1574 1.3 oster i += numParityNodes;
1575 1.3 oster if (lu_flag) {
1576 1.3 oster unlockQNodes = &nodes[i];
1577 1.3 oster i += numParityNodes;
1578 1.3 oster } else {
1579 1.3 oster unlockQNodes = NULL;
1580 1.3 oster }
1581 1.3 oster } else {
1582 1.3 oster readQNodes = writeQNodes = qNodes = unlockQNodes = NULL;
1583 1.3 oster }
1584 1.3 oster RF_ASSERT(i == totalNumNodes);
1585 1.1 oster
1586 1.3 oster /* Step 3. initialize the nodes */
1587 1.3 oster /* initialize block node (Nil) */
1588 1.3 oster nNodes = numDataNodes + (nfaults * numParityNodes);
1589 1.3 oster rf_InitNode(blockNode, rf_wait, RF_FALSE, rf_NullNodeFunc, rf_NullNodeUndoFunc, NULL, nNodes, 0, 0, 0, dag_h, "Nil", allocList);
1590 1.3 oster
1591 1.3 oster /* initialize terminate node (Trm) */
1592 1.3 oster rf_InitNode(termNode, rf_wait, RF_FALSE, rf_TerminateFunc, rf_TerminateUndoFunc, NULL, 0, nNodes, 0, 0, dag_h, "Trm", allocList);
1593 1.3 oster
1594 1.3 oster /* initialize nodes which read old data (Rod) */
1595 1.3 oster for (i = 0; i < numDataNodes; i++) {
1596 1.3 oster rf_InitNode(&readDataNodes[i], rf_wait, RF_FALSE, rf_DiskReadFunc, rf_DiskReadUndoFunc, rf_GenericWakeupFunc, (numParityNodes * nfaults) + 1, 1, 4, 0, dag_h, "Rod", allocList);
1597 1.3 oster RF_ASSERT(pda != NULL);
1598 1.3 oster readDataNodes[i].params[0].p = pda; /* physical disk addr
1599 1.3 oster * desc */
1600 1.3 oster readDataNodes[i].params[1].p = rf_AllocBuffer(raidPtr, dag_h, pda, allocList); /* buffer to hold old
1601 1.3 oster * data */
1602 1.3 oster readDataNodes[i].params[2].v = parityStripeID;
1603 1.3 oster readDataNodes[i].params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, lu_flag, 0, which_ru);
1604 1.3 oster pda = pda->next;
1605 1.3 oster for (j = 0; j < readDataNodes[i].numSuccedents; j++)
1606 1.3 oster readDataNodes[i].propList[j] = NULL;
1607 1.3 oster }
1608 1.3 oster
1609 1.3 oster /* initialize nodes which read old parity (Rop) */
1610 1.3 oster pda = asmap->parityInfo;
1611 1.3 oster i = 0;
1612 1.3 oster for (i = 0; i < numParityNodes; i++) {
1613 1.3 oster RF_ASSERT(pda != NULL);
1614 1.3 oster rf_InitNode(&readParityNodes[i], rf_wait, RF_FALSE, rf_DiskReadFunc, rf_DiskReadUndoFunc, rf_GenericWakeupFunc, numParityNodes, 1, 4, 0, dag_h, "Rop", allocList);
1615 1.3 oster readParityNodes[i].params[0].p = pda;
1616 1.3 oster readParityNodes[i].params[1].p = rf_AllocBuffer(raidPtr, dag_h, pda, allocList); /* buffer to hold old
1617 1.3 oster * parity */
1618 1.3 oster readParityNodes[i].params[2].v = parityStripeID;
1619 1.3 oster readParityNodes[i].params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, lu_flag, 0, which_ru);
1620 1.3 oster for (j = 0; j < readParityNodes[i].numSuccedents; j++)
1621 1.3 oster readParityNodes[i].propList[0] = NULL;
1622 1.3 oster pda = pda->next;
1623 1.3 oster }
1624 1.3 oster
1625 1.3 oster /* initialize nodes which read old Q (Roq) */
1626 1.3 oster if (nfaults == 2) {
1627 1.3 oster pda = asmap->qInfo;
1628 1.3 oster for (i = 0; i < numParityNodes; i++) {
1629 1.3 oster RF_ASSERT(pda != NULL);
1630 1.3 oster rf_InitNode(&readQNodes[i], rf_wait, RF_FALSE, rf_DiskReadFunc, rf_DiskReadUndoFunc, rf_GenericWakeupFunc, numParityNodes, 1, 4, 0, dag_h, "Roq", allocList);
1631 1.3 oster readQNodes[i].params[0].p = pda;
1632 1.3 oster readQNodes[i].params[1].p = rf_AllocBuffer(raidPtr, dag_h, pda, allocList); /* buffer to hold old Q */
1633 1.3 oster readQNodes[i].params[2].v = parityStripeID;
1634 1.3 oster readQNodes[i].params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, lu_flag, 0, which_ru);
1635 1.3 oster for (j = 0; j < readQNodes[i].numSuccedents; j++)
1636 1.3 oster readQNodes[i].propList[0] = NULL;
1637 1.3 oster pda = pda->next;
1638 1.3 oster }
1639 1.3 oster }
1640 1.3 oster /* initialize nodes which write new data (Wnd) */
1641 1.3 oster pda = asmap->physInfo;
1642 1.3 oster for (i = 0; i < numDataNodes; i++) {
1643 1.3 oster RF_ASSERT(pda != NULL);
1644 1.3 oster rf_InitNode(&writeDataNodes[i], rf_wait, RF_FALSE, rf_DiskWriteFunc, rf_DiskWriteUndoFunc, rf_GenericWakeupFunc, 1, 1, 4, 0, dag_h, "Wnd", allocList);
1645 1.3 oster writeDataNodes[i].params[0].p = pda; /* physical disk addr
1646 1.3 oster * desc */
1647 1.3 oster writeDataNodes[i].params[1].p = pda->bufPtr; /* buffer holding new
1648 1.3 oster * data to be written */
1649 1.3 oster writeDataNodes[i].params[2].v = parityStripeID;
1650 1.3 oster writeDataNodes[i].params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, 0, which_ru);
1651 1.3 oster
1652 1.3 oster if (lu_flag) {
1653 1.3 oster /* initialize node to unlock the disk queue */
1654 1.3 oster rf_InitNode(&unlockDataNodes[i], rf_wait, RF_FALSE, rf_DiskUnlockFunc, rf_DiskUnlockUndoFunc, rf_GenericWakeupFunc, 1, 1, 2, 0, dag_h, "Und", allocList);
1655 1.3 oster unlockDataNodes[i].params[0].p = pda; /* physical disk addr
1656 1.3 oster * desc */
1657 1.3 oster unlockDataNodes[i].params[1].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, lu_flag, which_ru);
1658 1.3 oster }
1659 1.3 oster pda = pda->next;
1660 1.3 oster }
1661 1.3 oster
1662 1.3 oster
1663 1.3 oster /* initialize nodes which compute new parity and Q */
1664 1.3 oster /* we use the simple XOR func in the double-XOR case, and when we're
1665 1.3 oster * accessing only a portion of one stripe unit. the distinction
1666 1.3 oster * between the two is that the regular XOR func assumes that the
1667 1.3 oster * targbuf is a full SU in size, and examines the pda associated with
1668 1.3 oster * the buffer to decide where within the buffer to XOR the data,
1669 1.3 oster * whereas the simple XOR func just XORs the data into the start of
1670 1.3 oster * the buffer. */
1671 1.3 oster if ((numParityNodes == 2) || ((numDataNodes == 1) && (asmap->totalSectorsAccessed < raidPtr->Layout.sectorsPerStripeUnit))) {
1672 1.3 oster func = pfuncs->simple;
1673 1.3 oster undoFunc = rf_NullNodeUndoFunc;
1674 1.3 oster name = pfuncs->SimpleName;
1675 1.3 oster if (qfuncs) {
1676 1.3 oster qfunc = qfuncs->simple;
1677 1.3 oster qname = qfuncs->SimpleName;
1678 1.3 oster }
1679 1.3 oster } else {
1680 1.3 oster func = pfuncs->regular;
1681 1.3 oster undoFunc = rf_NullNodeUndoFunc;
1682 1.3 oster name = pfuncs->RegularName;
1683 1.3 oster if (qfuncs) {
1684 1.3 oster qfunc = qfuncs->regular;
1685 1.3 oster qname = qfuncs->RegularName;
1686 1.3 oster }
1687 1.3 oster }
1688 1.3 oster /* initialize the xor nodes: params are {pda,buf} from {Rod,Wnd,Rop}
1689 1.3 oster * nodes, and raidPtr */
1690 1.3 oster if (numParityNodes == 2) { /* double-xor case */
1691 1.3 oster for (i = 0; i < numParityNodes; i++) {
1692 1.3 oster rf_InitNode(&xorNodes[i], rf_wait, RF_FALSE, func, undoFunc, NULL, numParityNodes, numParityNodes + numDataNodes, 7, 1, dag_h, name, allocList); /* no wakeup func for
1693 1.3 oster * xor */
1694 1.3 oster xorNodes[i].flags |= RF_DAGNODE_FLAG_YIELD;
1695 1.3 oster xorNodes[i].params[0] = readDataNodes[i].params[0];
1696 1.3 oster xorNodes[i].params[1] = readDataNodes[i].params[1];
1697 1.3 oster xorNodes[i].params[2] = readParityNodes[i].params[0];
1698 1.3 oster xorNodes[i].params[3] = readParityNodes[i].params[1];
1699 1.3 oster xorNodes[i].params[4] = writeDataNodes[i].params[0];
1700 1.3 oster xorNodes[i].params[5] = writeDataNodes[i].params[1];
1701 1.3 oster xorNodes[i].params[6].p = raidPtr;
1702 1.3 oster xorNodes[i].results[0] = readParityNodes[i].params[1].p; /* use old parity buf as
1703 1.3 oster * target buf */
1704 1.3 oster if (nfaults == 2) {
1705 1.3 oster rf_InitNode(&qNodes[i], rf_wait, RF_FALSE, qfunc, undoFunc, NULL, numParityNodes, numParityNodes + numDataNodes, 7, 1, dag_h, qname, allocList); /* no wakeup func for
1706 1.3 oster * xor */
1707 1.3 oster qNodes[i].params[0] = readDataNodes[i].params[0];
1708 1.3 oster qNodes[i].params[1] = readDataNodes[i].params[1];
1709 1.3 oster qNodes[i].params[2] = readQNodes[i].params[0];
1710 1.3 oster qNodes[i].params[3] = readQNodes[i].params[1];
1711 1.3 oster qNodes[i].params[4] = writeDataNodes[i].params[0];
1712 1.3 oster qNodes[i].params[5] = writeDataNodes[i].params[1];
1713 1.3 oster qNodes[i].params[6].p = raidPtr;
1714 1.3 oster qNodes[i].results[0] = readQNodes[i].params[1].p; /* use old Q buf as
1715 1.3 oster * target buf */
1716 1.3 oster }
1717 1.3 oster }
1718 1.3 oster } else {
1719 1.3 oster /* there is only one xor node in this case */
1720 1.3 oster rf_InitNode(&xorNodes[0], rf_wait, RF_FALSE, func, undoFunc, NULL, numParityNodes, numParityNodes + numDataNodes, (2 * (numDataNodes + numDataNodes + 1) + 1), 1, dag_h, name, allocList);
1721 1.3 oster xorNodes[0].flags |= RF_DAGNODE_FLAG_YIELD;
1722 1.3 oster for (i = 0; i < numDataNodes + 1; i++) {
1723 1.3 oster /* set up params related to Rod and Rop nodes */
1724 1.3 oster xorNodes[0].params[2 * i + 0] = readDataNodes[i].params[0]; /* pda */
1725 1.3 oster xorNodes[0].params[2 * i + 1] = readDataNodes[i].params[1]; /* buffer pointer */
1726 1.3 oster }
1727 1.3 oster for (i = 0; i < numDataNodes; i++) {
1728 1.3 oster /* set up params related to Wnd and Wnp nodes */
1729 1.3 oster xorNodes[0].params[2 * (numDataNodes + 1 + i) + 0] = writeDataNodes[i].params[0]; /* pda */
1730 1.3 oster xorNodes[0].params[2 * (numDataNodes + 1 + i) + 1] = writeDataNodes[i].params[1]; /* buffer pointer */
1731 1.3 oster }
1732 1.3 oster xorNodes[0].params[2 * (numDataNodes + numDataNodes + 1)].p = raidPtr; /* xor node needs to get
1733 1.3 oster * at RAID information */
1734 1.3 oster xorNodes[0].results[0] = readParityNodes[0].params[1].p;
1735 1.3 oster if (nfaults == 2) {
1736 1.3 oster rf_InitNode(&qNodes[0], rf_wait, RF_FALSE, qfunc, undoFunc, NULL, numParityNodes, numParityNodes + numDataNodes, (2 * (numDataNodes + numDataNodes + 1) + 1), 1, dag_h, qname, allocList);
1737 1.3 oster for (i = 0; i < numDataNodes; i++) {
1738 1.3 oster /* set up params related to Rod */
1739 1.3 oster qNodes[0].params[2 * i + 0] = readDataNodes[i].params[0]; /* pda */
1740 1.3 oster qNodes[0].params[2 * i + 1] = readDataNodes[i].params[1]; /* buffer pointer */
1741 1.3 oster }
1742 1.3 oster /* and read old q */
1743 1.3 oster qNodes[0].params[2 * numDataNodes + 0] = readQNodes[0].params[0]; /* pda */
1744 1.3 oster qNodes[0].params[2 * numDataNodes + 1] = readQNodes[0].params[1]; /* buffer pointer */
1745 1.3 oster for (i = 0; i < numDataNodes; i++) {
1746 1.3 oster /* set up params related to Wnd nodes */
1747 1.3 oster qNodes[0].params[2 * (numDataNodes + 1 + i) + 0] = writeDataNodes[i].params[0]; /* pda */
1748 1.3 oster qNodes[0].params[2 * (numDataNodes + 1 + i) + 1] = writeDataNodes[i].params[1]; /* buffer pointer */
1749 1.3 oster }
1750 1.3 oster qNodes[0].params[2 * (numDataNodes + numDataNodes + 1)].p = raidPtr; /* xor node needs to get
1751 1.3 oster * at RAID information */
1752 1.3 oster qNodes[0].results[0] = readQNodes[0].params[1].p;
1753 1.3 oster }
1754 1.3 oster }
1755 1.3 oster
1756 1.3 oster /* initialize nodes which write new parity (Wnp) */
1757 1.3 oster pda = asmap->parityInfo;
1758 1.3 oster for (i = 0; i < numParityNodes; i++) {
1759 1.3 oster rf_InitNode(&writeParityNodes[i], rf_wait, RF_FALSE, rf_DiskWriteFunc, rf_DiskWriteUndoFunc, rf_GenericWakeupFunc, 1, numParityNodes, 4, 0, dag_h, "Wnp", allocList);
1760 1.3 oster RF_ASSERT(pda != NULL);
1761 1.3 oster writeParityNodes[i].params[0].p = pda; /* param 1 (bufPtr)
1762 1.3 oster * filled in by xor node */
1763 1.3 oster writeParityNodes[i].params[1].p = xorNodes[i].results[0]; /* buffer pointer for
1764 1.3 oster * parity write
1765 1.3 oster * operation */
1766 1.3 oster writeParityNodes[i].params[2].v = parityStripeID;
1767 1.3 oster writeParityNodes[i].params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, 0, which_ru);
1768 1.3 oster
1769 1.3 oster if (lu_flag) {
1770 1.3 oster /* initialize node to unlock the disk queue */
1771 1.3 oster rf_InitNode(&unlockParityNodes[i], rf_wait, RF_FALSE, rf_DiskUnlockFunc, rf_DiskUnlockUndoFunc, rf_GenericWakeupFunc, 1, 1, 2, 0, dag_h, "Unp", allocList);
1772 1.3 oster unlockParityNodes[i].params[0].p = pda; /* physical disk addr
1773 1.3 oster * desc */
1774 1.3 oster unlockParityNodes[i].params[1].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, lu_flag, which_ru);
1775 1.3 oster }
1776 1.3 oster pda = pda->next;
1777 1.3 oster }
1778 1.3 oster
1779 1.3 oster /* initialize nodes which write new Q (Wnq) */
1780 1.3 oster if (nfaults == 2) {
1781 1.3 oster pda = asmap->qInfo;
1782 1.3 oster for (i = 0; i < numParityNodes; i++) {
1783 1.3 oster rf_InitNode(&writeQNodes[i], rf_wait, RF_FALSE, rf_DiskWriteFunc, rf_DiskWriteUndoFunc, rf_GenericWakeupFunc, 1, numParityNodes, 4, 0, dag_h, "Wnq", allocList);
1784 1.3 oster RF_ASSERT(pda != NULL);
1785 1.3 oster writeQNodes[i].params[0].p = pda; /* param 1 (bufPtr)
1786 1.3 oster * filled in by xor node */
1787 1.3 oster writeQNodes[i].params[1].p = qNodes[i].results[0]; /* buffer pointer for
1788 1.3 oster * parity write
1789 1.3 oster * operation */
1790 1.3 oster writeQNodes[i].params[2].v = parityStripeID;
1791 1.3 oster writeQNodes[i].params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, 0, which_ru);
1792 1.3 oster
1793 1.3 oster if (lu_flag) {
1794 1.3 oster /* initialize node to unlock the disk queue */
1795 1.3 oster rf_InitNode(&unlockQNodes[i], rf_wait, RF_FALSE, rf_DiskUnlockFunc, rf_DiskUnlockUndoFunc, rf_GenericWakeupFunc, 1, 1, 2, 0, dag_h, "Unq", allocList);
1796 1.3 oster unlockQNodes[i].params[0].p = pda; /* physical disk addr
1797 1.3 oster * desc */
1798 1.3 oster unlockQNodes[i].params[1].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, lu_flag, which_ru);
1799 1.3 oster }
1800 1.3 oster pda = pda->next;
1801 1.3 oster }
1802 1.3 oster }
1803 1.3 oster /* Step 4. connect the nodes */
1804 1.3 oster
1805 1.3 oster /* connect header to block node */
1806 1.3 oster dag_h->succedents[0] = blockNode;
1807 1.3 oster
1808 1.3 oster /* connect block node to read old data nodes */
1809 1.3 oster RF_ASSERT(blockNode->numSuccedents == (numDataNodes + (numParityNodes * nfaults)));
1810 1.3 oster for (i = 0; i < numDataNodes; i++) {
1811 1.3 oster blockNode->succedents[i] = &readDataNodes[i];
1812 1.3 oster RF_ASSERT(readDataNodes[i].numAntecedents == 1);
1813 1.3 oster readDataNodes[i].antecedents[0] = blockNode;
1814 1.3 oster readDataNodes[i].antType[0] = rf_control;
1815 1.3 oster }
1816 1.3 oster
1817 1.3 oster /* connect block node to read old parity nodes */
1818 1.3 oster for (i = 0; i < numParityNodes; i++) {
1819 1.3 oster blockNode->succedents[numDataNodes + i] = &readParityNodes[i];
1820 1.3 oster RF_ASSERT(readParityNodes[i].numAntecedents == 1);
1821 1.3 oster readParityNodes[i].antecedents[0] = blockNode;
1822 1.3 oster readParityNodes[i].antType[0] = rf_control;
1823 1.3 oster }
1824 1.3 oster
1825 1.3 oster /* connect block node to read old Q nodes */
1826 1.3 oster if (nfaults == 2)
1827 1.3 oster for (i = 0; i < numParityNodes; i++) {
1828 1.3 oster blockNode->succedents[numDataNodes + numParityNodes + i] = &readQNodes[i];
1829 1.3 oster RF_ASSERT(readQNodes[i].numAntecedents == 1);
1830 1.3 oster readQNodes[i].antecedents[0] = blockNode;
1831 1.3 oster readQNodes[i].antType[0] = rf_control;
1832 1.3 oster }
1833 1.3 oster
1834 1.3 oster /* connect read old data nodes to write new data nodes */
1835 1.3 oster for (i = 0; i < numDataNodes; i++) {
1836 1.3 oster RF_ASSERT(readDataNodes[i].numSuccedents == ((nfaults * numParityNodes) + 1));
1837 1.3 oster RF_ASSERT(writeDataNodes[i].numAntecedents == 1);
1838 1.3 oster readDataNodes[i].succedents[0] = &writeDataNodes[i];
1839 1.3 oster writeDataNodes[i].antecedents[0] = &readDataNodes[i];
1840 1.3 oster writeDataNodes[i].antType[0] = rf_antiData;
1841 1.3 oster }
1842 1.3 oster
1843 1.3 oster /* connect read old data nodes to xor nodes */
1844 1.3 oster for (i = 0; i < numDataNodes; i++) {
1845 1.3 oster for (j = 0; j < numParityNodes; j++) {
1846 1.3 oster RF_ASSERT(xorNodes[j].numAntecedents == numDataNodes + numParityNodes);
1847 1.3 oster readDataNodes[i].succedents[1 + j] = &xorNodes[j];
1848 1.3 oster xorNodes[j].antecedents[i] = &readDataNodes[i];
1849 1.3 oster xorNodes[j].antType[i] = rf_trueData;
1850 1.3 oster }
1851 1.3 oster }
1852 1.3 oster
1853 1.3 oster /* connect read old data nodes to q nodes */
1854 1.3 oster if (nfaults == 2)
1855 1.3 oster for (i = 0; i < numDataNodes; i++)
1856 1.3 oster for (j = 0; j < numParityNodes; j++) {
1857 1.3 oster RF_ASSERT(qNodes[j].numAntecedents == numDataNodes + numParityNodes);
1858 1.3 oster readDataNodes[i].succedents[1 + numParityNodes + j] = &qNodes[j];
1859 1.3 oster qNodes[j].antecedents[i] = &readDataNodes[i];
1860 1.3 oster qNodes[j].antType[i] = rf_trueData;
1861 1.3 oster }
1862 1.3 oster
1863 1.3 oster /* connect read old parity nodes to xor nodes */
1864 1.3 oster for (i = 0; i < numParityNodes; i++) {
1865 1.3 oster for (j = 0; j < numParityNodes; j++) {
1866 1.3 oster RF_ASSERT(readParityNodes[i].numSuccedents == numParityNodes);
1867 1.3 oster readParityNodes[i].succedents[j] = &xorNodes[j];
1868 1.3 oster xorNodes[j].antecedents[numDataNodes + i] = &readParityNodes[i];
1869 1.3 oster xorNodes[j].antType[numDataNodes + i] = rf_trueData;
1870 1.3 oster }
1871 1.3 oster }
1872 1.3 oster
1873 1.3 oster /* connect read old q nodes to q nodes */
1874 1.3 oster if (nfaults == 2)
1875 1.3 oster for (i = 0; i < numParityNodes; i++) {
1876 1.3 oster for (j = 0; j < numParityNodes; j++) {
1877 1.3 oster RF_ASSERT(readQNodes[i].numSuccedents == numParityNodes);
1878 1.3 oster readQNodes[i].succedents[j] = &qNodes[j];
1879 1.3 oster qNodes[j].antecedents[numDataNodes + i] = &readQNodes[i];
1880 1.3 oster qNodes[j].antType[numDataNodes + i] = rf_trueData;
1881 1.3 oster }
1882 1.3 oster }
1883 1.3 oster
1884 1.3 oster /* connect xor nodes to the write new parity nodes */
1885 1.3 oster for (i = 0; i < numParityNodes; i++) {
1886 1.3 oster RF_ASSERT(writeParityNodes[i].numAntecedents == numParityNodes);
1887 1.3 oster for (j = 0; j < numParityNodes; j++) {
1888 1.3 oster RF_ASSERT(xorNodes[j].numSuccedents == numParityNodes);
1889 1.3 oster xorNodes[i].succedents[j] = &writeParityNodes[j];
1890 1.3 oster writeParityNodes[j].antecedents[i] = &xorNodes[i];
1891 1.3 oster writeParityNodes[j].antType[i] = rf_trueData;
1892 1.3 oster }
1893 1.3 oster }
1894 1.3 oster
1895 1.3 oster /* connect q nodes to the write new q nodes */
1896 1.3 oster if (nfaults == 2)
1897 1.3 oster for (i = 0; i < numParityNodes; i++) {
1898 1.3 oster RF_ASSERT(writeQNodes[i].numAntecedents == numParityNodes);
1899 1.3 oster for (j = 0; j < numParityNodes; j++) {
1900 1.3 oster RF_ASSERT(qNodes[j].numSuccedents == 1);
1901 1.3 oster qNodes[i].succedents[j] = &writeQNodes[j];
1902 1.3 oster writeQNodes[j].antecedents[i] = &qNodes[i];
1903 1.3 oster writeQNodes[j].antType[i] = rf_trueData;
1904 1.3 oster }
1905 1.3 oster }
1906 1.3 oster
1907 1.3 oster RF_ASSERT(termNode->numAntecedents == (numDataNodes + (nfaults * numParityNodes)));
1908 1.3 oster RF_ASSERT(termNode->numSuccedents == 0);
1909 1.3 oster for (i = 0; i < numDataNodes; i++) {
1910 1.3 oster if (lu_flag) {
1911 1.3 oster /* connect write new data nodes to unlock nodes */
1912 1.3 oster RF_ASSERT(writeDataNodes[i].numSuccedents == 1);
1913 1.3 oster RF_ASSERT(unlockDataNodes[i].numAntecedents == 1);
1914 1.3 oster writeDataNodes[i].succedents[0] = &unlockDataNodes[i];
1915 1.3 oster unlockDataNodes[i].antecedents[0] = &writeDataNodes[i];
1916 1.3 oster unlockDataNodes[i].antType[0] = rf_control;
1917 1.3 oster
1918 1.3 oster /* connect unlock nodes to term node */
1919 1.3 oster RF_ASSERT(unlockDataNodes[i].numSuccedents == 1);
1920 1.3 oster unlockDataNodes[i].succedents[0] = termNode;
1921 1.3 oster termNode->antecedents[i] = &unlockDataNodes[i];
1922 1.3 oster termNode->antType[i] = rf_control;
1923 1.3 oster } else {
1924 1.3 oster /* connect write new data nodes to term node */
1925 1.3 oster RF_ASSERT(writeDataNodes[i].numSuccedents == 1);
1926 1.3 oster RF_ASSERT(termNode->numAntecedents == (numDataNodes + (nfaults * numParityNodes)));
1927 1.3 oster writeDataNodes[i].succedents[0] = termNode;
1928 1.3 oster termNode->antecedents[i] = &writeDataNodes[i];
1929 1.3 oster termNode->antType[i] = rf_control;
1930 1.3 oster }
1931 1.3 oster }
1932 1.3 oster
1933 1.3 oster for (i = 0; i < numParityNodes; i++) {
1934 1.3 oster if (lu_flag) {
1935 1.3 oster /* connect write new parity nodes to unlock nodes */
1936 1.3 oster RF_ASSERT(writeParityNodes[i].numSuccedents == 1);
1937 1.3 oster RF_ASSERT(unlockParityNodes[i].numAntecedents == 1);
1938 1.3 oster writeParityNodes[i].succedents[0] = &unlockParityNodes[i];
1939 1.3 oster unlockParityNodes[i].antecedents[0] = &writeParityNodes[i];
1940 1.3 oster unlockParityNodes[i].antType[0] = rf_control;
1941 1.3 oster
1942 1.3 oster /* connect unlock nodes to term node */
1943 1.3 oster RF_ASSERT(unlockParityNodes[i].numSuccedents == 1);
1944 1.3 oster unlockParityNodes[i].succedents[0] = termNode;
1945 1.3 oster termNode->antecedents[numDataNodes + i] = &unlockParityNodes[i];
1946 1.3 oster termNode->antType[numDataNodes + i] = rf_control;
1947 1.3 oster } else {
1948 1.3 oster RF_ASSERT(writeParityNodes[i].numSuccedents == 1);
1949 1.3 oster writeParityNodes[i].succedents[0] = termNode;
1950 1.3 oster termNode->antecedents[numDataNodes + i] = &writeParityNodes[i];
1951 1.3 oster termNode->antType[numDataNodes + i] = rf_control;
1952 1.3 oster }
1953 1.3 oster }
1954 1.3 oster
1955 1.3 oster if (nfaults == 2)
1956 1.3 oster for (i = 0; i < numParityNodes; i++) {
1957 1.3 oster if (lu_flag) {
1958 1.3 oster /* connect write new Q nodes to unlock nodes */
1959 1.3 oster RF_ASSERT(writeQNodes[i].numSuccedents == 1);
1960 1.3 oster RF_ASSERT(unlockQNodes[i].numAntecedents == 1);
1961 1.3 oster writeQNodes[i].succedents[0] = &unlockQNodes[i];
1962 1.3 oster unlockQNodes[i].antecedents[0] = &writeQNodes[i];
1963 1.3 oster unlockQNodes[i].antType[0] = rf_control;
1964 1.3 oster
1965 1.3 oster /* connect unlock nodes to unblock node */
1966 1.3 oster RF_ASSERT(unlockQNodes[i].numSuccedents == 1);
1967 1.3 oster unlockQNodes[i].succedents[0] = termNode;
1968 1.3 oster termNode->antecedents[numDataNodes + numParityNodes + i] = &unlockQNodes[i];
1969 1.3 oster termNode->antType[numDataNodes + numParityNodes + i] = rf_control;
1970 1.3 oster } else {
1971 1.3 oster RF_ASSERT(writeQNodes[i].numSuccedents == 1);
1972 1.3 oster writeQNodes[i].succedents[0] = termNode;
1973 1.3 oster termNode->antecedents[numDataNodes + numParityNodes + i] = &writeQNodes[i];
1974 1.3 oster termNode->antType[numDataNodes + numParityNodes + i] = rf_control;
1975 1.3 oster }
1976 1.3 oster }
1977 1.1 oster }
1978 1.1 oster
1979 1.1 oster
1980 1.1 oster
1981 1.1 oster /******************************************************************************
1982 1.1 oster * create a write graph (fault-free or degraded) for RAID level 1
1983 1.1 oster *
1984 1.1 oster * Hdr Nil -> Wpd -> Nil -> Trm
1985 1.1 oster * Nil -> Wsd ->
1986 1.1 oster *
1987 1.1 oster * The "Wpd" node writes data to the primary copy in the mirror pair
1988 1.1 oster * The "Wsd" node writes data to the secondary copy in the mirror pair
1989 1.1 oster *
1990 1.1 oster * Parameters: raidPtr - description of the physical array
1991 1.1 oster * asmap - logical & physical addresses for this access
1992 1.1 oster * bp - buffer ptr (holds write data)
1993 1.3 oster * flags - general flags (e.g. disk locking)
1994 1.1 oster * allocList - list of memory allocated in DAG creation
1995 1.1 oster *****************************************************************************/
1996 1.1 oster
1997 1.3 oster void
1998 1.3 oster rf_CreateRaidOneWriteDAGFwd(
1999 1.3 oster RF_Raid_t * raidPtr,
2000 1.3 oster RF_AccessStripeMap_t * asmap,
2001 1.3 oster RF_DagHeader_t * dag_h,
2002 1.3 oster void *bp,
2003 1.3 oster RF_RaidAccessFlags_t flags,
2004 1.3 oster RF_AllocListElem_t * allocList)
2005 1.1 oster {
2006 1.3 oster RF_DagNode_t *blockNode, *unblockNode, *termNode;
2007 1.3 oster RF_DagNode_t *nodes, *wndNode, *wmirNode;
2008 1.3 oster int nWndNodes, nWmirNodes, i;
2009 1.3 oster RF_ReconUnitNum_t which_ru;
2010 1.3 oster RF_PhysDiskAddr_t *pda, *pdaP;
2011 1.3 oster RF_StripeNum_t parityStripeID;
2012 1.3 oster
2013 1.3 oster parityStripeID = rf_RaidAddressToParityStripeID(&(raidPtr->Layout),
2014 1.3 oster asmap->raidAddress, &which_ru);
2015 1.3 oster if (rf_dagDebug) {
2016 1.3 oster printf("[Creating RAID level 1 write DAG]\n");
2017 1.3 oster }
2018 1.3 oster nWmirNodes = (asmap->parityInfo->next) ? 2 : 1; /* 2 implies access not
2019 1.3 oster * SU aligned */
2020 1.3 oster nWndNodes = (asmap->physInfo->next) ? 2 : 1;
2021 1.3 oster
2022 1.3 oster /* alloc the Wnd nodes and the Wmir node */
2023 1.3 oster if (asmap->numDataFailed == 1)
2024 1.3 oster nWndNodes--;
2025 1.3 oster if (asmap->numParityFailed == 1)
2026 1.3 oster nWmirNodes--;
2027 1.3 oster
2028 1.3 oster /* total number of nodes = nWndNodes + nWmirNodes + (block + unblock +
2029 1.3 oster * terminator) */
2030 1.3 oster RF_CallocAndAdd(nodes, nWndNodes + nWmirNodes + 3, sizeof(RF_DagNode_t), (RF_DagNode_t *), allocList);
2031 1.3 oster i = 0;
2032 1.3 oster wndNode = &nodes[i];
2033 1.3 oster i += nWndNodes;
2034 1.3 oster wmirNode = &nodes[i];
2035 1.3 oster i += nWmirNodes;
2036 1.3 oster blockNode = &nodes[i];
2037 1.3 oster i += 1;
2038 1.3 oster unblockNode = &nodes[i];
2039 1.3 oster i += 1;
2040 1.3 oster termNode = &nodes[i];
2041 1.3 oster i += 1;
2042 1.3 oster RF_ASSERT(i == (nWndNodes + nWmirNodes + 3));
2043 1.3 oster
2044 1.3 oster /* this dag can commit immediately */
2045 1.3 oster dag_h->numCommitNodes = 0;
2046 1.3 oster dag_h->numCommits = 0;
2047 1.3 oster dag_h->numSuccedents = 1;
2048 1.3 oster
2049 1.3 oster /* initialize the unblock and term nodes */
2050 1.3 oster rf_InitNode(blockNode, rf_wait, RF_FALSE, rf_NullNodeFunc, rf_NullNodeUndoFunc, NULL, (nWndNodes + nWmirNodes), 0, 0, 0, dag_h, "Nil", allocList);
2051 1.3 oster rf_InitNode(unblockNode, rf_wait, RF_FALSE, rf_NullNodeFunc, rf_NullNodeUndoFunc, NULL, 1, (nWndNodes + nWmirNodes), 0, 0, dag_h, "Nil", allocList);
2052 1.3 oster rf_InitNode(termNode, rf_wait, RF_FALSE, rf_TerminateFunc, rf_TerminateUndoFunc, NULL, 0, 1, 0, 0, dag_h, "Trm", allocList);
2053 1.3 oster
2054 1.3 oster /* initialize the wnd nodes */
2055 1.3 oster if (nWndNodes > 0) {
2056 1.3 oster pda = asmap->physInfo;
2057 1.3 oster for (i = 0; i < nWndNodes; i++) {
2058 1.3 oster rf_InitNode(&wndNode[i], rf_wait, RF_FALSE, rf_DiskWriteFunc, rf_DiskWriteUndoFunc, rf_GenericWakeupFunc, 1, 1, 4, 0, dag_h, "Wpd", allocList);
2059 1.3 oster RF_ASSERT(pda != NULL);
2060 1.3 oster wndNode[i].params[0].p = pda;
2061 1.3 oster wndNode[i].params[1].p = pda->bufPtr;
2062 1.3 oster wndNode[i].params[2].v = parityStripeID;
2063 1.3 oster wndNode[i].params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, 0, which_ru);
2064 1.3 oster pda = pda->next;
2065 1.3 oster }
2066 1.3 oster RF_ASSERT(pda == NULL);
2067 1.3 oster }
2068 1.3 oster /* initialize the mirror nodes */
2069 1.3 oster if (nWmirNodes > 0) {
2070 1.3 oster pda = asmap->physInfo;
2071 1.3 oster pdaP = asmap->parityInfo;
2072 1.3 oster for (i = 0; i < nWmirNodes; i++) {
2073 1.3 oster rf_InitNode(&wmirNode[i], rf_wait, RF_FALSE, rf_DiskWriteFunc, rf_DiskWriteUndoFunc, rf_GenericWakeupFunc, 1, 1, 4, 0, dag_h, "Wsd", allocList);
2074 1.3 oster RF_ASSERT(pda != NULL);
2075 1.3 oster wmirNode[i].params[0].p = pdaP;
2076 1.3 oster wmirNode[i].params[1].p = pda->bufPtr;
2077 1.3 oster wmirNode[i].params[2].v = parityStripeID;
2078 1.3 oster wmirNode[i].params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, 0, which_ru);
2079 1.3 oster pda = pda->next;
2080 1.3 oster pdaP = pdaP->next;
2081 1.3 oster }
2082 1.3 oster RF_ASSERT(pda == NULL);
2083 1.3 oster RF_ASSERT(pdaP == NULL);
2084 1.3 oster }
2085 1.3 oster /* link the header node to the block node */
2086 1.3 oster RF_ASSERT(dag_h->numSuccedents == 1);
2087 1.3 oster RF_ASSERT(blockNode->numAntecedents == 0);
2088 1.3 oster dag_h->succedents[0] = blockNode;
2089 1.3 oster
2090 1.3 oster /* link the block node to the write nodes */
2091 1.3 oster RF_ASSERT(blockNode->numSuccedents == (nWndNodes + nWmirNodes));
2092 1.3 oster for (i = 0; i < nWndNodes; i++) {
2093 1.3 oster RF_ASSERT(wndNode[i].numAntecedents == 1);
2094 1.3 oster blockNode->succedents[i] = &wndNode[i];
2095 1.3 oster wndNode[i].antecedents[0] = blockNode;
2096 1.3 oster wndNode[i].antType[0] = rf_control;
2097 1.3 oster }
2098 1.3 oster for (i = 0; i < nWmirNodes; i++) {
2099 1.3 oster RF_ASSERT(wmirNode[i].numAntecedents == 1);
2100 1.3 oster blockNode->succedents[i + nWndNodes] = &wmirNode[i];
2101 1.3 oster wmirNode[i].antecedents[0] = blockNode;
2102 1.3 oster wmirNode[i].antType[0] = rf_control;
2103 1.3 oster }
2104 1.3 oster
2105 1.3 oster /* link the write nodes to the unblock node */
2106 1.3 oster RF_ASSERT(unblockNode->numAntecedents == (nWndNodes + nWmirNodes));
2107 1.3 oster for (i = 0; i < nWndNodes; i++) {
2108 1.3 oster RF_ASSERT(wndNode[i].numSuccedents == 1);
2109 1.3 oster wndNode[i].succedents[0] = unblockNode;
2110 1.3 oster unblockNode->antecedents[i] = &wndNode[i];
2111 1.3 oster unblockNode->antType[i] = rf_control;
2112 1.3 oster }
2113 1.3 oster for (i = 0; i < nWmirNodes; i++) {
2114 1.3 oster RF_ASSERT(wmirNode[i].numSuccedents == 1);
2115 1.3 oster wmirNode[i].succedents[0] = unblockNode;
2116 1.3 oster unblockNode->antecedents[i + nWndNodes] = &wmirNode[i];
2117 1.3 oster unblockNode->antType[i + nWndNodes] = rf_control;
2118 1.3 oster }
2119 1.3 oster
2120 1.3 oster /* link the unblock node to the term node */
2121 1.3 oster RF_ASSERT(unblockNode->numSuccedents == 1);
2122 1.3 oster RF_ASSERT(termNode->numAntecedents == 1);
2123 1.3 oster RF_ASSERT(termNode->numSuccedents == 0);
2124 1.3 oster unblockNode->succedents[0] = termNode;
2125 1.3 oster termNode->antecedents[0] = unblockNode;
2126 1.3 oster termNode->antType[0] = rf_control;
2127 1.1 oster
2128 1.3 oster return;
2129 1.1 oster }
2130