rf_parityscan.c revision 1.9 1 /* $NetBSD: rf_parityscan.c,v 1.9 2000/05/28 03:00:31 oster Exp $ */
2 /*
3 * Copyright (c) 1995 Carnegie-Mellon University.
4 * All rights reserved.
5 *
6 * Author: Mark Holland
7 *
8 * Permission to use, copy, modify and distribute this software and
9 * its documentation is hereby granted, provided that both the copyright
10 * notice and this permission notice appear in all copies of the
11 * software, derivative works or modified versions, and any portions
12 * thereof, and that both notices appear in supporting documentation.
13 *
14 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
15 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
16 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
17 *
18 * Carnegie Mellon requests users of this software to return to
19 *
20 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
21 * School of Computer Science
22 * Carnegie Mellon University
23 * Pittsburgh PA 15213-3890
24 *
25 * any improvements or extensions that they make and grant Carnegie the
26 * rights to redistribute these changes.
27 */
28
29 /*****************************************************************************
30 *
31 * rf_parityscan.c -- misc utilities related to parity verification
32 *
33 *****************************************************************************/
34
35 #include "rf_types.h"
36 #include "rf_raid.h"
37 #include "rf_dag.h"
38 #include "rf_dagfuncs.h"
39 #include "rf_dagutils.h"
40 #include "rf_mcpair.h"
41 #include "rf_general.h"
42 #include "rf_engine.h"
43 #include "rf_parityscan.h"
44 #include "rf_map.h"
45
46 /*****************************************************************************************
47 *
48 * walk through the entire arry and write new parity.
49 * This works by creating two DAGs, one to read a stripe of data and one to
50 * write new parity. The first is executed, the data is xored together, and
51 * then the second is executed. To avoid constantly building and tearing down
52 * the DAGs, we create them a priori and fill them in with the mapping
53 * information as we go along.
54 *
55 * there should never be more than one thread running this.
56 *
57 ****************************************************************************************/
58
59 int
60 rf_RewriteParity(raidPtr)
61 RF_Raid_t *raidPtr;
62 {
63 RF_RaidLayout_t *layoutPtr = &raidPtr->Layout;
64 RF_AccessStripeMapHeader_t *asm_h;
65 int ret_val;
66 int rc;
67 RF_PhysDiskAddr_t pda;
68 RF_SectorNum_t i;
69
70 if (raidPtr->Layout.map->faultsTolerated == 0) {
71 /* There isn't any parity. Call it "okay." */
72 return (RF_PARITY_OKAY);
73 }
74 if (raidPtr->status[0] != rf_rs_optimal) {
75 /*
76 * We're in degraded mode. Don't try to verify parity now!
77 * XXX: this should be a "we don't want to", not a
78 * "we can't" error.
79 */
80 return (RF_PARITY_COULD_NOT_VERIFY);
81 }
82
83 ret_val = 0;
84
85 pda.startSector = 0;
86 pda.numSector = raidPtr->Layout.sectorsPerStripeUnit;
87 rc = RF_PARITY_OKAY;
88
89 for (i = 0; i < raidPtr->totalSectors &&
90 rc <= RF_PARITY_CORRECTED;
91 i += layoutPtr->dataSectorsPerStripe) {
92 if (raidPtr->waitShutdown) {
93 /* Someone is pulling the plug on this set...
94 abort the re-write */
95 return (1);
96 }
97 asm_h = rf_MapAccess(raidPtr, i,
98 layoutPtr->dataSectorsPerStripe,
99 NULL, RF_DONT_REMAP);
100 raidPtr->parity_rewrite_stripes_done =
101 i / layoutPtr->dataSectorsPerStripe ;
102 rc = rf_VerifyParity(raidPtr, asm_h->stripeMap, 1, 0);
103
104 switch (rc) {
105 case RF_PARITY_OKAY:
106 case RF_PARITY_CORRECTED:
107 break;
108 case RF_PARITY_BAD:
109 printf("Parity bad during correction\n");
110 ret_val = 1;
111 break;
112 case RF_PARITY_COULD_NOT_CORRECT:
113 printf("Could not correct bad parity\n");
114 ret_val = 1;
115 break;
116 case RF_PARITY_COULD_NOT_VERIFY:
117 printf("Could not verify parity\n");
118 ret_val = 1;
119 break;
120 default:
121 printf("Bad rc=%d from VerifyParity in RewriteParity\n", rc);
122 ret_val = 1;
123 }
124 rf_FreeAccessStripeMap(asm_h);
125 }
126 return (ret_val);
127 }
128 /*****************************************************************************************
129 *
130 * verify that the parity in a particular stripe is correct.
131 * we validate only the range of parity defined by parityPDA, since
132 * this is all we have locked. The way we do this is to create an asm
133 * that maps the whole stripe and then range-restrict it to the parity
134 * region defined by the parityPDA.
135 *
136 ****************************************************************************************/
137 int
138 rf_VerifyParity(raidPtr, aasm, correct_it, flags)
139 RF_Raid_t *raidPtr;
140 RF_AccessStripeMap_t *aasm;
141 int correct_it;
142 RF_RaidAccessFlags_t flags;
143 {
144 RF_PhysDiskAddr_t *parityPDA;
145 RF_AccessStripeMap_t *doasm;
146 RF_LayoutSW_t *lp;
147 int lrc, rc;
148
149 lp = raidPtr->Layout.map;
150 if (lp->faultsTolerated == 0) {
151 /*
152 * There isn't any parity. Call it "okay."
153 */
154 return (RF_PARITY_OKAY);
155 }
156 rc = RF_PARITY_OKAY;
157 if (lp->VerifyParity) {
158 for (doasm = aasm; doasm; doasm = doasm->next) {
159 for (parityPDA = doasm->parityInfo; parityPDA;
160 parityPDA = parityPDA->next) {
161 lrc = lp->VerifyParity(raidPtr,
162 doasm->raidAddress,
163 parityPDA,
164 correct_it, flags);
165 if (lrc > rc) {
166 /* see rf_parityscan.h for why this
167 * works */
168 rc = lrc;
169 }
170 }
171 }
172 } else {
173 rc = RF_PARITY_COULD_NOT_VERIFY;
174 }
175 return (rc);
176 }
177
178 int
179 rf_VerifyParityBasic(raidPtr, raidAddr, parityPDA, correct_it, flags)
180 RF_Raid_t *raidPtr;
181 RF_RaidAddr_t raidAddr;
182 RF_PhysDiskAddr_t *parityPDA;
183 int correct_it;
184 RF_RaidAccessFlags_t flags;
185 {
186 RF_RaidLayout_t *layoutPtr = &(raidPtr->Layout);
187 RF_RaidAddr_t startAddr = rf_RaidAddressOfPrevStripeBoundary(layoutPtr,
188 raidAddr);
189 RF_SectorCount_t numsector = parityPDA->numSector;
190 int numbytes = rf_RaidAddressToByte(raidPtr, numsector);
191 int bytesPerStripe = numbytes * layoutPtr->numDataCol;
192 RF_DagHeader_t *rd_dag_h, *wr_dag_h; /* read, write dag */
193 RF_DagNode_t *blockNode, *unblockNode, *wrBlock, *wrUnblock;
194 RF_AccessStripeMapHeader_t *asm_h;
195 RF_AccessStripeMap_t *asmap;
196 RF_AllocListElem_t *alloclist;
197 RF_PhysDiskAddr_t *pda;
198 char *pbuf, *buf, *end_p, *p;
199 int i, retcode;
200 RF_ReconUnitNum_t which_ru;
201 RF_StripeNum_t psID = rf_RaidAddressToParityStripeID(layoutPtr,
202 raidAddr,
203 &which_ru);
204 int stripeWidth = layoutPtr->numDataCol + layoutPtr->numParityCol;
205 RF_AccTraceEntry_t tracerec;
206 RF_MCPair_t *mcpair;
207
208 retcode = RF_PARITY_OKAY;
209
210 mcpair = rf_AllocMCPair();
211 rf_MakeAllocList(alloclist);
212 RF_MallocAndAdd(buf, numbytes * (layoutPtr->numDataCol + layoutPtr->numParityCol), (char *), alloclist);
213 RF_CallocAndAdd(pbuf, 1, numbytes, (char *), alloclist); /* use calloc to make
214 * sure buffer is zeroed */
215 end_p = buf + bytesPerStripe;
216
217 rd_dag_h = rf_MakeSimpleDAG(raidPtr, stripeWidth, numbytes, buf, rf_DiskReadFunc, rf_DiskReadUndoFunc,
218 "Rod", alloclist, flags, RF_IO_NORMAL_PRIORITY);
219 blockNode = rd_dag_h->succedents[0];
220 unblockNode = blockNode->succedents[0]->succedents[0];
221
222 /* map the stripe and fill in the PDAs in the dag */
223 asm_h = rf_MapAccess(raidPtr, startAddr, layoutPtr->dataSectorsPerStripe, buf, RF_DONT_REMAP);
224 asmap = asm_h->stripeMap;
225
226 for (pda = asmap->physInfo, i = 0; i < layoutPtr->numDataCol; i++, pda = pda->next) {
227 RF_ASSERT(pda);
228 rf_RangeRestrictPDA(raidPtr, parityPDA, pda, 0, 1);
229 RF_ASSERT(pda->numSector != 0);
230 if (rf_TryToRedirectPDA(raidPtr, pda, 0))
231 goto out; /* no way to verify parity if disk is
232 * dead. return w/ good status */
233 blockNode->succedents[i]->params[0].p = pda;
234 blockNode->succedents[i]->params[2].v = psID;
235 blockNode->succedents[i]->params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, 0, which_ru);
236 }
237
238 RF_ASSERT(!asmap->parityInfo->next);
239 rf_RangeRestrictPDA(raidPtr, parityPDA, asmap->parityInfo, 0, 1);
240 RF_ASSERT(asmap->parityInfo->numSector != 0);
241 if (rf_TryToRedirectPDA(raidPtr, asmap->parityInfo, 1))
242 goto out;
243 blockNode->succedents[layoutPtr->numDataCol]->params[0].p = asmap->parityInfo;
244
245 /* fire off the DAG */
246 bzero((char *) &tracerec, sizeof(tracerec));
247 rd_dag_h->tracerec = &tracerec;
248
249 if (rf_verifyParityDebug) {
250 printf("Parity verify read dag:\n");
251 rf_PrintDAGList(rd_dag_h);
252 }
253 RF_LOCK_MUTEX(mcpair->mutex);
254 mcpair->flag = 0;
255 rf_DispatchDAG(rd_dag_h, (void (*) (void *)) rf_MCPairWakeupFunc,
256 (void *) mcpair);
257 while (!mcpair->flag)
258 RF_WAIT_COND(mcpair->cond, mcpair->mutex);
259 RF_UNLOCK_MUTEX(mcpair->mutex);
260 if (rd_dag_h->status != rf_enable) {
261 RF_ERRORMSG("Unable to verify parity: can't read the stripe\n");
262 retcode = RF_PARITY_COULD_NOT_VERIFY;
263 goto out;
264 }
265 for (p = buf; p < end_p; p += numbytes) {
266 rf_bxor(p, pbuf, numbytes, NULL);
267 }
268 for (i = 0; i < numbytes; i++) {
269 #if 0
270 if (pbuf[i] != 0 || buf[bytesPerStripe + i] != 0) {
271 printf("Bytes: %d %d %d\n", i, pbuf[i], buf[bytesPerStripe + i]);
272 }
273 #endif
274 if (pbuf[i] != buf[bytesPerStripe + i]) {
275 if (!correct_it)
276 RF_ERRORMSG3("Parity verify error: byte %d of parity is 0x%x should be 0x%x\n",
277 i, (u_char) buf[bytesPerStripe + i], (u_char) pbuf[i]);
278 retcode = RF_PARITY_BAD;
279 break;
280 }
281 }
282
283 if (retcode && correct_it) {
284 wr_dag_h = rf_MakeSimpleDAG(raidPtr, 1, numbytes, pbuf, rf_DiskWriteFunc, rf_DiskWriteUndoFunc,
285 "Wnp", alloclist, flags, RF_IO_NORMAL_PRIORITY);
286 wrBlock = wr_dag_h->succedents[0];
287 wrUnblock = wrBlock->succedents[0]->succedents[0];
288 wrBlock->succedents[0]->params[0].p = asmap->parityInfo;
289 wrBlock->succedents[0]->params[2].v = psID;
290 wrBlock->succedents[0]->params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, 0, which_ru);
291 bzero((char *) &tracerec, sizeof(tracerec));
292 wr_dag_h->tracerec = &tracerec;
293 if (rf_verifyParityDebug) {
294 printf("Parity verify write dag:\n");
295 rf_PrintDAGList(wr_dag_h);
296 }
297 RF_LOCK_MUTEX(mcpair->mutex);
298 mcpair->flag = 0;
299 rf_DispatchDAG(wr_dag_h, (void (*) (void *)) rf_MCPairWakeupFunc,
300 (void *) mcpair);
301 while (!mcpair->flag)
302 RF_WAIT_COND(mcpair->cond, mcpair->mutex);
303 RF_UNLOCK_MUTEX(mcpair->mutex);
304 if (wr_dag_h->status != rf_enable) {
305 RF_ERRORMSG("Unable to correct parity in VerifyParity: can't write the stripe\n");
306 retcode = RF_PARITY_COULD_NOT_CORRECT;
307 }
308 rf_FreeDAG(wr_dag_h);
309 if (retcode == RF_PARITY_BAD)
310 retcode = RF_PARITY_CORRECTED;
311 }
312 out:
313 rf_FreeAccessStripeMap(asm_h);
314 rf_FreeAllocList(alloclist);
315 rf_FreeDAG(rd_dag_h);
316 rf_FreeMCPair(mcpair);
317 return (retcode);
318 }
319
320 int
321 rf_TryToRedirectPDA(raidPtr, pda, parity)
322 RF_Raid_t *raidPtr;
323 RF_PhysDiskAddr_t *pda;
324 int parity;
325 {
326 if (raidPtr->Disks[pda->row][pda->col].status == rf_ds_reconstructing) {
327 if (rf_CheckRUReconstructed(raidPtr->reconControl[pda->row]->reconMap, pda->startSector)) {
328 if (raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE) {
329 RF_RowCol_t or = pda->row, oc = pda->col;
330 RF_SectorNum_t os = pda->startSector;
331 if (parity) {
332 (raidPtr->Layout.map->MapParity) (raidPtr, pda->raidAddress, &pda->row, &pda->col, &pda->startSector, RF_REMAP);
333 if (rf_verifyParityDebug)
334 printf("VerifyParity: Redir P r %d c %d sect %ld -> r %d c %d sect %ld\n",
335 or, oc, (long) os, pda->row, pda->col, (long) pda->startSector);
336 } else {
337 (raidPtr->Layout.map->MapSector) (raidPtr, pda->raidAddress, &pda->row, &pda->col, &pda->startSector, RF_REMAP);
338 if (rf_verifyParityDebug)
339 printf("VerifyParity: Redir D r %d c %d sect %ld -> r %d c %d sect %ld\n",
340 or, oc, (long) os, pda->row, pda->col, (long) pda->startSector);
341 }
342 } else {
343 RF_RowCol_t spRow = raidPtr->Disks[pda->row][pda->col].spareRow;
344 RF_RowCol_t spCol = raidPtr->Disks[pda->row][pda->col].spareCol;
345 pda->row = spRow;
346 pda->col = spCol;
347 }
348 }
349 }
350 if (RF_DEAD_DISK(raidPtr->Disks[pda->row][pda->col].status))
351 return (1);
352 return (0);
353 }
354 /*****************************************************************************************
355 *
356 * currently a stub.
357 *
358 * takes as input an ASM describing a write operation and containing one failure, and
359 * verifies that the parity was correctly updated to reflect the write.
360 *
361 * if it's a data unit that's failed, we read the other data units in the stripe and
362 * the parity unit, XOR them together, and verify that we get the data intended for
363 * the failed disk. Since it's easy, we also validate that the right data got written
364 * to the surviving data disks.
365 *
366 * If it's the parity that failed, there's really no validation we can do except the
367 * above verification that the right data got written to all disks. This is because
368 * the new data intended for the failed disk is supplied in the ASM, but this is of
369 * course not the case for the new parity.
370 *
371 ****************************************************************************************/
372 int
373 rf_VerifyDegrModeWrite(raidPtr, asmh)
374 RF_Raid_t *raidPtr;
375 RF_AccessStripeMapHeader_t *asmh;
376 {
377 return (0);
378 }
379 /* creates a simple DAG with a header, a block-recon node at level 1,
380 * nNodes nodes at level 2, an unblock-recon node at level 3, and
381 * a terminator node at level 4. The stripe address field in
382 * the block and unblock nodes are not touched, nor are the pda
383 * fields in the second-level nodes, so they must be filled in later.
384 *
385 * commit point is established at unblock node - this means that any
386 * failure during dag execution causes the dag to fail
387 */
388 RF_DagHeader_t *
389 rf_MakeSimpleDAG(raidPtr, nNodes, bytesPerSU, databuf, doFunc, undoFunc, name, alloclist, flags, priority)
390 RF_Raid_t *raidPtr;
391 int nNodes;
392 int bytesPerSU;
393 char *databuf;
394 int (*doFunc) (RF_DagNode_t * node);
395 int (*undoFunc) (RF_DagNode_t * node);
396 char *name; /* node names at the second level */
397 RF_AllocListElem_t *alloclist;
398 RF_RaidAccessFlags_t flags;
399 int priority;
400 {
401 RF_DagHeader_t *dag_h;
402 RF_DagNode_t *nodes, *termNode, *blockNode, *unblockNode;
403 int i;
404
405 /* create the nodes, the block & unblock nodes, and the terminator
406 * node */
407 RF_CallocAndAdd(nodes, nNodes + 3, sizeof(RF_DagNode_t), (RF_DagNode_t *), alloclist);
408 blockNode = &nodes[nNodes];
409 unblockNode = blockNode + 1;
410 termNode = unblockNode + 1;
411
412 dag_h = rf_AllocDAGHeader();
413 dag_h->raidPtr = (void *) raidPtr;
414 dag_h->allocList = NULL;/* we won't use this alloc list */
415 dag_h->status = rf_enable;
416 dag_h->numSuccedents = 1;
417 dag_h->creator = "SimpleDAG";
418
419 /* this dag can not commit until the unblock node is reached errors
420 * prior to the commit point imply the dag has failed */
421 dag_h->numCommitNodes = 1;
422 dag_h->numCommits = 0;
423
424 dag_h->succedents[0] = blockNode;
425 rf_InitNode(blockNode, rf_wait, RF_FALSE, rf_NullNodeFunc, rf_NullNodeUndoFunc, NULL, nNodes, 0, 0, 0, dag_h, "Nil", alloclist);
426 rf_InitNode(unblockNode, rf_wait, RF_TRUE, rf_NullNodeFunc, rf_NullNodeUndoFunc, NULL, 1, nNodes, 0, 0, dag_h, "Nil", alloclist);
427 unblockNode->succedents[0] = termNode;
428 for (i = 0; i < nNodes; i++) {
429 blockNode->succedents[i] = unblockNode->antecedents[i] = &nodes[i];
430 unblockNode->antType[i] = rf_control;
431 rf_InitNode(&nodes[i], rf_wait, RF_FALSE, doFunc, undoFunc, rf_GenericWakeupFunc, 1, 1, 4, 0, dag_h, name, alloclist);
432 nodes[i].succedents[0] = unblockNode;
433 nodes[i].antecedents[0] = blockNode;
434 nodes[i].antType[0] = rf_control;
435 nodes[i].params[1].p = (databuf + (i * bytesPerSU));
436 }
437 rf_InitNode(termNode, rf_wait, RF_FALSE, rf_TerminateFunc, rf_TerminateUndoFunc, NULL, 0, 1, 0, 0, dag_h, "Trm", alloclist);
438 termNode->antecedents[0] = unblockNode;
439 termNode->antType[0] = rf_control;
440 return (dag_h);
441 }
442