rf_parityscan.c revision 1.11 1 /* $NetBSD: rf_parityscan.c,v 1.11 2001/10/04 15:58:55 oster Exp $ */
2 /*
3 * Copyright (c) 1995 Carnegie-Mellon University.
4 * All rights reserved.
5 *
6 * Author: Mark Holland
7 *
8 * Permission to use, copy, modify and distribute this software and
9 * its documentation is hereby granted, provided that both the copyright
10 * notice and this permission notice appear in all copies of the
11 * software, derivative works or modified versions, and any portions
12 * thereof, and that both notices appear in supporting documentation.
13 *
14 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
15 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
16 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
17 *
18 * Carnegie Mellon requests users of this software to return to
19 *
20 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
21 * School of Computer Science
22 * Carnegie Mellon University
23 * Pittsburgh PA 15213-3890
24 *
25 * any improvements or extensions that they make and grant Carnegie the
26 * rights to redistribute these changes.
27 */
28
29 /*****************************************************************************
30 *
31 * rf_parityscan.c -- misc utilities related to parity verification
32 *
33 *****************************************************************************/
34
35 #include <dev/raidframe/raidframevar.h>
36
37 #include "rf_raid.h"
38 #include "rf_dag.h"
39 #include "rf_dagfuncs.h"
40 #include "rf_dagutils.h"
41 #include "rf_mcpair.h"
42 #include "rf_general.h"
43 #include "rf_engine.h"
44 #include "rf_parityscan.h"
45 #include "rf_map.h"
46
47 /*****************************************************************************************
48 *
49 * walk through the entire arry and write new parity.
50 * This works by creating two DAGs, one to read a stripe of data and one to
51 * write new parity. The first is executed, the data is xored together, and
52 * then the second is executed. To avoid constantly building and tearing down
53 * the DAGs, we create them a priori and fill them in with the mapping
54 * information as we go along.
55 *
56 * there should never be more than one thread running this.
57 *
58 ****************************************************************************************/
59
60 int
61 rf_RewriteParity(raidPtr)
62 RF_Raid_t *raidPtr;
63 {
64 RF_RaidLayout_t *layoutPtr = &raidPtr->Layout;
65 RF_AccessStripeMapHeader_t *asm_h;
66 int ret_val;
67 int rc;
68 RF_PhysDiskAddr_t pda;
69 RF_SectorNum_t i;
70
71 if (raidPtr->Layout.map->faultsTolerated == 0) {
72 /* There isn't any parity. Call it "okay." */
73 return (RF_PARITY_OKAY);
74 }
75 if (raidPtr->status[0] != rf_rs_optimal) {
76 /*
77 * We're in degraded mode. Don't try to verify parity now!
78 * XXX: this should be a "we don't want to", not a
79 * "we can't" error.
80 */
81 return (RF_PARITY_COULD_NOT_VERIFY);
82 }
83
84 ret_val = 0;
85
86 pda.startSector = 0;
87 pda.numSector = raidPtr->Layout.sectorsPerStripeUnit;
88 rc = RF_PARITY_OKAY;
89
90 for (i = 0; i < raidPtr->totalSectors &&
91 rc <= RF_PARITY_CORRECTED;
92 i += layoutPtr->dataSectorsPerStripe) {
93 if (raidPtr->waitShutdown) {
94 /* Someone is pulling the plug on this set...
95 abort the re-write */
96 return (1);
97 }
98 asm_h = rf_MapAccess(raidPtr, i,
99 layoutPtr->dataSectorsPerStripe,
100 NULL, RF_DONT_REMAP);
101 raidPtr->parity_rewrite_stripes_done =
102 i / layoutPtr->dataSectorsPerStripe ;
103 rc = rf_VerifyParity(raidPtr, asm_h->stripeMap, 1, 0);
104
105 switch (rc) {
106 case RF_PARITY_OKAY:
107 case RF_PARITY_CORRECTED:
108 break;
109 case RF_PARITY_BAD:
110 printf("Parity bad during correction\n");
111 ret_val = 1;
112 break;
113 case RF_PARITY_COULD_NOT_CORRECT:
114 printf("Could not correct bad parity\n");
115 ret_val = 1;
116 break;
117 case RF_PARITY_COULD_NOT_VERIFY:
118 printf("Could not verify parity\n");
119 ret_val = 1;
120 break;
121 default:
122 printf("Bad rc=%d from VerifyParity in RewriteParity\n", rc);
123 ret_val = 1;
124 }
125 rf_FreeAccessStripeMap(asm_h);
126 }
127 return (ret_val);
128 }
129 /*****************************************************************************************
130 *
131 * verify that the parity in a particular stripe is correct.
132 * we validate only the range of parity defined by parityPDA, since
133 * this is all we have locked. The way we do this is to create an asm
134 * that maps the whole stripe and then range-restrict it to the parity
135 * region defined by the parityPDA.
136 *
137 ****************************************************************************************/
138 int
139 rf_VerifyParity(raidPtr, aasm, correct_it, flags)
140 RF_Raid_t *raidPtr;
141 RF_AccessStripeMap_t *aasm;
142 int correct_it;
143 RF_RaidAccessFlags_t flags;
144 {
145 RF_PhysDiskAddr_t *parityPDA;
146 RF_AccessStripeMap_t *doasm;
147 RF_LayoutSW_t *lp;
148 int lrc, rc;
149
150 lp = raidPtr->Layout.map;
151 if (lp->faultsTolerated == 0) {
152 /*
153 * There isn't any parity. Call it "okay."
154 */
155 return (RF_PARITY_OKAY);
156 }
157 rc = RF_PARITY_OKAY;
158 if (lp->VerifyParity) {
159 for (doasm = aasm; doasm; doasm = doasm->next) {
160 for (parityPDA = doasm->parityInfo; parityPDA;
161 parityPDA = parityPDA->next) {
162 lrc = lp->VerifyParity(raidPtr,
163 doasm->raidAddress,
164 parityPDA,
165 correct_it, flags);
166 if (lrc > rc) {
167 /* see rf_parityscan.h for why this
168 * works */
169 rc = lrc;
170 }
171 }
172 }
173 } else {
174 rc = RF_PARITY_COULD_NOT_VERIFY;
175 }
176 return (rc);
177 }
178
179 int
180 rf_VerifyParityBasic(raidPtr, raidAddr, parityPDA, correct_it, flags)
181 RF_Raid_t *raidPtr;
182 RF_RaidAddr_t raidAddr;
183 RF_PhysDiskAddr_t *parityPDA;
184 int correct_it;
185 RF_RaidAccessFlags_t flags;
186 {
187 RF_RaidLayout_t *layoutPtr = &(raidPtr->Layout);
188 RF_RaidAddr_t startAddr = rf_RaidAddressOfPrevStripeBoundary(layoutPtr,
189 raidAddr);
190 RF_SectorCount_t numsector = parityPDA->numSector;
191 int numbytes = rf_RaidAddressToByte(raidPtr, numsector);
192 int bytesPerStripe = numbytes * layoutPtr->numDataCol;
193 RF_DagHeader_t *rd_dag_h, *wr_dag_h; /* read, write dag */
194 RF_DagNode_t *blockNode, *unblockNode, *wrBlock, *wrUnblock;
195 RF_AccessStripeMapHeader_t *asm_h;
196 RF_AccessStripeMap_t *asmap;
197 RF_AllocListElem_t *alloclist;
198 RF_PhysDiskAddr_t *pda;
199 char *pbuf, *buf, *end_p, *p;
200 int i, retcode;
201 RF_ReconUnitNum_t which_ru;
202 RF_StripeNum_t psID = rf_RaidAddressToParityStripeID(layoutPtr,
203 raidAddr,
204 &which_ru);
205 int stripeWidth = layoutPtr->numDataCol + layoutPtr->numParityCol;
206 RF_AccTraceEntry_t tracerec;
207 RF_MCPair_t *mcpair;
208
209 retcode = RF_PARITY_OKAY;
210
211 mcpair = rf_AllocMCPair();
212 rf_MakeAllocList(alloclist);
213 RF_MallocAndAdd(buf, numbytes * (layoutPtr->numDataCol + layoutPtr->numParityCol), (char *), alloclist);
214 RF_CallocAndAdd(pbuf, 1, numbytes, (char *), alloclist); /* use calloc to make
215 * sure buffer is zeroed */
216 end_p = buf + bytesPerStripe;
217
218 rd_dag_h = rf_MakeSimpleDAG(raidPtr, stripeWidth, numbytes, buf, rf_DiskReadFunc, rf_DiskReadUndoFunc,
219 "Rod", alloclist, flags, RF_IO_NORMAL_PRIORITY);
220 blockNode = rd_dag_h->succedents[0];
221 unblockNode = blockNode->succedents[0]->succedents[0];
222
223 /* map the stripe and fill in the PDAs in the dag */
224 asm_h = rf_MapAccess(raidPtr, startAddr, layoutPtr->dataSectorsPerStripe, buf, RF_DONT_REMAP);
225 asmap = asm_h->stripeMap;
226
227 for (pda = asmap->physInfo, i = 0; i < layoutPtr->numDataCol; i++, pda = pda->next) {
228 RF_ASSERT(pda);
229 rf_RangeRestrictPDA(raidPtr, parityPDA, pda, 0, 1);
230 RF_ASSERT(pda->numSector != 0);
231 if (rf_TryToRedirectPDA(raidPtr, pda, 0))
232 goto out; /* no way to verify parity if disk is
233 * dead. return w/ good status */
234 blockNode->succedents[i]->params[0].p = pda;
235 blockNode->succedents[i]->params[2].v = psID;
236 blockNode->succedents[i]->params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, 0, which_ru);
237 }
238
239 RF_ASSERT(!asmap->parityInfo->next);
240 rf_RangeRestrictPDA(raidPtr, parityPDA, asmap->parityInfo, 0, 1);
241 RF_ASSERT(asmap->parityInfo->numSector != 0);
242 if (rf_TryToRedirectPDA(raidPtr, asmap->parityInfo, 1))
243 goto out;
244 blockNode->succedents[layoutPtr->numDataCol]->params[0].p = asmap->parityInfo;
245
246 /* fire off the DAG */
247 memset((char *) &tracerec, 0, sizeof(tracerec));
248 rd_dag_h->tracerec = &tracerec;
249
250 if (rf_verifyParityDebug) {
251 printf("Parity verify read dag:\n");
252 rf_PrintDAGList(rd_dag_h);
253 }
254 RF_LOCK_MUTEX(mcpair->mutex);
255 mcpair->flag = 0;
256 rf_DispatchDAG(rd_dag_h, (void (*) (void *)) rf_MCPairWakeupFunc,
257 (void *) mcpair);
258 while (!mcpair->flag)
259 RF_WAIT_COND(mcpair->cond, mcpair->mutex);
260 RF_UNLOCK_MUTEX(mcpair->mutex);
261 if (rd_dag_h->status != rf_enable) {
262 RF_ERRORMSG("Unable to verify parity: can't read the stripe\n");
263 retcode = RF_PARITY_COULD_NOT_VERIFY;
264 goto out;
265 }
266 for (p = buf; p < end_p; p += numbytes) {
267 rf_bxor(p, pbuf, numbytes, NULL);
268 }
269 for (i = 0; i < numbytes; i++) {
270 #if 0
271 if (pbuf[i] != 0 || buf[bytesPerStripe + i] != 0) {
272 printf("Bytes: %d %d %d\n", i, pbuf[i], buf[bytesPerStripe + i]);
273 }
274 #endif
275 if (pbuf[i] != buf[bytesPerStripe + i]) {
276 if (!correct_it)
277 RF_ERRORMSG3("Parity verify error: byte %d of parity is 0x%x should be 0x%x\n",
278 i, (u_char) buf[bytesPerStripe + i], (u_char) pbuf[i]);
279 retcode = RF_PARITY_BAD;
280 break;
281 }
282 }
283
284 if (retcode && correct_it) {
285 wr_dag_h = rf_MakeSimpleDAG(raidPtr, 1, numbytes, pbuf, rf_DiskWriteFunc, rf_DiskWriteUndoFunc,
286 "Wnp", alloclist, flags, RF_IO_NORMAL_PRIORITY);
287 wrBlock = wr_dag_h->succedents[0];
288 wrUnblock = wrBlock->succedents[0]->succedents[0];
289 wrBlock->succedents[0]->params[0].p = asmap->parityInfo;
290 wrBlock->succedents[0]->params[2].v = psID;
291 wrBlock->succedents[0]->params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, 0, which_ru);
292 memset((char *) &tracerec, 0, sizeof(tracerec));
293 wr_dag_h->tracerec = &tracerec;
294 if (rf_verifyParityDebug) {
295 printf("Parity verify write dag:\n");
296 rf_PrintDAGList(wr_dag_h);
297 }
298 RF_LOCK_MUTEX(mcpair->mutex);
299 mcpair->flag = 0;
300 rf_DispatchDAG(wr_dag_h, (void (*) (void *)) rf_MCPairWakeupFunc,
301 (void *) mcpair);
302 while (!mcpair->flag)
303 RF_WAIT_COND(mcpair->cond, mcpair->mutex);
304 RF_UNLOCK_MUTEX(mcpair->mutex);
305 if (wr_dag_h->status != rf_enable) {
306 RF_ERRORMSG("Unable to correct parity in VerifyParity: can't write the stripe\n");
307 retcode = RF_PARITY_COULD_NOT_CORRECT;
308 }
309 rf_FreeDAG(wr_dag_h);
310 if (retcode == RF_PARITY_BAD)
311 retcode = RF_PARITY_CORRECTED;
312 }
313 out:
314 rf_FreeAccessStripeMap(asm_h);
315 rf_FreeAllocList(alloclist);
316 rf_FreeDAG(rd_dag_h);
317 rf_FreeMCPair(mcpair);
318 return (retcode);
319 }
320
321 int
322 rf_TryToRedirectPDA(raidPtr, pda, parity)
323 RF_Raid_t *raidPtr;
324 RF_PhysDiskAddr_t *pda;
325 int parity;
326 {
327 if (raidPtr->Disks[pda->row][pda->col].status == rf_ds_reconstructing) {
328 if (rf_CheckRUReconstructed(raidPtr->reconControl[pda->row]->reconMap, pda->startSector)) {
329 if (raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE) {
330 RF_RowCol_t or = pda->row, oc = pda->col;
331 RF_SectorNum_t os = pda->startSector;
332 if (parity) {
333 (raidPtr->Layout.map->MapParity) (raidPtr, pda->raidAddress, &pda->row, &pda->col, &pda->startSector, RF_REMAP);
334 if (rf_verifyParityDebug)
335 printf("VerifyParity: Redir P r %d c %d sect %ld -> r %d c %d sect %ld\n",
336 or, oc, (long) os, pda->row, pda->col, (long) pda->startSector);
337 } else {
338 (raidPtr->Layout.map->MapSector) (raidPtr, pda->raidAddress, &pda->row, &pda->col, &pda->startSector, RF_REMAP);
339 if (rf_verifyParityDebug)
340 printf("VerifyParity: Redir D r %d c %d sect %ld -> r %d c %d sect %ld\n",
341 or, oc, (long) os, pda->row, pda->col, (long) pda->startSector);
342 }
343 } else {
344 RF_RowCol_t spRow = raidPtr->Disks[pda->row][pda->col].spareRow;
345 RF_RowCol_t spCol = raidPtr->Disks[pda->row][pda->col].spareCol;
346 pda->row = spRow;
347 pda->col = spCol;
348 }
349 }
350 }
351 if (RF_DEAD_DISK(raidPtr->Disks[pda->row][pda->col].status))
352 return (1);
353 return (0);
354 }
355 /*****************************************************************************************
356 *
357 * currently a stub.
358 *
359 * takes as input an ASM describing a write operation and containing one failure, and
360 * verifies that the parity was correctly updated to reflect the write.
361 *
362 * if it's a data unit that's failed, we read the other data units in the stripe and
363 * the parity unit, XOR them together, and verify that we get the data intended for
364 * the failed disk. Since it's easy, we also validate that the right data got written
365 * to the surviving data disks.
366 *
367 * If it's the parity that failed, there's really no validation we can do except the
368 * above verification that the right data got written to all disks. This is because
369 * the new data intended for the failed disk is supplied in the ASM, but this is of
370 * course not the case for the new parity.
371 *
372 ****************************************************************************************/
373 int
374 rf_VerifyDegrModeWrite(raidPtr, asmh)
375 RF_Raid_t *raidPtr;
376 RF_AccessStripeMapHeader_t *asmh;
377 {
378 return (0);
379 }
380 /* creates a simple DAG with a header, a block-recon node at level 1,
381 * nNodes nodes at level 2, an unblock-recon node at level 3, and
382 * a terminator node at level 4. The stripe address field in
383 * the block and unblock nodes are not touched, nor are the pda
384 * fields in the second-level nodes, so they must be filled in later.
385 *
386 * commit point is established at unblock node - this means that any
387 * failure during dag execution causes the dag to fail
388 */
389 RF_DagHeader_t *
390 rf_MakeSimpleDAG(raidPtr, nNodes, bytesPerSU, databuf, doFunc, undoFunc, name, alloclist, flags, priority)
391 RF_Raid_t *raidPtr;
392 int nNodes;
393 int bytesPerSU;
394 char *databuf;
395 int (*doFunc) (RF_DagNode_t * node);
396 int (*undoFunc) (RF_DagNode_t * node);
397 char *name; /* node names at the second level */
398 RF_AllocListElem_t *alloclist;
399 RF_RaidAccessFlags_t flags;
400 int priority;
401 {
402 RF_DagHeader_t *dag_h;
403 RF_DagNode_t *nodes, *termNode, *blockNode, *unblockNode;
404 int i;
405
406 /* create the nodes, the block & unblock nodes, and the terminator
407 * node */
408 RF_CallocAndAdd(nodes, nNodes + 3, sizeof(RF_DagNode_t), (RF_DagNode_t *), alloclist);
409 blockNode = &nodes[nNodes];
410 unblockNode = blockNode + 1;
411 termNode = unblockNode + 1;
412
413 dag_h = rf_AllocDAGHeader();
414 dag_h->raidPtr = (void *) raidPtr;
415 dag_h->allocList = NULL;/* we won't use this alloc list */
416 dag_h->status = rf_enable;
417 dag_h->numSuccedents = 1;
418 dag_h->creator = "SimpleDAG";
419
420 /* this dag can not commit until the unblock node is reached errors
421 * prior to the commit point imply the dag has failed */
422 dag_h->numCommitNodes = 1;
423 dag_h->numCommits = 0;
424
425 dag_h->succedents[0] = blockNode;
426 rf_InitNode(blockNode, rf_wait, RF_FALSE, rf_NullNodeFunc, rf_NullNodeUndoFunc, NULL, nNodes, 0, 0, 0, dag_h, "Nil", alloclist);
427 rf_InitNode(unblockNode, rf_wait, RF_TRUE, rf_NullNodeFunc, rf_NullNodeUndoFunc, NULL, 1, nNodes, 0, 0, dag_h, "Nil", alloclist);
428 unblockNode->succedents[0] = termNode;
429 for (i = 0; i < nNodes; i++) {
430 blockNode->succedents[i] = unblockNode->antecedents[i] = &nodes[i];
431 unblockNode->antType[i] = rf_control;
432 rf_InitNode(&nodes[i], rf_wait, RF_FALSE, doFunc, undoFunc, rf_GenericWakeupFunc, 1, 1, 4, 0, dag_h, name, alloclist);
433 nodes[i].succedents[0] = unblockNode;
434 nodes[i].antecedents[0] = blockNode;
435 nodes[i].antType[0] = rf_control;
436 nodes[i].params[1].p = (databuf + (i * bytesPerSU));
437 }
438 rf_InitNode(termNode, rf_wait, RF_FALSE, rf_TerminateFunc, rf_TerminateUndoFunc, NULL, 0, 1, 0, 0, dag_h, "Trm", alloclist);
439 termNode->antecedents[0] = unblockNode;
440 termNode->antType[0] = rf_control;
441 return (dag_h);
442 }
443