rf_map.c revision 1.49 1 /* $NetBSD: rf_map.c,v 1.49 2019/02/10 17:13:33 christos Exp $ */
2 /*
3 * Copyright (c) 1995 Carnegie-Mellon University.
4 * All rights reserved.
5 *
6 * Author: Mark Holland
7 *
8 * Permission to use, copy, modify and distribute this software and
9 * its documentation is hereby granted, provided that both the copyright
10 * notice and this permission notice appear in all copies of the
11 * software, derivative works or modified versions, and any portions
12 * thereof, and that both notices appear in supporting documentation.
13 *
14 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
15 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
16 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
17 *
18 * Carnegie Mellon requests users of this software to return to
19 *
20 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
21 * School of Computer Science
22 * Carnegie Mellon University
23 * Pittsburgh PA 15213-3890
24 *
25 * any improvements or extensions that they make and grant Carnegie the
26 * rights to redistribute these changes.
27 */
28
29 /**************************************************************************
30 *
31 * map.c -- main code for mapping RAID addresses to physical disk addresses
32 *
33 **************************************************************************/
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: rf_map.c,v 1.49 2019/02/10 17:13:33 christos Exp $");
37
38 #include <dev/raidframe/raidframevar.h>
39
40 #include "rf_threadstuff.h"
41 #include "rf_raid.h"
42 #include "rf_general.h"
43 #include "rf_map.h"
44 #include "rf_shutdown.h"
45
46 static void rf_FreePDAList(RF_PhysDiskAddr_t *pda_list);
47 static void rf_FreeASMList(RF_AccessStripeMap_t *asm_list);
48
49 /***************************************************************************
50 *
51 * MapAccess -- main 1st order mapping routine. Maps an access in the
52 * RAID address space to the corresponding set of physical disk
53 * addresses. The result is returned as a list of AccessStripeMap
54 * structures, one per stripe accessed. Each ASM structure contains a
55 * pointer to a list of PhysDiskAddr structures, which describe the
56 * physical locations touched by the user access. Note that this
57 * routine returns only static mapping information, i.e. the list of
58 * physical addresses returned does not necessarily identify the set
59 * of physical locations that will actually be read or written. The
60 * routine also maps the parity. The physical disk location returned
61 * always indicates the entire parity unit, even when only a subset of
62 * it is being accessed. This is because an access that is not stripe
63 * unit aligned but that spans a stripe unit boundary may require
64 * access two distinct portions of the parity unit, and we can't yet
65 * tell which portion(s) we'll actually need. We leave it up to the
66 * algorithm selection code to decide what subset of the parity unit
67 * to access. Note that addresses in the RAID address space must
68 * always be maintained as longs, instead of ints.
69 *
70 * This routine returns NULL if numBlocks is 0
71 *
72 * raidAddress - starting address in RAID address space
73 * numBlocks - number of blocks in RAID address space to access
74 * buffer - buffer to supply/recieve data
75 * remap - 1 => remap address to spare space
76 ***************************************************************************/
77
78 RF_AccessStripeMapHeader_t *
79 rf_MapAccess(RF_Raid_t *raidPtr, RF_RaidAddr_t raidAddress,
80 RF_SectorCount_t numBlocks, void *buffer, int remap)
81 {
82 RF_RaidLayout_t *layoutPtr = &(raidPtr->Layout);
83 RF_AccessStripeMapHeader_t *asm_hdr = NULL;
84 RF_AccessStripeMap_t *asm_list = NULL, *asm_p = NULL;
85 int faultsTolerated = layoutPtr->map->faultsTolerated;
86 /* we'll change raidAddress along the way */
87 RF_RaidAddr_t startAddress = raidAddress;
88 RF_RaidAddr_t endAddress = raidAddress + numBlocks;
89 RF_RaidDisk_t *disks = raidPtr->Disks;
90 RF_PhysDiskAddr_t *pda_p;
91 #if (RF_INCLUDE_DECL_PQ > 0) || (RF_INCLUDE_RAID6 > 0)
92 RF_PhysDiskAddr_t *pda_q;
93 #endif
94 RF_StripeCount_t numStripes = 0;
95 RF_RaidAddr_t stripeRealEndAddress, stripeEndAddress,
96 nextStripeUnitAddress;
97 RF_RaidAddr_t startAddrWithinStripe, lastRaidAddr;
98 RF_StripeCount_t totStripes;
99 RF_StripeNum_t stripeID, lastSID, SUID, lastSUID;
100 RF_AccessStripeMap_t *asmList, *t_asm;
101 RF_PhysDiskAddr_t *pdaList, *t_pda;
102
103 /* allocate all the ASMs and PDAs up front */
104 lastRaidAddr = raidAddress + numBlocks - 1;
105 stripeID = rf_RaidAddressToStripeID(layoutPtr, raidAddress);
106 lastSID = rf_RaidAddressToStripeID(layoutPtr, lastRaidAddr);
107 totStripes = lastSID - stripeID + 1;
108 SUID = rf_RaidAddressToStripeUnitID(layoutPtr, raidAddress);
109 lastSUID = rf_RaidAddressToStripeUnitID(layoutPtr, lastRaidAddr);
110
111 asmList = rf_AllocASMList(totStripes);
112
113 /* may also need pda(s) per stripe for parity */
114 pdaList = rf_AllocPDAList(lastSUID - SUID + 1 +
115 faultsTolerated * totStripes);
116
117
118 if (raidAddress + numBlocks > raidPtr->totalSectors) {
119 RF_ERRORMSG1("Unable to map access because offset (%d) was invalid\n",
120 (int) raidAddress);
121 return (NULL);
122 }
123 #if RF_DEBUG_MAP
124 if (rf_mapDebug)
125 rf_PrintRaidAddressInfo(raidPtr, raidAddress, numBlocks);
126 #endif
127 for (; raidAddress < endAddress;) {
128 /* make the next stripe structure */
129 RF_ASSERT(asmList);
130 t_asm = asmList;
131 asmList = asmList->next;
132 memset(t_asm, 0, sizeof(*t_asm));
133 if (!asm_p)
134 asm_list = asm_p = t_asm;
135 else {
136 asm_p->next = t_asm;
137 asm_p = asm_p->next;
138 }
139 numStripes++;
140
141 /* map SUs from current location to the end of the stripe */
142 asm_p->stripeID = /* rf_RaidAddressToStripeID(layoutPtr,
143 raidAddress) */ stripeID++;
144 stripeRealEndAddress = rf_RaidAddressOfNextStripeBoundary(layoutPtr, raidAddress);
145 stripeEndAddress = RF_MIN(endAddress, stripeRealEndAddress);
146 asm_p->raidAddress = raidAddress;
147 asm_p->endRaidAddress = stripeEndAddress;
148
149 /* map each stripe unit in the stripe */
150 pda_p = NULL;
151
152 /* Raid addr of start of portion of access that is
153 within this stripe */
154 startAddrWithinStripe = raidAddress;
155
156 for (; raidAddress < stripeEndAddress;) {
157 RF_ASSERT(pdaList);
158 t_pda = pdaList;
159 pdaList = pdaList->next;
160 memset(t_pda, 0, sizeof(*t_pda));
161 if (!pda_p)
162 asm_p->physInfo = pda_p = t_pda;
163 else {
164 pda_p->next = t_pda;
165 pda_p = pda_p->next;
166 }
167
168 pda_p->type = RF_PDA_TYPE_DATA;
169 (layoutPtr->map->MapSector) (raidPtr, raidAddress,
170 &(pda_p->col),
171 &(pda_p->startSector),
172 remap);
173
174 /* mark any failures we find. failedPDA is
175 * don't-care if there is more than one
176 * failure */
177
178 /* the RAID address corresponding to this
179 physical diskaddress */
180 pda_p->raidAddress = raidAddress;
181 nextStripeUnitAddress = rf_RaidAddressOfNextStripeUnitBoundary(layoutPtr, raidAddress);
182 pda_p->numSector = RF_MIN(endAddress, nextStripeUnitAddress) - raidAddress;
183 RF_ASSERT(pda_p->numSector != 0);
184 rf_ASMCheckStatus(raidPtr, pda_p, asm_p, disks, 0);
185 pda_p->bufPtr = (char *)buffer + rf_RaidAddressToByte(raidPtr, (raidAddress - startAddress));
186 asm_p->totalSectorsAccessed += pda_p->numSector;
187 asm_p->numStripeUnitsAccessed++;
188
189 raidAddress = RF_MIN(endAddress, nextStripeUnitAddress);
190 }
191
192 /* Map the parity. At this stage, the startSector and
193 * numSector fields for the parity unit are always set
194 * to indicate the entire parity unit. We may modify
195 * this after mapping the data portion. */
196 switch (faultsTolerated) {
197 case 0:
198 break;
199 case 1: /* single fault tolerant */
200 RF_ASSERT(pdaList);
201 t_pda = pdaList;
202 pdaList = pdaList->next;
203 memset(t_pda, 0, sizeof(*t_pda));
204 pda_p = asm_p->parityInfo = t_pda;
205 pda_p->type = RF_PDA_TYPE_PARITY;
206 (layoutPtr->map->MapParity) (raidPtr, rf_RaidAddressOfPrevStripeUnitBoundary(layoutPtr, startAddrWithinStripe),
207 &(pda_p->col), &(pda_p->startSector), remap);
208 pda_p->numSector = layoutPtr->sectorsPerStripeUnit;
209 /* raidAddr may be needed to find unit to redirect to */
210 pda_p->raidAddress = rf_RaidAddressOfPrevStripeUnitBoundary(layoutPtr, startAddrWithinStripe);
211 rf_ASMCheckStatus(raidPtr, pda_p, asm_p, disks, 1);
212 rf_ASMParityAdjust(asm_p->parityInfo, startAddrWithinStripe, endAddress, layoutPtr, asm_p);
213
214 break;
215 #if (RF_INCLUDE_DECL_PQ > 0) || (RF_INCLUDE_RAID6 > 0)
216 case 2: /* two fault tolerant */
217 RF_ASSERT(pdaList && pdaList->next);
218 t_pda = pdaList;
219 pdaList = pdaList->next;
220 memset(t_pda, 0, sizeof(*t_pda));
221 pda_p = asm_p->parityInfo = t_pda;
222 pda_p->type = RF_PDA_TYPE_PARITY;
223 t_pda = pdaList;
224 pdaList = pdaList->next;
225 memset(t_pda, 0, sizeof(*t_pda));
226 pda_q = asm_p->qInfo = t_pda;
227 pda_q->type = RF_PDA_TYPE_Q;
228 (layoutPtr->map->MapParity) (raidPtr, rf_RaidAddressOfPrevStripeUnitBoundary(layoutPtr, startAddrWithinStripe),
229 &(pda_p->col), &(pda_p->startSector), remap);
230 (layoutPtr->map->MapQ) (raidPtr, rf_RaidAddressOfPrevStripeUnitBoundary(layoutPtr, startAddrWithinStripe),
231 &(pda_q->col), &(pda_q->startSector), remap);
232 pda_q->numSector = pda_p->numSector = layoutPtr->sectorsPerStripeUnit;
233 /* raidAddr may be needed to find unit to redirect to */
234 pda_p->raidAddress = rf_RaidAddressOfPrevStripeUnitBoundary(layoutPtr, startAddrWithinStripe);
235 pda_q->raidAddress = rf_RaidAddressOfPrevStripeUnitBoundary(layoutPtr, startAddrWithinStripe);
236 /* failure mode stuff */
237 rf_ASMCheckStatus(raidPtr, pda_p, asm_p, disks, 1);
238 rf_ASMCheckStatus(raidPtr, pda_q, asm_p, disks, 1);
239 rf_ASMParityAdjust(asm_p->parityInfo, startAddrWithinStripe, endAddress, layoutPtr, asm_p);
240 rf_ASMParityAdjust(asm_p->qInfo, startAddrWithinStripe, endAddress, layoutPtr, asm_p);
241 break;
242 #endif
243 }
244 }
245 RF_ASSERT(asmList == NULL && pdaList == NULL);
246 /* make the header structure */
247 asm_hdr = rf_AllocAccessStripeMapHeader();
248 RF_ASSERT(numStripes == totStripes);
249 asm_hdr->numStripes = numStripes;
250 asm_hdr->stripeMap = asm_list;
251
252 #if RF_DEBUG_MAP
253 if (rf_mapDebug)
254 rf_PrintAccessStripeMap(asm_hdr);
255 #endif
256 return (asm_hdr);
257 }
258
259 /***************************************************************************
260 * This routine walks through an ASM list and marks the PDAs that have
261 * failed. It's called only when a disk failure causes an in-flight
262 * DAG to fail. The parity may consist of two components, but we want
263 * to use only one failedPDA pointer. Thus we set failedPDA to point
264 * to the first parity component, and rely on the rest of the code to
265 * do the right thing with this.
266 ***************************************************************************/
267
268 void
269 rf_MarkFailuresInASMList(RF_Raid_t *raidPtr,
270 RF_AccessStripeMapHeader_t *asm_h)
271 {
272 RF_RaidDisk_t *disks = raidPtr->Disks;
273 RF_AccessStripeMap_t *asmap;
274 RF_PhysDiskAddr_t *pda;
275
276 for (asmap = asm_h->stripeMap; asmap; asmap = asmap->next) {
277 asmap->numDataFailed = 0;
278 asmap->numParityFailed = 0;
279 asmap->numQFailed = 0;
280 asmap->numFailedPDAs = 0;
281 memset(asmap->failedPDAs, 0,
282 RF_MAX_FAILED_PDA * sizeof(*asmap->failedPDAs));
283 for (pda = asmap->physInfo; pda; pda = pda->next) {
284 if (RF_DEAD_DISK(disks[pda->col].status)) {
285 asmap->numDataFailed++;
286 asmap->failedPDAs[asmap->numFailedPDAs] = pda;
287 asmap->numFailedPDAs++;
288 }
289 }
290 pda = asmap->parityInfo;
291 if (pda && RF_DEAD_DISK(disks[pda->col].status)) {
292 asmap->numParityFailed++;
293 asmap->failedPDAs[asmap->numFailedPDAs] = pda;
294 asmap->numFailedPDAs++;
295 }
296 pda = asmap->qInfo;
297 if (pda && RF_DEAD_DISK(disks[pda->col].status)) {
298 asmap->numQFailed++;
299 asmap->failedPDAs[asmap->numFailedPDAs] = pda;
300 asmap->numFailedPDAs++;
301 }
302 }
303 }
304
305 /***************************************************************************
306 *
307 * routines to allocate and free list elements. All allocation
308 * routines zero the structure before returning it.
309 *
310 * FreePhysDiskAddr is static. It should never be called directly,
311 * because FreeAccessStripeMap takes care of freeing the PhysDiskAddr
312 * list.
313 *
314 ***************************************************************************/
315
316 #define RF_MAX_FREE_ASMHDR 128
317 #define RF_MIN_FREE_ASMHDR 32
318
319 #define RF_MAX_FREE_ASM 192
320 #define RF_MIN_FREE_ASM 64
321
322 #define RF_MAX_FREE_PDA 192
323 #define RF_MIN_FREE_PDA 64
324
325 #define RF_MAX_FREE_ASMHLE 64
326 #define RF_MIN_FREE_ASMHLE 16
327
328 #define RF_MAX_FREE_FSS 128
329 #define RF_MIN_FREE_FSS 32
330
331 #define RF_MAX_FREE_VFPLE 128
332 #define RF_MIN_FREE_VFPLE 32
333
334 #define RF_MAX_FREE_VPLE 128
335 #define RF_MIN_FREE_VPLE 32
336
337
338 /* called at shutdown time. So far, all that is necessary is to
339 release all the free lists */
340 static void rf_ShutdownMapModule(void *);
341 static void
342 rf_ShutdownMapModule(void *ignored)
343 {
344 pool_destroy(&rf_pools.asm_hdr);
345 pool_destroy(&rf_pools.asmap);
346 pool_destroy(&rf_pools.asmhle);
347 pool_destroy(&rf_pools.pda);
348 pool_destroy(&rf_pools.fss);
349 pool_destroy(&rf_pools.vfple);
350 pool_destroy(&rf_pools.vple);
351 }
352
353 int
354 rf_ConfigureMapModule(RF_ShutdownList_t **listp)
355 {
356
357 rf_pool_init(&rf_pools.asm_hdr, sizeof(RF_AccessStripeMapHeader_t),
358 "rf_asmhdr_pl", RF_MIN_FREE_ASMHDR, RF_MAX_FREE_ASMHDR);
359 rf_pool_init(&rf_pools.asmap, sizeof(RF_AccessStripeMap_t),
360 "rf_asm_pl", RF_MIN_FREE_ASM, RF_MAX_FREE_ASM);
361 rf_pool_init(&rf_pools.asmhle, sizeof(RF_ASMHeaderListElem_t),
362 "rf_asmhle_pl", RF_MIN_FREE_ASMHLE, RF_MAX_FREE_ASMHLE);
363 rf_pool_init(&rf_pools.pda, sizeof(RF_PhysDiskAddr_t),
364 "rf_pda_pl", RF_MIN_FREE_PDA, RF_MAX_FREE_PDA);
365 rf_pool_init(&rf_pools.fss, sizeof(RF_FailedStripe_t),
366 "rf_fss_pl", RF_MIN_FREE_FSS, RF_MAX_FREE_FSS);
367 rf_pool_init(&rf_pools.vfple, sizeof(RF_VoidFunctionPointerListElem_t),
368 "rf_vfple_pl", RF_MIN_FREE_VFPLE, RF_MAX_FREE_VFPLE);
369 rf_pool_init(&rf_pools.vple, sizeof(RF_VoidPointerListElem_t),
370 "rf_vple_pl", RF_MIN_FREE_VPLE, RF_MAX_FREE_VPLE);
371 rf_ShutdownCreate(listp, rf_ShutdownMapModule, NULL);
372
373 return (0);
374 }
375
376 RF_AccessStripeMapHeader_t *
377 rf_AllocAccessStripeMapHeader(void)
378 {
379 return pool_get(&rf_pools.asm_hdr, PR_WAITOK | PR_ZERO);
380 }
381
382 void
383 rf_FreeAccessStripeMapHeader(RF_AccessStripeMapHeader_t *p)
384 {
385 pool_put(&rf_pools.asm_hdr, p);
386 }
387
388
389 RF_VoidFunctionPointerListElem_t *
390 rf_AllocVFPListElem(void)
391 {
392 return pool_get(&rf_pools.vfple, PR_WAITOK | PR_ZERO);
393 }
394
395 void
396 rf_FreeVFPListElem(RF_VoidFunctionPointerListElem_t *p)
397 {
398
399 pool_put(&rf_pools.vfple, p);
400 }
401
402
403 RF_VoidPointerListElem_t *
404 rf_AllocVPListElem(void)
405 {
406 return pool_get(&rf_pools.vple, PR_WAITOK | PR_ZERO);
407 }
408
409 void
410 rf_FreeVPListElem(RF_VoidPointerListElem_t *p)
411 {
412
413 pool_put(&rf_pools.vple, p);
414 }
415
416 RF_ASMHeaderListElem_t *
417 rf_AllocASMHeaderListElem(void)
418 {
419 return pool_get(&rf_pools.asmhle, PR_WAITOK | PR_ZERO);
420 }
421
422 void
423 rf_FreeASMHeaderListElem(RF_ASMHeaderListElem_t *p)
424 {
425
426 pool_put(&rf_pools.asmhle, p);
427 }
428
429 RF_FailedStripe_t *
430 rf_AllocFailedStripeStruct(void)
431 {
432 return pool_get(&rf_pools.fss, PR_WAITOK | PR_ZERO);
433 }
434
435 void
436 rf_FreeFailedStripeStruct(RF_FailedStripe_t *p)
437 {
438 pool_put(&rf_pools.fss, p);
439 }
440
441
442
443
444
445 RF_PhysDiskAddr_t *
446 rf_AllocPhysDiskAddr(void)
447 {
448 return pool_get(&rf_pools.pda, PR_WAITOK | PR_ZERO);
449 }
450 /* allocates a list of PDAs, locking the free list only once when we
451 * have to call calloc, we do it one component at a time to simplify
452 * the process of freeing the list at program shutdown. This should
453 * not be much of a performance hit, because it should be very
454 * infrequently executed. */
455 RF_PhysDiskAddr_t *
456 rf_AllocPDAList(int count)
457 {
458 RF_PhysDiskAddr_t *p, *prev;
459 int i;
460
461 p = NULL;
462 prev = NULL;
463 for (i = 0; i < count; i++) {
464 p = pool_get(&rf_pools.pda, PR_WAITOK);
465 p->next = prev;
466 prev = p;
467 }
468
469 return (p);
470 }
471
472 void
473 rf_FreePhysDiskAddr(RF_PhysDiskAddr_t *p)
474 {
475 pool_put(&rf_pools.pda, p);
476 }
477
478 static void
479 rf_FreePDAList(RF_PhysDiskAddr_t *pda_list)
480 {
481 RF_PhysDiskAddr_t *p, *tmp;
482
483 p=pda_list;
484 while (p) {
485 tmp = p->next;
486 pool_put(&rf_pools.pda, p);
487 p = tmp;
488 }
489 }
490
491 /* this is essentially identical to AllocPDAList. I should combine
492 * the two. when we have to call calloc, we do it one component at a
493 * time to simplify the process of freeing the list at program
494 * shutdown. This should not be much of a performance hit, because it
495 * should be very infrequently executed. */
496 RF_AccessStripeMap_t *
497 rf_AllocASMList(int count)
498 {
499 RF_AccessStripeMap_t *p, *prev;
500 int i;
501
502 p = NULL;
503 prev = NULL;
504 for (i = 0; i < count; i++) {
505 p = pool_get(&rf_pools.asmap, PR_WAITOK);
506 p->next = prev;
507 prev = p;
508 }
509 return (p);
510 }
511
512 static void
513 rf_FreeASMList(RF_AccessStripeMap_t *asm_list)
514 {
515 RF_AccessStripeMap_t *p, *tmp;
516
517 p=asm_list;
518 while (p) {
519 tmp = p->next;
520 pool_put(&rf_pools.asmap, p);
521 p = tmp;
522 }
523 }
524
525 void
526 rf_FreeAccessStripeMap(RF_AccessStripeMapHeader_t *hdr)
527 {
528 RF_AccessStripeMap_t *p;
529 RF_PhysDiskAddr_t *pdp, *trailer, *pdaList = NULL, *pdaEnd = NULL;
530 int count = 0, t;
531
532 for (p = hdr->stripeMap; p; p = p->next) {
533
534 /* link the 3 pda lists into the accumulating pda list */
535
536 if (!pdaList)
537 pdaList = p->qInfo;
538 else
539 pdaEnd->next = p->qInfo;
540 for (trailer = NULL, pdp = p->qInfo; pdp;) {
541 trailer = pdp;
542 pdp = pdp->next;
543 count++;
544 }
545 if (trailer)
546 pdaEnd = trailer;
547
548 if (!pdaList)
549 pdaList = p->parityInfo;
550 else
551 pdaEnd->next = p->parityInfo;
552 for (trailer = NULL, pdp = p->parityInfo; pdp;) {
553 trailer = pdp;
554 pdp = pdp->next;
555 count++;
556 }
557 if (trailer)
558 pdaEnd = trailer;
559
560 if (!pdaList)
561 pdaList = p->physInfo;
562 else
563 pdaEnd->next = p->physInfo;
564 for (trailer = NULL, pdp = p->physInfo; pdp;) {
565 trailer = pdp;
566 pdp = pdp->next;
567 count++;
568 }
569 if (trailer)
570 pdaEnd = trailer;
571 }
572
573 /* debug only */
574 for (t = 0, pdp = pdaList; pdp; pdp = pdp->next)
575 t++;
576 RF_ASSERT(t == count);
577
578 if (pdaList)
579 rf_FreePDAList(pdaList);
580 rf_FreeASMList(hdr->stripeMap);
581 rf_FreeAccessStripeMapHeader(hdr);
582 }
583 /* We can't use the large write optimization if there are any failures
584 * in the stripe. In the declustered layout, there is no way to
585 * immediately determine what disks constitute a stripe, so we
586 * actually have to hunt through the stripe looking for failures. The
587 * reason we map the parity instead of just using asm->parityInfo->col
588 * is because the latter may have been already redirected to a spare
589 * drive, which would mess up the computation of the stripe offset.
590 *
591 * ASSUMES AT MOST ONE FAILURE IN THE STRIPE. */
592 int
593 rf_CheckStripeForFailures(RF_Raid_t *raidPtr, RF_AccessStripeMap_t *asmap)
594 {
595 RF_RowCol_t tcol, pcol, *diskids, i;
596 RF_RaidLayout_t *layoutPtr = &raidPtr->Layout;
597 RF_StripeCount_t stripeOffset;
598 int numFailures;
599 RF_RaidAddr_t sosAddr;
600 RF_SectorNum_t diskOffset, poffset;
601
602 /* quick out in the fault-free case. */
603 rf_lock_mutex2(raidPtr->mutex);
604 numFailures = raidPtr->numFailures;
605 rf_unlock_mutex2(raidPtr->mutex);
606 if (numFailures == 0)
607 return (0);
608
609 sosAddr = rf_RaidAddressOfPrevStripeBoundary(layoutPtr,
610 asmap->raidAddress);
611 (layoutPtr->map->IdentifyStripe) (raidPtr, asmap->raidAddress,
612 &diskids);
613 (layoutPtr->map->MapParity) (raidPtr, asmap->raidAddress,
614 &pcol, &poffset, 0); /* get pcol */
615
616 /* this need not be true if we've redirected the access to a
617 * spare in another row RF_ASSERT(row == testrow); */
618 stripeOffset = 0;
619 for (i = 0; i < layoutPtr->numDataCol + layoutPtr->numParityCol; i++) {
620 if (diskids[i] != pcol) {
621 if (RF_DEAD_DISK(raidPtr->Disks[diskids[i]].status)) {
622 if (raidPtr->status != rf_rs_reconstructing)
623 return (1);
624 RF_ASSERT(raidPtr->reconControl->fcol == diskids[i]);
625 layoutPtr->map->MapSector(raidPtr,
626 sosAddr + stripeOffset * layoutPtr->sectorsPerStripeUnit,
627 &tcol, &diskOffset, 0);
628 RF_ASSERT(tcol == diskids[i]);
629 if (!rf_CheckRUReconstructed(raidPtr->reconControl->reconMap, diskOffset))
630 return (1);
631 asmap->flags |= RF_ASM_REDIR_LARGE_WRITE;
632 return (0);
633 }
634 stripeOffset++;
635 }
636 }
637 return (0);
638 }
639 #if (RF_INCLUDE_DECL_PQ > 0) || (RF_INCLUDE_RAID6 > 0) || (RF_INCLUDE_EVENODD >0)
640 /*
641 return the number of failed data units in the stripe.
642 */
643
644 int
645 rf_NumFailedDataUnitsInStripe(RF_Raid_t *raidPtr, RF_AccessStripeMap_t *asmap)
646 {
647 RF_RaidLayout_t *layoutPtr = &raidPtr->Layout;
648 RF_RowCol_t tcol, i;
649 RF_SectorNum_t diskOffset;
650 RF_RaidAddr_t sosAddr;
651 int numFailures;
652
653 /* quick out in the fault-free case. */
654 rf_lock_mutex2(raidPtr->mutex);
655 numFailures = raidPtr->numFailures;
656 rf_unlock_mutex2(raidPtr->mutex);
657 if (numFailures == 0)
658 return (0);
659 numFailures = 0;
660
661 sosAddr = rf_RaidAddressOfPrevStripeBoundary(layoutPtr,
662 asmap->raidAddress);
663 for (i = 0; i < layoutPtr->numDataCol; i++) {
664 (layoutPtr->map->MapSector) (raidPtr, sosAddr + i * layoutPtr->sectorsPerStripeUnit,
665 &tcol, &diskOffset, 0);
666 if (RF_DEAD_DISK(raidPtr->Disks[tcol].status))
667 numFailures++;
668 }
669
670 return numFailures;
671 }
672 #endif
673
674 /****************************************************************************
675 *
676 * debug routines
677 *
678 ***************************************************************************/
679 #if RF_DEBUG_MAP
680 void
681 rf_PrintAccessStripeMap(RF_AccessStripeMapHeader_t *asm_h)
682 {
683 rf_PrintFullAccessStripeMap(asm_h, 0);
684 }
685 #endif
686
687 /* prbuf - flag to print buffer pointers */
688 void
689 rf_PrintFullAccessStripeMap(RF_AccessStripeMapHeader_t *asm_h, int prbuf)
690 {
691 int i;
692 RF_AccessStripeMap_t *asmap = asm_h->stripeMap;
693 RF_PhysDiskAddr_t *p;
694 printf("%d stripes total\n", (int) asm_h->numStripes);
695 for (; asmap; asmap = asmap->next) {
696 /* printf("Num failures: %d\n",asmap->numDataFailed); */
697 /* printf("Num sectors:
698 * %d\n",(int)asmap->totalSectorsAccessed); */
699 printf("Stripe %d (%d sectors), failures: %d data, %d parity: ",
700 (int) asmap->stripeID,
701 (int) asmap->totalSectorsAccessed,
702 (int) asmap->numDataFailed,
703 (int) asmap->numParityFailed);
704 if (asmap->parityInfo) {
705 printf("Parity [c%d s%d-%d", asmap->parityInfo->col,
706 (int) asmap->parityInfo->startSector,
707 (int) (asmap->parityInfo->startSector +
708 asmap->parityInfo->numSector - 1));
709 if (prbuf)
710 printf(" b0x%lx", (unsigned long) asmap->parityInfo->bufPtr);
711 if (asmap->parityInfo->next) {
712 printf(", c%d s%d-%d", asmap->parityInfo->next->col,
713 (int) asmap->parityInfo->next->startSector,
714 (int) (asmap->parityInfo->next->startSector +
715 asmap->parityInfo->next->numSector - 1));
716 if (prbuf)
717 printf(" b0x%lx", (unsigned long) asmap->parityInfo->next->bufPtr);
718 RF_ASSERT(asmap->parityInfo->next->next == NULL);
719 }
720 printf("]\n\t");
721 }
722 for (i = 0, p = asmap->physInfo; p; p = p->next, i++) {
723 printf("SU c%d s%d-%d ", p->col, (int) p->startSector,
724 (int) (p->startSector + p->numSector - 1));
725 if (prbuf)
726 printf("b0x%lx ", (unsigned long) p->bufPtr);
727 if (i && !(i & 1))
728 printf("\n\t");
729 }
730 printf("\n");
731 p = asm_h->stripeMap->failedPDAs[0];
732 if (asm_h->stripeMap->numDataFailed + asm_h->stripeMap->numParityFailed > 1)
733 printf("[multiple failures]\n");
734 else
735 if (asm_h->stripeMap->numDataFailed + asm_h->stripeMap->numParityFailed > 0)
736 printf("\t[Failed PDA: c%d s%d-%d]\n", p->col,
737 (int) p->startSector, (int) (p->startSector + p->numSector - 1));
738 }
739 }
740
741 #if RF_MAP_DEBUG
742 void
743 rf_PrintRaidAddressInfo(RF_Raid_t *raidPtr, RF_RaidAddr_t raidAddr,
744 RF_SectorCount_t numBlocks)
745 {
746 RF_RaidLayout_t *layoutPtr = &raidPtr->Layout;
747 RF_RaidAddr_t ra, sosAddr = rf_RaidAddressOfPrevStripeBoundary(layoutPtr, raidAddr);
748
749 printf("Raid addrs of SU boundaries from start of stripe to end of access:\n\t");
750 for (ra = sosAddr; ra <= raidAddr + numBlocks; ra += layoutPtr->sectorsPerStripeUnit) {
751 printf("%d (0x%x), ", (int) ra, (int) ra);
752 }
753 printf("\n");
754 printf("Offset into stripe unit: %d (0x%x)\n",
755 (int) (raidAddr % layoutPtr->sectorsPerStripeUnit),
756 (int) (raidAddr % layoutPtr->sectorsPerStripeUnit));
757 }
758 #endif
759 /* given a parity descriptor and the starting address within a stripe,
760 * range restrict the parity descriptor to touch only the correct
761 * stuff. */
762 void
763 rf_ASMParityAdjust(RF_PhysDiskAddr_t *toAdjust,
764 RF_StripeNum_t startAddrWithinStripe,
765 RF_SectorNum_t endAddress,
766 RF_RaidLayout_t *layoutPtr,
767 RF_AccessStripeMap_t *asm_p)
768 {
769 RF_PhysDiskAddr_t *new_pda;
770
771 /* when we're accessing only a portion of one stripe unit, we
772 * want the parity descriptor to identify only the chunk of
773 * parity associated with the data. When the access spans
774 * exactly one stripe unit boundary and is less than a stripe
775 * unit in size, it uses two disjoint regions of the parity
776 * unit. When an access spans more than one stripe unit
777 * boundary, it uses all of the parity unit.
778 *
779 * To better handle the case where stripe units are small, we
780 * may eventually want to change the 2nd case so that if the
781 * SU size is below some threshold, we just read/write the
782 * whole thing instead of breaking it up into two accesses. */
783 if (asm_p->numStripeUnitsAccessed == 1) {
784 int x = (startAddrWithinStripe % layoutPtr->sectorsPerStripeUnit);
785 toAdjust->startSector += x;
786 toAdjust->raidAddress += x;
787 toAdjust->numSector = asm_p->physInfo->numSector;
788 RF_ASSERT(toAdjust->numSector != 0);
789 } else
790 if (asm_p->numStripeUnitsAccessed == 2 && asm_p->totalSectorsAccessed < layoutPtr->sectorsPerStripeUnit) {
791 int x = (startAddrWithinStripe % layoutPtr->sectorsPerStripeUnit);
792
793 /* create a second pda and copy the parity map info
794 * into it */
795 RF_ASSERT(toAdjust->next == NULL);
796 /* the following will get freed in rf_FreeAccessStripeMap() via
797 rf_FreePDAList() */
798 new_pda = toAdjust->next = rf_AllocPhysDiskAddr();
799 *new_pda = *toAdjust; /* structure assignment */
800 new_pda->next = NULL;
801
802 /* adjust the start sector & number of blocks for the
803 * first parity pda */
804 toAdjust->startSector += x;
805 toAdjust->raidAddress += x;
806 toAdjust->numSector = rf_RaidAddressOfNextStripeUnitBoundary(layoutPtr, startAddrWithinStripe) - startAddrWithinStripe;
807 RF_ASSERT(toAdjust->numSector != 0);
808
809 /* adjust the second pda */
810 new_pda->numSector = endAddress - rf_RaidAddressOfPrevStripeUnitBoundary(layoutPtr, endAddress);
811 /* new_pda->raidAddress =
812 * rf_RaidAddressOfNextStripeUnitBoundary(layoutPtr,
813 * toAdjust->raidAddress); */
814 RF_ASSERT(new_pda->numSector != 0);
815 }
816 }
817
818 /* Check if a disk has been spared or failed. If spared, redirect the
819 * I/O. If it has been failed, record it in the asm pointer. Fifth
820 * arg is whether data or parity. */
821 void
822 rf_ASMCheckStatus(RF_Raid_t *raidPtr, RF_PhysDiskAddr_t *pda_p,
823 RF_AccessStripeMap_t *asm_p, RF_RaidDisk_t *disks,
824 int parity)
825 {
826 RF_DiskStatus_t dstatus;
827 RF_RowCol_t fcol;
828
829 dstatus = disks[pda_p->col].status;
830
831 if (dstatus == rf_ds_spared) {
832 /* if the disk has been spared, redirect access to the spare */
833 fcol = pda_p->col;
834 pda_p->col = disks[fcol].spareCol;
835 } else
836 if (dstatus == rf_ds_dist_spared) {
837 /* ditto if disk has been spared to dist spare space */
838 #if RF_DEBUG_MAP
839 RF_RowCol_t oc = pda_p->col;
840 RF_SectorNum_t oo = pda_p->startSector;
841 #endif
842 if (pda_p->type == RF_PDA_TYPE_DATA)
843 raidPtr->Layout.map->MapSector(raidPtr, pda_p->raidAddress, &pda_p->col, &pda_p->startSector, RF_REMAP);
844 else
845 raidPtr->Layout.map->MapParity(raidPtr, pda_p->raidAddress, &pda_p->col, &pda_p->startSector, RF_REMAP);
846
847 #if RF_DEBUG_MAP
848 if (rf_mapDebug) {
849 printf("Redirected c %d o %d -> c %d o %d\n", oc, (int) oo,
850 pda_p->col, (int) pda_p->startSector);
851 }
852 #endif
853 } else
854 if (RF_DEAD_DISK(dstatus)) {
855 /* if the disk is inaccessible, mark the
856 * failure */
857 if (parity)
858 asm_p->numParityFailed++;
859 else {
860 asm_p->numDataFailed++;
861 }
862 asm_p->failedPDAs[asm_p->numFailedPDAs] = pda_p;
863 asm_p->numFailedPDAs++;
864 #if 0
865 switch (asm_p->numParityFailed + asm_p->numDataFailed) {
866 case 1:
867 asm_p->failedPDAs[0] = pda_p;
868 break;
869 case 2:
870 asm_p->failedPDAs[1] = pda_p;
871 default:
872 break;
873 }
874 #endif
875 }
876 /* the redirected access should never span a stripe unit boundary */
877 RF_ASSERT(rf_RaidAddressToStripeUnitID(&raidPtr->Layout, pda_p->raidAddress) ==
878 rf_RaidAddressToStripeUnitID(&raidPtr->Layout, pda_p->raidAddress + pda_p->numSector - 1));
879 RF_ASSERT(pda_p->col != -1);
880 }
881