rf_decluster.c revision 1.9 1 /* $NetBSD: rf_decluster.c,v 1.9 2002/05/22 15:40:49 wiz Exp $ */
2 /*
3 * Copyright (c) 1995 Carnegie-Mellon University.
4 * All rights reserved.
5 *
6 * Author: Mark Holland
7 *
8 * Permission to use, copy, modify and distribute this software and
9 * its documentation is hereby granted, provided that both the copyright
10 * notice and this permission notice appear in all copies of the
11 * software, derivative works or modified versions, and any portions
12 * thereof, and that both notices appear in supporting documentation.
13 *
14 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
15 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
16 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
17 *
18 * Carnegie Mellon requests users of this software to return to
19 *
20 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
21 * School of Computer Science
22 * Carnegie Mellon University
23 * Pittsburgh PA 15213-3890
24 *
25 * any improvements or extensions that they make and grant Carnegie the
26 * rights to redistribute these changes.
27 */
28
29 /*----------------------------------------------------------------------
30 *
31 * rf_decluster.c -- code related to the declustered layout
32 *
33 * Created 10-21-92 (MCH)
34 *
35 * Nov 93: adding support for distributed sparing. This code is a little
36 * complex: the basic layout used is as follows:
37 * let F = (v-1)/GCD(r,v-1). The spare space for each set of
38 * F consecutive fulltables is grouped together and placed after
39 * that set of tables.
40 * +------------------------------+
41 * | F fulltables |
42 * | Spare Space |
43 * | F fulltables |
44 * | Spare Space |
45 * | ... |
46 * +------------------------------+
47 *
48 *--------------------------------------------------------------------*/
49
50 #include <sys/cdefs.h>
51 __KERNEL_RCSID(0, "$NetBSD: rf_decluster.c,v 1.9 2002/05/22 15:40:49 wiz Exp $");
52
53 #include <dev/raidframe/raidframevar.h>
54
55 #include "rf_archs.h"
56 #include "rf_raid.h"
57 #include "rf_decluster.h"
58 #include "rf_debugMem.h"
59 #include "rf_utils.h"
60 #include "rf_alloclist.h"
61 #include "rf_general.h"
62 #include "rf_shutdown.h"
63
64
65 extern int rf_copyback_in_progress; /* debug only */
66
67 /* found in rf_kintf.c */
68 int rf_GetSpareTableFromDaemon(RF_SparetWait_t * req);
69
70 #if (RF_INCLUDE_PARITY_DECLUSTERING > 0) || (RF_INCLUDE_PARITY_DECLUSTERING_PQ > 0)
71
72 /* configuration code */
73
74 int
75 rf_ConfigureDeclustered(
76 RF_ShutdownList_t ** listp,
77 RF_Raid_t * raidPtr,
78 RF_Config_t * cfgPtr)
79 {
80 RF_RaidLayout_t *layoutPtr = &(raidPtr->Layout);
81 int b, v, k, r, lambda; /* block design params */
82 int i, j;
83 RF_RowCol_t *first_avail_slot;
84 RF_StripeCount_t complete_FT_count, numCompleteFullTablesPerDisk;
85 RF_DeclusteredConfigInfo_t *info;
86 RF_StripeCount_t PUsPerDisk, spareRegionDepthInPUs, numCompleteSpareRegionsPerDisk,
87 extraPUsPerDisk;
88 RF_StripeCount_t totSparePUsPerDisk;
89 RF_SectorNum_t diskOffsetOfLastFullTableInSUs;
90 RF_SectorCount_t SpareSpaceInSUs;
91 char *cfgBuf = (char *) (cfgPtr->layoutSpecific);
92 RF_StripeNum_t l, SUID;
93
94 SUID = l = 0;
95 numCompleteSpareRegionsPerDisk = 0;
96
97 /* 1. create layout specific structure */
98 RF_MallocAndAdd(info, sizeof(RF_DeclusteredConfigInfo_t), (RF_DeclusteredConfigInfo_t *), raidPtr->cleanupList);
99 if (info == NULL)
100 return (ENOMEM);
101 layoutPtr->layoutSpecificInfo = (void *) info;
102 info->SpareTable = NULL;
103
104 /* 2. extract parameters from the config structure */
105 if (layoutPtr->map->flags & RF_DISTRIBUTE_SPARE) {
106 (void)memcpy(info->sparemap_fname, cfgBuf, RF_SPAREMAP_NAME_LEN);
107 }
108 cfgBuf += RF_SPAREMAP_NAME_LEN;
109
110 b = *((int *) cfgBuf);
111 cfgBuf += sizeof(int);
112 v = *((int *) cfgBuf);
113 cfgBuf += sizeof(int);
114 k = *((int *) cfgBuf);
115 cfgBuf += sizeof(int);
116 r = *((int *) cfgBuf);
117 cfgBuf += sizeof(int);
118 lambda = *((int *) cfgBuf);
119 cfgBuf += sizeof(int);
120 raidPtr->noRotate = *((int *) cfgBuf);
121 cfgBuf += sizeof(int);
122
123 /* the sparemaps are generated assuming that parity is rotated, so we
124 * issue a warning if both distributed sparing and no-rotate are on at
125 * the same time */
126 if ((layoutPtr->map->flags & RF_DISTRIBUTE_SPARE) && raidPtr->noRotate) {
127 RF_ERRORMSG("Warning: distributed sparing specified without parity rotation.\n");
128 }
129 if (raidPtr->numCol != v) {
130 RF_ERRORMSG2("RAID: config error: table element count (%d) not equal to no. of cols (%d)\n", v, raidPtr->numCol);
131 return (EINVAL);
132 }
133 /* 3. set up the values used in the mapping code */
134 info->BlocksPerTable = b;
135 info->Lambda = lambda;
136 info->NumParityReps = info->groupSize = k;
137 info->SUsPerTable = b * (k - 1) * layoutPtr->SUsPerPU; /* b blks, k-1 SUs each */
138 info->SUsPerFullTable = k * info->SUsPerTable; /* rot k times */
139 info->PUsPerBlock = k - 1;
140 info->SUsPerBlock = info->PUsPerBlock * layoutPtr->SUsPerPU;
141 info->TableDepthInPUs = (b * k) / v;
142 info->FullTableDepthInPUs = info->TableDepthInPUs * k; /* k repetitions */
143
144 /* used only in distributed sparing case */
145 info->FullTablesPerSpareRegion = (v - 1) / rf_gcd(r, v - 1); /* (v-1)/gcd fulltables */
146 info->TablesPerSpareRegion = k * info->FullTablesPerSpareRegion;
147 info->SpareSpaceDepthPerRegionInSUs = (r * info->TablesPerSpareRegion / (v - 1)) * layoutPtr->SUsPerPU;
148
149 /* check to make sure the block design is sufficiently small */
150 if ((raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE)) {
151 if (info->FullTableDepthInPUs * layoutPtr->SUsPerPU + info->SpareSpaceDepthPerRegionInSUs > layoutPtr->stripeUnitsPerDisk) {
152 RF_ERRORMSG3("RAID: config error: Full Table depth (%d) + Spare Space (%d) larger than disk size (%d) (BD too big)\n",
153 (int) info->FullTableDepthInPUs,
154 (int) info->SpareSpaceDepthPerRegionInSUs,
155 (int) layoutPtr->stripeUnitsPerDisk);
156 return (EINVAL);
157 }
158 } else {
159 if (info->TableDepthInPUs * layoutPtr->SUsPerPU > layoutPtr->stripeUnitsPerDisk) {
160 RF_ERRORMSG2("RAID: config error: Table depth (%d) larger than disk size (%d) (BD too big)\n",
161 (int) (info->TableDepthInPUs * layoutPtr->SUsPerPU), \
162 (int) layoutPtr->stripeUnitsPerDisk);
163 return (EINVAL);
164 }
165 }
166
167
168 /* compute the size of each disk, and the number of tables in the last
169 * fulltable (which need not be complete) */
170 if (raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE) {
171
172 PUsPerDisk = layoutPtr->stripeUnitsPerDisk / layoutPtr->SUsPerPU;
173 spareRegionDepthInPUs = (info->TablesPerSpareRegion * info->TableDepthInPUs +
174 (info->TablesPerSpareRegion * info->TableDepthInPUs) / (v - 1));
175 info->SpareRegionDepthInSUs = spareRegionDepthInPUs * layoutPtr->SUsPerPU;
176
177 numCompleteSpareRegionsPerDisk = PUsPerDisk / spareRegionDepthInPUs;
178 info->NumCompleteSRs = numCompleteSpareRegionsPerDisk;
179 extraPUsPerDisk = PUsPerDisk % spareRegionDepthInPUs;
180
181 /* assume conservatively that we need the full amount of spare
182 * space in one region in order to provide spares for the
183 * partial spare region at the end of the array. We set "i"
184 * to the number of tables in the partial spare region. This
185 * may actually include some fulltables. */
186 extraPUsPerDisk -= (info->SpareSpaceDepthPerRegionInSUs / layoutPtr->SUsPerPU);
187 if (extraPUsPerDisk <= 0)
188 i = 0;
189 else
190 i = extraPUsPerDisk / info->TableDepthInPUs;
191
192 complete_FT_count = raidPtr->numRow * (numCompleteSpareRegionsPerDisk * (info->TablesPerSpareRegion / k) + i / k);
193 info->FullTableLimitSUID = complete_FT_count * info->SUsPerFullTable;
194 info->ExtraTablesPerDisk = i % k;
195
196 /* note that in the last spare region, the spare space is
197 * complete even though data/parity space is not */
198 totSparePUsPerDisk = (numCompleteSpareRegionsPerDisk + 1) * (info->SpareSpaceDepthPerRegionInSUs / layoutPtr->SUsPerPU);
199 info->TotSparePUsPerDisk = totSparePUsPerDisk;
200
201 layoutPtr->stripeUnitsPerDisk =
202 ((complete_FT_count / raidPtr->numRow) * info->FullTableDepthInPUs + /* data & parity space */
203 info->ExtraTablesPerDisk * info->TableDepthInPUs +
204 totSparePUsPerDisk /* spare space */
205 ) * layoutPtr->SUsPerPU;
206 layoutPtr->dataStripeUnitsPerDisk =
207 (complete_FT_count * info->FullTableDepthInPUs + info->ExtraTablesPerDisk * info->TableDepthInPUs)
208 * layoutPtr->SUsPerPU * (k - 1) / k;
209
210 } else {
211 /* non-dist spare case: force each disk to contain an
212 * integral number of tables */
213 layoutPtr->stripeUnitsPerDisk /= (info->TableDepthInPUs * layoutPtr->SUsPerPU);
214 layoutPtr->stripeUnitsPerDisk *= (info->TableDepthInPUs * layoutPtr->SUsPerPU);
215
216 /* compute the number of tables in the last fulltable, which
217 * need not be complete */
218 complete_FT_count =
219 ((layoutPtr->stripeUnitsPerDisk / layoutPtr->SUsPerPU) / info->FullTableDepthInPUs) * raidPtr->numRow;
220
221 info->FullTableLimitSUID = complete_FT_count * info->SUsPerFullTable;
222 info->ExtraTablesPerDisk =
223 ((layoutPtr->stripeUnitsPerDisk / layoutPtr->SUsPerPU) / info->TableDepthInPUs) % k;
224 }
225
226 raidPtr->sectorsPerDisk = layoutPtr->stripeUnitsPerDisk * layoutPtr->sectorsPerStripeUnit;
227
228 /* find the disk offset of the stripe unit where the last fulltable
229 * starts */
230 numCompleteFullTablesPerDisk = complete_FT_count / raidPtr->numRow;
231 diskOffsetOfLastFullTableInSUs = numCompleteFullTablesPerDisk * info->FullTableDepthInPUs * layoutPtr->SUsPerPU;
232 if (raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE) {
233 SpareSpaceInSUs = numCompleteSpareRegionsPerDisk * info->SpareSpaceDepthPerRegionInSUs;
234 diskOffsetOfLastFullTableInSUs += SpareSpaceInSUs;
235 info->DiskOffsetOfLastSpareSpaceChunkInSUs =
236 diskOffsetOfLastFullTableInSUs + info->ExtraTablesPerDisk * info->TableDepthInPUs * layoutPtr->SUsPerPU;
237 }
238 info->DiskOffsetOfLastFullTableInSUs = diskOffsetOfLastFullTableInSUs;
239 info->numCompleteFullTablesPerDisk = numCompleteFullTablesPerDisk;
240
241 /* 4. create and initialize the lookup tables */
242 info->LayoutTable = rf_make_2d_array(b, k, raidPtr->cleanupList);
243 if (info->LayoutTable == NULL)
244 return (ENOMEM);
245 info->OffsetTable = rf_make_2d_array(b, k, raidPtr->cleanupList);
246 if (info->OffsetTable == NULL)
247 return (ENOMEM);
248 info->BlockTable = rf_make_2d_array(info->TableDepthInPUs * layoutPtr->SUsPerPU, raidPtr->numCol, raidPtr->cleanupList);
249 if (info->BlockTable == NULL)
250 return (ENOMEM);
251
252 first_avail_slot = rf_make_1d_array(v, NULL);
253 if (first_avail_slot == NULL)
254 return (ENOMEM);
255
256 for (i = 0; i < b; i++)
257 for (j = 0; j < k; j++)
258 info->LayoutTable[i][j] = *cfgBuf++;
259
260 /* initialize offset table */
261 for (i = 0; i < b; i++)
262 for (j = 0; j < k; j++) {
263 info->OffsetTable[i][j] = first_avail_slot[info->LayoutTable[i][j]];
264 first_avail_slot[info->LayoutTable[i][j]]++;
265 }
266
267 /* initialize block table */
268 for (SUID = l = 0; l < layoutPtr->SUsPerPU; l++) {
269 for (i = 0; i < b; i++) {
270 for (j = 0; j < k; j++) {
271 info->BlockTable[(info->OffsetTable[i][j] * layoutPtr->SUsPerPU) + l]
272 [info->LayoutTable[i][j]] = SUID;
273 }
274 SUID++;
275 }
276 }
277
278 rf_free_1d_array(first_avail_slot, v);
279
280 /* 5. set up the remaining redundant-but-useful parameters */
281
282 raidPtr->totalSectors = (k * complete_FT_count + raidPtr->numRow * info->ExtraTablesPerDisk) *
283 info->SUsPerTable * layoutPtr->sectorsPerStripeUnit;
284 layoutPtr->numStripe = (raidPtr->totalSectors / layoutPtr->sectorsPerStripeUnit) / (k - 1);
285
286 /* strange evaluation order below to try and minimize overflow
287 * problems */
288
289 layoutPtr->dataSectorsPerStripe = (k - 1) * layoutPtr->sectorsPerStripeUnit;
290 layoutPtr->bytesPerStripeUnit = layoutPtr->sectorsPerStripeUnit << raidPtr->logBytesPerSector;
291 layoutPtr->numDataCol = k - 1;
292 layoutPtr->numParityCol = 1;
293
294 return (0);
295 }
296 /* declustering with distributed sparing */
297 static void rf_ShutdownDeclusteredDS(RF_ThreadArg_t);
298 static void
299 rf_ShutdownDeclusteredDS(arg)
300 RF_ThreadArg_t arg;
301 {
302 RF_DeclusteredConfigInfo_t *info;
303 RF_Raid_t *raidPtr;
304
305 raidPtr = (RF_Raid_t *) arg;
306 info = (RF_DeclusteredConfigInfo_t *) raidPtr->Layout.layoutSpecificInfo;
307 if (info->SpareTable)
308 rf_FreeSpareTable(raidPtr);
309 }
310
311 int
312 rf_ConfigureDeclusteredDS(
313 RF_ShutdownList_t ** listp,
314 RF_Raid_t * raidPtr,
315 RF_Config_t * cfgPtr)
316 {
317 int rc;
318
319 rc = rf_ConfigureDeclustered(listp, raidPtr, cfgPtr);
320 if (rc)
321 return (rc);
322 rc = rf_ShutdownCreate(listp, rf_ShutdownDeclusteredDS, raidPtr);
323 if (rc) {
324 RF_ERRORMSG1("Got %d adding shutdown event for DeclusteredDS\n", rc);
325 rf_ShutdownDeclusteredDS(raidPtr);
326 return (rc);
327 }
328 return (0);
329 }
330
331 void
332 rf_MapSectorDeclustered(raidPtr, raidSector, row, col, diskSector, remap)
333 RF_Raid_t *raidPtr;
334 RF_RaidAddr_t raidSector;
335 RF_RowCol_t *row;
336 RF_RowCol_t *col;
337 RF_SectorNum_t *diskSector;
338 int remap;
339 {
340 RF_RaidLayout_t *layoutPtr = &(raidPtr->Layout);
341 RF_DeclusteredConfigInfo_t *info = (RF_DeclusteredConfigInfo_t *) layoutPtr->layoutSpecificInfo;
342 RF_StripeNum_t SUID = raidSector / layoutPtr->sectorsPerStripeUnit;
343 RF_StripeNum_t FullTableID, FullTableOffset, TableID, TableOffset;
344 RF_StripeNum_t BlockID, BlockOffset, RepIndex;
345 RF_StripeCount_t sus_per_fulltable = info->SUsPerFullTable;
346 RF_StripeCount_t fulltable_depth = info->FullTableDepthInPUs * layoutPtr->SUsPerPU;
347 RF_StripeNum_t base_suid = 0, outSU, SpareRegion = 0, SpareSpace = 0;
348
349 rf_decluster_adjust_params(layoutPtr, &SUID, &sus_per_fulltable, &fulltable_depth, &base_suid);
350
351 FullTableID = SUID / sus_per_fulltable; /* fulltable ID within array
352 * (across rows) */
353 if (raidPtr->numRow == 1)
354 *row = 0; /* avoid a mod and a div in the common case */
355 else {
356 *row = FullTableID % raidPtr->numRow;
357 FullTableID /= raidPtr->numRow; /* convert to fulltable ID on
358 * this disk */
359 }
360 if (raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE) {
361 SpareRegion = FullTableID / info->FullTablesPerSpareRegion;
362 SpareSpace = SpareRegion * info->SpareSpaceDepthPerRegionInSUs;
363 }
364 FullTableOffset = SUID % sus_per_fulltable;
365 TableID = FullTableOffset / info->SUsPerTable;
366 TableOffset = FullTableOffset - TableID * info->SUsPerTable;
367 BlockID = TableOffset / info->PUsPerBlock;
368 BlockOffset = TableOffset - BlockID * info->PUsPerBlock;
369 BlockID %= info->BlocksPerTable;
370 RepIndex = info->PUsPerBlock - TableID;
371 if (!raidPtr->noRotate)
372 BlockOffset += ((BlockOffset >= RepIndex) ? 1 : 0);
373 *col = info->LayoutTable[BlockID][BlockOffset];
374
375 /* remap to distributed spare space if indicated */
376 if (remap) {
377 RF_ASSERT(raidPtr->Disks[*row][*col].status == rf_ds_reconstructing || raidPtr->Disks[*row][*col].status == rf_ds_dist_spared ||
378 (rf_copyback_in_progress && raidPtr->Disks[*row][*col].status == rf_ds_optimal));
379 rf_remap_to_spare_space(layoutPtr, info, *row, FullTableID, TableID, BlockID, (base_suid) ? 1 : 0, SpareRegion, col, &outSU);
380 } else {
381
382 outSU = base_suid;
383 outSU += FullTableID * fulltable_depth; /* offs to strt of FT */
384 outSU += SpareSpace; /* skip rsvd spare space */
385 outSU += TableID * info->TableDepthInPUs * layoutPtr->SUsPerPU; /* offs to strt of tble */
386 outSU += info->OffsetTable[BlockID][BlockOffset] * layoutPtr->SUsPerPU; /* offs to the PU */
387 }
388 outSU += TableOffset / (info->BlocksPerTable * info->PUsPerBlock); /* offs to the SU within
389 * a PU */
390
391 /* convert SUs to sectors, and, if not aligned to SU boundary, add in
392 * offset to sector. */
393 *diskSector = outSU * layoutPtr->sectorsPerStripeUnit + (raidSector % layoutPtr->sectorsPerStripeUnit);
394
395 RF_ASSERT(*col != -1);
396 }
397
398
399 /* prototyping this inexplicably causes the compile of the layout table (rf_layout.c) to fail */
400 void
401 rf_MapParityDeclustered(
402 RF_Raid_t * raidPtr,
403 RF_RaidAddr_t raidSector,
404 RF_RowCol_t * row,
405 RF_RowCol_t * col,
406 RF_SectorNum_t * diskSector,
407 int remap)
408 {
409 RF_RaidLayout_t *layoutPtr = &(raidPtr->Layout);
410 RF_DeclusteredConfigInfo_t *info = (RF_DeclusteredConfigInfo_t *) layoutPtr->layoutSpecificInfo;
411 RF_StripeNum_t SUID = raidSector / layoutPtr->sectorsPerStripeUnit;
412 RF_StripeNum_t FullTableID, FullTableOffset, TableID, TableOffset;
413 RF_StripeNum_t BlockID, BlockOffset, RepIndex;
414 RF_StripeCount_t sus_per_fulltable = info->SUsPerFullTable;
415 RF_StripeCount_t fulltable_depth = info->FullTableDepthInPUs * layoutPtr->SUsPerPU;
416 RF_StripeNum_t base_suid = 0, outSU, SpareRegion = 0, SpareSpace = 0;
417
418 rf_decluster_adjust_params(layoutPtr, &SUID, &sus_per_fulltable, &fulltable_depth, &base_suid);
419
420 /* compute row & (possibly) spare space exactly as before */
421 FullTableID = SUID / sus_per_fulltable;
422 if (raidPtr->numRow == 1)
423 *row = 0; /* avoid a mod and a div in the common case */
424 else {
425 *row = FullTableID % raidPtr->numRow;
426 FullTableID /= raidPtr->numRow; /* convert to fulltable ID on
427 * this disk */
428 }
429 if ((raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE)) {
430 SpareRegion = FullTableID / info->FullTablesPerSpareRegion;
431 SpareSpace = SpareRegion * info->SpareSpaceDepthPerRegionInSUs;
432 }
433 /* compute BlockID and RepIndex exactly as before */
434 FullTableOffset = SUID % sus_per_fulltable;
435 TableID = FullTableOffset / info->SUsPerTable;
436 TableOffset = FullTableOffset - TableID * info->SUsPerTable;
437 /* TableOffset = FullTableOffset % info->SUsPerTable; */
438 /* BlockID = (TableOffset / info->PUsPerBlock) %
439 * info->BlocksPerTable; */
440 BlockID = TableOffset / info->PUsPerBlock;
441 /* BlockOffset = TableOffset % info->PUsPerBlock; */
442 BlockOffset = TableOffset - BlockID * info->PUsPerBlock;
443 BlockID %= info->BlocksPerTable;
444
445 /* the parity block is in the position indicated by RepIndex */
446 RepIndex = (raidPtr->noRotate) ? info->PUsPerBlock : info->PUsPerBlock - TableID;
447 *col = info->LayoutTable[BlockID][RepIndex];
448
449 if (remap) {
450 RF_ASSERT(raidPtr->Disks[*row][*col].status == rf_ds_reconstructing || raidPtr->Disks[*row][*col].status == rf_ds_dist_spared ||
451 (rf_copyback_in_progress && raidPtr->Disks[*row][*col].status == rf_ds_optimal));
452 rf_remap_to_spare_space(layoutPtr, info, *row, FullTableID, TableID, BlockID, (base_suid) ? 1 : 0, SpareRegion, col, &outSU);
453 } else {
454
455 /* compute sector as before, except use RepIndex instead of
456 * BlockOffset */
457 outSU = base_suid;
458 outSU += FullTableID * fulltable_depth;
459 outSU += SpareSpace; /* skip rsvd spare space */
460 outSU += TableID * info->TableDepthInPUs * layoutPtr->SUsPerPU;
461 outSU += info->OffsetTable[BlockID][RepIndex] * layoutPtr->SUsPerPU;
462 }
463
464 outSU += TableOffset / (info->BlocksPerTable * info->PUsPerBlock);
465 *diskSector = outSU * layoutPtr->sectorsPerStripeUnit + (raidSector % layoutPtr->sectorsPerStripeUnit);
466
467 RF_ASSERT(*col != -1);
468 }
469 /* returns an array of ints identifying the disks that comprise the stripe containing the indicated address.
470 * the caller must _never_ attempt to modify this array.
471 */
472 void
473 rf_IdentifyStripeDeclustered(
474 RF_Raid_t * raidPtr,
475 RF_RaidAddr_t addr,
476 RF_RowCol_t ** diskids,
477 RF_RowCol_t * outRow)
478 {
479 RF_RaidLayout_t *layoutPtr = &(raidPtr->Layout);
480 RF_DeclusteredConfigInfo_t *info = (RF_DeclusteredConfigInfo_t *) layoutPtr->layoutSpecificInfo;
481 RF_StripeCount_t sus_per_fulltable = info->SUsPerFullTable;
482 RF_StripeCount_t fulltable_depth = info->FullTableDepthInPUs * layoutPtr->SUsPerPU;
483 RF_StripeNum_t base_suid = 0;
484 RF_StripeNum_t SUID = rf_RaidAddressToStripeUnitID(layoutPtr, addr);
485 RF_StripeNum_t stripeID, FullTableID;
486 int tableOffset;
487
488 rf_decluster_adjust_params(layoutPtr, &SUID, &sus_per_fulltable, &fulltable_depth, &base_suid);
489 FullTableID = SUID / sus_per_fulltable; /* fulltable ID within array
490 * (across rows) */
491 *outRow = FullTableID % raidPtr->numRow;
492 stripeID = rf_StripeUnitIDToStripeID(layoutPtr, SUID); /* find stripe offset
493 * into array */
494 tableOffset = (stripeID % info->BlocksPerTable); /* find offset into
495 * block design table */
496 *diskids = info->LayoutTable[tableOffset];
497 }
498 /* This returns the default head-separation limit, which is measured
499 * in "required units for reconstruction". Each time a disk fetches
500 * a unit, it bumps a counter. The head-sep code prohibits any disk
501 * from getting more than headSepLimit counter values ahead of any
502 * other.
503 *
504 * We assume here that the number of floating recon buffers is already
505 * set. There are r stripes to be reconstructed in each table, and so
506 * if we have a total of B buffers, we can have at most B/r tables
507 * under recon at any one time. In each table, lambda units are required
508 * from each disk, so given B buffers, the head sep limit has to be
509 * (lambda*B)/r units. We subtract one to avoid weird boundary cases.
510 *
511 * for example, suppose were given 50 buffers, r=19, and lambda=4 as in
512 * the 20.5 design. There are 19 stripes/table to be reconstructed, so
513 * we can have 50/19 tables concurrently under reconstruction, which means
514 * we can allow the fastest disk to get 50/19 tables ahead of the slower
515 * disk. There are lambda "required units" for each disk, so the fastest
516 * disk can get 4*50/19 = 10 counter values ahead of the slowest.
517 *
518 * If numBufsToAccumulate is not 1, we need to limit the head sep further
519 * because multiple bufs will be required for each stripe under recon.
520 */
521 RF_HeadSepLimit_t
522 rf_GetDefaultHeadSepLimitDeclustered(
523 RF_Raid_t * raidPtr)
524 {
525 RF_DeclusteredConfigInfo_t *info = (RF_DeclusteredConfigInfo_t *) raidPtr->Layout.layoutSpecificInfo;
526
527 return (info->Lambda * raidPtr->numFloatingReconBufs / info->TableDepthInPUs / rf_numBufsToAccumulate);
528 }
529 /* returns the default number of recon buffers to use. The value
530 * is somewhat arbitrary...it's intended to be large enough to allow
531 * for a reasonably large head-sep limit, but small enough that you
532 * don't use up all your system memory with buffers.
533 */
534 int
535 rf_GetDefaultNumFloatingReconBuffersDeclustered(RF_Raid_t * raidPtr)
536 {
537 return (100 * rf_numBufsToAccumulate);
538 }
539 /* sectors in the last fulltable of the array need to be handled
540 * specially since this fulltable can be incomplete. this function
541 * changes the values of certain params to handle this.
542 *
543 * the idea here is that MapSector et. al. figure out which disk the
544 * addressed unit lives on by computing the modulos of the unit number
545 * with the number of units per fulltable, table, etc. In the last
546 * fulltable, there are fewer units per fulltable, so we need to adjust
547 * the number of user data units per fulltable to reflect this.
548 *
549 * so, we (1) convert the fulltable size and depth parameters to
550 * the size of the partial fulltable at the end, (2) compute the
551 * disk sector offset where this fulltable starts, and (3) convert
552 * the users stripe unit number from an offset into the array to
553 * an offset into the last fulltable.
554 */
555 void
556 rf_decluster_adjust_params(
557 RF_RaidLayout_t * layoutPtr,
558 RF_StripeNum_t * SUID,
559 RF_StripeCount_t * sus_per_fulltable,
560 RF_StripeCount_t * fulltable_depth,
561 RF_StripeNum_t * base_suid)
562 {
563 RF_DeclusteredConfigInfo_t *info = (RF_DeclusteredConfigInfo_t *) layoutPtr->layoutSpecificInfo;
564
565 if (*SUID >= info->FullTableLimitSUID) {
566 /* new full table size is size of last full table on disk */
567 *sus_per_fulltable = info->ExtraTablesPerDisk * info->SUsPerTable;
568
569 /* new full table depth is corresponding depth */
570 *fulltable_depth = info->ExtraTablesPerDisk * info->TableDepthInPUs * layoutPtr->SUsPerPU;
571
572 /* set up the new base offset */
573 *base_suid = info->DiskOffsetOfLastFullTableInSUs;
574
575 /* convert users array address to an offset into the last
576 * fulltable */
577 *SUID -= info->FullTableLimitSUID;
578 }
579 }
580 /*
581 * map a stripe ID to a parity stripe ID.
582 * See comment above RaidAddressToParityStripeID in layout.c.
583 */
584 void
585 rf_MapSIDToPSIDDeclustered(
586 RF_RaidLayout_t * layoutPtr,
587 RF_StripeNum_t stripeID,
588 RF_StripeNum_t * psID,
589 RF_ReconUnitNum_t * which_ru)
590 {
591 RF_DeclusteredConfigInfo_t *info;
592
593 info = (RF_DeclusteredConfigInfo_t *) layoutPtr->layoutSpecificInfo;
594
595 *psID = (stripeID / (layoutPtr->SUsPerPU * info->BlocksPerTable))
596 * info->BlocksPerTable + (stripeID % info->BlocksPerTable);
597 *which_ru = (stripeID % (info->BlocksPerTable * layoutPtr->SUsPerPU))
598 / info->BlocksPerTable;
599 RF_ASSERT((*which_ru) < layoutPtr->SUsPerPU / layoutPtr->SUsPerRU);
600 }
601 /*
602 * Called from MapSector and MapParity to retarget an access at the spare unit.
603 * Modifies the "col" and "outSU" parameters only.
604 */
605 void
606 rf_remap_to_spare_space(
607 RF_RaidLayout_t * layoutPtr,
608 RF_DeclusteredConfigInfo_t * info,
609 RF_RowCol_t row,
610 RF_StripeNum_t FullTableID,
611 RF_StripeNum_t TableID,
612 RF_SectorNum_t BlockID,
613 RF_StripeNum_t base_suid,
614 RF_StripeNum_t SpareRegion,
615 RF_RowCol_t * outCol,
616 RF_StripeNum_t * outSU)
617 {
618 RF_StripeNum_t ftID, spareTableStartSU, TableInSpareRegion, lastSROffset,
619 which_ft;
620
621 /*
622 * note that FullTableID and hence SpareRegion may have gotten
623 * tweaked by rf_decluster_adjust_params. We detect this by
624 * noticing that base_suid is not 0.
625 */
626 if (base_suid == 0) {
627 ftID = FullTableID;
628 } else {
629 /*
630 * There may be > 1.0 full tables in the last (i.e. partial)
631 * spare region. find out which of these we're in.
632 */
633 lastSROffset = info->NumCompleteSRs * info->SpareRegionDepthInSUs;
634 which_ft = (info->DiskOffsetOfLastFullTableInSUs - lastSROffset) / (info->FullTableDepthInPUs * layoutPtr->SUsPerPU);
635
636 /* compute the actual full table ID */
637 ftID = info->DiskOffsetOfLastFullTableInSUs / (info->FullTableDepthInPUs * layoutPtr->SUsPerPU) + which_ft;
638 SpareRegion = info->NumCompleteSRs;
639 }
640 TableInSpareRegion = (ftID * info->NumParityReps + TableID) % info->TablesPerSpareRegion;
641
642 *outCol = info->SpareTable[TableInSpareRegion][BlockID].spareDisk;
643 RF_ASSERT(*outCol != -1);
644
645 spareTableStartSU = (SpareRegion == info->NumCompleteSRs) ?
646 info->DiskOffsetOfLastFullTableInSUs + info->ExtraTablesPerDisk * info->TableDepthInPUs * layoutPtr->SUsPerPU :
647 (SpareRegion + 1) * info->SpareRegionDepthInSUs - info->SpareSpaceDepthPerRegionInSUs;
648 *outSU = spareTableStartSU + info->SpareTable[TableInSpareRegion][BlockID].spareBlockOffsetInSUs;
649 if (*outSU >= layoutPtr->stripeUnitsPerDisk) {
650 printf("rf_remap_to_spare_space: invalid remapped disk SU offset %ld\n", (long) *outSU);
651 }
652 }
653
654 #endif /* (RF_INCLUDE_PARITY_DECLUSTERING > 0) || (RF_INCLUDE_PARITY_DECLUSTERING_PQ > 0) */
655
656
657 int
658 rf_InstallSpareTable(
659 RF_Raid_t * raidPtr,
660 RF_RowCol_t frow,
661 RF_RowCol_t fcol)
662 {
663 RF_DeclusteredConfigInfo_t *info = (RF_DeclusteredConfigInfo_t *) raidPtr->Layout.layoutSpecificInfo;
664 RF_SparetWait_t *req;
665 int retcode;
666
667 RF_Malloc(req, sizeof(*req), (RF_SparetWait_t *));
668 req->C = raidPtr->numCol;
669 req->G = raidPtr->Layout.numDataCol + raidPtr->Layout.numParityCol;
670 req->fcol = fcol;
671 req->SUsPerPU = raidPtr->Layout.SUsPerPU;
672 req->TablesPerSpareRegion = info->TablesPerSpareRegion;
673 req->BlocksPerTable = info->BlocksPerTable;
674 req->TableDepthInPUs = info->TableDepthInPUs;
675 req->SpareSpaceDepthPerRegionInSUs = info->SpareSpaceDepthPerRegionInSUs;
676
677 retcode = rf_GetSpareTableFromDaemon(req);
678 RF_ASSERT(!retcode); /* XXX -- fix this to recover gracefully --
679 * XXX */
680 return (retcode);
681 }
682 /*
683 * Invoked via ioctl to install a spare table in the kernel.
684 */
685 int
686 rf_SetSpareTable(raidPtr, data)
687 RF_Raid_t *raidPtr;
688 void *data;
689 {
690 RF_DeclusteredConfigInfo_t *info = (RF_DeclusteredConfigInfo_t *) raidPtr->Layout.layoutSpecificInfo;
691 RF_SpareTableEntry_t **ptrs;
692 int i, retcode;
693
694 /* what we need to copyin is a 2-d array, so first copyin the user
695 * pointers to the rows in the table */
696 RF_Malloc(ptrs, info->TablesPerSpareRegion * sizeof(RF_SpareTableEntry_t *), (RF_SpareTableEntry_t **));
697 retcode = copyin((caddr_t) data, (caddr_t) ptrs, info->TablesPerSpareRegion * sizeof(RF_SpareTableEntry_t *));
698
699 if (retcode)
700 return (retcode);
701
702 /* now allocate kernel space for the row pointers */
703 RF_Malloc(info->SpareTable, info->TablesPerSpareRegion * sizeof(RF_SpareTableEntry_t *), (RF_SpareTableEntry_t **));
704
705 /* now allocate kernel space for each row in the table, and copy it in
706 * from user space */
707 for (i = 0; i < info->TablesPerSpareRegion; i++) {
708 RF_Malloc(info->SpareTable[i], info->BlocksPerTable * sizeof(RF_SpareTableEntry_t), (RF_SpareTableEntry_t *));
709 retcode = copyin(ptrs[i], info->SpareTable[i], info->BlocksPerTable * sizeof(RF_SpareTableEntry_t));
710 if (retcode) {
711 info->SpareTable = NULL; /* blow off the memory
712 * we've allocated */
713 return (retcode);
714 }
715 }
716
717 /* free up the temporary array we used */
718 RF_Free(ptrs, info->TablesPerSpareRegion * sizeof(RF_SpareTableEntry_t *));
719
720 return (0);
721 }
722
723 RF_ReconUnitCount_t
724 rf_GetNumSpareRUsDeclustered(raidPtr)
725 RF_Raid_t *raidPtr;
726 {
727 RF_RaidLayout_t *layoutPtr = &raidPtr->Layout;
728
729 return (((RF_DeclusteredConfigInfo_t *) layoutPtr->layoutSpecificInfo)->TotSparePUsPerDisk);
730 }
731
732 void
733 rf_FreeSpareTable(raidPtr)
734 RF_Raid_t *raidPtr;
735 {
736 long i;
737 RF_RaidLayout_t *layoutPtr = &raidPtr->Layout;
738 RF_DeclusteredConfigInfo_t *info = (RF_DeclusteredConfigInfo_t *) layoutPtr->layoutSpecificInfo;
739 RF_SpareTableEntry_t **table = info->SpareTable;
740
741 for (i = 0; i < info->TablesPerSpareRegion; i++) {
742 RF_Free(table[i], info->BlocksPerTable * sizeof(RF_SpareTableEntry_t));
743 }
744 RF_Free(table, info->TablesPerSpareRegion * sizeof(RF_SpareTableEntry_t *));
745 info->SpareTable = (RF_SpareTableEntry_t **) NULL;
746 }
747