rf_decluster.c revision 1.14 1 /* $NetBSD: rf_decluster.c,v 1.14 2002/11/19 01:49:42 oster Exp $ */
2 /*
3 * Copyright (c) 1995 Carnegie-Mellon University.
4 * All rights reserved.
5 *
6 * Author: Mark Holland
7 *
8 * Permission to use, copy, modify and distribute this software and
9 * its documentation is hereby granted, provided that both the copyright
10 * notice and this permission notice appear in all copies of the
11 * software, derivative works or modified versions, and any portions
12 * thereof, and that both notices appear in supporting documentation.
13 *
14 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
15 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
16 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
17 *
18 * Carnegie Mellon requests users of this software to return to
19 *
20 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
21 * School of Computer Science
22 * Carnegie Mellon University
23 * Pittsburgh PA 15213-3890
24 *
25 * any improvements or extensions that they make and grant Carnegie the
26 * rights to redistribute these changes.
27 */
28
29 /*----------------------------------------------------------------------
30 *
31 * rf_decluster.c -- code related to the declustered layout
32 *
33 * Created 10-21-92 (MCH)
34 *
35 * Nov 93: adding support for distributed sparing. This code is a little
36 * complex: the basic layout used is as follows:
37 * let F = (v-1)/GCD(r,v-1). The spare space for each set of
38 * F consecutive fulltables is grouped together and placed after
39 * that set of tables.
40 * +------------------------------+
41 * | F fulltables |
42 * | Spare Space |
43 * | F fulltables |
44 * | Spare Space |
45 * | ... |
46 * +------------------------------+
47 *
48 *--------------------------------------------------------------------*/
49
50 #include <sys/cdefs.h>
51 __KERNEL_RCSID(0, "$NetBSD: rf_decluster.c,v 1.14 2002/11/19 01:49:42 oster Exp $");
52
53 #include <dev/raidframe/raidframevar.h>
54
55 #include "rf_archs.h"
56 #include "rf_raid.h"
57 #include "rf_decluster.h"
58 #include "rf_debugMem.h"
59 #include "rf_utils.h"
60 #include "rf_alloclist.h"
61 #include "rf_general.h"
62 #include "rf_kintf.h"
63 #include "rf_shutdown.h"
64
65 #if (RF_INCLUDE_PARITY_DECLUSTERING > 0) || (RF_INCLUDE_PARITY_DECLUSTERING_PQ > 0)
66
67 /* configuration code */
68
69 int
70 rf_ConfigureDeclustered(
71 RF_ShutdownList_t ** listp,
72 RF_Raid_t * raidPtr,
73 RF_Config_t * cfgPtr)
74 {
75 RF_RaidLayout_t *layoutPtr = &(raidPtr->Layout);
76 int b, v, k, r, lambda; /* block design params */
77 int i, j;
78 RF_RowCol_t *first_avail_slot;
79 RF_StripeCount_t complete_FT_count, numCompleteFullTablesPerDisk;
80 RF_DeclusteredConfigInfo_t *info;
81 RF_StripeCount_t PUsPerDisk, spareRegionDepthInPUs, numCompleteSpareRegionsPerDisk,
82 extraPUsPerDisk;
83 RF_StripeCount_t totSparePUsPerDisk;
84 RF_SectorNum_t diskOffsetOfLastFullTableInSUs;
85 RF_SectorCount_t SpareSpaceInSUs;
86 char *cfgBuf = (char *) (cfgPtr->layoutSpecific);
87 RF_StripeNum_t l, SUID;
88
89 SUID = l = 0;
90 numCompleteSpareRegionsPerDisk = 0;
91
92 /* 1. create layout specific structure */
93 RF_MallocAndAdd(info, sizeof(RF_DeclusteredConfigInfo_t), (RF_DeclusteredConfigInfo_t *), raidPtr->cleanupList);
94 if (info == NULL)
95 return (ENOMEM);
96 layoutPtr->layoutSpecificInfo = (void *) info;
97 info->SpareTable = NULL;
98
99 /* 2. extract parameters from the config structure */
100 if (layoutPtr->map->flags & RF_DISTRIBUTE_SPARE) {
101 (void)memcpy(info->sparemap_fname, cfgBuf, RF_SPAREMAP_NAME_LEN);
102 }
103 cfgBuf += RF_SPAREMAP_NAME_LEN;
104
105 b = *((int *) cfgBuf);
106 cfgBuf += sizeof(int);
107 v = *((int *) cfgBuf);
108 cfgBuf += sizeof(int);
109 k = *((int *) cfgBuf);
110 cfgBuf += sizeof(int);
111 r = *((int *) cfgBuf);
112 cfgBuf += sizeof(int);
113 lambda = *((int *) cfgBuf);
114 cfgBuf += sizeof(int);
115 raidPtr->noRotate = *((int *) cfgBuf);
116 cfgBuf += sizeof(int);
117
118 /* the sparemaps are generated assuming that parity is rotated, so we
119 * issue a warning if both distributed sparing and no-rotate are on at
120 * the same time */
121 if ((layoutPtr->map->flags & RF_DISTRIBUTE_SPARE) && raidPtr->noRotate) {
122 RF_ERRORMSG("Warning: distributed sparing specified without parity rotation.\n");
123 }
124 if (raidPtr->numCol != v) {
125 RF_ERRORMSG2("RAID: config error: table element count (%d) not equal to no. of cols (%d)\n", v, raidPtr->numCol);
126 return (EINVAL);
127 }
128 /* 3. set up the values used in the mapping code */
129 info->BlocksPerTable = b;
130 info->Lambda = lambda;
131 info->NumParityReps = info->groupSize = k;
132 info->SUsPerTable = b * (k - 1) * layoutPtr->SUsPerPU; /* b blks, k-1 SUs each */
133 info->SUsPerFullTable = k * info->SUsPerTable; /* rot k times */
134 info->PUsPerBlock = k - 1;
135 info->SUsPerBlock = info->PUsPerBlock * layoutPtr->SUsPerPU;
136 info->TableDepthInPUs = (b * k) / v;
137 info->FullTableDepthInPUs = info->TableDepthInPUs * k; /* k repetitions */
138
139 /* used only in distributed sparing case */
140 info->FullTablesPerSpareRegion = (v - 1) / rf_gcd(r, v - 1); /* (v-1)/gcd fulltables */
141 info->TablesPerSpareRegion = k * info->FullTablesPerSpareRegion;
142 info->SpareSpaceDepthPerRegionInSUs = (r * info->TablesPerSpareRegion / (v - 1)) * layoutPtr->SUsPerPU;
143
144 /* check to make sure the block design is sufficiently small */
145 if ((raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE)) {
146 if (info->FullTableDepthInPUs * layoutPtr->SUsPerPU + info->SpareSpaceDepthPerRegionInSUs > layoutPtr->stripeUnitsPerDisk) {
147 RF_ERRORMSG3("RAID: config error: Full Table depth (%d) + Spare Space (%d) larger than disk size (%d) (BD too big)\n",
148 (int) info->FullTableDepthInPUs,
149 (int) info->SpareSpaceDepthPerRegionInSUs,
150 (int) layoutPtr->stripeUnitsPerDisk);
151 return (EINVAL);
152 }
153 } else {
154 if (info->TableDepthInPUs * layoutPtr->SUsPerPU > layoutPtr->stripeUnitsPerDisk) {
155 RF_ERRORMSG2("RAID: config error: Table depth (%d) larger than disk size (%d) (BD too big)\n",
156 (int) (info->TableDepthInPUs * layoutPtr->SUsPerPU), \
157 (int) layoutPtr->stripeUnitsPerDisk);
158 return (EINVAL);
159 }
160 }
161
162
163 /* compute the size of each disk, and the number of tables in the last
164 * fulltable (which need not be complete) */
165 if (raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE) {
166
167 PUsPerDisk = layoutPtr->stripeUnitsPerDisk / layoutPtr->SUsPerPU;
168 spareRegionDepthInPUs = (info->TablesPerSpareRegion * info->TableDepthInPUs +
169 (info->TablesPerSpareRegion * info->TableDepthInPUs) / (v - 1));
170 info->SpareRegionDepthInSUs = spareRegionDepthInPUs * layoutPtr->SUsPerPU;
171
172 numCompleteSpareRegionsPerDisk = PUsPerDisk / spareRegionDepthInPUs;
173 info->NumCompleteSRs = numCompleteSpareRegionsPerDisk;
174 extraPUsPerDisk = PUsPerDisk % spareRegionDepthInPUs;
175
176 /* assume conservatively that we need the full amount of spare
177 * space in one region in order to provide spares for the
178 * partial spare region at the end of the array. We set "i"
179 * to the number of tables in the partial spare region. This
180 * may actually include some fulltables. */
181 extraPUsPerDisk -= (info->SpareSpaceDepthPerRegionInSUs / layoutPtr->SUsPerPU);
182 if (extraPUsPerDisk <= 0)
183 i = 0;
184 else
185 i = extraPUsPerDisk / info->TableDepthInPUs;
186
187 complete_FT_count = raidPtr->numRow * (numCompleteSpareRegionsPerDisk * (info->TablesPerSpareRegion / k) + i / k);
188 info->FullTableLimitSUID = complete_FT_count * info->SUsPerFullTable;
189 info->ExtraTablesPerDisk = i % k;
190
191 /* note that in the last spare region, the spare space is
192 * complete even though data/parity space is not */
193 totSparePUsPerDisk = (numCompleteSpareRegionsPerDisk + 1) * (info->SpareSpaceDepthPerRegionInSUs / layoutPtr->SUsPerPU);
194 info->TotSparePUsPerDisk = totSparePUsPerDisk;
195
196 layoutPtr->stripeUnitsPerDisk =
197 ((complete_FT_count / raidPtr->numRow) * info->FullTableDepthInPUs + /* data & parity space */
198 info->ExtraTablesPerDisk * info->TableDepthInPUs +
199 totSparePUsPerDisk /* spare space */
200 ) * layoutPtr->SUsPerPU;
201 layoutPtr->dataStripeUnitsPerDisk =
202 (complete_FT_count * info->FullTableDepthInPUs + info->ExtraTablesPerDisk * info->TableDepthInPUs)
203 * layoutPtr->SUsPerPU * (k - 1) / k;
204
205 } else {
206 /* non-dist spare case: force each disk to contain an
207 * integral number of tables */
208 layoutPtr->stripeUnitsPerDisk /= (info->TableDepthInPUs * layoutPtr->SUsPerPU);
209 layoutPtr->stripeUnitsPerDisk *= (info->TableDepthInPUs * layoutPtr->SUsPerPU);
210
211 /* compute the number of tables in the last fulltable, which
212 * need not be complete */
213 complete_FT_count =
214 ((layoutPtr->stripeUnitsPerDisk / layoutPtr->SUsPerPU) / info->FullTableDepthInPUs) * raidPtr->numRow;
215
216 info->FullTableLimitSUID = complete_FT_count * info->SUsPerFullTable;
217 info->ExtraTablesPerDisk =
218 ((layoutPtr->stripeUnitsPerDisk / layoutPtr->SUsPerPU) / info->TableDepthInPUs) % k;
219 }
220
221 raidPtr->sectorsPerDisk = layoutPtr->stripeUnitsPerDisk * layoutPtr->sectorsPerStripeUnit;
222
223 /* find the disk offset of the stripe unit where the last fulltable
224 * starts */
225 numCompleteFullTablesPerDisk = complete_FT_count / raidPtr->numRow;
226 diskOffsetOfLastFullTableInSUs = numCompleteFullTablesPerDisk * info->FullTableDepthInPUs * layoutPtr->SUsPerPU;
227 if (raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE) {
228 SpareSpaceInSUs = numCompleteSpareRegionsPerDisk * info->SpareSpaceDepthPerRegionInSUs;
229 diskOffsetOfLastFullTableInSUs += SpareSpaceInSUs;
230 info->DiskOffsetOfLastSpareSpaceChunkInSUs =
231 diskOffsetOfLastFullTableInSUs + info->ExtraTablesPerDisk * info->TableDepthInPUs * layoutPtr->SUsPerPU;
232 }
233 info->DiskOffsetOfLastFullTableInSUs = diskOffsetOfLastFullTableInSUs;
234 info->numCompleteFullTablesPerDisk = numCompleteFullTablesPerDisk;
235
236 /* 4. create and initialize the lookup tables */
237 info->LayoutTable = rf_make_2d_array(b, k, raidPtr->cleanupList);
238 if (info->LayoutTable == NULL)
239 return (ENOMEM);
240 info->OffsetTable = rf_make_2d_array(b, k, raidPtr->cleanupList);
241 if (info->OffsetTable == NULL)
242 return (ENOMEM);
243 info->BlockTable = rf_make_2d_array(info->TableDepthInPUs * layoutPtr->SUsPerPU, raidPtr->numCol, raidPtr->cleanupList);
244 if (info->BlockTable == NULL)
245 return (ENOMEM);
246
247 first_avail_slot = rf_make_1d_array(v, NULL);
248 if (first_avail_slot == NULL)
249 return (ENOMEM);
250
251 for (i = 0; i < b; i++)
252 for (j = 0; j < k; j++)
253 info->LayoutTable[i][j] = *cfgBuf++;
254
255 /* initialize offset table */
256 for (i = 0; i < b; i++)
257 for (j = 0; j < k; j++) {
258 info->OffsetTable[i][j] = first_avail_slot[info->LayoutTable[i][j]];
259 first_avail_slot[info->LayoutTable[i][j]]++;
260 }
261
262 /* initialize block table */
263 for (SUID = l = 0; l < layoutPtr->SUsPerPU; l++) {
264 for (i = 0; i < b; i++) {
265 for (j = 0; j < k; j++) {
266 info->BlockTable[(info->OffsetTable[i][j] * layoutPtr->SUsPerPU) + l]
267 [info->LayoutTable[i][j]] = SUID;
268 }
269 SUID++;
270 }
271 }
272
273 rf_free_1d_array(first_avail_slot, v);
274
275 /* 5. set up the remaining redundant-but-useful parameters */
276
277 raidPtr->totalSectors = (k * complete_FT_count + raidPtr->numRow * info->ExtraTablesPerDisk) *
278 info->SUsPerTable * layoutPtr->sectorsPerStripeUnit;
279 layoutPtr->numStripe = (raidPtr->totalSectors / layoutPtr->sectorsPerStripeUnit) / (k - 1);
280
281 /* strange evaluation order below to try and minimize overflow
282 * problems */
283
284 layoutPtr->dataSectorsPerStripe = (k - 1) * layoutPtr->sectorsPerStripeUnit;
285 layoutPtr->numDataCol = k - 1;
286 layoutPtr->numParityCol = 1;
287
288 return (0);
289 }
290 /* declustering with distributed sparing */
291 static void rf_ShutdownDeclusteredDS(RF_ThreadArg_t);
292 static void
293 rf_ShutdownDeclusteredDS(arg)
294 RF_ThreadArg_t arg;
295 {
296 RF_DeclusteredConfigInfo_t *info;
297 RF_Raid_t *raidPtr;
298
299 raidPtr = (RF_Raid_t *) arg;
300 info = (RF_DeclusteredConfigInfo_t *) raidPtr->Layout.layoutSpecificInfo;
301 if (info->SpareTable)
302 rf_FreeSpareTable(raidPtr);
303 }
304
305 int
306 rf_ConfigureDeclusteredDS(
307 RF_ShutdownList_t ** listp,
308 RF_Raid_t * raidPtr,
309 RF_Config_t * cfgPtr)
310 {
311 int rc;
312
313 rc = rf_ConfigureDeclustered(listp, raidPtr, cfgPtr);
314 if (rc)
315 return (rc);
316 rc = rf_ShutdownCreate(listp, rf_ShutdownDeclusteredDS, raidPtr);
317 if (rc) {
318 RF_ERRORMSG1("Got %d adding shutdown event for DeclusteredDS\n", rc);
319 rf_ShutdownDeclusteredDS(raidPtr);
320 return (rc);
321 }
322 return (0);
323 }
324
325 void
326 rf_MapSectorDeclustered(raidPtr, raidSector, row, col, diskSector, remap)
327 RF_Raid_t *raidPtr;
328 RF_RaidAddr_t raidSector;
329 RF_RowCol_t *row;
330 RF_RowCol_t *col;
331 RF_SectorNum_t *diskSector;
332 int remap;
333 {
334 RF_RaidLayout_t *layoutPtr = &(raidPtr->Layout);
335 RF_DeclusteredConfigInfo_t *info = (RF_DeclusteredConfigInfo_t *) layoutPtr->layoutSpecificInfo;
336 RF_StripeNum_t SUID = raidSector / layoutPtr->sectorsPerStripeUnit;
337 RF_StripeNum_t FullTableID, FullTableOffset, TableID, TableOffset;
338 RF_StripeNum_t BlockID, BlockOffset, RepIndex;
339 RF_StripeCount_t sus_per_fulltable = info->SUsPerFullTable;
340 RF_StripeCount_t fulltable_depth = info->FullTableDepthInPUs * layoutPtr->SUsPerPU;
341 RF_StripeNum_t base_suid = 0, outSU, SpareRegion = 0, SpareSpace = 0;
342
343 rf_decluster_adjust_params(layoutPtr, &SUID, &sus_per_fulltable, &fulltable_depth, &base_suid);
344
345 FullTableID = SUID / sus_per_fulltable; /* fulltable ID within array
346 * (across rows) */
347 if (raidPtr->numRow == 1)
348 *row = 0; /* avoid a mod and a div in the common case */
349 else {
350 *row = FullTableID % raidPtr->numRow;
351 FullTableID /= raidPtr->numRow; /* convert to fulltable ID on
352 * this disk */
353 }
354 if (raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE) {
355 SpareRegion = FullTableID / info->FullTablesPerSpareRegion;
356 SpareSpace = SpareRegion * info->SpareSpaceDepthPerRegionInSUs;
357 }
358 FullTableOffset = SUID % sus_per_fulltable;
359 TableID = FullTableOffset / info->SUsPerTable;
360 TableOffset = FullTableOffset - TableID * info->SUsPerTable;
361 BlockID = TableOffset / info->PUsPerBlock;
362 BlockOffset = TableOffset - BlockID * info->PUsPerBlock;
363 BlockID %= info->BlocksPerTable;
364 RepIndex = info->PUsPerBlock - TableID;
365 if (!raidPtr->noRotate)
366 BlockOffset += ((BlockOffset >= RepIndex) ? 1 : 0);
367 *col = info->LayoutTable[BlockID][BlockOffset];
368
369 /* remap to distributed spare space if indicated */
370 if (remap) {
371 RF_ASSERT(raidPtr->Disks[*row][*col].status == rf_ds_reconstructing || raidPtr->Disks[*row][*col].status == rf_ds_dist_spared ||
372 (rf_copyback_in_progress && raidPtr->Disks[*row][*col].status == rf_ds_optimal));
373 rf_remap_to_spare_space(layoutPtr, info, *row, FullTableID, TableID, BlockID, (base_suid) ? 1 : 0, SpareRegion, col, &outSU);
374 } else {
375
376 outSU = base_suid;
377 outSU += FullTableID * fulltable_depth; /* offs to strt of FT */
378 outSU += SpareSpace; /* skip rsvd spare space */
379 outSU += TableID * info->TableDepthInPUs * layoutPtr->SUsPerPU; /* offs to strt of tble */
380 outSU += info->OffsetTable[BlockID][BlockOffset] * layoutPtr->SUsPerPU; /* offs to the PU */
381 }
382 outSU += TableOffset / (info->BlocksPerTable * info->PUsPerBlock); /* offs to the SU within
383 * a PU */
384
385 /* convert SUs to sectors, and, if not aligned to SU boundary, add in
386 * offset to sector. */
387 *diskSector = outSU * layoutPtr->sectorsPerStripeUnit + (raidSector % layoutPtr->sectorsPerStripeUnit);
388
389 RF_ASSERT(*col != -1);
390 }
391
392
393 /* prototyping this inexplicably causes the compile of the layout table (rf_layout.c) to fail */
394 void
395 rf_MapParityDeclustered(
396 RF_Raid_t * raidPtr,
397 RF_RaidAddr_t raidSector,
398 RF_RowCol_t * row,
399 RF_RowCol_t * col,
400 RF_SectorNum_t * diskSector,
401 int remap)
402 {
403 RF_RaidLayout_t *layoutPtr = &(raidPtr->Layout);
404 RF_DeclusteredConfigInfo_t *info = (RF_DeclusteredConfigInfo_t *) layoutPtr->layoutSpecificInfo;
405 RF_StripeNum_t SUID = raidSector / layoutPtr->sectorsPerStripeUnit;
406 RF_StripeNum_t FullTableID, FullTableOffset, TableID, TableOffset;
407 RF_StripeNum_t BlockID, BlockOffset, RepIndex;
408 RF_StripeCount_t sus_per_fulltable = info->SUsPerFullTable;
409 RF_StripeCount_t fulltable_depth = info->FullTableDepthInPUs * layoutPtr->SUsPerPU;
410 RF_StripeNum_t base_suid = 0, outSU, SpareRegion = 0, SpareSpace = 0;
411
412 rf_decluster_adjust_params(layoutPtr, &SUID, &sus_per_fulltable, &fulltable_depth, &base_suid);
413
414 /* compute row & (possibly) spare space exactly as before */
415 FullTableID = SUID / sus_per_fulltable;
416 if (raidPtr->numRow == 1)
417 *row = 0; /* avoid a mod and a div in the common case */
418 else {
419 *row = FullTableID % raidPtr->numRow;
420 FullTableID /= raidPtr->numRow; /* convert to fulltable ID on
421 * this disk */
422 }
423 if ((raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE)) {
424 SpareRegion = FullTableID / info->FullTablesPerSpareRegion;
425 SpareSpace = SpareRegion * info->SpareSpaceDepthPerRegionInSUs;
426 }
427 /* compute BlockID and RepIndex exactly as before */
428 FullTableOffset = SUID % sus_per_fulltable;
429 TableID = FullTableOffset / info->SUsPerTable;
430 TableOffset = FullTableOffset - TableID * info->SUsPerTable;
431 /* TableOffset = FullTableOffset % info->SUsPerTable; */
432 /* BlockID = (TableOffset / info->PUsPerBlock) %
433 * info->BlocksPerTable; */
434 BlockID = TableOffset / info->PUsPerBlock;
435 /* BlockOffset = TableOffset % info->PUsPerBlock; */
436 BlockOffset = TableOffset - BlockID * info->PUsPerBlock;
437 BlockID %= info->BlocksPerTable;
438
439 /* the parity block is in the position indicated by RepIndex */
440 RepIndex = (raidPtr->noRotate) ? info->PUsPerBlock : info->PUsPerBlock - TableID;
441 *col = info->LayoutTable[BlockID][RepIndex];
442
443 if (remap) {
444 RF_ASSERT(raidPtr->Disks[*row][*col].status == rf_ds_reconstructing || raidPtr->Disks[*row][*col].status == rf_ds_dist_spared ||
445 (rf_copyback_in_progress && raidPtr->Disks[*row][*col].status == rf_ds_optimal));
446 rf_remap_to_spare_space(layoutPtr, info, *row, FullTableID, TableID, BlockID, (base_suid) ? 1 : 0, SpareRegion, col, &outSU);
447 } else {
448
449 /* compute sector as before, except use RepIndex instead of
450 * BlockOffset */
451 outSU = base_suid;
452 outSU += FullTableID * fulltable_depth;
453 outSU += SpareSpace; /* skip rsvd spare space */
454 outSU += TableID * info->TableDepthInPUs * layoutPtr->SUsPerPU;
455 outSU += info->OffsetTable[BlockID][RepIndex] * layoutPtr->SUsPerPU;
456 }
457
458 outSU += TableOffset / (info->BlocksPerTable * info->PUsPerBlock);
459 *diskSector = outSU * layoutPtr->sectorsPerStripeUnit + (raidSector % layoutPtr->sectorsPerStripeUnit);
460
461 RF_ASSERT(*col != -1);
462 }
463 /* returns an array of ints identifying the disks that comprise the stripe containing the indicated address.
464 * the caller must _never_ attempt to modify this array.
465 */
466 void
467 rf_IdentifyStripeDeclustered(
468 RF_Raid_t * raidPtr,
469 RF_RaidAddr_t addr,
470 RF_RowCol_t ** diskids,
471 RF_RowCol_t * outRow)
472 {
473 RF_RaidLayout_t *layoutPtr = &(raidPtr->Layout);
474 RF_DeclusteredConfigInfo_t *info = (RF_DeclusteredConfigInfo_t *) layoutPtr->layoutSpecificInfo;
475 RF_StripeCount_t sus_per_fulltable = info->SUsPerFullTable;
476 RF_StripeCount_t fulltable_depth = info->FullTableDepthInPUs * layoutPtr->SUsPerPU;
477 RF_StripeNum_t base_suid = 0;
478 RF_StripeNum_t SUID = rf_RaidAddressToStripeUnitID(layoutPtr, addr);
479 RF_StripeNum_t stripeID, FullTableID;
480 int tableOffset;
481
482 rf_decluster_adjust_params(layoutPtr, &SUID, &sus_per_fulltable, &fulltable_depth, &base_suid);
483 FullTableID = SUID / sus_per_fulltable; /* fulltable ID within array
484 * (across rows) */
485 *outRow = FullTableID % raidPtr->numRow;
486 stripeID = rf_StripeUnitIDToStripeID(layoutPtr, SUID); /* find stripe offset
487 * into array */
488 tableOffset = (stripeID % info->BlocksPerTable); /* find offset into
489 * block design table */
490 *diskids = info->LayoutTable[tableOffset];
491 }
492 /* This returns the default head-separation limit, which is measured
493 * in "required units for reconstruction". Each time a disk fetches
494 * a unit, it bumps a counter. The head-sep code prohibits any disk
495 * from getting more than headSepLimit counter values ahead of any
496 * other.
497 *
498 * We assume here that the number of floating recon buffers is already
499 * set. There are r stripes to be reconstructed in each table, and so
500 * if we have a total of B buffers, we can have at most B/r tables
501 * under recon at any one time. In each table, lambda units are required
502 * from each disk, so given B buffers, the head sep limit has to be
503 * (lambda*B)/r units. We subtract one to avoid weird boundary cases.
504 *
505 * for example, suppose were given 50 buffers, r=19, and lambda=4 as in
506 * the 20.5 design. There are 19 stripes/table to be reconstructed, so
507 * we can have 50/19 tables concurrently under reconstruction, which means
508 * we can allow the fastest disk to get 50/19 tables ahead of the slower
509 * disk. There are lambda "required units" for each disk, so the fastest
510 * disk can get 4*50/19 = 10 counter values ahead of the slowest.
511 *
512 * If numBufsToAccumulate is not 1, we need to limit the head sep further
513 * because multiple bufs will be required for each stripe under recon.
514 */
515 RF_HeadSepLimit_t
516 rf_GetDefaultHeadSepLimitDeclustered(
517 RF_Raid_t * raidPtr)
518 {
519 RF_DeclusteredConfigInfo_t *info = (RF_DeclusteredConfigInfo_t *) raidPtr->Layout.layoutSpecificInfo;
520
521 return (info->Lambda * raidPtr->numFloatingReconBufs / info->TableDepthInPUs / rf_numBufsToAccumulate);
522 }
523 /* returns the default number of recon buffers to use. The value
524 * is somewhat arbitrary...it's intended to be large enough to allow
525 * for a reasonably large head-sep limit, but small enough that you
526 * don't use up all your system memory with buffers.
527 */
528 int
529 rf_GetDefaultNumFloatingReconBuffersDeclustered(RF_Raid_t * raidPtr)
530 {
531 return (100 * rf_numBufsToAccumulate);
532 }
533 /* sectors in the last fulltable of the array need to be handled
534 * specially since this fulltable can be incomplete. this function
535 * changes the values of certain params to handle this.
536 *
537 * the idea here is that MapSector et. al. figure out which disk the
538 * addressed unit lives on by computing the modulos of the unit number
539 * with the number of units per fulltable, table, etc. In the last
540 * fulltable, there are fewer units per fulltable, so we need to adjust
541 * the number of user data units per fulltable to reflect this.
542 *
543 * so, we (1) convert the fulltable size and depth parameters to
544 * the size of the partial fulltable at the end, (2) compute the
545 * disk sector offset where this fulltable starts, and (3) convert
546 * the users stripe unit number from an offset into the array to
547 * an offset into the last fulltable.
548 */
549 void
550 rf_decluster_adjust_params(
551 RF_RaidLayout_t * layoutPtr,
552 RF_StripeNum_t * SUID,
553 RF_StripeCount_t * sus_per_fulltable,
554 RF_StripeCount_t * fulltable_depth,
555 RF_StripeNum_t * base_suid)
556 {
557 RF_DeclusteredConfigInfo_t *info = (RF_DeclusteredConfigInfo_t *) layoutPtr->layoutSpecificInfo;
558
559 if (*SUID >= info->FullTableLimitSUID) {
560 /* new full table size is size of last full table on disk */
561 *sus_per_fulltable = info->ExtraTablesPerDisk * info->SUsPerTable;
562
563 /* new full table depth is corresponding depth */
564 *fulltable_depth = info->ExtraTablesPerDisk * info->TableDepthInPUs * layoutPtr->SUsPerPU;
565
566 /* set up the new base offset */
567 *base_suid = info->DiskOffsetOfLastFullTableInSUs;
568
569 /* convert users array address to an offset into the last
570 * fulltable */
571 *SUID -= info->FullTableLimitSUID;
572 }
573 }
574 /*
575 * map a stripe ID to a parity stripe ID.
576 * See comment above RaidAddressToParityStripeID in layout.c.
577 */
578 void
579 rf_MapSIDToPSIDDeclustered(
580 RF_RaidLayout_t * layoutPtr,
581 RF_StripeNum_t stripeID,
582 RF_StripeNum_t * psID,
583 RF_ReconUnitNum_t * which_ru)
584 {
585 RF_DeclusteredConfigInfo_t *info;
586
587 info = (RF_DeclusteredConfigInfo_t *) layoutPtr->layoutSpecificInfo;
588
589 *psID = (stripeID / (layoutPtr->SUsPerPU * info->BlocksPerTable))
590 * info->BlocksPerTable + (stripeID % info->BlocksPerTable);
591 *which_ru = (stripeID % (info->BlocksPerTable * layoutPtr->SUsPerPU))
592 / info->BlocksPerTable;
593 RF_ASSERT((*which_ru) < layoutPtr->SUsPerPU / layoutPtr->SUsPerRU);
594 }
595 /*
596 * Called from MapSector and MapParity to retarget an access at the spare unit.
597 * Modifies the "col" and "outSU" parameters only.
598 */
599 void
600 rf_remap_to_spare_space(
601 RF_RaidLayout_t * layoutPtr,
602 RF_DeclusteredConfigInfo_t * info,
603 RF_RowCol_t row,
604 RF_StripeNum_t FullTableID,
605 RF_StripeNum_t TableID,
606 RF_SectorNum_t BlockID,
607 RF_StripeNum_t base_suid,
608 RF_StripeNum_t SpareRegion,
609 RF_RowCol_t * outCol,
610 RF_StripeNum_t * outSU)
611 {
612 RF_StripeNum_t ftID, spareTableStartSU, TableInSpareRegion, lastSROffset,
613 which_ft;
614
615 /*
616 * note that FullTableID and hence SpareRegion may have gotten
617 * tweaked by rf_decluster_adjust_params. We detect this by
618 * noticing that base_suid is not 0.
619 */
620 if (base_suid == 0) {
621 ftID = FullTableID;
622 } else {
623 /*
624 * There may be > 1.0 full tables in the last (i.e. partial)
625 * spare region. find out which of these we're in.
626 */
627 lastSROffset = info->NumCompleteSRs * info->SpareRegionDepthInSUs;
628 which_ft = (info->DiskOffsetOfLastFullTableInSUs - lastSROffset) / (info->FullTableDepthInPUs * layoutPtr->SUsPerPU);
629
630 /* compute the actual full table ID */
631 ftID = info->DiskOffsetOfLastFullTableInSUs / (info->FullTableDepthInPUs * layoutPtr->SUsPerPU) + which_ft;
632 SpareRegion = info->NumCompleteSRs;
633 }
634 TableInSpareRegion = (ftID * info->NumParityReps + TableID) % info->TablesPerSpareRegion;
635
636 *outCol = info->SpareTable[TableInSpareRegion][BlockID].spareDisk;
637 RF_ASSERT(*outCol != -1);
638
639 spareTableStartSU = (SpareRegion == info->NumCompleteSRs) ?
640 info->DiskOffsetOfLastFullTableInSUs + info->ExtraTablesPerDisk * info->TableDepthInPUs * layoutPtr->SUsPerPU :
641 (SpareRegion + 1) * info->SpareRegionDepthInSUs - info->SpareSpaceDepthPerRegionInSUs;
642 *outSU = spareTableStartSU + info->SpareTable[TableInSpareRegion][BlockID].spareBlockOffsetInSUs;
643 if (*outSU >= layoutPtr->stripeUnitsPerDisk) {
644 printf("rf_remap_to_spare_space: invalid remapped disk SU offset %ld\n", (long) *outSU);
645 }
646 }
647
648 #endif /* (RF_INCLUDE_PARITY_DECLUSTERING > 0) || (RF_INCLUDE_PARITY_DECLUSTERING_PQ > 0) */
649
650 #if (RF_INCLUDE_PARITY_DECLUSTERING_DS > 0)
651 int
652 rf_InstallSpareTable(
653 RF_Raid_t * raidPtr,
654 RF_RowCol_t frow,
655 RF_RowCol_t fcol)
656 {
657 RF_DeclusteredConfigInfo_t *info = (RF_DeclusteredConfigInfo_t *) raidPtr->Layout.layoutSpecificInfo;
658 RF_SparetWait_t *req;
659 int retcode;
660
661 RF_Malloc(req, sizeof(*req), (RF_SparetWait_t *));
662 req->C = raidPtr->numCol;
663 req->G = raidPtr->Layout.numDataCol + raidPtr->Layout.numParityCol;
664 req->fcol = fcol;
665 req->SUsPerPU = raidPtr->Layout.SUsPerPU;
666 req->TablesPerSpareRegion = info->TablesPerSpareRegion;
667 req->BlocksPerTable = info->BlocksPerTable;
668 req->TableDepthInPUs = info->TableDepthInPUs;
669 req->SpareSpaceDepthPerRegionInSUs = info->SpareSpaceDepthPerRegionInSUs;
670
671 retcode = rf_GetSpareTableFromDaemon(req);
672 RF_ASSERT(!retcode); /* XXX -- fix this to recover gracefully --
673 * XXX */
674 return (retcode);
675 }
676 #endif
677 #if (RF_INCLUDE_PARITY_DECLUSTERING > 0) || (RF_INCLUDE_PARITY_DECLUSTERING_PQ > 0)
678 /*
679 * Invoked via ioctl to install a spare table in the kernel.
680 */
681 int
682 rf_SetSpareTable(raidPtr, data)
683 RF_Raid_t *raidPtr;
684 void *data;
685 {
686 RF_DeclusteredConfigInfo_t *info = (RF_DeclusteredConfigInfo_t *) raidPtr->Layout.layoutSpecificInfo;
687 RF_SpareTableEntry_t **ptrs;
688 int i, retcode;
689
690 /* what we need to copyin is a 2-d array, so first copyin the user
691 * pointers to the rows in the table */
692 RF_Malloc(ptrs, info->TablesPerSpareRegion * sizeof(RF_SpareTableEntry_t *), (RF_SpareTableEntry_t **));
693 retcode = copyin((caddr_t) data, (caddr_t) ptrs, info->TablesPerSpareRegion * sizeof(RF_SpareTableEntry_t *));
694
695 if (retcode)
696 return (retcode);
697
698 /* now allocate kernel space for the row pointers */
699 RF_Malloc(info->SpareTable, info->TablesPerSpareRegion * sizeof(RF_SpareTableEntry_t *), (RF_SpareTableEntry_t **));
700
701 /* now allocate kernel space for each row in the table, and copy it in
702 * from user space */
703 for (i = 0; i < info->TablesPerSpareRegion; i++) {
704 RF_Malloc(info->SpareTable[i], info->BlocksPerTable * sizeof(RF_SpareTableEntry_t), (RF_SpareTableEntry_t *));
705 retcode = copyin(ptrs[i], info->SpareTable[i], info->BlocksPerTable * sizeof(RF_SpareTableEntry_t));
706 if (retcode) {
707 info->SpareTable = NULL; /* blow off the memory
708 * we've allocated */
709 return (retcode);
710 }
711 }
712
713 /* free up the temporary array we used */
714 RF_Free(ptrs, info->TablesPerSpareRegion * sizeof(RF_SpareTableEntry_t *));
715
716 return (0);
717 }
718
719 RF_ReconUnitCount_t
720 rf_GetNumSpareRUsDeclustered(raidPtr)
721 RF_Raid_t *raidPtr;
722 {
723 RF_RaidLayout_t *layoutPtr = &raidPtr->Layout;
724
725 return (((RF_DeclusteredConfigInfo_t *) layoutPtr->layoutSpecificInfo)->TotSparePUsPerDisk);
726 }
727 #endif /* (RF_INCLUDE_PARITY_DECLUSTERING > 0) || (RF_INCLUDE_PARITY_DECLUSTERING_PQ > 0) */
728
729 void
730 rf_FreeSpareTable(raidPtr)
731 RF_Raid_t *raidPtr;
732 {
733 long i;
734 RF_RaidLayout_t *layoutPtr = &raidPtr->Layout;
735 RF_DeclusteredConfigInfo_t *info = (RF_DeclusteredConfigInfo_t *) layoutPtr->layoutSpecificInfo;
736 RF_SpareTableEntry_t **table = info->SpareTable;
737
738 for (i = 0; i < info->TablesPerSpareRegion; i++) {
739 RF_Free(table[i], info->BlocksPerTable * sizeof(RF_SpareTableEntry_t));
740 }
741 RF_Free(table, info->TablesPerSpareRegion * sizeof(RF_SpareTableEntry_t *));
742 info->SpareTable = (RF_SpareTableEntry_t **) NULL;
743 }
744