rf_paritylogging.c revision 1.5 1 /* $NetBSD: rf_paritylogging.c,v 1.5 2000/01/08 05:13:26 oster Exp $ */
2 /*
3 * Copyright (c) 1995 Carnegie-Mellon University.
4 * All rights reserved.
5 *
6 * Author: William V. Courtright II
7 *
8 * Permission to use, copy, modify and distribute this software and
9 * its documentation is hereby granted, provided that both the copyright
10 * notice and this permission notice appear in all copies of the
11 * software, derivative works or modified versions, and any portions
12 * thereof, and that both notices appear in supporting documentation.
13 *
14 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
15 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
16 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
17 *
18 * Carnegie Mellon requests users of this software to return to
19 *
20 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
21 * School of Computer Science
22 * Carnegie Mellon University
23 * Pittsburgh PA 15213-3890
24 *
25 * any improvements or extensions that they make and grant Carnegie the
26 * rights to redistribute these changes.
27 */
28
29
30 /*
31 parity logging configuration, dag selection, and mapping is implemented here
32 */
33
34 #include "rf_archs.h"
35
36 #if RF_INCLUDE_PARITYLOGGING > 0
37
38 #include "rf_types.h"
39 #include "rf_raid.h"
40 #include "rf_dag.h"
41 #include "rf_dagutils.h"
42 #include "rf_dagfuncs.h"
43 #include "rf_dagffrd.h"
44 #include "rf_dagffwr.h"
45 #include "rf_dagdegrd.h"
46 #include "rf_dagdegwr.h"
47 #include "rf_threadid.h"
48 #include "rf_paritylog.h"
49 #include "rf_paritylogDiskMgr.h"
50 #include "rf_paritylogging.h"
51 #include "rf_parityloggingdags.h"
52 #include "rf_general.h"
53 #include "rf_map.h"
54 #include "rf_utils.h"
55 #include "rf_shutdown.h"
56
57 typedef struct RF_ParityLoggingConfigInfo_s {
58 RF_RowCol_t **stripeIdentifier; /* filled in at config time & used by
59 * IdentifyStripe */
60 } RF_ParityLoggingConfigInfo_t;
61
62 static void FreeRegionInfo(RF_Raid_t * raidPtr, RF_RegionId_t regionID);
63 static void rf_ShutdownParityLogging(RF_ThreadArg_t arg);
64 static void rf_ShutdownParityLoggingRegionInfo(RF_ThreadArg_t arg);
65 static void rf_ShutdownParityLoggingPool(RF_ThreadArg_t arg);
66 static void rf_ShutdownParityLoggingRegionBufferPool(RF_ThreadArg_t arg);
67 static void rf_ShutdownParityLoggingParityBufferPool(RF_ThreadArg_t arg);
68 static void rf_ShutdownParityLoggingDiskQueue(RF_ThreadArg_t arg);
69
70 int
71 rf_ConfigureParityLogging(
72 RF_ShutdownList_t ** listp,
73 RF_Raid_t * raidPtr,
74 RF_Config_t * cfgPtr)
75 {
76 int i, j, startdisk, rc;
77 RF_SectorCount_t totalLogCapacity, fragmentation, lastRegionCapacity;
78 RF_SectorCount_t parityBufferCapacity, maxRegionParityRange;
79 RF_RaidLayout_t *layoutPtr = &raidPtr->Layout;
80 RF_ParityLoggingConfigInfo_t *info;
81 RF_ParityLog_t *l = NULL, *next;
82 caddr_t lHeapPtr;
83
84 if (rf_numParityRegions <= 0)
85 return(EINVAL);
86
87 /*
88 * We create multiple entries on the shutdown list here, since
89 * this configuration routine is fairly complicated in and of
90 * itself, and this makes backing out of a failed configuration
91 * much simpler.
92 */
93
94 raidPtr->numSectorsPerLog = RF_DEFAULT_NUM_SECTORS_PER_LOG;
95
96 /* create a parity logging configuration structure */
97 RF_MallocAndAdd(info, sizeof(RF_ParityLoggingConfigInfo_t), (RF_ParityLoggingConfigInfo_t *), raidPtr->cleanupList);
98 if (info == NULL)
99 return (ENOMEM);
100 layoutPtr->layoutSpecificInfo = (void *) info;
101
102 RF_ASSERT(raidPtr->numRow == 1);
103
104 /* the stripe identifier must identify the disks in each stripe, IN
105 * THE ORDER THAT THEY APPEAR IN THE STRIPE. */
106 info->stripeIdentifier = rf_make_2d_array((raidPtr->numCol), (raidPtr->numCol), raidPtr->cleanupList);
107 if (info->stripeIdentifier == NULL)
108 return (ENOMEM);
109
110 startdisk = 0;
111 for (i = 0; i < (raidPtr->numCol); i++) {
112 for (j = 0; j < (raidPtr->numCol); j++) {
113 info->stripeIdentifier[i][j] = (startdisk + j) % (raidPtr->numCol - 1);
114 }
115 if ((--startdisk) < 0)
116 startdisk = raidPtr->numCol - 1 - 1;
117 }
118
119 /* fill in the remaining layout parameters */
120 layoutPtr->numStripe = layoutPtr->stripeUnitsPerDisk;
121 layoutPtr->bytesPerStripeUnit = layoutPtr->sectorsPerStripeUnit << raidPtr->logBytesPerSector;
122 layoutPtr->numParityCol = 1;
123 layoutPtr->numParityLogCol = 1;
124 layoutPtr->numDataCol = raidPtr->numCol - layoutPtr->numParityCol - layoutPtr->numParityLogCol;
125 layoutPtr->dataSectorsPerStripe = layoutPtr->numDataCol * layoutPtr->sectorsPerStripeUnit;
126 layoutPtr->dataStripeUnitsPerDisk = layoutPtr->stripeUnitsPerDisk;
127 raidPtr->sectorsPerDisk = layoutPtr->stripeUnitsPerDisk * layoutPtr->sectorsPerStripeUnit;
128
129 raidPtr->totalSectors = layoutPtr->stripeUnitsPerDisk * layoutPtr->numDataCol * layoutPtr->sectorsPerStripeUnit;
130
131 /* configure parity log parameters
132 *
133 * parameter comment/constraints
134 * -------------------------------------------
135 * numParityRegions* all regions (except possibly last)
136 * of equal size
137 * totalInCoreLogCapacity* amount of memory in bytes available
138 * for in-core logs (default 1 MB)
139 * numSectorsPerLog# capacity of an in-core log in sectors
140 * (1 * disk track)
141 * numParityLogs total number of in-core logs,
142 * should be at least numParityRegions
143 * regionLogCapacity size of a region log (except possibly
144 * last one) in sectors
145 * totalLogCapacity total amount of log space in sectors
146 *
147 * where '*' denotes a user settable parameter.
148 * Note that logs are fixed to be the size of a disk track,
149 * value #defined in rf_paritylog.h
150 *
151 */
152
153 totalLogCapacity = layoutPtr->stripeUnitsPerDisk * layoutPtr->sectorsPerStripeUnit * layoutPtr->numParityLogCol;
154 raidPtr->regionLogCapacity = totalLogCapacity / rf_numParityRegions;
155 if (rf_parityLogDebug)
156 printf("bytes per sector %d\n", raidPtr->bytesPerSector);
157
158 /* reduce fragmentation within a disk region by adjusting the number
159 * of regions in an attempt to allow an integral number of logs to fit
160 * into a disk region */
161 fragmentation = raidPtr->regionLogCapacity % raidPtr->numSectorsPerLog;
162 if (fragmentation > 0)
163 for (i = 1; i < (raidPtr->numSectorsPerLog / 2); i++) {
164 if (((totalLogCapacity / (rf_numParityRegions + i)) % raidPtr->numSectorsPerLog) < fragmentation) {
165 rf_numParityRegions++;
166 raidPtr->regionLogCapacity = totalLogCapacity / rf_numParityRegions;
167 fragmentation = raidPtr->regionLogCapacity % raidPtr->numSectorsPerLog;
168 }
169 if (((totalLogCapacity / (rf_numParityRegions - i)) % raidPtr->numSectorsPerLog) < fragmentation) {
170 rf_numParityRegions--;
171 raidPtr->regionLogCapacity = totalLogCapacity / rf_numParityRegions;
172 fragmentation = raidPtr->regionLogCapacity % raidPtr->numSectorsPerLog;
173 }
174 }
175 /* ensure integral number of regions per log */
176 raidPtr->regionLogCapacity = (raidPtr->regionLogCapacity / raidPtr->numSectorsPerLog) * raidPtr->numSectorsPerLog;
177
178 raidPtr->numParityLogs = rf_totalInCoreLogCapacity / (raidPtr->bytesPerSector * raidPtr->numSectorsPerLog);
179 /* to avoid deadlock, must ensure that enough logs exist for each
180 * region to have one simultaneously */
181 if (raidPtr->numParityLogs < rf_numParityRegions)
182 raidPtr->numParityLogs = rf_numParityRegions;
183
184 /* create region information structs */
185 RF_Malloc(raidPtr->regionInfo, (rf_numParityRegions * sizeof(RF_RegionInfo_t)), (RF_RegionInfo_t *));
186 if (raidPtr->regionInfo == NULL)
187 return (ENOMEM);
188
189 /* last region may not be full capacity */
190 lastRegionCapacity = raidPtr->regionLogCapacity;
191 while ((rf_numParityRegions - 1) * raidPtr->regionLogCapacity + lastRegionCapacity > totalLogCapacity)
192 lastRegionCapacity = lastRegionCapacity - raidPtr->numSectorsPerLog;
193
194 raidPtr->regionParityRange = raidPtr->sectorsPerDisk / rf_numParityRegions;
195 maxRegionParityRange = raidPtr->regionParityRange;
196
197 /* i can't remember why this line is in the code -wvcii 6/30/95 */
198 /* if (raidPtr->sectorsPerDisk % rf_numParityRegions > 0)
199 regionParityRange++; */
200
201 /* build pool of unused parity logs */
202 RF_Malloc(raidPtr->parityLogBufferHeap, raidPtr->numParityLogs * raidPtr->numSectorsPerLog * raidPtr->bytesPerSector, (caddr_t));
203 if (raidPtr->parityLogBufferHeap == NULL)
204 return (ENOMEM);
205 lHeapPtr = raidPtr->parityLogBufferHeap;
206 rc = rf_mutex_init(&raidPtr->parityLogPool.mutex);
207 if (rc) {
208 RF_ERRORMSG3("Unable to init mutex file %s line %d rc=%d\n", __FILE__,
209 __LINE__, rc);
210 RF_Free(raidPtr->parityLogBufferHeap, raidPtr->numParityLogs * raidPtr->numSectorsPerLog * raidPtr->bytesPerSector);
211 return (ENOMEM);
212 }
213 for (i = 0; i < raidPtr->numParityLogs; i++) {
214 if (i == 0) {
215 RF_Calloc(raidPtr->parityLogPool.parityLogs, 1, sizeof(RF_ParityLog_t), (RF_ParityLog_t *));
216 if (raidPtr->parityLogPool.parityLogs == NULL) {
217 RF_Free(raidPtr->parityLogBufferHeap, raidPtr->numParityLogs * raidPtr->numSectorsPerLog * raidPtr->bytesPerSector);
218 return (ENOMEM);
219 }
220 l = raidPtr->parityLogPool.parityLogs;
221 } else {
222 RF_Calloc(l->next, 1, sizeof(RF_ParityLog_t), (RF_ParityLog_t *));
223 if (l->next == NULL) {
224 RF_Free(raidPtr->parityLogBufferHeap, raidPtr->numParityLogs * raidPtr->numSectorsPerLog * raidPtr->bytesPerSector);
225 for (l = raidPtr->parityLogPool.parityLogs; l; l = next) {
226 next = l->next;
227 if (l->records)
228 RF_Free(l->records, (raidPtr->numSectorsPerLog * sizeof(RF_ParityLogRecord_t)));
229 RF_Free(l, sizeof(RF_ParityLog_t));
230 }
231 return (ENOMEM);
232 }
233 l = l->next;
234 }
235 l->bufPtr = lHeapPtr;
236 lHeapPtr += raidPtr->numSectorsPerLog * raidPtr->bytesPerSector;
237 RF_Malloc(l->records, (raidPtr->numSectorsPerLog * sizeof(RF_ParityLogRecord_t)), (RF_ParityLogRecord_t *));
238 if (l->records == NULL) {
239 RF_Free(raidPtr->parityLogBufferHeap, raidPtr->numParityLogs * raidPtr->numSectorsPerLog * raidPtr->bytesPerSector);
240 for (l = raidPtr->parityLogPool.parityLogs; l; l = next) {
241 next = l->next;
242 if (l->records)
243 RF_Free(l->records, (raidPtr->numSectorsPerLog * sizeof(RF_ParityLogRecord_t)));
244 RF_Free(l, sizeof(RF_ParityLog_t));
245 }
246 return (ENOMEM);
247 }
248 }
249 rc = rf_ShutdownCreate(listp, rf_ShutdownParityLoggingPool, raidPtr);
250 if (rc) {
251 RF_ERRORMSG3("Unable to create shutdown entry file %s line %d rc=%d\n", __FILE__,
252 __LINE__, rc);
253 rf_ShutdownParityLoggingPool(raidPtr);
254 return (rc);
255 }
256 /* build pool of region buffers */
257 rc = rf_mutex_init(&raidPtr->regionBufferPool.mutex);
258 if (rc) {
259 RF_ERRORMSG3("Unable to init mutex file %s line %d rc=%d\n", __FILE__,
260 __LINE__, rc);
261 return (ENOMEM);
262 }
263 rc = rf_cond_init(&raidPtr->regionBufferPool.cond);
264 if (rc) {
265 RF_ERRORMSG3("Unable to init cond file %s line %d rc=%d\n", __FILE__,
266 __LINE__, rc);
267 rf_mutex_destroy(&raidPtr->regionBufferPool.mutex);
268 return (ENOMEM);
269 }
270 raidPtr->regionBufferPool.bufferSize = raidPtr->regionLogCapacity * raidPtr->bytesPerSector;
271 printf("regionBufferPool.bufferSize %d\n", raidPtr->regionBufferPool.bufferSize);
272 raidPtr->regionBufferPool.totalBuffers = 1; /* for now, only one
273 * region at a time may
274 * be reintegrated */
275 raidPtr->regionBufferPool.availableBuffers = raidPtr->regionBufferPool.totalBuffers;
276 raidPtr->regionBufferPool.availBuffersIndex = 0;
277 raidPtr->regionBufferPool.emptyBuffersIndex = 0;
278 RF_Malloc(raidPtr->regionBufferPool.buffers, raidPtr->regionBufferPool.totalBuffers * sizeof(caddr_t), (caddr_t *));
279 if (raidPtr->regionBufferPool.buffers == NULL) {
280 rf_mutex_destroy(&raidPtr->regionBufferPool.mutex);
281 rf_cond_destroy(&raidPtr->regionBufferPool.cond);
282 return (ENOMEM);
283 }
284 for (i = 0; i < raidPtr->regionBufferPool.totalBuffers; i++) {
285 RF_Malloc(raidPtr->regionBufferPool.buffers[i], raidPtr->regionBufferPool.bufferSize * sizeof(char), (caddr_t));
286 if (raidPtr->regionBufferPool.buffers == NULL) {
287 rf_mutex_destroy(&raidPtr->regionBufferPool.mutex);
288 rf_cond_destroy(&raidPtr->regionBufferPool.cond);
289 for (j = 0; j < i; j++) {
290 RF_Free(raidPtr->regionBufferPool.buffers[i], raidPtr->regionBufferPool.bufferSize * sizeof(char));
291 }
292 RF_Free(raidPtr->regionBufferPool.buffers, raidPtr->regionBufferPool.totalBuffers * sizeof(caddr_t));
293 return (ENOMEM);
294 }
295 printf("raidPtr->regionBufferPool.buffers[%d] = %lx\n", i,
296 (long) raidPtr->regionBufferPool.buffers[i]);
297 }
298 rc = rf_ShutdownCreate(listp, rf_ShutdownParityLoggingRegionBufferPool, raidPtr);
299 if (rc) {
300 RF_ERRORMSG3("Unable to create shutdown entry file %s line %d rc=%d\n", __FILE__,
301 __LINE__, rc);
302 rf_ShutdownParityLoggingRegionBufferPool(raidPtr);
303 return (rc);
304 }
305 /* build pool of parity buffers */
306 parityBufferCapacity = maxRegionParityRange;
307 rc = rf_mutex_init(&raidPtr->parityBufferPool.mutex);
308 if (rc) {
309 RF_ERRORMSG3("Unable to init mutex file %s line %d rc=%d\n", __FILE__,
310 __LINE__, rc);
311 return (rc);
312 }
313 rc = rf_cond_init(&raidPtr->parityBufferPool.cond);
314 if (rc) {
315 RF_ERRORMSG3("Unable to init cond file %s line %d rc=%d\n", __FILE__,
316 __LINE__, rc);
317 rf_mutex_destroy(&raidPtr->parityBufferPool.mutex);
318 return (ENOMEM);
319 }
320 raidPtr->parityBufferPool.bufferSize = parityBufferCapacity * raidPtr->bytesPerSector;
321 printf("parityBufferPool.bufferSize %d\n", raidPtr->parityBufferPool.bufferSize);
322 raidPtr->parityBufferPool.totalBuffers = 1; /* for now, only one
323 * region at a time may
324 * be reintegrated */
325 raidPtr->parityBufferPool.availableBuffers = raidPtr->parityBufferPool.totalBuffers;
326 raidPtr->parityBufferPool.availBuffersIndex = 0;
327 raidPtr->parityBufferPool.emptyBuffersIndex = 0;
328 RF_Malloc(raidPtr->parityBufferPool.buffers, raidPtr->parityBufferPool.totalBuffers * sizeof(caddr_t), (caddr_t *));
329 if (raidPtr->parityBufferPool.buffers == NULL) {
330 rf_mutex_destroy(&raidPtr->parityBufferPool.mutex);
331 rf_cond_destroy(&raidPtr->parityBufferPool.cond);
332 return (ENOMEM);
333 }
334 for (i = 0; i < raidPtr->parityBufferPool.totalBuffers; i++) {
335 RF_Malloc(raidPtr->parityBufferPool.buffers[i], raidPtr->parityBufferPool.bufferSize * sizeof(char), (caddr_t));
336 if (raidPtr->parityBufferPool.buffers == NULL) {
337 rf_mutex_destroy(&raidPtr->parityBufferPool.mutex);
338 rf_cond_destroy(&raidPtr->parityBufferPool.cond);
339 for (j = 0; j < i; j++) {
340 RF_Free(raidPtr->parityBufferPool.buffers[i], raidPtr->regionBufferPool.bufferSize * sizeof(char));
341 }
342 RF_Free(raidPtr->parityBufferPool.buffers, raidPtr->regionBufferPool.totalBuffers * sizeof(caddr_t));
343 return (ENOMEM);
344 }
345 printf("parityBufferPool.buffers[%d] = %lx\n", i,
346 (long) raidPtr->parityBufferPool.buffers[i]);
347 }
348 rc = rf_ShutdownCreate(listp, rf_ShutdownParityLoggingParityBufferPool, raidPtr);
349 if (rc) {
350 RF_ERRORMSG3("Unable to create shutdown entry file %s line %d rc=%d\n", __FILE__,
351 __LINE__, rc);
352 rf_ShutdownParityLoggingParityBufferPool(raidPtr);
353 return (rc);
354 }
355 /* initialize parityLogDiskQueue */
356 rc = rf_create_managed_mutex(listp, &raidPtr->parityLogDiskQueue.mutex);
357 if (rc) {
358 RF_ERRORMSG3("Unable to init mutex file %s line %d rc=%d\n", __FILE__,
359 __LINE__, rc);
360 return (rc);
361 }
362 rc = rf_create_managed_cond(listp, &raidPtr->parityLogDiskQueue.cond);
363 if (rc) {
364 RF_ERRORMSG3("Unable to init cond file %s line %d rc=%d\n", __FILE__,
365 __LINE__, rc);
366 return (rc);
367 }
368 raidPtr->parityLogDiskQueue.flushQueue = NULL;
369 raidPtr->parityLogDiskQueue.reintQueue = NULL;
370 raidPtr->parityLogDiskQueue.bufHead = NULL;
371 raidPtr->parityLogDiskQueue.bufTail = NULL;
372 raidPtr->parityLogDiskQueue.reintHead = NULL;
373 raidPtr->parityLogDiskQueue.reintTail = NULL;
374 raidPtr->parityLogDiskQueue.logBlockHead = NULL;
375 raidPtr->parityLogDiskQueue.logBlockTail = NULL;
376 raidPtr->parityLogDiskQueue.reintBlockHead = NULL;
377 raidPtr->parityLogDiskQueue.reintBlockTail = NULL;
378 raidPtr->parityLogDiskQueue.freeDataList = NULL;
379 raidPtr->parityLogDiskQueue.freeCommonList = NULL;
380
381 rc = rf_ShutdownCreate(listp, rf_ShutdownParityLoggingDiskQueue, raidPtr);
382 if (rc) {
383 RF_ERRORMSG3("Unable to create shutdown entry file %s line %d rc=%d\n", __FILE__,
384 __LINE__, rc);
385 return (rc);
386 }
387 for (i = 0; i < rf_numParityRegions; i++) {
388 rc = rf_mutex_init(&raidPtr->regionInfo[i].mutex);
389 if (rc) {
390 RF_ERRORMSG3("Unable to init mutex file %s line %d rc=%d\n", __FILE__,
391 __LINE__, rc);
392 for (j = 0; j < i; j++)
393 FreeRegionInfo(raidPtr, j);
394 RF_Free(raidPtr->regionInfo, (rf_numParityRegions * sizeof(RF_RegionInfo_t)));
395 return (ENOMEM);
396 }
397 rc = rf_mutex_init(&raidPtr->regionInfo[i].reintMutex);
398 if (rc) {
399 RF_ERRORMSG3("Unable to init mutex file %s line %d rc=%d\n", __FILE__,
400 __LINE__, rc);
401 rf_mutex_destroy(&raidPtr->regionInfo[i].mutex);
402 for (j = 0; j < i; j++)
403 FreeRegionInfo(raidPtr, j);
404 RF_Free(raidPtr->regionInfo, (rf_numParityRegions * sizeof(RF_RegionInfo_t)));
405 return (ENOMEM);
406 }
407 raidPtr->regionInfo[i].reintInProgress = RF_FALSE;
408 raidPtr->regionInfo[i].regionStartAddr = raidPtr->regionLogCapacity * i;
409 raidPtr->regionInfo[i].parityStartAddr = raidPtr->regionParityRange * i;
410 if (i < rf_numParityRegions - 1) {
411 raidPtr->regionInfo[i].capacity = raidPtr->regionLogCapacity;
412 raidPtr->regionInfo[i].numSectorsParity = raidPtr->regionParityRange;
413 } else {
414 raidPtr->regionInfo[i].capacity = lastRegionCapacity;
415 raidPtr->regionInfo[i].numSectorsParity = raidPtr->sectorsPerDisk - raidPtr->regionParityRange * i;
416 if (raidPtr->regionInfo[i].numSectorsParity > maxRegionParityRange)
417 maxRegionParityRange = raidPtr->regionInfo[i].numSectorsParity;
418 }
419 raidPtr->regionInfo[i].diskCount = 0;
420 RF_ASSERT(raidPtr->regionInfo[i].capacity + raidPtr->regionInfo[i].regionStartAddr <= totalLogCapacity);
421 RF_ASSERT(raidPtr->regionInfo[i].parityStartAddr + raidPtr->regionInfo[i].numSectorsParity <= raidPtr->sectorsPerDisk);
422 RF_Malloc(raidPtr->regionInfo[i].diskMap, (raidPtr->regionInfo[i].capacity * sizeof(RF_DiskMap_t)), (RF_DiskMap_t *));
423 if (raidPtr->regionInfo[i].diskMap == NULL) {
424 rf_mutex_destroy(&raidPtr->regionInfo[i].mutex);
425 rf_mutex_destroy(&raidPtr->regionInfo[i].reintMutex);
426 for (j = 0; j < i; j++)
427 FreeRegionInfo(raidPtr, j);
428 RF_Free(raidPtr->regionInfo, (rf_numParityRegions * sizeof(RF_RegionInfo_t)));
429 return (ENOMEM);
430 }
431 raidPtr->regionInfo[i].loggingEnabled = RF_FALSE;
432 raidPtr->regionInfo[i].coreLog = NULL;
433 }
434 rc = rf_ShutdownCreate(listp, rf_ShutdownParityLoggingRegionInfo, raidPtr);
435 if (rc) {
436 RF_ERRORMSG3("Unable to create shutdown entry file %s line %d rc=%d\n", __FILE__,
437 __LINE__, rc);
438 rf_ShutdownParityLoggingRegionInfo(raidPtr);
439 return (rc);
440 }
441 RF_ASSERT(raidPtr->parityLogDiskQueue.threadState == 0);
442 raidPtr->parityLogDiskQueue.threadState = RF_PLOG_CREATED;
443 rc = RF_CREATE_THREAD(raidPtr->pLogDiskThreadHandle, rf_ParityLoggingDiskManager, raidPtr,"rf_log");
444 if (rc) {
445 raidPtr->parityLogDiskQueue.threadState = 0;
446 RF_ERRORMSG3("Unable to create parity logging disk thread file %s line %d rc=%d\n",
447 __FILE__, __LINE__, rc);
448 return (ENOMEM);
449 }
450 /* wait for thread to start */
451 RF_LOCK_MUTEX(raidPtr->parityLogDiskQueue.mutex);
452 while (!(raidPtr->parityLogDiskQueue.threadState & RF_PLOG_RUNNING)) {
453 RF_WAIT_COND(raidPtr->parityLogDiskQueue.cond, raidPtr->parityLogDiskQueue.mutex);
454 }
455 RF_UNLOCK_MUTEX(raidPtr->parityLogDiskQueue.mutex);
456
457 rc = rf_ShutdownCreate(listp, rf_ShutdownParityLogging, raidPtr);
458 if (rc) {
459 RF_ERRORMSG1("Got rc=%d adding parity logging shutdown event\n", rc);
460 rf_ShutdownParityLogging(raidPtr);
461 return (rc);
462 }
463 if (rf_parityLogDebug) {
464 printf(" size of disk log in sectors: %d\n",
465 (int) totalLogCapacity);
466 printf(" total number of parity regions is %d\n", (int) rf_numParityRegions);
467 printf(" nominal sectors of log per parity region is %d\n", (int) raidPtr->regionLogCapacity);
468 printf(" nominal region fragmentation is %d sectors\n", (int) fragmentation);
469 printf(" total number of parity logs is %d\n", raidPtr->numParityLogs);
470 printf(" parity log size is %d sectors\n", raidPtr->numSectorsPerLog);
471 printf(" total in-core log space is %d bytes\n", (int) rf_totalInCoreLogCapacity);
472 }
473 rf_EnableParityLogging(raidPtr);
474
475 return (0);
476 }
477
478 static void
479 FreeRegionInfo(
480 RF_Raid_t * raidPtr,
481 RF_RegionId_t regionID)
482 {
483 RF_LOCK_MUTEX(raidPtr->regionInfo[regionID].mutex);
484 RF_Free(raidPtr->regionInfo[regionID].diskMap, (raidPtr->regionInfo[regionID].capacity * sizeof(RF_DiskMap_t)));
485 if (!rf_forceParityLogReint && raidPtr->regionInfo[regionID].coreLog) {
486 rf_ReleaseParityLogs(raidPtr, raidPtr->regionInfo[regionID].coreLog);
487 raidPtr->regionInfo[regionID].coreLog = NULL;
488 } else {
489 RF_ASSERT(raidPtr->regionInfo[regionID].coreLog == NULL);
490 RF_ASSERT(raidPtr->regionInfo[regionID].diskCount == 0);
491 }
492 RF_UNLOCK_MUTEX(raidPtr->regionInfo[regionID].mutex);
493 rf_mutex_destroy(&raidPtr->regionInfo[regionID].mutex);
494 rf_mutex_destroy(&raidPtr->regionInfo[regionID].reintMutex);
495 }
496
497
498 static void
499 FreeParityLogQueue(
500 RF_Raid_t * raidPtr,
501 RF_ParityLogQueue_t * queue)
502 {
503 RF_ParityLog_t *l1, *l2;
504
505 RF_LOCK_MUTEX(queue->mutex);
506 l1 = queue->parityLogs;
507 while (l1) {
508 l2 = l1;
509 l1 = l2->next;
510 RF_Free(l2->records, (raidPtr->numSectorsPerLog * sizeof(RF_ParityLogRecord_t)));
511 RF_Free(l2, sizeof(RF_ParityLog_t));
512 }
513 RF_UNLOCK_MUTEX(queue->mutex);
514 rf_mutex_destroy(&queue->mutex);
515 }
516
517
518 static void
519 FreeRegionBufferQueue(RF_RegionBufferQueue_t * queue)
520 {
521 int i;
522
523 RF_LOCK_MUTEX(queue->mutex);
524 if (queue->availableBuffers != queue->totalBuffers) {
525 printf("Attempt to free region queue which is still in use!\n");
526 RF_ASSERT(0);
527 }
528 for (i = 0; i < queue->totalBuffers; i++)
529 RF_Free(queue->buffers[i], queue->bufferSize);
530 RF_Free(queue->buffers, queue->totalBuffers * sizeof(caddr_t));
531 RF_UNLOCK_MUTEX(queue->mutex);
532 rf_mutex_destroy(&queue->mutex);
533 }
534
535 static void
536 rf_ShutdownParityLoggingRegionInfo(RF_ThreadArg_t arg)
537 {
538 RF_Raid_t *raidPtr;
539 RF_RegionId_t i;
540
541 raidPtr = (RF_Raid_t *) arg;
542 if (rf_parityLogDebug) {
543 int tid;
544 rf_get_threadid(tid);
545 printf("[%d] ShutdownParityLoggingRegionInfo\n", tid);
546 }
547 /* free region information structs */
548 for (i = 0; i < rf_numParityRegions; i++)
549 FreeRegionInfo(raidPtr, i);
550 RF_Free(raidPtr->regionInfo, (rf_numParityRegions * sizeof(raidPtr->regionInfo)));
551 raidPtr->regionInfo = NULL;
552 }
553
554 static void
555 rf_ShutdownParityLoggingPool(RF_ThreadArg_t arg)
556 {
557 RF_Raid_t *raidPtr;
558
559 raidPtr = (RF_Raid_t *) arg;
560 if (rf_parityLogDebug) {
561 int tid;
562 rf_get_threadid(tid);
563 printf("[%d] ShutdownParityLoggingPool\n", tid);
564 }
565 /* free contents of parityLogPool */
566 FreeParityLogQueue(raidPtr, &raidPtr->parityLogPool);
567 RF_Free(raidPtr->parityLogBufferHeap, raidPtr->numParityLogs * raidPtr->numSectorsPerLog * raidPtr->bytesPerSector);
568 }
569
570 static void
571 rf_ShutdownParityLoggingRegionBufferPool(RF_ThreadArg_t arg)
572 {
573 RF_Raid_t *raidPtr;
574
575 raidPtr = (RF_Raid_t *) arg;
576 if (rf_parityLogDebug) {
577 int tid;
578 rf_get_threadid(tid);
579 printf("[%d] ShutdownParityLoggingRegionBufferPool\n", tid);
580 }
581 FreeRegionBufferQueue(&raidPtr->regionBufferPool);
582 }
583
584 static void
585 rf_ShutdownParityLoggingParityBufferPool(RF_ThreadArg_t arg)
586 {
587 RF_Raid_t *raidPtr;
588
589 raidPtr = (RF_Raid_t *) arg;
590 if (rf_parityLogDebug) {
591 int tid;
592 rf_get_threadid(tid);
593 printf("[%d] ShutdownParityLoggingParityBufferPool\n", tid);
594 }
595 FreeRegionBufferQueue(&raidPtr->parityBufferPool);
596 }
597
598 static void
599 rf_ShutdownParityLoggingDiskQueue(RF_ThreadArg_t arg)
600 {
601 RF_ParityLogData_t *d;
602 RF_CommonLogData_t *c;
603 RF_Raid_t *raidPtr;
604
605 raidPtr = (RF_Raid_t *) arg;
606 if (rf_parityLogDebug) {
607 int tid;
608 rf_get_threadid(tid);
609 printf("[%d] ShutdownParityLoggingDiskQueue\n", tid);
610 }
611 /* free disk manager stuff */
612 RF_ASSERT(raidPtr->parityLogDiskQueue.bufHead == NULL);
613 RF_ASSERT(raidPtr->parityLogDiskQueue.bufTail == NULL);
614 RF_ASSERT(raidPtr->parityLogDiskQueue.reintHead == NULL);
615 RF_ASSERT(raidPtr->parityLogDiskQueue.reintTail == NULL);
616 while (raidPtr->parityLogDiskQueue.freeDataList) {
617 d = raidPtr->parityLogDiskQueue.freeDataList;
618 raidPtr->parityLogDiskQueue.freeDataList = raidPtr->parityLogDiskQueue.freeDataList->next;
619 RF_Free(d, sizeof(RF_ParityLogData_t));
620 }
621 while (raidPtr->parityLogDiskQueue.freeCommonList) {
622 c = raidPtr->parityLogDiskQueue.freeCommonList;
623 rf_mutex_destroy(&c->mutex);
624 raidPtr->parityLogDiskQueue.freeCommonList = raidPtr->parityLogDiskQueue.freeCommonList->next;
625 RF_Free(c, sizeof(RF_CommonLogData_t));
626 }
627 }
628
629 static void
630 rf_ShutdownParityLogging(RF_ThreadArg_t arg)
631 {
632 RF_Raid_t *raidPtr;
633
634 raidPtr = (RF_Raid_t *) arg;
635 if (rf_parityLogDebug) {
636 int tid;
637 rf_get_threadid(tid);
638 printf("[%d] ShutdownParityLogging\n", tid);
639 }
640 /* shutdown disk thread */
641 /* This has the desirable side-effect of forcing all regions to be
642 * reintegrated. This is necessary since all parity log maps are
643 * currently held in volatile memory. */
644
645 RF_LOCK_MUTEX(raidPtr->parityLogDiskQueue.mutex);
646 raidPtr->parityLogDiskQueue.threadState |= RF_PLOG_TERMINATE;
647 RF_UNLOCK_MUTEX(raidPtr->parityLogDiskQueue.mutex);
648 RF_SIGNAL_COND(raidPtr->parityLogDiskQueue.cond);
649 /*
650 * pLogDiskThread will now terminate when queues are cleared
651 * now wait for it to be done
652 */
653 RF_LOCK_MUTEX(raidPtr->parityLogDiskQueue.mutex);
654 while (!(raidPtr->parityLogDiskQueue.threadState & RF_PLOG_SHUTDOWN)) {
655 RF_WAIT_COND(raidPtr->parityLogDiskQueue.cond, raidPtr->parityLogDiskQueue.mutex);
656 }
657 RF_UNLOCK_MUTEX(raidPtr->parityLogDiskQueue.mutex);
658 if (rf_parityLogDebug) {
659 int tid;
660 rf_get_threadid(tid);
661 printf("[%d] ShutdownParityLogging done (thread completed)\n", tid);
662 }
663 }
664
665 int
666 rf_GetDefaultNumFloatingReconBuffersParityLogging(RF_Raid_t * raidPtr)
667 {
668 return (20);
669 }
670
671 RF_HeadSepLimit_t
672 rf_GetDefaultHeadSepLimitParityLogging(RF_Raid_t * raidPtr)
673 {
674 return (10);
675 }
676 /* return the region ID for a given RAID address */
677 RF_RegionId_t
678 rf_MapRegionIDParityLogging(
679 RF_Raid_t * raidPtr,
680 RF_SectorNum_t address)
681 {
682 RF_RegionId_t regionID;
683
684 /* regionID = address / (raidPtr->regionParityRange * raidPtr->Layout.numDataCol); */
685 regionID = address / raidPtr->regionParityRange;
686 if (regionID == rf_numParityRegions) {
687 /* last region may be larger than other regions */
688 regionID--;
689 }
690 RF_ASSERT(address >= raidPtr->regionInfo[regionID].parityStartAddr);
691 RF_ASSERT(address < raidPtr->regionInfo[regionID].parityStartAddr + raidPtr->regionInfo[regionID].numSectorsParity);
692 RF_ASSERT(regionID < rf_numParityRegions);
693 return (regionID);
694 }
695
696
697 /* given a logical RAID sector, determine physical disk address of data */
698 void
699 rf_MapSectorParityLogging(
700 RF_Raid_t * raidPtr,
701 RF_RaidAddr_t raidSector,
702 RF_RowCol_t * row,
703 RF_RowCol_t * col,
704 RF_SectorNum_t * diskSector,
705 int remap)
706 {
707 RF_StripeNum_t SUID = raidSector / raidPtr->Layout.sectorsPerStripeUnit;
708 *row = 0;
709 /* *col = (SUID % (raidPtr->numCol -
710 * raidPtr->Layout.numParityLogCol)); */
711 *col = SUID % raidPtr->Layout.numDataCol;
712 *diskSector = (SUID / (raidPtr->Layout.numDataCol)) * raidPtr->Layout.sectorsPerStripeUnit +
713 (raidSector % raidPtr->Layout.sectorsPerStripeUnit);
714 }
715
716
717 /* given a logical RAID sector, determine physical disk address of parity */
718 void
719 rf_MapParityParityLogging(
720 RF_Raid_t * raidPtr,
721 RF_RaidAddr_t raidSector,
722 RF_RowCol_t * row,
723 RF_RowCol_t * col,
724 RF_SectorNum_t * diskSector,
725 int remap)
726 {
727 RF_StripeNum_t SUID = raidSector / raidPtr->Layout.sectorsPerStripeUnit;
728
729 *row = 0;
730 /* *col =
731 * raidPtr->Layout.numDataCol-(SUID/raidPtr->Layout.numDataCol)%(raidPt
732 * r->numCol - raidPtr->Layout.numParityLogCol); */
733 *col = raidPtr->Layout.numDataCol;
734 *diskSector = (SUID / (raidPtr->Layout.numDataCol)) * raidPtr->Layout.sectorsPerStripeUnit +
735 (raidSector % raidPtr->Layout.sectorsPerStripeUnit);
736 }
737
738
739 /* given a regionID and sector offset, determine the physical disk address of the parity log */
740 void
741 rf_MapLogParityLogging(
742 RF_Raid_t * raidPtr,
743 RF_RegionId_t regionID,
744 RF_SectorNum_t regionOffset,
745 RF_RowCol_t * row,
746 RF_RowCol_t * col,
747 RF_SectorNum_t * startSector)
748 {
749 *row = 0;
750 *col = raidPtr->numCol - 1;
751 *startSector = raidPtr->regionInfo[regionID].regionStartAddr + regionOffset;
752 }
753
754
755 /* given a regionID, determine the physical disk address of the logged parity for that region */
756 void
757 rf_MapRegionParity(
758 RF_Raid_t * raidPtr,
759 RF_RegionId_t regionID,
760 RF_RowCol_t * row,
761 RF_RowCol_t * col,
762 RF_SectorNum_t * startSector,
763 RF_SectorCount_t * numSector)
764 {
765 *row = 0;
766 *col = raidPtr->numCol - 2;
767 *startSector = raidPtr->regionInfo[regionID].parityStartAddr;
768 *numSector = raidPtr->regionInfo[regionID].numSectorsParity;
769 }
770
771
772 /* given a logical RAID address, determine the participating disks in the stripe */
773 void
774 rf_IdentifyStripeParityLogging(
775 RF_Raid_t * raidPtr,
776 RF_RaidAddr_t addr,
777 RF_RowCol_t ** diskids,
778 RF_RowCol_t * outRow)
779 {
780 RF_StripeNum_t stripeID = rf_RaidAddressToStripeID(&raidPtr->Layout, addr);
781 RF_ParityLoggingConfigInfo_t *info = (RF_ParityLoggingConfigInfo_t *) raidPtr->Layout.layoutSpecificInfo;
782 *outRow = 0;
783 *diskids = info->stripeIdentifier[stripeID % raidPtr->numCol];
784 }
785
786
787 void
788 rf_MapSIDToPSIDParityLogging(
789 RF_RaidLayout_t * layoutPtr,
790 RF_StripeNum_t stripeID,
791 RF_StripeNum_t * psID,
792 RF_ReconUnitNum_t * which_ru)
793 {
794 *which_ru = 0;
795 *psID = stripeID;
796 }
797
798
799 /* select an algorithm for performing an access. Returns two pointers,
800 * one to a function that will return information about the DAG, and
801 * another to a function that will create the dag.
802 */
803 void
804 rf_ParityLoggingDagSelect(
805 RF_Raid_t * raidPtr,
806 RF_IoType_t type,
807 RF_AccessStripeMap_t * asmp,
808 RF_VoidFuncPtr * createFunc)
809 {
810 RF_RaidLayout_t *layoutPtr = &(raidPtr->Layout);
811 RF_PhysDiskAddr_t *failedPDA = NULL;
812 RF_RowCol_t frow, fcol;
813 RF_RowStatus_t rstat;
814 int prior_recon;
815 int tid;
816
817 RF_ASSERT(RF_IO_IS_R_OR_W(type));
818
819 if (asmp->numDataFailed + asmp->numParityFailed > 1) {
820 RF_ERRORMSG("Multiple disks failed in a single group! Aborting I/O operation.\n");
821 /* *infoFunc = */ *createFunc = NULL;
822 return;
823 } else
824 if (asmp->numDataFailed + asmp->numParityFailed == 1) {
825
826 /* if under recon & already reconstructed, redirect
827 * the access to the spare drive and eliminate the
828 * failure indication */
829 failedPDA = asmp->failedPDAs[0];
830 frow = failedPDA->row;
831 fcol = failedPDA->col;
832 rstat = raidPtr->status[failedPDA->row];
833 prior_recon = (rstat == rf_rs_reconfigured) || (
834 (rstat == rf_rs_reconstructing) ?
835 rf_CheckRUReconstructed(raidPtr->reconControl[frow]->reconMap, failedPDA->startSector) : 0
836 );
837 if (prior_recon) {
838 RF_RowCol_t or = failedPDA->row, oc = failedPDA->col;
839 RF_SectorNum_t oo = failedPDA->startSector;
840 if (layoutPtr->map->flags & RF_DISTRIBUTE_SPARE) { /* redirect to dist
841 * spare space */
842
843 if (failedPDA == asmp->parityInfo) {
844
845 /* parity has failed */
846 (layoutPtr->map->MapParity) (raidPtr, failedPDA->raidAddress, &failedPDA->row,
847 &failedPDA->col, &failedPDA->startSector, RF_REMAP);
848
849 if (asmp->parityInfo->next) { /* redir 2nd component,
850 * if any */
851 RF_PhysDiskAddr_t *p = asmp->parityInfo->next;
852 RF_SectorNum_t SUoffs = p->startSector % layoutPtr->sectorsPerStripeUnit;
853 p->row = failedPDA->row;
854 p->col = failedPDA->col;
855 p->startSector = rf_RaidAddressOfPrevStripeUnitBoundary(layoutPtr, failedPDA->startSector) +
856 SUoffs; /* cheating:
857 * startSector is not
858 * really a RAID address */
859 }
860 } else
861 if (asmp->parityInfo->next && failedPDA == asmp->parityInfo->next) {
862 RF_ASSERT(0); /* should not ever
863 * happen */
864 } else {
865
866 /* data has failed */
867 (layoutPtr->map->MapSector) (raidPtr, failedPDA->raidAddress, &failedPDA->row,
868 &failedPDA->col, &failedPDA->startSector, RF_REMAP);
869
870 }
871
872 } else { /* redirect to dedicated spare
873 * space */
874
875 failedPDA->row = raidPtr->Disks[frow][fcol].spareRow;
876 failedPDA->col = raidPtr->Disks[frow][fcol].spareCol;
877
878 /* the parity may have two distinct
879 * components, both of which may need
880 * to be redirected */
881 if (asmp->parityInfo->next) {
882 if (failedPDA == asmp->parityInfo) {
883 failedPDA->next->row = failedPDA->row;
884 failedPDA->next->col = failedPDA->col;
885 } else
886 if (failedPDA == asmp->parityInfo->next) { /* paranoid: should
887 * never occur */
888 asmp->parityInfo->row = failedPDA->row;
889 asmp->parityInfo->col = failedPDA->col;
890 }
891 }
892 }
893
894 RF_ASSERT(failedPDA->col != -1);
895
896 if (rf_dagDebug || rf_mapDebug) {
897 rf_get_threadid(tid);
898 printf("[%d] Redirected type '%c' r %d c %d o %ld -> r %d c %d o %ld\n",
899 tid, type, or, oc, (long) oo, failedPDA->row, failedPDA->col, (long) failedPDA->startSector);
900 }
901 asmp->numDataFailed = asmp->numParityFailed = 0;
902 }
903 }
904 if (type == RF_IO_TYPE_READ) {
905
906 if (asmp->numDataFailed == 0)
907 *createFunc = (RF_VoidFuncPtr) rf_CreateFaultFreeReadDAG;
908 else
909 *createFunc = (RF_VoidFuncPtr) rf_CreateRaidFiveDegradedReadDAG;
910
911 } else {
912
913
914 /* if mirroring, always use large writes. If the access
915 * requires two distinct parity updates, always do a small
916 * write. If the stripe contains a failure but the access
917 * does not, do a small write. The first conditional
918 * (numStripeUnitsAccessed <= numDataCol/2) uses a
919 * less-than-or-equal rather than just a less-than because
920 * when G is 3 or 4, numDataCol/2 is 1, and I want
921 * single-stripe-unit updates to use just one disk. */
922 if ((asmp->numDataFailed + asmp->numParityFailed) == 0) {
923 if (((asmp->numStripeUnitsAccessed <= (layoutPtr->numDataCol / 2)) && (layoutPtr->numDataCol != 1)) ||
924 (asmp->parityInfo->next != NULL) || rf_CheckStripeForFailures(raidPtr, asmp)) {
925 *createFunc = (RF_VoidFuncPtr) rf_CreateParityLoggingSmallWriteDAG;
926 } else
927 *createFunc = (RF_VoidFuncPtr) rf_CreateParityLoggingLargeWriteDAG;
928 } else
929 if (asmp->numParityFailed == 1)
930 *createFunc = (RF_VoidFuncPtr) rf_CreateNonRedundantWriteDAG;
931 else
932 if (asmp->numStripeUnitsAccessed != 1 && failedPDA->numSector != layoutPtr->sectorsPerStripeUnit)
933 *createFunc = NULL;
934 else
935 *createFunc = (RF_VoidFuncPtr) rf_CreateDegradedWriteDAG;
936 }
937 }
938 #endif /* RF_INCLUDE_PARITYLOGGING > 0 */
939