rf_paritylogging.c revision 1.3 1 /* $NetBSD: rf_paritylogging.c,v 1.3 1999/02/05 00:06:14 oster Exp $ */
2 /*
3 * Copyright (c) 1995 Carnegie-Mellon University.
4 * All rights reserved.
5 *
6 * Author: William V. Courtright II
7 *
8 * Permission to use, copy, modify and distribute this software and
9 * its documentation is hereby granted, provided that both the copyright
10 * notice and this permission notice appear in all copies of the
11 * software, derivative works or modified versions, and any portions
12 * thereof, and that both notices appear in supporting documentation.
13 *
14 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
15 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
16 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
17 *
18 * Carnegie Mellon requests users of this software to return to
19 *
20 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
21 * School of Computer Science
22 * Carnegie Mellon University
23 * Pittsburgh PA 15213-3890
24 *
25 * any improvements or extensions that they make and grant Carnegie the
26 * rights to redistribute these changes.
27 */
28
29
30 /*
31 parity logging configuration, dag selection, and mapping is implemented here
32 */
33
34 #include "rf_archs.h"
35
36 #if RF_INCLUDE_PARITYLOGGING > 0
37
38 #include "rf_types.h"
39 #include "rf_raid.h"
40 #include "rf_dag.h"
41 #include "rf_dagutils.h"
42 #include "rf_dagfuncs.h"
43 #include "rf_dagffrd.h"
44 #include "rf_dagffwr.h"
45 #include "rf_dagdegrd.h"
46 #include "rf_dagdegwr.h"
47 #include "rf_threadid.h"
48 #include "rf_paritylog.h"
49 #include "rf_paritylogDiskMgr.h"
50 #include "rf_paritylogging.h"
51 #include "rf_parityloggingdags.h"
52 #include "rf_general.h"
53 #include "rf_map.h"
54 #include "rf_utils.h"
55 #include "rf_shutdown.h"
56
57 typedef struct RF_ParityLoggingConfigInfo_s {
58 RF_RowCol_t **stripeIdentifier; /* filled in at config time & used by
59 * IdentifyStripe */
60 } RF_ParityLoggingConfigInfo_t;
61
62 static void FreeRegionInfo(RF_Raid_t * raidPtr, RF_RegionId_t regionID);
63 static void rf_ShutdownParityLogging(RF_ThreadArg_t arg);
64 static void rf_ShutdownParityLoggingRegionInfo(RF_ThreadArg_t arg);
65 static void rf_ShutdownParityLoggingPool(RF_ThreadArg_t arg);
66 static void rf_ShutdownParityLoggingRegionBufferPool(RF_ThreadArg_t arg);
67 static void rf_ShutdownParityLoggingParityBufferPool(RF_ThreadArg_t arg);
68 static void rf_ShutdownParityLoggingDiskQueue(RF_ThreadArg_t arg);
69
70 int
71 rf_ConfigureParityLogging(
72 RF_ShutdownList_t ** listp,
73 RF_Raid_t * raidPtr,
74 RF_Config_t * cfgPtr)
75 {
76 int i, j, startdisk, rc;
77 RF_SectorCount_t totalLogCapacity, fragmentation, lastRegionCapacity;
78 RF_SectorCount_t parityBufferCapacity, maxRegionParityRange;
79 RF_RaidLayout_t *layoutPtr = &raidPtr->Layout;
80 RF_ParityLoggingConfigInfo_t *info;
81 RF_ParityLog_t *l = NULL, *next;
82 caddr_t lHeapPtr;
83
84 /*
85 * We create multiple entries on the shutdown list here, since
86 * this configuration routine is fairly complicated in and of
87 * itself, and this makes backing out of a failed configuration
88 * much simpler.
89 */
90
91 raidPtr->numSectorsPerLog = RF_DEFAULT_NUM_SECTORS_PER_LOG;
92
93 /* create a parity logging configuration structure */
94 RF_MallocAndAdd(info, sizeof(RF_ParityLoggingConfigInfo_t), (RF_ParityLoggingConfigInfo_t *), raidPtr->cleanupList);
95 if (info == NULL)
96 return (ENOMEM);
97 layoutPtr->layoutSpecificInfo = (void *) info;
98
99 RF_ASSERT(raidPtr->numRow == 1);
100
101 /* the stripe identifier must identify the disks in each stripe, IN
102 * THE ORDER THAT THEY APPEAR IN THE STRIPE. */
103 info->stripeIdentifier = rf_make_2d_array((raidPtr->numCol), (raidPtr->numCol), raidPtr->cleanupList);
104 if (info->stripeIdentifier == NULL)
105 return (ENOMEM);
106
107 startdisk = 0;
108 for (i = 0; i < (raidPtr->numCol); i++) {
109 for (j = 0; j < (raidPtr->numCol); j++) {
110 info->stripeIdentifier[i][j] = (startdisk + j) % (raidPtr->numCol - 1);
111 }
112 if ((--startdisk) < 0)
113 startdisk = raidPtr->numCol - 1 - 1;
114 }
115
116 /* fill in the remaining layout parameters */
117 layoutPtr->numStripe = layoutPtr->stripeUnitsPerDisk;
118 layoutPtr->bytesPerStripeUnit = layoutPtr->sectorsPerStripeUnit << raidPtr->logBytesPerSector;
119 layoutPtr->numParityCol = 1;
120 layoutPtr->numParityLogCol = 1;
121 layoutPtr->numDataCol = raidPtr->numCol - layoutPtr->numParityCol - layoutPtr->numParityLogCol;
122 layoutPtr->dataSectorsPerStripe = layoutPtr->numDataCol * layoutPtr->sectorsPerStripeUnit;
123 layoutPtr->dataStripeUnitsPerDisk = layoutPtr->stripeUnitsPerDisk;
124 raidPtr->sectorsPerDisk = layoutPtr->stripeUnitsPerDisk * layoutPtr->sectorsPerStripeUnit;
125
126 raidPtr->totalSectors = layoutPtr->stripeUnitsPerDisk * layoutPtr->numDataCol * layoutPtr->sectorsPerStripeUnit;
127
128 /* configure parity log parameters
129 *
130 * parameter comment/constraints ----------------
131 * ------------------- numParityRegions all regions (except
132 * possibly last) of equal size totalInCoreLogCapacity amount of
133 * memory in bytes available for in-core logs (default 1 MB) #
134 * numSectorsPerLog capacity of an in-core log in sectors (1
135 * disk track) numParityLogs total number of in-core logs,
136 * should be at least numParityRegions regionLogCapacity size of
137 * a region log (except possibly last one) in sectors totalLogCapacity
138 * total amount of log space in sectors
139 *
140 * denotes a user settable parameter. # logs are fixed to be the size of
141 * a disk track, value #defined in rf_paritylog.h
142 *
143 */
144
145 totalLogCapacity = layoutPtr->stripeUnitsPerDisk * layoutPtr->sectorsPerStripeUnit * layoutPtr->numParityLogCol;
146 raidPtr->regionLogCapacity = totalLogCapacity / rf_numParityRegions;
147 if (rf_parityLogDebug)
148 printf("bytes per sector %d\n", raidPtr->bytesPerSector);
149
150 /* reduce fragmentation within a disk region by adjusting the number
151 * of regions in an attempt to allow an integral number of logs to fit
152 * into a disk region */
153 fragmentation = raidPtr->regionLogCapacity % raidPtr->numSectorsPerLog;
154 if (fragmentation > 0)
155 for (i = 1; i < (raidPtr->numSectorsPerLog / 2); i++) {
156 if (((totalLogCapacity / (rf_numParityRegions + i)) % raidPtr->numSectorsPerLog) < fragmentation) {
157 rf_numParityRegions++;
158 raidPtr->regionLogCapacity = totalLogCapacity / rf_numParityRegions;
159 fragmentation = raidPtr->regionLogCapacity % raidPtr->numSectorsPerLog;
160 }
161 if (((totalLogCapacity / (rf_numParityRegions - i)) % raidPtr->numSectorsPerLog) < fragmentation) {
162 rf_numParityRegions--;
163 raidPtr->regionLogCapacity = totalLogCapacity / rf_numParityRegions;
164 fragmentation = raidPtr->regionLogCapacity % raidPtr->numSectorsPerLog;
165 }
166 }
167 /* ensure integral number of regions per log */
168 raidPtr->regionLogCapacity = (raidPtr->regionLogCapacity / raidPtr->numSectorsPerLog) * raidPtr->numSectorsPerLog;
169
170 raidPtr->numParityLogs = rf_totalInCoreLogCapacity / (raidPtr->bytesPerSector * raidPtr->numSectorsPerLog);
171 /* to avoid deadlock, must ensure that enough logs exist for each
172 * region to have one simultaneously */
173 if (raidPtr->numParityLogs < rf_numParityRegions)
174 raidPtr->numParityLogs = rf_numParityRegions;
175
176 /* create region information structs */
177 RF_Malloc(raidPtr->regionInfo, (rf_numParityRegions * sizeof(RF_RegionInfo_t)), (RF_RegionInfo_t *));
178 if (raidPtr->regionInfo == NULL)
179 return (ENOMEM);
180
181 /* last region may not be full capacity */
182 lastRegionCapacity = raidPtr->regionLogCapacity;
183 while ((rf_numParityRegions - 1) * raidPtr->regionLogCapacity + lastRegionCapacity > totalLogCapacity)
184 lastRegionCapacity = lastRegionCapacity - raidPtr->numSectorsPerLog;
185
186 raidPtr->regionParityRange = raidPtr->sectorsPerDisk / rf_numParityRegions;
187 maxRegionParityRange = raidPtr->regionParityRange;
188
189 /* i can't remember why this line is in the code -wvcii 6/30/95 */
190 /* if (raidPtr->sectorsPerDisk % rf_numParityRegions > 0)
191 regionParityRange++; */
192
193 /* build pool of unused parity logs */
194 RF_Malloc(raidPtr->parityLogBufferHeap, raidPtr->numParityLogs * raidPtr->numSectorsPerLog * raidPtr->bytesPerSector, (caddr_t));
195 if (raidPtr->parityLogBufferHeap == NULL)
196 return (ENOMEM);
197 lHeapPtr = raidPtr->parityLogBufferHeap;
198 rc = rf_mutex_init(&raidPtr->parityLogPool.mutex);
199 if (rc) {
200 RF_ERRORMSG3("Unable to init mutex file %s line %d rc=%d\n", __FILE__,
201 __LINE__, rc);
202 RF_Free(raidPtr->parityLogBufferHeap, raidPtr->numParityLogs * raidPtr->numSectorsPerLog * raidPtr->bytesPerSector);
203 return (ENOMEM);
204 }
205 for (i = 0; i < raidPtr->numParityLogs; i++) {
206 if (i == 0) {
207 RF_Calloc(raidPtr->parityLogPool.parityLogs, 1, sizeof(RF_ParityLog_t), (RF_ParityLog_t *));
208 if (raidPtr->parityLogPool.parityLogs == NULL) {
209 RF_Free(raidPtr->parityLogBufferHeap, raidPtr->numParityLogs * raidPtr->numSectorsPerLog * raidPtr->bytesPerSector);
210 return (ENOMEM);
211 }
212 l = raidPtr->parityLogPool.parityLogs;
213 } else {
214 RF_Calloc(l->next, 1, sizeof(RF_ParityLog_t), (RF_ParityLog_t *));
215 if (l->next == NULL) {
216 RF_Free(raidPtr->parityLogBufferHeap, raidPtr->numParityLogs * raidPtr->numSectorsPerLog * raidPtr->bytesPerSector);
217 for (l = raidPtr->parityLogPool.parityLogs; l; l = next) {
218 next = l->next;
219 if (l->records)
220 RF_Free(l->records, (raidPtr->numSectorsPerLog * sizeof(RF_ParityLogRecord_t)));
221 RF_Free(l, sizeof(RF_ParityLog_t));
222 }
223 return (ENOMEM);
224 }
225 l = l->next;
226 }
227 l->bufPtr = lHeapPtr;
228 lHeapPtr += raidPtr->numSectorsPerLog * raidPtr->bytesPerSector;
229 RF_Malloc(l->records, (raidPtr->numSectorsPerLog * sizeof(RF_ParityLogRecord_t)), (RF_ParityLogRecord_t *));
230 if (l->records == NULL) {
231 RF_Free(raidPtr->parityLogBufferHeap, raidPtr->numParityLogs * raidPtr->numSectorsPerLog * raidPtr->bytesPerSector);
232 for (l = raidPtr->parityLogPool.parityLogs; l; l = next) {
233 next = l->next;
234 if (l->records)
235 RF_Free(l->records, (raidPtr->numSectorsPerLog * sizeof(RF_ParityLogRecord_t)));
236 RF_Free(l, sizeof(RF_ParityLog_t));
237 }
238 return (ENOMEM);
239 }
240 }
241 rc = rf_ShutdownCreate(listp, rf_ShutdownParityLoggingPool, raidPtr);
242 if (rc) {
243 RF_ERRORMSG3("Unable to create shutdown entry file %s line %d rc=%d\n", __FILE__,
244 __LINE__, rc);
245 rf_ShutdownParityLoggingPool(raidPtr);
246 return (rc);
247 }
248 /* build pool of region buffers */
249 rc = rf_mutex_init(&raidPtr->regionBufferPool.mutex);
250 if (rc) {
251 RF_ERRORMSG3("Unable to init mutex file %s line %d rc=%d\n", __FILE__,
252 __LINE__, rc);
253 return (ENOMEM);
254 }
255 rc = rf_cond_init(&raidPtr->regionBufferPool.cond);
256 if (rc) {
257 RF_ERRORMSG3("Unable to init cond file %s line %d rc=%d\n", __FILE__,
258 __LINE__, rc);
259 rf_mutex_destroy(&raidPtr->regionBufferPool.mutex);
260 return (ENOMEM);
261 }
262 raidPtr->regionBufferPool.bufferSize = raidPtr->regionLogCapacity * raidPtr->bytesPerSector;
263 printf("regionBufferPool.bufferSize %d\n", raidPtr->regionBufferPool.bufferSize);
264 raidPtr->regionBufferPool.totalBuffers = 1; /* for now, only one
265 * region at a time may
266 * be reintegrated */
267 raidPtr->regionBufferPool.availableBuffers = raidPtr->regionBufferPool.totalBuffers;
268 raidPtr->regionBufferPool.availBuffersIndex = 0;
269 raidPtr->regionBufferPool.emptyBuffersIndex = 0;
270 RF_Malloc(raidPtr->regionBufferPool.buffers, raidPtr->regionBufferPool.totalBuffers * sizeof(caddr_t), (caddr_t *));
271 if (raidPtr->regionBufferPool.buffers == NULL) {
272 rf_mutex_destroy(&raidPtr->regionBufferPool.mutex);
273 rf_cond_destroy(&raidPtr->regionBufferPool.cond);
274 return (ENOMEM);
275 }
276 for (i = 0; i < raidPtr->regionBufferPool.totalBuffers; i++) {
277 RF_Malloc(raidPtr->regionBufferPool.buffers[i], raidPtr->regionBufferPool.bufferSize * sizeof(char), (caddr_t));
278 if (raidPtr->regionBufferPool.buffers == NULL) {
279 rf_mutex_destroy(&raidPtr->regionBufferPool.mutex);
280 rf_cond_destroy(&raidPtr->regionBufferPool.cond);
281 for (j = 0; j < i; j++) {
282 RF_Free(raidPtr->regionBufferPool.buffers[i], raidPtr->regionBufferPool.bufferSize * sizeof(char));
283 }
284 RF_Free(raidPtr->regionBufferPool.buffers, raidPtr->regionBufferPool.totalBuffers * sizeof(caddr_t));
285 return (ENOMEM);
286 }
287 printf("raidPtr->regionBufferPool.buffers[%d] = %lx\n", i,
288 (long) raidPtr->regionBufferPool.buffers[i]);
289 }
290 rc = rf_ShutdownCreate(listp, rf_ShutdownParityLoggingRegionBufferPool, raidPtr);
291 if (rc) {
292 RF_ERRORMSG3("Unable to create shutdown entry file %s line %d rc=%d\n", __FILE__,
293 __LINE__, rc);
294 rf_ShutdownParityLoggingRegionBufferPool(raidPtr);
295 return (rc);
296 }
297 /* build pool of parity buffers */
298 parityBufferCapacity = maxRegionParityRange;
299 rc = rf_mutex_init(&raidPtr->parityBufferPool.mutex);
300 if (rc) {
301 RF_ERRORMSG3("Unable to init mutex file %s line %d rc=%d\n", __FILE__,
302 __LINE__, rc);
303 return (rc);
304 }
305 rc = rf_cond_init(&raidPtr->parityBufferPool.cond);
306 if (rc) {
307 RF_ERRORMSG3("Unable to init cond file %s line %d rc=%d\n", __FILE__,
308 __LINE__, rc);
309 rf_mutex_destroy(&raidPtr->parityBufferPool.mutex);
310 return (ENOMEM);
311 }
312 raidPtr->parityBufferPool.bufferSize = parityBufferCapacity * raidPtr->bytesPerSector;
313 printf("parityBufferPool.bufferSize %d\n", raidPtr->parityBufferPool.bufferSize);
314 raidPtr->parityBufferPool.totalBuffers = 1; /* for now, only one
315 * region at a time may
316 * be reintegrated */
317 raidPtr->parityBufferPool.availableBuffers = raidPtr->parityBufferPool.totalBuffers;
318 raidPtr->parityBufferPool.availBuffersIndex = 0;
319 raidPtr->parityBufferPool.emptyBuffersIndex = 0;
320 RF_Malloc(raidPtr->parityBufferPool.buffers, raidPtr->parityBufferPool.totalBuffers * sizeof(caddr_t), (caddr_t *));
321 if (raidPtr->parityBufferPool.buffers == NULL) {
322 rf_mutex_destroy(&raidPtr->parityBufferPool.mutex);
323 rf_cond_destroy(&raidPtr->parityBufferPool.cond);
324 return (ENOMEM);
325 }
326 for (i = 0; i < raidPtr->parityBufferPool.totalBuffers; i++) {
327 RF_Malloc(raidPtr->parityBufferPool.buffers[i], raidPtr->parityBufferPool.bufferSize * sizeof(char), (caddr_t));
328 if (raidPtr->parityBufferPool.buffers == NULL) {
329 rf_mutex_destroy(&raidPtr->parityBufferPool.mutex);
330 rf_cond_destroy(&raidPtr->parityBufferPool.cond);
331 for (j = 0; j < i; j++) {
332 RF_Free(raidPtr->parityBufferPool.buffers[i], raidPtr->regionBufferPool.bufferSize * sizeof(char));
333 }
334 RF_Free(raidPtr->parityBufferPool.buffers, raidPtr->regionBufferPool.totalBuffers * sizeof(caddr_t));
335 return (ENOMEM);
336 }
337 printf("parityBufferPool.buffers[%d] = %lx\n", i,
338 (long) raidPtr->parityBufferPool.buffers[i]);
339 }
340 rc = rf_ShutdownCreate(listp, rf_ShutdownParityLoggingParityBufferPool, raidPtr);
341 if (rc) {
342 RF_ERRORMSG3("Unable to create shutdown entry file %s line %d rc=%d\n", __FILE__,
343 __LINE__, rc);
344 rf_ShutdownParityLoggingParityBufferPool(raidPtr);
345 return (rc);
346 }
347 /* initialize parityLogDiskQueue */
348 rc = rf_create_managed_mutex(listp, &raidPtr->parityLogDiskQueue.mutex);
349 if (rc) {
350 RF_ERRORMSG3("Unable to init mutex file %s line %d rc=%d\n", __FILE__,
351 __LINE__, rc);
352 return (rc);
353 }
354 rc = rf_create_managed_cond(listp, &raidPtr->parityLogDiskQueue.cond);
355 if (rc) {
356 RF_ERRORMSG3("Unable to init cond file %s line %d rc=%d\n", __FILE__,
357 __LINE__, rc);
358 return (rc);
359 }
360 raidPtr->parityLogDiskQueue.flushQueue = NULL;
361 raidPtr->parityLogDiskQueue.reintQueue = NULL;
362 raidPtr->parityLogDiskQueue.bufHead = NULL;
363 raidPtr->parityLogDiskQueue.bufTail = NULL;
364 raidPtr->parityLogDiskQueue.reintHead = NULL;
365 raidPtr->parityLogDiskQueue.reintTail = NULL;
366 raidPtr->parityLogDiskQueue.logBlockHead = NULL;
367 raidPtr->parityLogDiskQueue.logBlockTail = NULL;
368 raidPtr->parityLogDiskQueue.reintBlockHead = NULL;
369 raidPtr->parityLogDiskQueue.reintBlockTail = NULL;
370 raidPtr->parityLogDiskQueue.freeDataList = NULL;
371 raidPtr->parityLogDiskQueue.freeCommonList = NULL;
372
373 rc = rf_ShutdownCreate(listp, rf_ShutdownParityLoggingDiskQueue, raidPtr);
374 if (rc) {
375 RF_ERRORMSG3("Unable to create shutdown entry file %s line %d rc=%d\n", __FILE__,
376 __LINE__, rc);
377 return (rc);
378 }
379 for (i = 0; i < rf_numParityRegions; i++) {
380 rc = rf_mutex_init(&raidPtr->regionInfo[i].mutex);
381 if (rc) {
382 RF_ERRORMSG3("Unable to init mutex file %s line %d rc=%d\n", __FILE__,
383 __LINE__, rc);
384 for (j = 0; j < i; j++)
385 FreeRegionInfo(raidPtr, j);
386 RF_Free(raidPtr->regionInfo, (rf_numParityRegions * sizeof(RF_RegionInfo_t)));
387 return (ENOMEM);
388 }
389 rc = rf_mutex_init(&raidPtr->regionInfo[i].reintMutex);
390 if (rc) {
391 RF_ERRORMSG3("Unable to init mutex file %s line %d rc=%d\n", __FILE__,
392 __LINE__, rc);
393 rf_mutex_destroy(&raidPtr->regionInfo[i].mutex);
394 for (j = 0; j < i; j++)
395 FreeRegionInfo(raidPtr, j);
396 RF_Free(raidPtr->regionInfo, (rf_numParityRegions * sizeof(RF_RegionInfo_t)));
397 return (ENOMEM);
398 }
399 raidPtr->regionInfo[i].reintInProgress = RF_FALSE;
400 raidPtr->regionInfo[i].regionStartAddr = raidPtr->regionLogCapacity * i;
401 raidPtr->regionInfo[i].parityStartAddr = raidPtr->regionParityRange * i;
402 if (i < rf_numParityRegions - 1) {
403 raidPtr->regionInfo[i].capacity = raidPtr->regionLogCapacity;
404 raidPtr->regionInfo[i].numSectorsParity = raidPtr->regionParityRange;
405 } else {
406 raidPtr->regionInfo[i].capacity = lastRegionCapacity;
407 raidPtr->regionInfo[i].numSectorsParity = raidPtr->sectorsPerDisk - raidPtr->regionParityRange * i;
408 if (raidPtr->regionInfo[i].numSectorsParity > maxRegionParityRange)
409 maxRegionParityRange = raidPtr->regionInfo[i].numSectorsParity;
410 }
411 raidPtr->regionInfo[i].diskCount = 0;
412 RF_ASSERT(raidPtr->regionInfo[i].capacity + raidPtr->regionInfo[i].regionStartAddr <= totalLogCapacity);
413 RF_ASSERT(raidPtr->regionInfo[i].parityStartAddr + raidPtr->regionInfo[i].numSectorsParity <= raidPtr->sectorsPerDisk);
414 RF_Malloc(raidPtr->regionInfo[i].diskMap, (raidPtr->regionInfo[i].capacity * sizeof(RF_DiskMap_t)), (RF_DiskMap_t *));
415 if (raidPtr->regionInfo[i].diskMap == NULL) {
416 rf_mutex_destroy(&raidPtr->regionInfo[i].mutex);
417 rf_mutex_destroy(&raidPtr->regionInfo[i].reintMutex);
418 for (j = 0; j < i; j++)
419 FreeRegionInfo(raidPtr, j);
420 RF_Free(raidPtr->regionInfo, (rf_numParityRegions * sizeof(RF_RegionInfo_t)));
421 return (ENOMEM);
422 }
423 raidPtr->regionInfo[i].loggingEnabled = RF_FALSE;
424 raidPtr->regionInfo[i].coreLog = NULL;
425 }
426 rc = rf_ShutdownCreate(listp, rf_ShutdownParityLoggingRegionInfo, raidPtr);
427 if (rc) {
428 RF_ERRORMSG3("Unable to create shutdown entry file %s line %d rc=%d\n", __FILE__,
429 __LINE__, rc);
430 rf_ShutdownParityLoggingRegionInfo(raidPtr);
431 return (rc);
432 }
433 RF_ASSERT(raidPtr->parityLogDiskQueue.threadState == 0);
434 raidPtr->parityLogDiskQueue.threadState = RF_PLOG_CREATED;
435 rc = RF_CREATE_THREAD(raidPtr->pLogDiskThreadHandle, rf_ParityLoggingDiskManager, raidPtr);
436 if (rc) {
437 raidPtr->parityLogDiskQueue.threadState = 0;
438 RF_ERRORMSG3("Unable to create parity logging disk thread file %s line %d rc=%d\n",
439 __FILE__, __LINE__, rc);
440 return (ENOMEM);
441 }
442 /* wait for thread to start */
443 RF_LOCK_MUTEX(raidPtr->parityLogDiskQueue.mutex);
444 while (!(raidPtr->parityLogDiskQueue.threadState & RF_PLOG_RUNNING)) {
445 RF_WAIT_COND(raidPtr->parityLogDiskQueue.cond, raidPtr->parityLogDiskQueue.mutex);
446 }
447 RF_UNLOCK_MUTEX(raidPtr->parityLogDiskQueue.mutex);
448
449 rc = rf_ShutdownCreate(listp, rf_ShutdownParityLogging, raidPtr);
450 if (rc) {
451 RF_ERRORMSG1("Got rc=%d adding parity logging shutdown event\n", rc);
452 rf_ShutdownParityLogging(raidPtr);
453 return (rc);
454 }
455 if (rf_parityLogDebug) {
456 printf(" size of disk log in sectors: %d\n",
457 (int) totalLogCapacity);
458 printf(" total number of parity regions is %d\n", (int) rf_numParityRegions);
459 printf(" nominal sectors of log per parity region is %d\n", (int) raidPtr->regionLogCapacity);
460 printf(" nominal region fragmentation is %d sectors\n", (int) fragmentation);
461 printf(" total number of parity logs is %d\n", raidPtr->numParityLogs);
462 printf(" parity log size is %d sectors\n", raidPtr->numSectorsPerLog);
463 printf(" total in-core log space is %d bytes\n", (int) rf_totalInCoreLogCapacity);
464 }
465 rf_EnableParityLogging(raidPtr);
466
467 return (0);
468 }
469
470 static void
471 FreeRegionInfo(
472 RF_Raid_t * raidPtr,
473 RF_RegionId_t regionID)
474 {
475 RF_LOCK_MUTEX(raidPtr->regionInfo[regionID].mutex);
476 RF_Free(raidPtr->regionInfo[regionID].diskMap, (raidPtr->regionInfo[regionID].capacity * sizeof(RF_DiskMap_t)));
477 if (!rf_forceParityLogReint && raidPtr->regionInfo[regionID].coreLog) {
478 rf_ReleaseParityLogs(raidPtr, raidPtr->regionInfo[regionID].coreLog);
479 raidPtr->regionInfo[regionID].coreLog = NULL;
480 } else {
481 RF_ASSERT(raidPtr->regionInfo[regionID].coreLog == NULL);
482 RF_ASSERT(raidPtr->regionInfo[regionID].diskCount == 0);
483 }
484 RF_UNLOCK_MUTEX(raidPtr->regionInfo[regionID].mutex);
485 rf_mutex_destroy(&raidPtr->regionInfo[regionID].mutex);
486 rf_mutex_destroy(&raidPtr->regionInfo[regionID].reintMutex);
487 }
488
489
490 static void
491 FreeParityLogQueue(
492 RF_Raid_t * raidPtr,
493 RF_ParityLogQueue_t * queue)
494 {
495 RF_ParityLog_t *l1, *l2;
496
497 RF_LOCK_MUTEX(queue->mutex);
498 l1 = queue->parityLogs;
499 while (l1) {
500 l2 = l1;
501 l1 = l2->next;
502 RF_Free(l2->records, (raidPtr->numSectorsPerLog * sizeof(RF_ParityLogRecord_t)));
503 RF_Free(l2, sizeof(RF_ParityLog_t));
504 }
505 RF_UNLOCK_MUTEX(queue->mutex);
506 rf_mutex_destroy(&queue->mutex);
507 }
508
509
510 static void
511 FreeRegionBufferQueue(RF_RegionBufferQueue_t * queue)
512 {
513 int i;
514
515 RF_LOCK_MUTEX(queue->mutex);
516 if (queue->availableBuffers != queue->totalBuffers) {
517 printf("Attempt to free region queue which is still in use!\n");
518 RF_ASSERT(0);
519 }
520 for (i = 0; i < queue->totalBuffers; i++)
521 RF_Free(queue->buffers[i], queue->bufferSize);
522 RF_Free(queue->buffers, queue->totalBuffers * sizeof(caddr_t));
523 RF_UNLOCK_MUTEX(queue->mutex);
524 rf_mutex_destroy(&queue->mutex);
525 }
526
527 static void
528 rf_ShutdownParityLoggingRegionInfo(RF_ThreadArg_t arg)
529 {
530 RF_Raid_t *raidPtr;
531 RF_RegionId_t i;
532
533 raidPtr = (RF_Raid_t *) arg;
534 if (rf_parityLogDebug) {
535 int tid;
536 rf_get_threadid(tid);
537 printf("[%d] ShutdownParityLoggingRegionInfo\n", tid);
538 }
539 /* free region information structs */
540 for (i = 0; i < rf_numParityRegions; i++)
541 FreeRegionInfo(raidPtr, i);
542 RF_Free(raidPtr->regionInfo, (rf_numParityRegions * sizeof(raidPtr->regionInfo)));
543 raidPtr->regionInfo = NULL;
544 }
545
546 static void
547 rf_ShutdownParityLoggingPool(RF_ThreadArg_t arg)
548 {
549 RF_Raid_t *raidPtr;
550
551 raidPtr = (RF_Raid_t *) arg;
552 if (rf_parityLogDebug) {
553 int tid;
554 rf_get_threadid(tid);
555 printf("[%d] ShutdownParityLoggingPool\n", tid);
556 }
557 /* free contents of parityLogPool */
558 FreeParityLogQueue(raidPtr, &raidPtr->parityLogPool);
559 RF_Free(raidPtr->parityLogBufferHeap, raidPtr->numParityLogs * raidPtr->numSectorsPerLog * raidPtr->bytesPerSector);
560 }
561
562 static void
563 rf_ShutdownParityLoggingRegionBufferPool(RF_ThreadArg_t arg)
564 {
565 RF_Raid_t *raidPtr;
566
567 raidPtr = (RF_Raid_t *) arg;
568 if (rf_parityLogDebug) {
569 int tid;
570 rf_get_threadid(tid);
571 printf("[%d] ShutdownParityLoggingRegionBufferPool\n", tid);
572 }
573 FreeRegionBufferQueue(&raidPtr->regionBufferPool);
574 }
575
576 static void
577 rf_ShutdownParityLoggingParityBufferPool(RF_ThreadArg_t arg)
578 {
579 RF_Raid_t *raidPtr;
580
581 raidPtr = (RF_Raid_t *) arg;
582 if (rf_parityLogDebug) {
583 int tid;
584 rf_get_threadid(tid);
585 printf("[%d] ShutdownParityLoggingParityBufferPool\n", tid);
586 }
587 FreeRegionBufferQueue(&raidPtr->parityBufferPool);
588 }
589
590 static void
591 rf_ShutdownParityLoggingDiskQueue(RF_ThreadArg_t arg)
592 {
593 RF_ParityLogData_t *d;
594 RF_CommonLogData_t *c;
595 RF_Raid_t *raidPtr;
596
597 raidPtr = (RF_Raid_t *) arg;
598 if (rf_parityLogDebug) {
599 int tid;
600 rf_get_threadid(tid);
601 printf("[%d] ShutdownParityLoggingDiskQueue\n", tid);
602 }
603 /* free disk manager stuff */
604 RF_ASSERT(raidPtr->parityLogDiskQueue.bufHead == NULL);
605 RF_ASSERT(raidPtr->parityLogDiskQueue.bufTail == NULL);
606 RF_ASSERT(raidPtr->parityLogDiskQueue.reintHead == NULL);
607 RF_ASSERT(raidPtr->parityLogDiskQueue.reintTail == NULL);
608 while (raidPtr->parityLogDiskQueue.freeDataList) {
609 d = raidPtr->parityLogDiskQueue.freeDataList;
610 raidPtr->parityLogDiskQueue.freeDataList = raidPtr->parityLogDiskQueue.freeDataList->next;
611 RF_Free(d, sizeof(RF_ParityLogData_t));
612 }
613 while (raidPtr->parityLogDiskQueue.freeCommonList) {
614 c = raidPtr->parityLogDiskQueue.freeCommonList;
615 rf_mutex_destroy(&c->mutex);
616 raidPtr->parityLogDiskQueue.freeCommonList = raidPtr->parityLogDiskQueue.freeCommonList->next;
617 RF_Free(c, sizeof(RF_CommonLogData_t));
618 }
619 }
620
621 static void
622 rf_ShutdownParityLogging(RF_ThreadArg_t arg)
623 {
624 RF_Raid_t *raidPtr;
625
626 raidPtr = (RF_Raid_t *) arg;
627 if (rf_parityLogDebug) {
628 int tid;
629 rf_get_threadid(tid);
630 printf("[%d] ShutdownParityLogging\n", tid);
631 }
632 /* shutdown disk thread */
633 /* This has the desirable side-effect of forcing all regions to be
634 * reintegrated. This is necessary since all parity log maps are
635 * currently held in volatile memory. */
636
637 RF_LOCK_MUTEX(raidPtr->parityLogDiskQueue.mutex);
638 raidPtr->parityLogDiskQueue.threadState |= RF_PLOG_TERMINATE;
639 RF_UNLOCK_MUTEX(raidPtr->parityLogDiskQueue.mutex);
640 RF_SIGNAL_COND(raidPtr->parityLogDiskQueue.cond);
641 /*
642 * pLogDiskThread will now terminate when queues are cleared
643 * now wait for it to be done
644 */
645 RF_LOCK_MUTEX(raidPtr->parityLogDiskQueue.mutex);
646 while (!(raidPtr->parityLogDiskQueue.threadState & RF_PLOG_SHUTDOWN)) {
647 RF_WAIT_COND(raidPtr->parityLogDiskQueue.cond, raidPtr->parityLogDiskQueue.mutex);
648 }
649 RF_UNLOCK_MUTEX(raidPtr->parityLogDiskQueue.mutex);
650 if (rf_parityLogDebug) {
651 int tid;
652 rf_get_threadid(tid);
653 printf("[%d] ShutdownParityLogging done (thread completed)\n", tid);
654 }
655 }
656
657 int
658 rf_GetDefaultNumFloatingReconBuffersParityLogging(RF_Raid_t * raidPtr)
659 {
660 return (20);
661 }
662
663 RF_HeadSepLimit_t
664 rf_GetDefaultHeadSepLimitParityLogging(RF_Raid_t * raidPtr)
665 {
666 return (10);
667 }
668 /* return the region ID for a given RAID address */
669 RF_RegionId_t
670 rf_MapRegionIDParityLogging(
671 RF_Raid_t * raidPtr,
672 RF_SectorNum_t address)
673 {
674 RF_RegionId_t regionID;
675
676 /* regionID = address / (raidPtr->regionParityRange * raidPtr->Layout.numDataCol); */
677 regionID = address / raidPtr->regionParityRange;
678 if (regionID == rf_numParityRegions) {
679 /* last region may be larger than other regions */
680 regionID--;
681 }
682 RF_ASSERT(address >= raidPtr->regionInfo[regionID].parityStartAddr);
683 RF_ASSERT(address < raidPtr->regionInfo[regionID].parityStartAddr + raidPtr->regionInfo[regionID].numSectorsParity);
684 RF_ASSERT(regionID < rf_numParityRegions);
685 return (regionID);
686 }
687
688
689 /* given a logical RAID sector, determine physical disk address of data */
690 void
691 rf_MapSectorParityLogging(
692 RF_Raid_t * raidPtr,
693 RF_RaidAddr_t raidSector,
694 RF_RowCol_t * row,
695 RF_RowCol_t * col,
696 RF_SectorNum_t * diskSector,
697 int remap)
698 {
699 RF_StripeNum_t SUID = raidSector / raidPtr->Layout.sectorsPerStripeUnit;
700 *row = 0;
701 /* *col = (SUID % (raidPtr->numCol -
702 * raidPtr->Layout.numParityLogCol)); */
703 *col = SUID % raidPtr->Layout.numDataCol;
704 *diskSector = (SUID / (raidPtr->Layout.numDataCol)) * raidPtr->Layout.sectorsPerStripeUnit +
705 (raidSector % raidPtr->Layout.sectorsPerStripeUnit);
706 }
707
708
709 /* given a logical RAID sector, determine physical disk address of parity */
710 void
711 rf_MapParityParityLogging(
712 RF_Raid_t * raidPtr,
713 RF_RaidAddr_t raidSector,
714 RF_RowCol_t * row,
715 RF_RowCol_t * col,
716 RF_SectorNum_t * diskSector,
717 int remap)
718 {
719 RF_StripeNum_t SUID = raidSector / raidPtr->Layout.sectorsPerStripeUnit;
720
721 *row = 0;
722 /* *col =
723 * raidPtr->Layout.numDataCol-(SUID/raidPtr->Layout.numDataCol)%(raidPt
724 * r->numCol - raidPtr->Layout.numParityLogCol); */
725 *col = raidPtr->Layout.numDataCol;
726 *diskSector = (SUID / (raidPtr->Layout.numDataCol)) * raidPtr->Layout.sectorsPerStripeUnit +
727 (raidSector % raidPtr->Layout.sectorsPerStripeUnit);
728 }
729
730
731 /* given a regionID and sector offset, determine the physical disk address of the parity log */
732 void
733 rf_MapLogParityLogging(
734 RF_Raid_t * raidPtr,
735 RF_RegionId_t regionID,
736 RF_SectorNum_t regionOffset,
737 RF_RowCol_t * row,
738 RF_RowCol_t * col,
739 RF_SectorNum_t * startSector)
740 {
741 *row = 0;
742 *col = raidPtr->numCol - 1;
743 *startSector = raidPtr->regionInfo[regionID].regionStartAddr + regionOffset;
744 }
745
746
747 /* given a regionID, determine the physical disk address of the logged parity for that region */
748 void
749 rf_MapRegionParity(
750 RF_Raid_t * raidPtr,
751 RF_RegionId_t regionID,
752 RF_RowCol_t * row,
753 RF_RowCol_t * col,
754 RF_SectorNum_t * startSector,
755 RF_SectorCount_t * numSector)
756 {
757 *row = 0;
758 *col = raidPtr->numCol - 2;
759 *startSector = raidPtr->regionInfo[regionID].parityStartAddr;
760 *numSector = raidPtr->regionInfo[regionID].numSectorsParity;
761 }
762
763
764 /* given a logical RAID address, determine the participating disks in the stripe */
765 void
766 rf_IdentifyStripeParityLogging(
767 RF_Raid_t * raidPtr,
768 RF_RaidAddr_t addr,
769 RF_RowCol_t ** diskids,
770 RF_RowCol_t * outRow)
771 {
772 RF_StripeNum_t stripeID = rf_RaidAddressToStripeID(&raidPtr->Layout, addr);
773 RF_ParityLoggingConfigInfo_t *info = (RF_ParityLoggingConfigInfo_t *) raidPtr->Layout.layoutSpecificInfo;
774 *outRow = 0;
775 *diskids = info->stripeIdentifier[stripeID % raidPtr->numCol];
776 }
777
778
779 void
780 rf_MapSIDToPSIDParityLogging(
781 RF_RaidLayout_t * layoutPtr,
782 RF_StripeNum_t stripeID,
783 RF_StripeNum_t * psID,
784 RF_ReconUnitNum_t * which_ru)
785 {
786 *which_ru = 0;
787 *psID = stripeID;
788 }
789
790
791 /* select an algorithm for performing an access. Returns two pointers,
792 * one to a function that will return information about the DAG, and
793 * another to a function that will create the dag.
794 */
795 void
796 rf_ParityLoggingDagSelect(
797 RF_Raid_t * raidPtr,
798 RF_IoType_t type,
799 RF_AccessStripeMap_t * asmp,
800 RF_VoidFuncPtr * createFunc)
801 {
802 RF_RaidLayout_t *layoutPtr = &(raidPtr->Layout);
803 RF_PhysDiskAddr_t *failedPDA = NULL;
804 RF_RowCol_t frow, fcol;
805 RF_RowStatus_t rstat;
806 int prior_recon;
807 int tid;
808
809 RF_ASSERT(RF_IO_IS_R_OR_W(type));
810
811 if (asmp->numDataFailed + asmp->numParityFailed > 1) {
812 RF_ERRORMSG("Multiple disks failed in a single group! Aborting I/O operation.\n");
813 /* *infoFunc = */ *createFunc = NULL;
814 return;
815 } else
816 if (asmp->numDataFailed + asmp->numParityFailed == 1) {
817
818 /* if under recon & already reconstructed, redirect
819 * the access to the spare drive and eliminate the
820 * failure indication */
821 failedPDA = asmp->failedPDAs[0];
822 frow = failedPDA->row;
823 fcol = failedPDA->col;
824 rstat = raidPtr->status[failedPDA->row];
825 prior_recon = (rstat == rf_rs_reconfigured) || (
826 (rstat == rf_rs_reconstructing) ?
827 rf_CheckRUReconstructed(raidPtr->reconControl[frow]->reconMap, failedPDA->startSector) : 0
828 );
829 if (prior_recon) {
830 RF_RowCol_t or = failedPDA->row, oc = failedPDA->col;
831 RF_SectorNum_t oo = failedPDA->startSector;
832 if (layoutPtr->map->flags & RF_DISTRIBUTE_SPARE) { /* redirect to dist
833 * spare space */
834
835 if (failedPDA == asmp->parityInfo) {
836
837 /* parity has failed */
838 (layoutPtr->map->MapParity) (raidPtr, failedPDA->raidAddress, &failedPDA->row,
839 &failedPDA->col, &failedPDA->startSector, RF_REMAP);
840
841 if (asmp->parityInfo->next) { /* redir 2nd component,
842 * if any */
843 RF_PhysDiskAddr_t *p = asmp->parityInfo->next;
844 RF_SectorNum_t SUoffs = p->startSector % layoutPtr->sectorsPerStripeUnit;
845 p->row = failedPDA->row;
846 p->col = failedPDA->col;
847 p->startSector = rf_RaidAddressOfPrevStripeUnitBoundary(layoutPtr, failedPDA->startSector) +
848 SUoffs; /* cheating:
849 * startSector is not
850 * really a RAID address */
851 }
852 } else
853 if (asmp->parityInfo->next && failedPDA == asmp->parityInfo->next) {
854 RF_ASSERT(0); /* should not ever
855 * happen */
856 } else {
857
858 /* data has failed */
859 (layoutPtr->map->MapSector) (raidPtr, failedPDA->raidAddress, &failedPDA->row,
860 &failedPDA->col, &failedPDA->startSector, RF_REMAP);
861
862 }
863
864 } else { /* redirect to dedicated spare
865 * space */
866
867 failedPDA->row = raidPtr->Disks[frow][fcol].spareRow;
868 failedPDA->col = raidPtr->Disks[frow][fcol].spareCol;
869
870 /* the parity may have two distinct
871 * components, both of which may need
872 * to be redirected */
873 if (asmp->parityInfo->next) {
874 if (failedPDA == asmp->parityInfo) {
875 failedPDA->next->row = failedPDA->row;
876 failedPDA->next->col = failedPDA->col;
877 } else
878 if (failedPDA == asmp->parityInfo->next) { /* paranoid: should
879 * never occur */
880 asmp->parityInfo->row = failedPDA->row;
881 asmp->parityInfo->col = failedPDA->col;
882 }
883 }
884 }
885
886 RF_ASSERT(failedPDA->col != -1);
887
888 if (rf_dagDebug || rf_mapDebug) {
889 rf_get_threadid(tid);
890 printf("[%d] Redirected type '%c' r %d c %d o %ld -> r %d c %d o %ld\n",
891 tid, type, or, oc, (long) oo, failedPDA->row, failedPDA->col, (long) failedPDA->startSector);
892 }
893 asmp->numDataFailed = asmp->numParityFailed = 0;
894 }
895 }
896 if (type == RF_IO_TYPE_READ) {
897
898 if (asmp->numDataFailed == 0)
899 *createFunc = (RF_VoidFuncPtr) rf_CreateFaultFreeReadDAG;
900 else
901 *createFunc = (RF_VoidFuncPtr) rf_CreateRaidFiveDegradedReadDAG;
902
903 } else {
904
905
906 /* if mirroring, always use large writes. If the access
907 * requires two distinct parity updates, always do a small
908 * write. If the stripe contains a failure but the access
909 * does not, do a small write. The first conditional
910 * (numStripeUnitsAccessed <= numDataCol/2) uses a
911 * less-than-or-equal rather than just a less-than because
912 * when G is 3 or 4, numDataCol/2 is 1, and I want
913 * single-stripe-unit updates to use just one disk. */
914 if ((asmp->numDataFailed + asmp->numParityFailed) == 0) {
915 if (((asmp->numStripeUnitsAccessed <= (layoutPtr->numDataCol / 2)) && (layoutPtr->numDataCol != 1)) ||
916 (asmp->parityInfo->next != NULL) || rf_CheckStripeForFailures(raidPtr, asmp)) {
917 *createFunc = (RF_VoidFuncPtr) rf_CreateParityLoggingSmallWriteDAG;
918 } else
919 *createFunc = (RF_VoidFuncPtr) rf_CreateParityLoggingLargeWriteDAG;
920 } else
921 if (asmp->numParityFailed == 1)
922 *createFunc = (RF_VoidFuncPtr) rf_CreateNonRedundantWriteDAG;
923 else
924 if (asmp->numStripeUnitsAccessed != 1 && failedPDA->numSector != layoutPtr->sectorsPerStripeUnit)
925 *createFunc = NULL;
926 else
927 *createFunc = (RF_VoidFuncPtr) rf_CreateDegradedWriteDAG;
928 }
929 }
930 #endif /* RF_INCLUDE_PARITYLOGGING > 0 */
931