rf_dagfuncs.c revision 1.1 1 /* $NetBSD: rf_dagfuncs.c,v 1.1 1998/11/13 04:20:28 oster Exp $ */
2 /*
3 * Copyright (c) 1995 Carnegie-Mellon University.
4 * All rights reserved.
5 *
6 * Author: Mark Holland, William V. Courtright II
7 *
8 * Permission to use, copy, modify and distribute this software and
9 * its documentation is hereby granted, provided that both the copyright
10 * notice and this permission notice appear in all copies of the
11 * software, derivative works or modified versions, and any portions
12 * thereof, and that both notices appear in supporting documentation.
13 *
14 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
15 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
16 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
17 *
18 * Carnegie Mellon requests users of this software to return to
19 *
20 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
21 * School of Computer Science
22 * Carnegie Mellon University
23 * Pittsburgh PA 15213-3890
24 *
25 * any improvements or extensions that they make and grant Carnegie the
26 * rights to redistribute these changes.
27 */
28
29 /*
30 * dagfuncs.c -- DAG node execution routines
31 *
32 * Rules:
33 * 1. Every DAG execution function must eventually cause node->status to
34 * get set to "good" or "bad", and "FinishNode" to be called. In the
35 * case of nodes that complete immediately (xor, NullNodeFunc, etc),
36 * the node execution function can do these two things directly. In
37 * the case of nodes that have to wait for some event (a disk read to
38 * complete, a lock to be released, etc) to occur before they can
39 * complete, this is typically achieved by having whatever module
40 * is doing the operation call GenericWakeupFunc upon completion.
41 * 2. DAG execution functions should check the status in the DAG header
42 * and NOP out their operations if the status is not "enable". However,
43 * execution functions that release resources must be sure to release
44 * them even when they NOP out the function that would use them.
45 * Functions that acquire resources should go ahead and acquire them
46 * even when they NOP, so that a downstream release node will not have
47 * to check to find out whether or not the acquire was suppressed.
48 */
49
50 /* :
51 * Log: rf_dagfuncs.c,v
52 * Revision 1.64 1996/07/31 16:29:26 jimz
53 * LONGSHIFT -> RF_LONGSHIFT, defined in rf_types.h
54 *
55 * Revision 1.63 1996/07/30 04:00:20 jimz
56 * define LONGSHIFT for mips
57 *
58 * Revision 1.62 1996/07/28 20:31:39 jimz
59 * i386netbsd port
60 * true/false fixup
61 *
62 * Revision 1.61 1996/07/27 23:36:08 jimz
63 * Solaris port of simulator
64 *
65 * Revision 1.60 1996/07/22 19:52:16 jimz
66 * switched node params to RF_DagParam_t, a union of
67 * a 64-bit int and a void *, for better portability
68 * attempted hpux port, but failed partway through for
69 * lack of a single C compiler capable of compiling all
70 * source files
71 *
72 * Revision 1.59 1996/07/18 22:57:14 jimz
73 * port simulator to AIX
74 *
75 * Revision 1.58 1996/07/17 21:00:58 jimz
76 * clean up timer interface, tracing
77 *
78 * Revision 1.57 1996/07/15 17:22:18 jimz
79 * nit-pick code cleanup
80 * resolve stdlib problems on DEC OSF
81 *
82 * Revision 1.56 1996/06/11 01:27:50 jimz
83 * Fixed bug where diskthread shutdown would crash or hang. This
84 * turned out to be two distinct bugs:
85 * (1) [crash] The thread shutdown code wasn't properly waiting for
86 * all the diskthreads to complete. This caused diskthreads that were
87 * exiting+cleaning up to unlock a destroyed mutex.
88 * (2) [hang] TerminateDiskQueues wasn't locking, and DiskIODequeue
89 * only checked for termination _after_ a wakeup if the queues were
90 * empty. This was a race where the termination wakeup could be lost
91 * by the dequeueing thread, and the system would hang waiting for the
92 * thread to exit, while the thread waited for an I/O or a signal to
93 * check the termination flag.
94 *
95 * Revision 1.55 1996/06/10 22:23:18 wvcii
96 * disk and xor funcs now optionally support undo logging
97 * for backward error recovery experiments
98 *
99 * Revision 1.54 1996/06/10 11:55:47 jimz
100 * Straightened out some per-array/not-per-array distinctions, fixed
101 * a couple bugs related to confusion. Added shutdown lists. Removed
102 * layout shutdown function (now subsumed by shutdown lists).
103 *
104 * Revision 1.53 1996/06/07 21:33:04 jimz
105 * begin using consistent types for sector numbers,
106 * stripe numbers, row+col numbers, recon unit numbers
107 *
108 * Revision 1.52 1996/06/06 17:28:44 jimz
109 * add new read mirror partition func, rename old read mirror
110 * to rf_DiskReadMirrorIdleFunc
111 *
112 * Revision 1.51 1996/06/03 23:28:26 jimz
113 * more bugfixes
114 * check in tree to sync for IPDS runs with current bugfixes
115 * there still may be a problem with threads in the script test
116 * getting I/Os stuck- not trivially reproducible (runs ~50 times
117 * in a row without getting stuck)
118 *
119 * Revision 1.50 1996/06/02 17:31:48 jimz
120 * Moved a lot of global stuff into array structure, where it belongs.
121 * Fixed up paritylogging, pss modules in this manner. Some general
122 * code cleanup. Removed lots of dead code, some dead files.
123 *
124 * Revision 1.49 1996/05/31 22:26:54 jimz
125 * fix a lot of mapping problems, memory allocation problems
126 * found some weird lock issues, fixed 'em
127 * more code cleanup
128 *
129 * Revision 1.48 1996/05/30 12:59:18 jimz
130 * make etimer happier, more portable
131 *
132 * Revision 1.47 1996/05/30 11:29:41 jimz
133 * Numerous bug fixes. Stripe lock release code disagreed with the taking code
134 * about when stripes should be locked (I made it consistent: no parity, no lock)
135 * There was a lot of extra serialization of I/Os which I've removed- a lot of
136 * it was to calculate values for the cache code, which is no longer with us.
137 * More types, function, macro cleanup. Added code to properly quiesce the array
138 * on shutdown. Made a lot of stuff array-specific which was (bogusly) general
139 * before. Fixed memory allocation, freeing bugs.
140 *
141 * Revision 1.46 1996/05/24 22:17:04 jimz
142 * continue code + namespace cleanup
143 * typed a bunch of flags
144 *
145 * Revision 1.45 1996/05/24 04:28:55 jimz
146 * release cleanup ckpt
147 *
148 * Revision 1.44 1996/05/23 21:46:35 jimz
149 * checkpoint in code cleanup (release prep)
150 * lots of types, function names have been fixed
151 *
152 * Revision 1.43 1996/05/23 00:33:23 jimz
153 * code cleanup: move all debug decls to rf_options.c, all extern
154 * debug decls to rf_options.h, all debug vars preceded by rf_
155 *
156 * Revision 1.42 1996/05/18 19:51:34 jimz
157 * major code cleanup- fix syntax, make some types consistent,
158 * add prototypes, clean out dead code, et cetera
159 *
160 * Revision 1.41 1996/05/08 21:01:24 jimz
161 * fixed up enum type names that were conflicting with other
162 * enums and function names (ie, "panic")
163 * future naming trends will be towards RF_ and rf_ for
164 * everything raidframe-related
165 *
166 * Revision 1.40 1996/05/08 15:24:14 wvcii
167 * modified GenericWakeupFunc to use recover, undone, and panic node states
168 *
169 * Revision 1.39 1996/05/02 17:18:01 jimz
170 * fix up headers for user-land, following ccmn cleanup
171 *
172 * Revision 1.38 1996/05/01 16:26:51 jimz
173 * don't include rf_ccmn.h (get ready to phase out)
174 *
175 * Revision 1.37 1995/12/12 18:10:06 jimz
176 * MIN -> RF_MIN, MAX -> RF_MAX, ASSERT -> RF_ASSERT
177 * fix 80-column brain damage in comments
178 *
179 * Revision 1.36 1995/12/04 19:19:09 wvcii
180 * modified DiskReadMirrorFunc
181 * - added fifth parameter, physical disk address of mirror copy
182 * - SelectIdleDisk conditionally swaps parameters 0 & 4
183 *
184 * Revision 1.35 1995/12/01 15:58:33 root
185 * added copyright info
186 *
187 * Revision 1.34 1995/11/17 18:12:17 amiri
188 * Changed DiskReadMirrorFunc to use the generic mapping routines
189 * to find the mirror of the data, function was assuming RAID level 1.
190 *
191 * Revision 1.33 1995/11/17 15:15:59 wvcii
192 * changes in DiskReadMirrorFunc
193 * - added ASSERTs
194 * - added call to MapParityRAID1
195 *
196 * Revision 1.32 1995/11/07 16:25:50 wvcii
197 * added DiskUnlockFuncForThreads
198 * general debugging of undo functions (first time they were used)
199 *
200 * Revision 1.31 1995/09/06 19:23:36 wvcii
201 * fixed tracing for parity logging nodes
202 *
203 * Revision 1.30 95/07/07 00:13:01 wvcii
204 * added 4th parameter to ParityLogAppend
205 *
206 */
207
208 #ifdef _KERNEL
209 #define KERNEL
210 #endif
211
212 #ifndef KERNEL
213 #include <errno.h>
214 #endif /* !KERNEL */
215
216 #include <sys/ioctl.h>
217 #include <sys/param.h>
218
219 #include "rf_archs.h"
220 #include "rf_raid.h"
221 #include "rf_dag.h"
222 #include "rf_layout.h"
223 #include "rf_etimer.h"
224 #include "rf_acctrace.h"
225 #include "rf_diskqueue.h"
226 #include "rf_dagfuncs.h"
227 #include "rf_general.h"
228 #include "rf_engine.h"
229 #include "rf_dagutils.h"
230
231 #ifdef KERNEL
232 #include "rf_kintf.h"
233 #endif /* KERNEL */
234
235 #if RF_INCLUDE_PARITYLOGGING > 0
236 #include "rf_paritylog.h"
237 #endif /* RF_INCLUDE_PARITYLOGGING > 0 */
238
239 int (*rf_DiskReadFunc)(RF_DagNode_t *);
240 int (*rf_DiskWriteFunc)(RF_DagNode_t *);
241 int (*rf_DiskReadUndoFunc)(RF_DagNode_t *);
242 int (*rf_DiskWriteUndoFunc)(RF_DagNode_t *);
243 int (*rf_DiskUnlockFunc)(RF_DagNode_t *);
244 int (*rf_DiskUnlockUndoFunc)(RF_DagNode_t *);
245 int (*rf_RegularXorUndoFunc)(RF_DagNode_t *);
246 int (*rf_SimpleXorUndoFunc)(RF_DagNode_t *);
247 int (*rf_RecoveryXorUndoFunc)(RF_DagNode_t *);
248
249 /*****************************************************************************************
250 * main (only) configuration routine for this module
251 ****************************************************************************************/
252 int rf_ConfigureDAGFuncs(listp)
253 RF_ShutdownList_t **listp;
254 {
255 RF_ASSERT( ((sizeof(long)==8) && RF_LONGSHIFT==3) || ((sizeof(long)==4) && RF_LONGSHIFT==2) );
256 rf_DiskReadFunc = rf_DiskReadFuncForThreads;
257 rf_DiskReadUndoFunc = rf_DiskUndoFunc;
258 rf_DiskWriteFunc = rf_DiskWriteFuncForThreads;
259 rf_DiskWriteUndoFunc = rf_DiskUndoFunc;
260 rf_DiskUnlockFunc = rf_DiskUnlockFuncForThreads;
261 rf_DiskUnlockUndoFunc = rf_NullNodeUndoFunc;
262 rf_RegularXorUndoFunc = rf_NullNodeUndoFunc;
263 rf_SimpleXorUndoFunc = rf_NullNodeUndoFunc;
264 rf_RecoveryXorUndoFunc = rf_NullNodeUndoFunc;
265 return(0);
266 }
267
268
269
270 /*****************************************************************************************
271 * the execution function associated with a terminate node
272 ****************************************************************************************/
273 int rf_TerminateFunc(node)
274 RF_DagNode_t *node;
275 {
276 RF_ASSERT(node->dagHdr->numCommits == node->dagHdr->numCommitNodes);
277 node->status = rf_good;
278 return(rf_FinishNode(node, RF_THREAD_CONTEXT));
279 }
280
281 int rf_TerminateUndoFunc(node)
282 RF_DagNode_t *node;
283 {
284 return(0);
285 }
286
287
288 /*****************************************************************************************
289 * execution functions associated with a mirror node
290 *
291 * parameters:
292 *
293 * 0 - physical disk addres of data
294 * 1 - buffer for holding read data
295 * 2 - parity stripe ID
296 * 3 - flags
297 * 4 - physical disk address of mirror (parity)
298 *
299 ****************************************************************************************/
300
301 int rf_DiskReadMirrorIdleFunc(node)
302 RF_DagNode_t *node;
303 {
304 /* select the mirror copy with the shortest queue and fill in node parameters
305 with physical disk address */
306
307 rf_SelectMirrorDiskIdle(node);
308 return(rf_DiskReadFunc(node));
309 }
310
311 int rf_DiskReadMirrorPartitionFunc(node)
312 RF_DagNode_t *node;
313 {
314 /* select the mirror copy with the shortest queue and fill in node parameters
315 with physical disk address */
316
317 rf_SelectMirrorDiskPartition(node);
318 return(rf_DiskReadFunc(node));
319 }
320
321 int rf_DiskReadMirrorUndoFunc(node)
322 RF_DagNode_t *node;
323 {
324 return(0);
325 }
326
327
328
329 #if RF_INCLUDE_PARITYLOGGING > 0
330 /*****************************************************************************************
331 * the execution function associated with a parity log update node
332 ****************************************************************************************/
333 int rf_ParityLogUpdateFunc(node)
334 RF_DagNode_t *node;
335 {
336 RF_PhysDiskAddr_t *pda = (RF_PhysDiskAddr_t *) node->params[0].p;
337 caddr_t buf = (caddr_t) node->params[1].p;
338 RF_ParityLogData_t *logData;
339 RF_AccTraceEntry_t *tracerec = node->dagHdr->tracerec;
340 RF_Etimer_t timer;
341
342 if (node->dagHdr->status == rf_enable)
343 {
344 RF_ETIMER_START(timer);
345 logData = rf_CreateParityLogData(RF_UPDATE, pda, buf,
346 (RF_Raid_t *) (node->dagHdr->raidPtr),
347 node->wakeFunc, (void *) node,
348 node->dagHdr->tracerec, timer);
349 if (logData)
350 rf_ParityLogAppend(logData, RF_FALSE, NULL, RF_FALSE);
351 else
352 {
353 RF_ETIMER_STOP(timer); RF_ETIMER_EVAL(timer); tracerec->plog_us += RF_ETIMER_VAL_US(timer);
354 (node->wakeFunc)(node, ENOMEM);
355 }
356 }
357 return(0);
358 }
359
360
361 /*****************************************************************************************
362 * the execution function associated with a parity log overwrite node
363 ****************************************************************************************/
364 int rf_ParityLogOverwriteFunc(node)
365 RF_DagNode_t *node;
366 {
367 RF_PhysDiskAddr_t *pda = (RF_PhysDiskAddr_t *) node->params[0].p;
368 caddr_t buf = (caddr_t) node->params[1].p;
369 RF_ParityLogData_t *logData;
370 RF_AccTraceEntry_t *tracerec = node->dagHdr->tracerec;
371 RF_Etimer_t timer;
372
373 if (node->dagHdr->status == rf_enable)
374 {
375 RF_ETIMER_START(timer);
376 logData = rf_CreateParityLogData(RF_OVERWRITE, pda, buf, (RF_Raid_t *) (node->dagHdr->raidPtr),
377 node->wakeFunc, (void *) node, node->dagHdr->tracerec, timer);
378 if (logData)
379 rf_ParityLogAppend(logData, RF_FALSE, NULL, RF_FALSE);
380 else
381 {
382 RF_ETIMER_STOP(timer); RF_ETIMER_EVAL(timer); tracerec->plog_us += RF_ETIMER_VAL_US(timer);
383 (node->wakeFunc)(node, ENOMEM);
384 }
385 }
386 return(0);
387 }
388
389 #else /* RF_INCLUDE_PARITYLOGGING > 0 */
390
391 int rf_ParityLogUpdateFunc(node)
392 RF_DagNode_t *node;
393 {
394 return(0);
395 }
396 int rf_ParityLogOverwriteFunc(node)
397 RF_DagNode_t *node;
398 {
399 return(0);
400 }
401
402 #endif /* RF_INCLUDE_PARITYLOGGING > 0 */
403
404 int rf_ParityLogUpdateUndoFunc(node)
405 RF_DagNode_t *node;
406 {
407 return(0);
408 }
409
410 int rf_ParityLogOverwriteUndoFunc(node)
411 RF_DagNode_t *node;
412 {
413 return(0);
414 }
415
416 /*****************************************************************************************
417 * the execution function associated with a NOP node
418 ****************************************************************************************/
419 int rf_NullNodeFunc(node)
420 RF_DagNode_t *node;
421 {
422 node->status = rf_good;
423 return(rf_FinishNode(node, RF_THREAD_CONTEXT));
424 }
425
426 int rf_NullNodeUndoFunc(node)
427 RF_DagNode_t *node;
428 {
429 node->status = rf_undone;
430 return(rf_FinishNode(node, RF_THREAD_CONTEXT));
431 }
432
433
434 /*****************************************************************************************
435 * the execution function associated with a disk-read node
436 ****************************************************************************************/
437 int rf_DiskReadFuncForThreads(node)
438 RF_DagNode_t *node;
439 {
440 RF_DiskQueueData_t *req;
441 RF_PhysDiskAddr_t *pda = (RF_PhysDiskAddr_t *)node->params[0].p;
442 caddr_t buf = (caddr_t)node->params[1].p;
443 RF_StripeNum_t parityStripeID = (RF_StripeNum_t)node->params[2].v;
444 unsigned priority = RF_EXTRACT_PRIORITY(node->params[3].v);
445 unsigned lock = RF_EXTRACT_LOCK_FLAG(node->params[3].v);
446 unsigned unlock = RF_EXTRACT_UNLOCK_FLAG(node->params[3].v);
447 unsigned which_ru = RF_EXTRACT_RU(node->params[3].v);
448 RF_DiskQueueDataFlags_t flags = 0;
449 RF_IoType_t iotype = (node->dagHdr->status == rf_enable) ? RF_IO_TYPE_READ : RF_IO_TYPE_NOP;
450 RF_DiskQueue_t **dqs = ((RF_Raid_t *) (node->dagHdr->raidPtr))->Queues;
451 void *b_proc = NULL;
452 #if RF_BACKWARD > 0
453 caddr_t undoBuf;
454 #endif
455
456 #ifdef KERNEL
457 if (node->dagHdr->bp) b_proc = (void *) ((struct buf *) node->dagHdr->bp)->b_proc;
458 #endif /* KERNEL */
459
460 RF_ASSERT( !(lock && unlock) );
461 flags |= (lock) ? RF_LOCK_DISK_QUEUE : 0;
462 flags |= (unlock) ? RF_UNLOCK_DISK_QUEUE : 0;
463 #if RF_BACKWARD > 0
464 /* allocate and zero the undo buffer.
465 * this is equivalent to copying the original buffer's contents to the undo buffer
466 * prior to performing the disk read.
467 * XXX hardcoded 512 bytes per sector!
468 */
469 if (node->dagHdr->allocList == NULL)
470 rf_MakeAllocList(node->dagHdr->allocList);
471 RF_CallocAndAdd(undoBuf, 1, 512 * pda->numSector, (caddr_t), node->dagHdr->allocList);
472 #endif /* RF_BACKWARD > 0 */
473 req = rf_CreateDiskQueueData(iotype, pda->startSector, pda->numSector,
474 buf, parityStripeID, which_ru,
475 (int (*)(void *,int)) node->wakeFunc,
476 node, NULL, node->dagHdr->tracerec,
477 (void *)(node->dagHdr->raidPtr), flags, b_proc);
478 if (!req) {
479 (node->wakeFunc)(node, ENOMEM);
480 } else {
481 node->dagFuncData = (void *) req;
482 rf_DiskIOEnqueue( &(dqs[pda->row][pda->col]), req, priority );
483 }
484 return(0);
485 }
486
487
488 /*****************************************************************************************
489 * the execution function associated with a disk-write node
490 ****************************************************************************************/
491 int rf_DiskWriteFuncForThreads(node)
492 RF_DagNode_t *node;
493 {
494 RF_DiskQueueData_t *req;
495 RF_PhysDiskAddr_t *pda = (RF_PhysDiskAddr_t *)node->params[0].p;
496 caddr_t buf = (caddr_t)node->params[1].p;
497 RF_StripeNum_t parityStripeID = (RF_StripeNum_t)node->params[2].v;
498 unsigned priority = RF_EXTRACT_PRIORITY(node->params[3].v);
499 unsigned lock = RF_EXTRACT_LOCK_FLAG(node->params[3].v);
500 unsigned unlock = RF_EXTRACT_UNLOCK_FLAG(node->params[3].v);
501 unsigned which_ru = RF_EXTRACT_RU(node->params[3].v);
502 RF_DiskQueueDataFlags_t flags = 0;
503 RF_IoType_t iotype = (node->dagHdr->status == rf_enable) ? RF_IO_TYPE_WRITE : RF_IO_TYPE_NOP;
504 RF_DiskQueue_t **dqs = ((RF_Raid_t *) (node->dagHdr->raidPtr))->Queues;
505 void *b_proc = NULL;
506 #if RF_BACKWARD > 0
507 caddr_t undoBuf;
508 #endif
509
510 #ifdef KERNEL
511 if (node->dagHdr->bp) b_proc = (void *) ((struct buf *) node->dagHdr->bp)->b_proc;
512 #endif /* KERNEL */
513
514 #if RF_BACKWARD > 0
515 /* This area is used only for backward error recovery experiments
516 * First, schedule allocate a buffer and schedule a pre-read of the disk
517 * After the pre-read, proceed with the normal disk write
518 */
519 if (node->status == rf_bwd2) {
520 /* just finished undo logging, now perform real function */
521 node->status = rf_fired;
522 RF_ASSERT( !(lock && unlock) );
523 flags |= (lock) ? RF_LOCK_DISK_QUEUE : 0;
524 flags |= (unlock) ? RF_UNLOCK_DISK_QUEUE : 0;
525 req = rf_CreateDiskQueueData(iotype,
526 pda->startSector, pda->numSector, buf, parityStripeID, which_ru,
527 node->wakeFunc, (void *) node, NULL, node->dagHdr->tracerec,
528 (void *) (node->dagHdr->raidPtr), flags, b_proc);
529
530 if (!req) {
531 (node->wakeFunc)(node, ENOMEM);
532 } else {
533 node->dagFuncData = (void *) req;
534 rf_DiskIOEnqueue( &(dqs[pda->row][pda->col]), req, priority );
535 }
536 }
537
538 else {
539 /* node status should be rf_fired */
540 /* schedule a disk pre-read */
541 node->status = rf_bwd1;
542 RF_ASSERT( !(lock && unlock) );
543 flags |= (lock) ? RF_LOCK_DISK_QUEUE : 0;
544 flags |= (unlock) ? RF_UNLOCK_DISK_QUEUE : 0;
545 if (node->dagHdr->allocList == NULL)
546 rf_MakeAllocList(node->dagHdr->allocList);
547 RF_CallocAndAdd(undoBuf, 1, 512 * pda->numSector, (caddr_t), node->dagHdr->allocList);
548 req = rf_CreateDiskQueueData(RF_IO_TYPE_READ,
549 pda->startSector, pda->numSector, undoBuf, parityStripeID, which_ru,
550 node->wakeFunc, (void *) node, NULL, node->dagHdr->tracerec,
551 (void *) (node->dagHdr->raidPtr), flags, b_proc);
552
553 if (!req) {
554 (node->wakeFunc)(node, ENOMEM);
555 } else {
556 node->dagFuncData = (void *) req;
557 rf_DiskIOEnqueue( &(dqs[pda->row][pda->col]), req, priority );
558 }
559 }
560 return(0);
561 #endif /* RF_BACKWARD > 0 */
562
563 /* normal processing (rollaway or forward recovery) begins here */
564 RF_ASSERT( !(lock && unlock) );
565 flags |= (lock) ? RF_LOCK_DISK_QUEUE : 0;
566 flags |= (unlock) ? RF_UNLOCK_DISK_QUEUE : 0;
567 req = rf_CreateDiskQueueData(iotype, pda->startSector, pda->numSector,
568 buf, parityStripeID, which_ru,
569 (int (*)(void *,int)) node->wakeFunc,
570 (void *) node, NULL,
571 node->dagHdr->tracerec,
572 (void *) (node->dagHdr->raidPtr),
573 flags, b_proc);
574
575 if (!req) {
576 (node->wakeFunc)(node, ENOMEM);
577 } else {
578 node->dagFuncData = (void *) req;
579 rf_DiskIOEnqueue( &(dqs[pda->row][pda->col]), req, priority );
580 }
581
582 return(0);
583 }
584
585 /*****************************************************************************************
586 * the undo function for disk nodes
587 * Note: this is not a proper undo of a write node, only locks are released.
588 * old data is not restored to disk!
589 ****************************************************************************************/
590 int rf_DiskUndoFunc(node)
591 RF_DagNode_t *node;
592 {
593 RF_DiskQueueData_t *req;
594 RF_PhysDiskAddr_t *pda = (RF_PhysDiskAddr_t *)node->params[0].p;
595 RF_DiskQueue_t **dqs = ((RF_Raid_t *) (node->dagHdr->raidPtr))->Queues;
596
597 req = rf_CreateDiskQueueData(RF_IO_TYPE_NOP,
598 0L, 0, NULL, 0L, 0,
599 (int (*)(void *,int)) node->wakeFunc,
600 (void *) node,
601 NULL, node->dagHdr->tracerec,
602 (void *) (node->dagHdr->raidPtr),
603 RF_UNLOCK_DISK_QUEUE, NULL);
604 if (!req)
605 (node->wakeFunc)(node, ENOMEM);
606 else {
607 node->dagFuncData = (void *) req;
608 rf_DiskIOEnqueue( &(dqs[pda->row][pda->col]), req, RF_IO_NORMAL_PRIORITY );
609 }
610
611 return(0);
612 }
613
614 /*****************************************************************************************
615 * the execution function associated with an "unlock disk queue" node
616 ****************************************************************************************/
617 int rf_DiskUnlockFuncForThreads(node)
618 RF_DagNode_t *node;
619 {
620 RF_DiskQueueData_t *req;
621 RF_PhysDiskAddr_t *pda = (RF_PhysDiskAddr_t *)node->params[0].p;
622 RF_DiskQueue_t **dqs = ((RF_Raid_t *) (node->dagHdr->raidPtr))->Queues;
623
624 req = rf_CreateDiskQueueData(RF_IO_TYPE_NOP,
625 0L, 0, NULL, 0L, 0,
626 (int (*)(void *,int)) node->wakeFunc,
627 (void *) node,
628 NULL, node->dagHdr->tracerec,
629 (void *) (node->dagHdr->raidPtr),
630 RF_UNLOCK_DISK_QUEUE, NULL);
631 if (!req)
632 (node->wakeFunc)(node, ENOMEM);
633 else {
634 node->dagFuncData = (void *) req;
635 rf_DiskIOEnqueue( &(dqs[pda->row][pda->col]), req, RF_IO_NORMAL_PRIORITY );
636 }
637
638 return(0);
639 }
640
641 /*****************************************************************************************
642 * Callback routine for DiskRead and DiskWrite nodes. When the disk op completes,
643 * the routine is called to set the node status and inform the execution engine that
644 * the node has fired.
645 ****************************************************************************************/
646 int rf_GenericWakeupFunc(node, status)
647 RF_DagNode_t *node;
648 int status;
649 {
650 switch (node->status) {
651 case rf_bwd1 :
652 node->status = rf_bwd2;
653 if (node->dagFuncData)
654 rf_FreeDiskQueueData((RF_DiskQueueData_t *) node->dagFuncData);
655 return(rf_DiskWriteFuncForThreads(node));
656 break;
657 case rf_fired :
658 if (status) node->status = rf_bad;
659 else node->status = rf_good;
660 break;
661 case rf_recover :
662 /* probably should never reach this case */
663 if (status) node->status = rf_panic;
664 else node->status = rf_undone;
665 break;
666 default :
667 RF_PANIC();
668 break;
669 }
670 if (node->dagFuncData)
671 rf_FreeDiskQueueData((RF_DiskQueueData_t *) node->dagFuncData);
672 return(rf_FinishNode(node, RF_INTR_CONTEXT));
673 }
674
675
676 /*****************************************************************************************
677 * there are three distinct types of xor nodes
678 * A "regular xor" is used in the fault-free case where the access spans a complete
679 * stripe unit. It assumes that the result buffer is one full stripe unit in size,
680 * and uses the stripe-unit-offset values that it computes from the PDAs to determine
681 * where within the stripe unit to XOR each argument buffer.
682 *
683 * A "simple xor" is used in the fault-free case where the access touches only a portion
684 * of one (or two, in some cases) stripe unit(s). It assumes that all the argument
685 * buffers are of the same size and have the same stripe unit offset.
686 *
687 * A "recovery xor" is used in the degraded-mode case. It's similar to the regular
688 * xor function except that it takes the failed PDA as an additional parameter, and
689 * uses it to determine what portions of the argument buffers need to be xor'd into
690 * the result buffer, and where in the result buffer they should go.
691 ****************************************************************************************/
692
693 /* xor the params together and store the result in the result field.
694 * assume the result field points to a buffer that is the size of one SU,
695 * and use the pda params to determine where within the buffer to XOR
696 * the input buffers.
697 */
698 int rf_RegularXorFunc(node)
699 RF_DagNode_t *node;
700 {
701 RF_Raid_t *raidPtr = (RF_Raid_t *)node->params[node->numParams-1].p;
702 RF_AccTraceEntry_t *tracerec = node->dagHdr->tracerec;
703 RF_Etimer_t timer;
704 int i, retcode;
705 #if RF_BACKWARD > 0
706 RF_PhysDiskAddr_t *pda;
707 caddr_t undoBuf;
708 #endif
709
710 retcode = 0;
711 if (node->dagHdr->status == rf_enable) {
712 /* don't do the XOR if the input is the same as the output */
713 RF_ETIMER_START(timer);
714 for (i=0; i<node->numParams-1; i+=2) if (node->params[i+1].p != node->results[0]) {
715 #if RF_BACKWARD > 0
716 /* This section mimics undo logging for backward error recovery experiments b
717 * allocating and initializing a buffer
718 * XXX 512 byte sector size is hard coded!
719 */
720 pda = node->params[i].p;
721 if (node->dagHdr->allocList == NULL)
722 rf_MakeAllocList(node->dagHdr->allocList);
723 RF_CallocAndAdd(undoBuf, 1, 512 * pda->numSector, (caddr_t), node->dagHdr->allocList);
724 #endif /* RF_BACKWARD > 0 */
725 retcode = rf_XorIntoBuffer(raidPtr, (RF_PhysDiskAddr_t *) node->params[i].p,
726 (char *)node->params[i+1].p, (char *) node->results[0], node->dagHdr->bp);
727 }
728 RF_ETIMER_STOP(timer); RF_ETIMER_EVAL(timer); tracerec->xor_us += RF_ETIMER_VAL_US(timer);
729 }
730 return(rf_GenericWakeupFunc(node, retcode)); /* call wake func explicitly since no I/O in this node */
731 }
732
733 /* xor the inputs into the result buffer, ignoring placement issues */
734 int rf_SimpleXorFunc(node)
735 RF_DagNode_t *node;
736 {
737 RF_Raid_t *raidPtr = (RF_Raid_t *)node->params[node->numParams-1].p;
738 int i, retcode = 0;
739 RF_AccTraceEntry_t *tracerec = node->dagHdr->tracerec;
740 RF_Etimer_t timer;
741 #if RF_BACKWARD > 0
742 RF_PhysDiskAddr_t *pda;
743 caddr_t undoBuf;
744 #endif
745
746 if (node->dagHdr->status == rf_enable) {
747 RF_ETIMER_START(timer);
748 /* don't do the XOR if the input is the same as the output */
749 for (i=0; i<node->numParams-1; i+=2) if (node->params[i+1].p != node->results[0]) {
750 #if RF_BACKWARD > 0
751 /* This section mimics undo logging for backward error recovery experiments b
752 * allocating and initializing a buffer
753 * XXX 512 byte sector size is hard coded!
754 */
755 pda = node->params[i].p;
756 if (node->dagHdr->allocList == NULL)
757 rf_MakeAllocList(node->dagHdr->allocList);
758 RF_CallocAndAdd(undoBuf, 1, 512 * pda->numSector, (caddr_t), node->dagHdr->allocList);
759 #endif /* RF_BACKWARD > 0 */
760 retcode = rf_bxor((char *)node->params[i+1].p, (char *) node->results[0],
761 rf_RaidAddressToByte(raidPtr, ((RF_PhysDiskAddr_t *)node->params[i].p)->numSector),
762 (struct buf *) node->dagHdr->bp);
763 }
764 RF_ETIMER_STOP(timer); RF_ETIMER_EVAL(timer); tracerec->xor_us += RF_ETIMER_VAL_US(timer);
765 }
766
767 return(rf_GenericWakeupFunc(node, retcode)); /* call wake func explicitly since no I/O in this node */
768 }
769
770 /* this xor is used by the degraded-mode dag functions to recover lost data.
771 * the second-to-last parameter is the PDA for the failed portion of the access.
772 * the code here looks at this PDA and assumes that the xor target buffer is
773 * equal in size to the number of sectors in the failed PDA. It then uses
774 * the other PDAs in the parameter list to determine where within the target
775 * buffer the corresponding data should be xored.
776 */
777 int rf_RecoveryXorFunc(node)
778 RF_DagNode_t *node;
779 {
780 RF_Raid_t *raidPtr = (RF_Raid_t *)node->params[node->numParams-1].p;
781 RF_RaidLayout_t *layoutPtr = (RF_RaidLayout_t *) &raidPtr->Layout;
782 RF_PhysDiskAddr_t *failedPDA = (RF_PhysDiskAddr_t *)node->params[node->numParams-2].p;
783 int i, retcode = 0;
784 RF_PhysDiskAddr_t *pda;
785 int suoffset, failedSUOffset = rf_StripeUnitOffset(layoutPtr,failedPDA->startSector);
786 char *srcbuf, *destbuf;
787 RF_AccTraceEntry_t *tracerec = node->dagHdr->tracerec;
788 RF_Etimer_t timer;
789 #if RF_BACKWARD > 0
790 caddr_t undoBuf;
791 #endif
792
793 if (node->dagHdr->status == rf_enable) {
794 RF_ETIMER_START(timer);
795 for (i=0; i<node->numParams-2; i+=2) if (node->params[i+1].p != node->results[0]) {
796 pda = (RF_PhysDiskAddr_t *)node->params[i].p;
797 #if RF_BACKWARD > 0
798 /* This section mimics undo logging for backward error recovery experiments b
799 * allocating and initializing a buffer
800 * XXX 512 byte sector size is hard coded!
801 */
802 if (node->dagHdr->allocList == NULL)
803 rf_MakeAllocList(node->dagHdr->allocList);
804 RF_CallocAndAdd(undoBuf, 1, 512 * pda->numSector, (caddr_t), node->dagHdr->allocList);
805 #endif /* RF_BACKWARD > 0 */
806 srcbuf = (char *)node->params[i+1].p;
807 suoffset = rf_StripeUnitOffset(layoutPtr, pda->startSector);
808 destbuf = ((char *) node->results[0]) + rf_RaidAddressToByte(raidPtr,suoffset-failedSUOffset);
809 retcode = rf_bxor(srcbuf, destbuf, rf_RaidAddressToByte(raidPtr, pda->numSector), node->dagHdr->bp);
810 }
811 RF_ETIMER_STOP(timer); RF_ETIMER_EVAL(timer); tracerec->xor_us += RF_ETIMER_VAL_US(timer);
812 }
813 return (rf_GenericWakeupFunc(node, retcode));
814 }
815
816 /*****************************************************************************************
817 * The next three functions are utilities used by the above xor-execution functions.
818 ****************************************************************************************/
819
820
821 /*
822 * this is just a glorified buffer xor. targbuf points to a buffer that is one full stripe unit
823 * in size. srcbuf points to a buffer that may be less than 1 SU, but never more. When the
824 * access described by pda is one SU in size (which by implication means it's SU-aligned),
825 * all that happens is (targbuf) <- (srcbuf ^ targbuf). When the access is less than one
826 * SU in size the XOR occurs on only the portion of targbuf identified in the pda.
827 */
828
829 int rf_XorIntoBuffer(raidPtr, pda, srcbuf, targbuf, bp)
830 RF_Raid_t *raidPtr;
831 RF_PhysDiskAddr_t *pda;
832 char *srcbuf;
833 char *targbuf;
834 void *bp;
835 {
836 char *targptr;
837 int sectPerSU = raidPtr->Layout.sectorsPerStripeUnit;
838 int SUOffset = pda->startSector % sectPerSU;
839 int length, retcode = 0;
840
841 RF_ASSERT(pda->numSector <= sectPerSU);
842
843 targptr = targbuf + rf_RaidAddressToByte(raidPtr, SUOffset);
844 length = rf_RaidAddressToByte(raidPtr, pda->numSector);
845 retcode = rf_bxor(srcbuf, targptr, length, bp);
846 return(retcode);
847 }
848
849 /* it really should be the case that the buffer pointers (returned by malloc)
850 * are aligned to the natural word size of the machine, so this is the only
851 * case we optimize for. The length should always be a multiple of the sector
852 * size, so there should be no problem with leftover bytes at the end.
853 */
854 int rf_bxor(src, dest, len, bp)
855 char *src;
856 char *dest;
857 int len;
858 void *bp;
859 {
860 unsigned mask = sizeof(long) -1, retcode = 0;
861
862 if ( !(((unsigned long) src) & mask) && !(((unsigned long) dest) & mask) && !(len&mask) ) {
863 retcode = rf_longword_bxor((unsigned long *) src, (unsigned long *) dest, len>>RF_LONGSHIFT, bp);
864 } else {
865 RF_ASSERT(0);
866 }
867 return(retcode);
868 }
869
870 /* map a user buffer into kernel space, if necessary */
871 #ifdef KERNEL
872 #ifdef __NetBSD__
873 /* XXX Not a clue if this is even close.. */
874 #define REMAP_VA(_bp,x,y) (y) = (x)
875 #else
876 #define REMAP_VA(_bp,x,y) (y) = (unsigned long *) ((IS_SYS_VA(x)) ? (unsigned long *)(x) : (unsigned long *) rf_MapToKernelSpace((struct buf *) (_bp), (caddr_t)(x)))
877 #endif /* __NetBSD__ */
878 #else /* KERNEL */
879 #define REMAP_VA(_bp,x,y) (y) = (x)
880 #endif /* KERNEL */
881
882 /* When XORing in kernel mode, we need to map each user page to kernel space before we can access it.
883 * We don't want to assume anything about which input buffers are in kernel/user
884 * space, nor about their alignment, so in each loop we compute the maximum number
885 * of bytes that we can xor without crossing any page boundaries, and do only this many
886 * bytes before the next remap.
887 */
888 int rf_longword_bxor(src, dest, len, bp)
889 register unsigned long *src;
890 register unsigned long *dest;
891 int len; /* longwords */
892 void *bp;
893 {
894 register unsigned long *end = src+len;
895 register unsigned long d0, d1, d2, d3, s0, s1, s2, s3; /* temps */
896 register unsigned long *pg_src, *pg_dest; /* per-page source/dest pointers */
897 int longs_this_time; /* # longwords to xor in the current iteration */
898
899 REMAP_VA(bp, src, pg_src);
900 REMAP_VA(bp, dest, pg_dest);
901 if (!pg_src || !pg_dest) return(EFAULT);
902
903 while (len >= 4 ) {
904 longs_this_time = RF_MIN(len, RF_MIN(RF_BLIP(pg_src), RF_BLIP(pg_dest)) >> RF_LONGSHIFT); /* note len in longwords */
905 src += longs_this_time; dest+= longs_this_time; len -= longs_this_time;
906 while (longs_this_time >= 4) {
907 d0 = pg_dest[0];
908 d1 = pg_dest[1];
909 d2 = pg_dest[2];
910 d3 = pg_dest[3];
911 s0 = pg_src[0];
912 s1 = pg_src[1];
913 s2 = pg_src[2];
914 s3 = pg_src[3];
915 pg_dest[0] = d0 ^ s0;
916 pg_dest[1] = d1 ^ s1;
917 pg_dest[2] = d2 ^ s2;
918 pg_dest[3] = d3 ^ s3;
919 pg_src += 4;
920 pg_dest += 4;
921 longs_this_time -= 4;
922 }
923 while (longs_this_time > 0) { /* cannot cross any page boundaries here */
924 *pg_dest++ ^= *pg_src++;
925 longs_this_time--;
926 }
927
928 /* either we're done, or we've reached a page boundary on one (or possibly both) of the pointers */
929 if (len) {
930 if (RF_PAGE_ALIGNED(src)) REMAP_VA(bp, src, pg_src);
931 if (RF_PAGE_ALIGNED(dest)) REMAP_VA(bp, dest, pg_dest);
932 if (!pg_src || !pg_dest) return(EFAULT);
933 }
934 }
935 while (src < end) {
936 *pg_dest++ ^= *pg_src++;
937 src++; dest++; len--;
938 if (RF_PAGE_ALIGNED(src)) REMAP_VA(bp, src, pg_src);
939 if (RF_PAGE_ALIGNED(dest)) REMAP_VA(bp, dest, pg_dest);
940 }
941 RF_ASSERT(len == 0);
942 return(0);
943 }
944
945
946 /*
947 dst = a ^ b ^ c;
948 a may equal dst
949 see comment above longword_bxor
950 */
951 int rf_longword_bxor3(dst,a,b,c,len, bp)
952 register unsigned long *dst;
953 register unsigned long *a;
954 register unsigned long *b;
955 register unsigned long *c;
956 int len; /* length in longwords */
957 void *bp;
958 {
959 unsigned long a0,a1,a2,a3, b0,b1,b2,b3;
960 register unsigned long *pg_a, *pg_b, *pg_c, *pg_dst; /* per-page source/dest pointers */
961 int longs_this_time; /* # longs to xor in the current iteration */
962 char dst_is_a = 0;
963
964 REMAP_VA(bp, a, pg_a);
965 REMAP_VA(bp, b, pg_b);
966 REMAP_VA(bp, c, pg_c);
967 if (a == dst) {pg_dst = pg_a; dst_is_a = 1;} else { REMAP_VA(bp, dst, pg_dst); }
968
969 /* align dest to cache line. Can't cross a pg boundary on dst here. */
970 while ((((unsigned long) pg_dst) & 0x1f)) {
971 *pg_dst++ = *pg_a++ ^ *pg_b++ ^ *pg_c++;
972 dst++; a++; b++; c++;
973 if (RF_PAGE_ALIGNED(a)) {REMAP_VA(bp, a, pg_a); if (!pg_a) return(EFAULT);}
974 if (RF_PAGE_ALIGNED(b)) {REMAP_VA(bp, a, pg_b); if (!pg_b) return(EFAULT);}
975 if (RF_PAGE_ALIGNED(c)) {REMAP_VA(bp, a, pg_c); if (!pg_c) return(EFAULT);}
976 len--;
977 }
978
979 while (len > 4 ) {
980 longs_this_time = RF_MIN(len, RF_MIN(RF_BLIP(a), RF_MIN(RF_BLIP(b), RF_MIN(RF_BLIP(c), RF_BLIP(dst)))) >> RF_LONGSHIFT);
981 a+= longs_this_time; b+= longs_this_time; c+= longs_this_time; dst+=longs_this_time; len-=longs_this_time;
982 while (longs_this_time >= 4) {
983 a0 = pg_a[0]; longs_this_time -= 4;
984
985 a1 = pg_a[1];
986 a2 = pg_a[2];
987
988 a3 = pg_a[3]; pg_a += 4;
989
990 b0 = pg_b[0];
991 b1 = pg_b[1];
992
993 b2 = pg_b[2];
994 b3 = pg_b[3];
995 /* start dual issue */
996 a0 ^= b0; b0 = pg_c[0];
997
998 pg_b += 4; a1 ^= b1;
999
1000 a2 ^= b2; a3 ^= b3;
1001
1002 b1 = pg_c[1]; a0 ^= b0;
1003
1004 b2 = pg_c[2]; a1 ^= b1;
1005
1006 b3 = pg_c[3]; a2 ^= b2;
1007
1008 pg_dst[0] = a0; a3 ^= b3;
1009 pg_dst[1] = a1; pg_c += 4;
1010 pg_dst[2] = a2;
1011 pg_dst[3] = a3; pg_dst += 4;
1012 }
1013 while (longs_this_time > 0) { /* cannot cross any page boundaries here */
1014 *pg_dst++ = *pg_a++ ^ *pg_b++ ^ *pg_c++;
1015 longs_this_time--;
1016 }
1017
1018 if (len) {
1019 if (RF_PAGE_ALIGNED(a)) {REMAP_VA(bp, a, pg_a); if (!pg_a) return(EFAULT); if (dst_is_a) pg_dst = pg_a;}
1020 if (RF_PAGE_ALIGNED(b)) {REMAP_VA(bp, b, pg_b); if (!pg_b) return(EFAULT);}
1021 if (RF_PAGE_ALIGNED(c)) {REMAP_VA(bp, c, pg_c); if (!pg_c) return(EFAULT);}
1022 if (!dst_is_a) if (RF_PAGE_ALIGNED(dst)) {REMAP_VA(bp, dst, pg_dst); if (!pg_dst) return(EFAULT);}
1023 }
1024 }
1025 while (len) {
1026 *pg_dst++ = *pg_a++ ^ *pg_b++ ^ *pg_c++;
1027 dst++; a++; b++; c++;
1028 if (RF_PAGE_ALIGNED(a)) {REMAP_VA(bp, a, pg_a); if (!pg_a) return(EFAULT); if (dst_is_a) pg_dst = pg_a;}
1029 if (RF_PAGE_ALIGNED(b)) {REMAP_VA(bp, b, pg_b); if (!pg_b) return(EFAULT);}
1030 if (RF_PAGE_ALIGNED(c)) {REMAP_VA(bp, c, pg_c); if (!pg_c) return(EFAULT);}
1031 if (!dst_is_a) if (RF_PAGE_ALIGNED(dst)) {REMAP_VA(bp, dst, pg_dst); if (!pg_dst) return(EFAULT);}
1032 len--;
1033 }
1034 return(0);
1035 }
1036
1037 int rf_bxor3(dst,a,b,c,len, bp)
1038 register unsigned char *dst;
1039 register unsigned char *a;
1040 register unsigned char *b;
1041 register unsigned char *c;
1042 unsigned long len;
1043 void *bp;
1044 {
1045 RF_ASSERT(((RF_UL(dst)|RF_UL(a)|RF_UL(b)|RF_UL(c)|len) & 0x7) == 0);
1046
1047 return(rf_longword_bxor3((unsigned long *)dst, (unsigned long *)a,
1048 (unsigned long *)b, (unsigned long *)c, len>>RF_LONGSHIFT, bp));
1049 }
1050