rf_engine.c revision 1.22 1 /* $NetBSD: rf_engine.c,v 1.22 2002/10/04 20:05:15 oster Exp $ */
2 /*
3 * Copyright (c) 1995 Carnegie-Mellon University.
4 * All rights reserved.
5 *
6 * Author: William V. Courtright II, Mark Holland, Rachad Youssef
7 *
8 * Permission to use, copy, modify and distribute this software and
9 * its documentation is hereby granted, provided that both the copyright
10 * notice and this permission notice appear in all copies of the
11 * software, derivative works or modified versions, and any portions
12 * thereof, and that both notices appear in supporting documentation.
13 *
14 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
15 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
16 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
17 *
18 * Carnegie Mellon requests users of this software to return to
19 *
20 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
21 * School of Computer Science
22 * Carnegie Mellon University
23 * Pittsburgh PA 15213-3890
24 *
25 * any improvements or extensions that they make and grant Carnegie the
26 * rights to redistribute these changes.
27 */
28
29 /****************************************************************************
30 * *
31 * engine.c -- code for DAG execution engine *
32 * *
33 * Modified to work as follows (holland): *
34 * A user-thread calls into DispatchDAG, which fires off the nodes that *
35 * are direct successors to the header node. DispatchDAG then returns, *
36 * and the rest of the I/O continues asynchronously. As each node *
37 * completes, the node execution function calls FinishNode(). FinishNode *
38 * scans the list of successors to the node and increments the antecedent *
39 * counts. Each node that becomes enabled is placed on a central node *
40 * queue. A dedicated dag-execution thread grabs nodes off of this *
41 * queue and fires them. *
42 * *
43 * NULL nodes are never fired. *
44 * *
45 * Terminator nodes are never fired, but rather cause the callback *
46 * associated with the DAG to be invoked. *
47 * *
48 * If a node fails, the dag either rolls forward to the completion or *
49 * rolls back, undoing previously-completed nodes and fails atomically. *
50 * The direction of recovery is determined by the location of the failed *
51 * node in the graph. If the failure occurred before the commit node in *
52 * the graph, backward recovery is used. Otherwise, forward recovery is *
53 * used. *
54 * *
55 ****************************************************************************/
56
57 #include <sys/cdefs.h>
58 __KERNEL_RCSID(0, "$NetBSD: rf_engine.c,v 1.22 2002/10/04 20:05:15 oster Exp $");
59
60 #include "rf_threadstuff.h"
61
62 #include <sys/errno.h>
63
64 #include "rf_dag.h"
65 #include "rf_engine.h"
66 #include "rf_etimer.h"
67 #include "rf_general.h"
68 #include "rf_dagutils.h"
69 #include "rf_shutdown.h"
70 #include "rf_raid.h"
71
72 static void DAGExecutionThread(RF_ThreadArg_t arg);
73 static void rf_RaidIOThread(RF_ThreadArg_t arg);
74
75 #define DO_INIT(_l_,_r_) { \
76 int _rc; \
77 _rc = rf_create_managed_mutex(_l_,&(_r_)->node_queue_mutex); \
78 if (_rc) { \
79 return(_rc); \
80 } \
81 _rc = rf_create_managed_cond(_l_,&(_r_)->node_queue_cond); \
82 if (_rc) { \
83 return(_rc); \
84 } \
85 }
86
87 /* synchronization primitives for this file. DO_WAIT should be enclosed in a while loop. */
88
89 #define DO_LOCK(_r_) \
90 do { \
91 ks = splbio(); \
92 RF_LOCK_MUTEX((_r_)->node_queue_mutex); \
93 } while (0)
94
95 #define DO_UNLOCK(_r_) \
96 do { \
97 RF_UNLOCK_MUTEX((_r_)->node_queue_mutex); \
98 splx(ks); \
99 } while (0)
100
101 #define DO_WAIT(_r_) \
102 RF_WAIT_COND((_r_)->node_queue, (_r_)->node_queue_mutex)
103
104 #define DO_SIGNAL(_r_) \
105 RF_BROADCAST_COND((_r_)->node_queue) /* XXX RF_SIGNAL_COND? */
106
107 static void rf_ShutdownEngine(void *);
108
109 static void
110 rf_ShutdownEngine(arg)
111 void *arg;
112 {
113 RF_Raid_t *raidPtr;
114 int ks;
115
116 raidPtr = (RF_Raid_t *) arg;
117
118 /* Tell the rf_RaidIOThread to shutdown */
119 simple_lock(&(raidPtr->iodone_lock));
120
121 raidPtr->shutdown_raidio = 1;
122 wakeup(&(raidPtr->iodone));
123
124 /* ...and wait for it to tell us it has finished */
125 while (raidPtr->shutdown_raidio)
126 ltsleep(&(raidPtr->shutdown_raidio), PRIBIO, "raidshutdown", 0,
127 &(raidPtr->iodone_lock));
128
129 simple_unlock(&(raidPtr->iodone_lock));
130
131 /* Now shut down the DAG execution engine. */
132 DO_LOCK(raidPtr);
133 raidPtr->shutdown_engine = 1;
134 DO_SIGNAL(raidPtr);
135 DO_UNLOCK(raidPtr);
136
137 }
138
139 int
140 rf_ConfigureEngine(
141 RF_ShutdownList_t ** listp,
142 RF_Raid_t * raidPtr,
143 RF_Config_t * cfgPtr)
144 {
145 int rc;
146
147 DO_INIT(listp, raidPtr);
148
149 raidPtr->node_queue = NULL;
150 raidPtr->dags_in_flight = 0;
151
152 rc = rf_init_managed_threadgroup(listp, &raidPtr->engine_tg);
153 if (rc)
154 return (rc);
155
156 /* we create the execution thread only once per system boot. no need
157 * to check return code b/c the kernel panics if it can't create the
158 * thread. */
159 if (rf_engineDebug) {
160 printf("raid%d: Creating engine thread\n", raidPtr->raidid);
161 }
162 if (RF_CREATE_ENGINE_THREAD(raidPtr->engine_thread, DAGExecutionThread, raidPtr,"raid%d",raidPtr->raidid)) {
163 RF_ERRORMSG("RAIDFRAME: Unable to create engine thread\n");
164 return (ENOMEM);
165 }
166 if (RF_CREATE_ENGINE_THREAD(raidPtr->engine_helper_thread,
167 rf_RaidIOThread, raidPtr,
168 "raidio%d", raidPtr->raidid)) {
169 printf("raid%d: Unable to create raidio thread\n",
170 raidPtr->raidid);
171 return (ENOMEM);
172 }
173 if (rf_engineDebug) {
174 printf("raid%d: Created engine thread\n", raidPtr->raidid);
175 }
176 RF_THREADGROUP_STARTED(&raidPtr->engine_tg);
177 /* XXX something is missing here... */
178 #ifdef debug
179 printf("Skipping the WAIT_START!!\n");
180 #endif
181 #if 0
182 RF_THREADGROUP_WAIT_START(&raidPtr->engine_tg);
183 #endif
184 /* engine thread is now running and waiting for work */
185 if (rf_engineDebug) {
186 printf("raid%d: Engine thread running and waiting for events\n", raidPtr->raidid);
187 }
188 rc = rf_ShutdownCreate(listp, rf_ShutdownEngine, raidPtr);
189 if (rc) {
190 rf_print_unable_to_add_shutdown(__FILE__, __LINE__, rc);
191 rf_ShutdownEngine(NULL);
192 }
193 return (rc);
194 }
195
196 static int
197 BranchDone(RF_DagNode_t * node)
198 {
199 int i;
200
201 /* return true if forward execution is completed for a node and it's
202 * succedents */
203 switch (node->status) {
204 case rf_wait:
205 /* should never be called in this state */
206 RF_PANIC();
207 break;
208 case rf_fired:
209 /* node is currently executing, so we're not done */
210 return (RF_FALSE);
211 case rf_good:
212 for (i = 0; i < node->numSuccedents; i++) /* for each succedent */
213 if (!BranchDone(node->succedents[i])) /* recursively check
214 * branch */
215 return RF_FALSE;
216 return RF_TRUE; /* node and all succedent branches aren't in
217 * fired state */
218 case rf_bad:
219 /* succedents can't fire */
220 return (RF_TRUE);
221 case rf_recover:
222 /* should never be called in this state */
223 RF_PANIC();
224 break;
225 case rf_undone:
226 case rf_panic:
227 /* XXX need to fix this case */
228 /* for now, assume that we're done */
229 return (RF_TRUE);
230 default:
231 /* illegal node status */
232 RF_PANIC();
233 break;
234 }
235 }
236
237 static int
238 NodeReady(RF_DagNode_t * node)
239 {
240 int ready;
241
242 switch (node->dagHdr->status) {
243 case rf_enable:
244 case rf_rollForward:
245 if ((node->status == rf_wait) && (node->numAntecedents == node->numAntDone))
246 ready = RF_TRUE;
247 else
248 ready = RF_FALSE;
249 break;
250 case rf_rollBackward:
251 RF_ASSERT(node->numSuccDone <= node->numSuccedents);
252 RF_ASSERT(node->numSuccFired <= node->numSuccedents);
253 RF_ASSERT(node->numSuccFired <= node->numSuccDone);
254 if ((node->status == rf_good) && (node->numSuccDone == node->numSuccedents))
255 ready = RF_TRUE;
256 else
257 ready = RF_FALSE;
258 break;
259 default:
260 printf("Execution engine found illegal DAG status in NodeReady\n");
261 RF_PANIC();
262 break;
263 }
264
265 return (ready);
266 }
267
268
269
270 /* user context and dag-exec-thread context:
271 * Fire a node. The node's status field determines which function, do or undo,
272 * to be fired.
273 * This routine assumes that the node's status field has alread been set to
274 * "fired" or "recover" to indicate the direction of execution.
275 */
276 static void
277 FireNode(RF_DagNode_t * node)
278 {
279 switch (node->status) {
280 case rf_fired:
281 /* fire the do function of a node */
282 if (rf_engineDebug) {
283 printf("raid%d: Firing node 0x%lx (%s)\n",
284 node->dagHdr->raidPtr->raidid,
285 (unsigned long) node, node->name);
286 }
287 if (node->flags & RF_DAGNODE_FLAG_YIELD) {
288 #if defined(__NetBSD__) && defined(_KERNEL)
289 /* thread_block(); */
290 /* printf("Need to block the thread here...\n"); */
291 /* XXX thread_block is actually mentioned in
292 * /usr/include/vm/vm_extern.h */
293 #else
294 thread_block();
295 #endif
296 }
297 (*(node->doFunc)) (node);
298 break;
299 case rf_recover:
300 /* fire the undo function of a node */
301 if (rf_engineDebug) {
302 printf("raid%d: Firing (undo) node 0x%lx (%s)\n",
303 node->dagHdr->raidPtr->raidid,
304 (unsigned long) node, node->name);
305 }
306 if (node->flags & RF_DAGNODE_FLAG_YIELD)
307 #if defined(__NetBSD__) && defined(_KERNEL)
308 /* thread_block(); */
309 /* printf("Need to block the thread here...\n"); */
310 /* XXX thread_block is actually mentioned in
311 * /usr/include/vm/vm_extern.h */
312 #else
313 thread_block();
314 #endif
315 (*(node->undoFunc)) (node);
316 break;
317 default:
318 RF_PANIC();
319 break;
320 }
321 }
322
323
324
325 /* user context:
326 * Attempt to fire each node in a linear array.
327 * The entire list is fired atomically.
328 */
329 static void
330 FireNodeArray(
331 int numNodes,
332 RF_DagNode_t ** nodeList)
333 {
334 RF_DagStatus_t dstat;
335 RF_DagNode_t *node;
336 int i, j;
337
338 /* first, mark all nodes which are ready to be fired */
339 for (i = 0; i < numNodes; i++) {
340 node = nodeList[i];
341 dstat = node->dagHdr->status;
342 RF_ASSERT((node->status == rf_wait) || (node->status == rf_good));
343 if (NodeReady(node)) {
344 if ((dstat == rf_enable) || (dstat == rf_rollForward)) {
345 RF_ASSERT(node->status == rf_wait);
346 if (node->commitNode)
347 node->dagHdr->numCommits++;
348 node->status = rf_fired;
349 for (j = 0; j < node->numAntecedents; j++)
350 node->antecedents[j]->numSuccFired++;
351 } else {
352 RF_ASSERT(dstat == rf_rollBackward);
353 RF_ASSERT(node->status == rf_good);
354 RF_ASSERT(node->commitNode == RF_FALSE); /* only one commit node
355 * per graph */
356 node->status = rf_recover;
357 }
358 }
359 }
360 /* now, fire the nodes */
361 for (i = 0; i < numNodes; i++) {
362 if ((nodeList[i]->status == rf_fired) || (nodeList[i]->status == rf_recover))
363 FireNode(nodeList[i]);
364 }
365 }
366
367
368 /* user context:
369 * Attempt to fire each node in a linked list.
370 * The entire list is fired atomically.
371 */
372 static void
373 FireNodeList(RF_DagNode_t * nodeList)
374 {
375 RF_DagNode_t *node, *next;
376 RF_DagStatus_t dstat;
377 int j;
378
379 if (nodeList) {
380 /* first, mark all nodes which are ready to be fired */
381 for (node = nodeList; node; node = next) {
382 next = node->next;
383 dstat = node->dagHdr->status;
384 RF_ASSERT((node->status == rf_wait) || (node->status == rf_good));
385 if (NodeReady(node)) {
386 if ((dstat == rf_enable) || (dstat == rf_rollForward)) {
387 RF_ASSERT(node->status == rf_wait);
388 if (node->commitNode)
389 node->dagHdr->numCommits++;
390 node->status = rf_fired;
391 for (j = 0; j < node->numAntecedents; j++)
392 node->antecedents[j]->numSuccFired++;
393 } else {
394 RF_ASSERT(dstat == rf_rollBackward);
395 RF_ASSERT(node->status == rf_good);
396 RF_ASSERT(node->commitNode == RF_FALSE); /* only one commit node
397 * per graph */
398 node->status = rf_recover;
399 }
400 }
401 }
402 /* now, fire the nodes */
403 for (node = nodeList; node; node = next) {
404 next = node->next;
405 if ((node->status == rf_fired) || (node->status == rf_recover))
406 FireNode(node);
407 }
408 }
409 }
410 /* interrupt context:
411 * for each succedent
412 * propagate required results from node to succedent
413 * increment succedent's numAntDone
414 * place newly-enable nodes on node queue for firing
415 *
416 * To save context switches, we don't place NIL nodes on the node queue,
417 * but rather just process them as if they had fired. Note that NIL nodes
418 * that are the direct successors of the header will actually get fired by
419 * DispatchDAG, which is fine because no context switches are involved.
420 *
421 * Important: when running at user level, this can be called by any
422 * disk thread, and so the increment and check of the antecedent count
423 * must be locked. I used the node queue mutex and locked down the
424 * entire function, but this is certainly overkill.
425 */
426 static void
427 PropagateResults(
428 RF_DagNode_t * node,
429 int context)
430 {
431 RF_DagNode_t *s, *a;
432 RF_Raid_t *raidPtr;
433 int i, ks;
434 RF_DagNode_t *finishlist = NULL; /* a list of NIL nodes to be
435 * finished */
436 RF_DagNode_t *skiplist = NULL; /* list of nodes with failed truedata
437 * antecedents */
438 RF_DagNode_t *firelist = NULL; /* a list of nodes to be fired */
439 RF_DagNode_t *q = NULL, *qh = NULL, *next;
440 int j, skipNode;
441
442 raidPtr = node->dagHdr->raidPtr;
443
444 DO_LOCK(raidPtr);
445
446 /* debug - validate fire counts */
447 for (i = 0; i < node->numAntecedents; i++) {
448 a = *(node->antecedents + i);
449 RF_ASSERT(a->numSuccFired >= a->numSuccDone);
450 RF_ASSERT(a->numSuccFired <= a->numSuccedents);
451 a->numSuccDone++;
452 }
453
454 switch (node->dagHdr->status) {
455 case rf_enable:
456 case rf_rollForward:
457 for (i = 0; i < node->numSuccedents; i++) {
458 s = *(node->succedents + i);
459 RF_ASSERT(s->status == rf_wait);
460 (s->numAntDone)++;
461 if (s->numAntDone == s->numAntecedents) {
462 /* look for NIL nodes */
463 if (s->doFunc == rf_NullNodeFunc) {
464 /* don't fire NIL nodes, just process
465 * them */
466 s->next = finishlist;
467 finishlist = s;
468 } else {
469 /* look to see if the node is to be
470 * skipped */
471 skipNode = RF_FALSE;
472 for (j = 0; j < s->numAntecedents; j++)
473 if ((s->antType[j] == rf_trueData) && (s->antecedents[j]->status == rf_bad))
474 skipNode = RF_TRUE;
475 if (skipNode) {
476 /* this node has one or more
477 * failed true data
478 * dependencies, so skip it */
479 s->next = skiplist;
480 skiplist = s;
481 } else
482 /* add s to list of nodes (q)
483 * to execute */
484 if (context != RF_INTR_CONTEXT) {
485 /* we only have to
486 * enqueue if we're at
487 * intr context */
488 s->next = firelist; /* put node on a list to
489 * be fired after we
490 * unlock */
491 firelist = s;
492 } else { /* enqueue the node for
493 * the dag exec thread
494 * to fire */
495 RF_ASSERT(NodeReady(s));
496 if (q) {
497 q->next = s;
498 q = s;
499 } else {
500 qh = q = s;
501 qh->next = NULL;
502 }
503 }
504 }
505 }
506 }
507
508 if (q) {
509 /* xfer our local list of nodes to the node queue */
510 q->next = raidPtr->node_queue;
511 raidPtr->node_queue = qh;
512 DO_SIGNAL(raidPtr);
513 }
514 DO_UNLOCK(raidPtr);
515
516 for (; skiplist; skiplist = next) {
517 next = skiplist->next;
518 skiplist->status = rf_skipped;
519 for (i = 0; i < skiplist->numAntecedents; i++) {
520 skiplist->antecedents[i]->numSuccFired++;
521 }
522 if (skiplist->commitNode) {
523 skiplist->dagHdr->numCommits++;
524 }
525 rf_FinishNode(skiplist, context);
526 }
527 for (; finishlist; finishlist = next) {
528 /* NIL nodes: no need to fire them */
529 next = finishlist->next;
530 finishlist->status = rf_good;
531 for (i = 0; i < finishlist->numAntecedents; i++) {
532 finishlist->antecedents[i]->numSuccFired++;
533 }
534 if (finishlist->commitNode)
535 finishlist->dagHdr->numCommits++;
536 /*
537 * Okay, here we're calling rf_FinishNode() on nodes that
538 * have the null function as their work proc. Such a node
539 * could be the terminal node in a DAG. If so, it will
540 * cause the DAG to complete, which will in turn free
541 * memory used by the DAG, which includes the node in
542 * question. Thus, we must avoid referencing the node
543 * at all after calling rf_FinishNode() on it.
544 */
545 rf_FinishNode(finishlist, context); /* recursive call */
546 }
547 /* fire all nodes in firelist */
548 FireNodeList(firelist);
549 break;
550
551 case rf_rollBackward:
552 for (i = 0; i < node->numAntecedents; i++) {
553 a = *(node->antecedents + i);
554 RF_ASSERT(a->status == rf_good);
555 RF_ASSERT(a->numSuccDone <= a->numSuccedents);
556 RF_ASSERT(a->numSuccDone <= a->numSuccFired);
557
558 if (a->numSuccDone == a->numSuccFired) {
559 if (a->undoFunc == rf_NullNodeFunc) {
560 /* don't fire NIL nodes, just process
561 * them */
562 a->next = finishlist;
563 finishlist = a;
564 } else {
565 if (context != RF_INTR_CONTEXT) {
566 /* we only have to enqueue if
567 * we're at intr context */
568 a->next = firelist; /* put node on a list to
569 * be fired after we
570 * unlock */
571 firelist = a;
572 } else { /* enqueue the node for
573 * the dag exec thread
574 * to fire */
575 RF_ASSERT(NodeReady(a));
576 if (q) {
577 q->next = a;
578 q = a;
579 } else {
580 qh = q = a;
581 qh->next = NULL;
582 }
583 }
584 }
585 }
586 }
587 if (q) {
588 /* xfer our local list of nodes to the node queue */
589 q->next = raidPtr->node_queue;
590 raidPtr->node_queue = qh;
591 DO_SIGNAL(raidPtr);
592 }
593 DO_UNLOCK(raidPtr);
594 for (; finishlist; finishlist = next) { /* NIL nodes: no need to
595 * fire them */
596 next = finishlist->next;
597 finishlist->status = rf_good;
598 /*
599 * Okay, here we're calling rf_FinishNode() on nodes that
600 * have the null function as their work proc. Such a node
601 * could be the first node in a DAG. If so, it will
602 * cause the DAG to complete, which will in turn free
603 * memory used by the DAG, which includes the node in
604 * question. Thus, we must avoid referencing the node
605 * at all after calling rf_FinishNode() on it.
606 */
607 rf_FinishNode(finishlist, context); /* recursive call */
608 }
609 /* fire all nodes in firelist */
610 FireNodeList(firelist);
611
612 break;
613 default:
614 printf("Engine found illegal DAG status in PropagateResults()\n");
615 RF_PANIC();
616 break;
617 }
618 }
619
620
621
622 /*
623 * Process a fired node which has completed
624 */
625 static void
626 ProcessNode(
627 RF_DagNode_t * node,
628 int context)
629 {
630 RF_Raid_t *raidPtr;
631
632 raidPtr = node->dagHdr->raidPtr;
633
634 switch (node->status) {
635 case rf_good:
636 /* normal case, don't need to do anything */
637 break;
638 case rf_bad:
639 if ((node->dagHdr->numCommits > 0) || (node->dagHdr->numCommitNodes == 0)) {
640 node->dagHdr->status = rf_rollForward; /* crossed commit
641 * barrier */
642 if (rf_engineDebug || 1) {
643 printf("raid%d: node (%s) returned fail, rolling forward\n", raidPtr->raidid, node->name);
644 }
645 } else {
646 node->dagHdr->status = rf_rollBackward; /* never reached commit
647 * barrier */
648 if (rf_engineDebug || 1) {
649 printf("raid%d: node (%s) returned fail, rolling backward\n", raidPtr->raidid, node->name);
650 }
651 }
652 break;
653 case rf_undone:
654 /* normal rollBackward case, don't need to do anything */
655 break;
656 case rf_panic:
657 /* an undo node failed!!! */
658 printf("UNDO of a node failed!!!/n");
659 break;
660 default:
661 printf("node finished execution with an illegal status!!!\n");
662 RF_PANIC();
663 break;
664 }
665
666 /* enqueue node's succedents (antecedents if rollBackward) for
667 * execution */
668 PropagateResults(node, context);
669 }
670
671
672
673 /* user context or dag-exec-thread context:
674 * This is the first step in post-processing a newly-completed node.
675 * This routine is called by each node execution function to mark the node
676 * as complete and fire off any successors that have been enabled.
677 */
678 int
679 rf_FinishNode(
680 RF_DagNode_t * node,
681 int context)
682 {
683 int retcode = RF_FALSE;
684 node->dagHdr->numNodesCompleted++;
685 ProcessNode(node, context);
686
687 return (retcode);
688 }
689
690
691 /* user context:
692 * submit dag for execution, return non-zero if we have to wait for completion.
693 * if and only if we return non-zero, we'll cause cbFunc to get invoked with
694 * cbArg when the DAG has completed.
695 *
696 * for now we always return 1. If the DAG does not cause any I/O, then the callback
697 * may get invoked before DispatchDAG returns. There's code in state 5 of ContinueRaidAccess
698 * to handle this.
699 *
700 * All we do here is fire the direct successors of the header node. The
701 * DAG execution thread does the rest of the dag processing.
702 */
703 int
704 rf_DispatchDAG(
705 RF_DagHeader_t * dag,
706 void (*cbFunc) (void *),
707 void *cbArg)
708 {
709 RF_Raid_t *raidPtr;
710
711 raidPtr = dag->raidPtr;
712 if (dag->tracerec) {
713 RF_ETIMER_START(dag->tracerec->timer);
714 }
715 #if DEBUG
716 #if RF_DEBUG_VALIDATE_DAG
717 if (rf_engineDebug || rf_validateDAGDebug) {
718 if (rf_ValidateDAG(dag))
719 RF_PANIC();
720 }
721 #endif
722 #endif
723 if (rf_engineDebug) {
724 printf("raid%d: Entering DispatchDAG\n", raidPtr->raidid);
725 }
726 raidPtr->dags_in_flight++; /* debug only: blow off proper
727 * locking */
728 dag->cbFunc = cbFunc;
729 dag->cbArg = cbArg;
730 dag->numNodesCompleted = 0;
731 dag->status = rf_enable;
732 FireNodeArray(dag->numSuccedents, dag->succedents);
733 return (1);
734 }
735 /* dedicated kernel thread:
736 * the thread that handles all DAG node firing.
737 * To minimize locking and unlocking, we grab a copy of the entire node queue and then set the
738 * node queue to NULL before doing any firing of nodes. This way we only have to release the
739 * lock once. Of course, it's probably rare that there's more than one node in the queue at
740 * any one time, but it sometimes happens.
741 *
742 * In the kernel, this thread runs at spl0 and is not swappable. I copied these
743 * characteristics from the aio_completion_thread.
744 */
745
746 static void
747 DAGExecutionThread(RF_ThreadArg_t arg)
748 {
749 RF_DagNode_t *nd, *local_nq, *term_nq, *fire_nq;
750 RF_Raid_t *raidPtr;
751 int ks;
752 int s;
753
754 raidPtr = (RF_Raid_t *) arg;
755
756 if (rf_engineDebug) {
757 printf("raid%d: Engine thread is running\n", raidPtr->raidid);
758 }
759
760 s = splbio();
761
762 RF_THREADGROUP_RUNNING(&raidPtr->engine_tg);
763
764 DO_LOCK(raidPtr);
765 while (!raidPtr->shutdown_engine) {
766
767 while (raidPtr->node_queue != NULL) {
768 local_nq = raidPtr->node_queue;
769 fire_nq = NULL;
770 term_nq = NULL;
771 raidPtr->node_queue = NULL;
772 DO_UNLOCK(raidPtr);
773
774 /* first, strip out the terminal nodes */
775 while (local_nq) {
776 nd = local_nq;
777 local_nq = local_nq->next;
778 switch (nd->dagHdr->status) {
779 case rf_enable:
780 case rf_rollForward:
781 if (nd->numSuccedents == 0) {
782 /* end of the dag, add to
783 * callback list */
784 nd->next = term_nq;
785 term_nq = nd;
786 } else {
787 /* not the end, add to the
788 * fire queue */
789 nd->next = fire_nq;
790 fire_nq = nd;
791 }
792 break;
793 case rf_rollBackward:
794 if (nd->numAntecedents == 0) {
795 /* end of the dag, add to the
796 * callback list */
797 nd->next = term_nq;
798 term_nq = nd;
799 } else {
800 /* not the end, add to the
801 * fire queue */
802 nd->next = fire_nq;
803 fire_nq = nd;
804 }
805 break;
806 default:
807 RF_PANIC();
808 break;
809 }
810 }
811
812 /* execute callback of dags which have reached the
813 * terminal node */
814 while (term_nq) {
815 nd = term_nq;
816 term_nq = term_nq->next;
817 nd->next = NULL;
818 (nd->dagHdr->cbFunc) (nd->dagHdr->cbArg);
819 raidPtr->dags_in_flight--; /* debug only */
820 }
821
822 /* fire remaining nodes */
823 FireNodeList(fire_nq);
824
825 DO_LOCK(raidPtr);
826 }
827 while (!raidPtr->shutdown_engine &&
828 raidPtr->node_queue == NULL) {
829 DO_UNLOCK(raidPtr);
830 tsleep(&(raidPtr->node_queue), PRIBIO, "rfwcond", 0);
831 DO_LOCK(raidPtr);
832 }
833 }
834 DO_UNLOCK(raidPtr);
835
836 RF_THREADGROUP_DONE(&raidPtr->engine_tg);
837
838 splx(s);
839 kthread_exit(0);
840 }
841
842 /*
843 rf_RaidIOThread() -- When I/O to a component completes, KernelWakeupFunc()
844 puts the completed request onto raidPtr->iodone TAILQ. This function
845 looks after requests on that queue by calling rf_DiskIOComplete() for
846 the request, and by calling any required CompleteFunc for the request.
847 */
848
849 static void
850 rf_RaidIOThread(RF_ThreadArg_t arg)
851 {
852 RF_Raid_t *raidPtr;
853 RF_DiskQueueData_t *req;
854 int s;
855
856 raidPtr = (RF_Raid_t *) arg;
857
858 s = splbio();
859 simple_lock(&(raidPtr->iodone_lock));
860
861 while (!raidPtr->shutdown_raidio) {
862 /* if there is nothing to do, then snooze. */
863 if (TAILQ_EMPTY(&(raidPtr->iodone))) {
864 ltsleep(&(raidPtr->iodone), PRIBIO, "raidiow", 0,
865 &(raidPtr->iodone_lock));
866 }
867
868 /* See what I/Os, if any, have arrived */
869 while ((req = TAILQ_FIRST(&(raidPtr->iodone))) != NULL) {
870 TAILQ_REMOVE(&(raidPtr->iodone), req, iodone_entries);
871 simple_unlock(&(raidPtr->iodone_lock));
872 rf_DiskIOComplete(req->queue, req, req->error);
873 (req->CompleteFunc) (req->argument, req->error);
874 simple_lock(&(raidPtr->iodone_lock));
875 }
876 }
877
878 /* Let rf_ShutdownEngine know that we're done... */
879 raidPtr->shutdown_raidio = 0;
880 wakeup(&(raidPtr->shutdown_raidio));
881
882 simple_unlock(&(raidPtr->iodone_lock));
883 splx(s);
884
885 kthread_exit(0);
886 }
887