rf_revent.c revision 1.9.2.2 1 /* $NetBSD: rf_revent.c,v 1.9.2.2 2002/09/17 21:20:59 nathanw Exp $ */
2 /*
3 * Copyright (c) 1995 Carnegie-Mellon University.
4 * All rights reserved.
5 *
6 * Author:
7 *
8 * Permission to use, copy, modify and distribute this software and
9 * its documentation is hereby granted, provided that both the copyright
10 * notice and this permission notice appear in all copies of the
11 * software, derivative works or modified versions, and any portions
12 * thereof, and that both notices appear in supporting documentation.
13 *
14 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
15 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
16 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
17 *
18 * Carnegie Mellon requests users of this software to return to
19 *
20 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
21 * School of Computer Science
22 * Carnegie Mellon University
23 * Pittsburgh PA 15213-3890
24 *
25 * any improvements or extensions that they make and grant Carnegie the
26 * rights to redistribute these changes.
27 */
28 /*
29 * revent.c -- reconstruction event handling code
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: rf_revent.c,v 1.9.2.2 2002/09/17 21:20:59 nathanw Exp $");
34
35 #include <sys/errno.h>
36
37 #include "rf_raid.h"
38 #include "rf_revent.h"
39 #include "rf_etimer.h"
40 #include "rf_general.h"
41 #include "rf_freelist.h"
42 #include "rf_desc.h"
43 #include "rf_shutdown.h"
44
45 static RF_FreeList_t *rf_revent_freelist;
46 #define RF_MAX_FREE_REVENT 128
47 #define RF_REVENT_INC 8
48 #define RF_REVENT_INITIAL 8
49
50
51
52 #include <sys/proc.h>
53 #include <sys/kernel.h>
54
55 #define DO_WAIT(_rc) \
56 ltsleep(&(_rc)->eventQueue, PRIBIO, "raidframe eventq", \
57 0, &((_rc)->eq_mutex))
58
59 #define DO_SIGNAL(_rc) wakeup(&(_rc)->eventQueue)
60
61
62 static void rf_ShutdownReconEvent(void *);
63
64 static RF_ReconEvent_t *
65 GetReconEventDesc(RF_RowCol_t row, RF_RowCol_t col,
66 void *arg, RF_Revent_t type);
67
68 static void rf_ShutdownReconEvent(ignored)
69 void *ignored;
70 {
71 RF_FREELIST_DESTROY(rf_revent_freelist, next, (RF_ReconEvent_t *));
72 }
73
74 int
75 rf_ConfigureReconEvent(listp)
76 RF_ShutdownList_t **listp;
77 {
78 int rc;
79
80 RF_FREELIST_CREATE(rf_revent_freelist, RF_MAX_FREE_REVENT,
81 RF_REVENT_INC, sizeof(RF_ReconEvent_t));
82 if (rf_revent_freelist == NULL)
83 return (ENOMEM);
84 rc = rf_ShutdownCreate(listp, rf_ShutdownReconEvent, NULL);
85 if (rc) {
86 rf_print_unable_to_add_shutdown(__FILE__, __LINE__, rc);
87 rf_ShutdownReconEvent(NULL);
88 return (rc);
89 }
90 RF_FREELIST_PRIME(rf_revent_freelist, RF_REVENT_INITIAL, next,
91 (RF_ReconEvent_t *));
92 return (0);
93 }
94
95 /* returns the next reconstruction event, blocking the calling thread
96 * until one becomes available. will now return null if it is blocked
97 * or will return an event if it is not */
98
99 RF_ReconEvent_t *
100 rf_GetNextReconEvent(reconDesc, row, continueFunc, continueArg)
101 RF_RaidReconDesc_t *reconDesc;
102 RF_RowCol_t row;
103 void (*continueFunc) (void *);
104 void *continueArg;
105 {
106 RF_Raid_t *raidPtr = reconDesc->raidPtr;
107 RF_ReconCtrl_t *rctrl = raidPtr->reconControl[row];
108 RF_ReconEvent_t *event;
109
110 RF_ASSERT(row >= 0 && row <= raidPtr->numRow);
111 RF_LOCK_MUTEX(rctrl->eq_mutex);
112 /* q null and count==0 must be equivalent conditions */
113 RF_ASSERT((rctrl->eventQueue == NULL) == (rctrl->eq_count == 0));
114
115 rctrl->continueFunc = continueFunc;
116 rctrl->continueArg = continueArg;
117
118
119 /* mpsleep timeout value: secs = timo_val/hz. 'ticks' here is
120 defined as cycle-counter ticks, not softclock ticks */
121
122 #define MAX_RECON_EXEC_USECS (100 * 1000) /* 100 ms */
123 #define RECON_DELAY_MS 25
124 #define RECON_TIMO ((RECON_DELAY_MS * hz) / 1000)
125
126 /* we are not pre-emptible in the kernel, but we don't want to run
127 * forever. If we run w/o blocking for more than MAX_RECON_EXEC_TICKS
128 * ticks of the cycle counter, delay for RECON_DELAY before
129 * continuing. this may murder us with context switches, so we may
130 * need to increase both the MAX...TICKS and the RECON_DELAY_MS. */
131 if (reconDesc->reconExecTimerRunning) {
132 int status;
133
134 RF_ETIMER_STOP(reconDesc->recon_exec_timer);
135 RF_ETIMER_EVAL(reconDesc->recon_exec_timer);
136 reconDesc->reconExecTicks +=
137 RF_ETIMER_VAL_US(reconDesc->recon_exec_timer);
138 if (reconDesc->reconExecTicks > reconDesc->maxReconExecTicks)
139 reconDesc->maxReconExecTicks =
140 reconDesc->reconExecTicks;
141 if (reconDesc->reconExecTicks >= MAX_RECON_EXEC_USECS) {
142 /* we've been running too long. delay for
143 * RECON_DELAY_MS */
144 #if RF_RECON_STATS > 0
145 reconDesc->numReconExecDelays++;
146 #endif /* RF_RECON_STATS > 0 */
147
148 status = ltsleep(&reconDesc->reconExecTicks, PRIBIO,
149 "recon delay", RECON_TIMO,
150 &rctrl->eq_mutex);
151 RF_ASSERT(status == EWOULDBLOCK);
152 reconDesc->reconExecTicks = 0;
153 }
154 }
155 while (!rctrl->eventQueue) {
156 #if RF_RECON_STATS > 0
157 reconDesc->numReconEventWaits++;
158 #endif /* RF_RECON_STATS > 0 */
159 DO_WAIT(rctrl);
160 reconDesc->reconExecTicks = 0; /* we've just waited */
161 }
162
163 reconDesc->reconExecTimerRunning = 1;
164 if (RF_ETIMER_VAL_US(reconDesc->recon_exec_timer)!=0) {
165 /* it moved!! reset the timer. */
166 RF_ETIMER_START(reconDesc->recon_exec_timer);
167 }
168 event = rctrl->eventQueue;
169 rctrl->eventQueue = event->next;
170 event->next = NULL;
171 rctrl->eq_count--;
172
173 /* q null and count==0 must be equivalent conditions */
174 RF_ASSERT((rctrl->eventQueue == NULL) == (rctrl->eq_count == 0));
175 RF_UNLOCK_MUTEX(rctrl->eq_mutex);
176 return (event);
177 }
178 /* enqueues a reconstruction event on the indicated queue */
179 void
180 rf_CauseReconEvent(raidPtr, row, col, arg, type)
181 RF_Raid_t *raidPtr;
182 RF_RowCol_t row;
183 RF_RowCol_t col;
184 void *arg;
185 RF_Revent_t type;
186 {
187 RF_ReconCtrl_t *rctrl = raidPtr->reconControl[row];
188 RF_ReconEvent_t *event = GetReconEventDesc(row, col, arg, type);
189
190 if (type == RF_REVENT_BUFCLEAR) {
191 RF_ASSERT(col != rctrl->fcol);
192 }
193 RF_ASSERT(row >= 0 && row <= raidPtr->numRow && col >= 0 && col <= raidPtr->numCol);
194 RF_LOCK_MUTEX(rctrl->eq_mutex);
195 /* q null and count==0 must be equivalent conditions */
196 RF_ASSERT((rctrl->eventQueue == NULL) == (rctrl->eq_count == 0));
197 event->next = rctrl->eventQueue;
198 rctrl->eventQueue = event;
199 rctrl->eq_count++;
200 RF_UNLOCK_MUTEX(rctrl->eq_mutex);
201
202 DO_SIGNAL(rctrl);
203 }
204 /* allocates and initializes a recon event descriptor */
205 static RF_ReconEvent_t *
206 GetReconEventDesc(row, col, arg, type)
207 RF_RowCol_t row;
208 RF_RowCol_t col;
209 void *arg;
210 RF_Revent_t type;
211 {
212 RF_ReconEvent_t *t;
213
214 RF_FREELIST_GET(rf_revent_freelist, t, next, (RF_ReconEvent_t *));
215 if (t == NULL)
216 return (NULL);
217 t->col = col;
218 t->arg = arg;
219 t->type = type;
220 return (t);
221 }
222
223 void
224 rf_FreeReconEventDesc(event)
225 RF_ReconEvent_t *event;
226 {
227 RF_FREELIST_FREE(rf_revent_freelist, event, next);
228 }
229