rf_revent.c revision 1.25 1 1.25 oster /* $NetBSD: rf_revent.c,v 1.25 2008/05/19 19:49:55 oster Exp $ */
2 1.1 oster /*
3 1.1 oster * Copyright (c) 1995 Carnegie-Mellon University.
4 1.1 oster * All rights reserved.
5 1.1 oster *
6 1.1 oster * Author:
7 1.1 oster *
8 1.1 oster * Permission to use, copy, modify and distribute this software and
9 1.1 oster * its documentation is hereby granted, provided that both the copyright
10 1.1 oster * notice and this permission notice appear in all copies of the
11 1.1 oster * software, derivative works or modified versions, and any portions
12 1.1 oster * thereof, and that both notices appear in supporting documentation.
13 1.1 oster *
14 1.1 oster * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
15 1.1 oster * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
16 1.1 oster * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
17 1.1 oster *
18 1.1 oster * Carnegie Mellon requests users of this software to return to
19 1.1 oster *
20 1.1 oster * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
21 1.1 oster * School of Computer Science
22 1.1 oster * Carnegie Mellon University
23 1.1 oster * Pittsburgh PA 15213-3890
24 1.1 oster *
25 1.1 oster * any improvements or extensions that they make and grant Carnegie the
26 1.1 oster * rights to redistribute these changes.
27 1.1 oster */
28 1.1 oster /*
29 1.1 oster * revent.c -- reconstruction event handling code
30 1.1 oster */
31 1.10 lukem
32 1.10 lukem #include <sys/cdefs.h>
33 1.25 oster __KERNEL_RCSID(0, "$NetBSD: rf_revent.c,v 1.25 2008/05/19 19:49:55 oster Exp $");
34 1.1 oster
35 1.1 oster #include <sys/errno.h>
36 1.1 oster
37 1.1 oster #include "rf_raid.h"
38 1.1 oster #include "rf_revent.h"
39 1.1 oster #include "rf_etimer.h"
40 1.1 oster #include "rf_general.h"
41 1.1 oster #include "rf_desc.h"
42 1.1 oster #include "rf_shutdown.h"
43 1.1 oster
44 1.1 oster #define RF_MAX_FREE_REVENT 128
45 1.18 oster #define RF_MIN_FREE_REVENT 32
46 1.25 oster #define RF_EVENTQ_WAIT 5000
47 1.1 oster
48 1.1 oster #include <sys/proc.h>
49 1.7 oster #include <sys/kernel.h>
50 1.1 oster
51 1.1 oster static void rf_ShutdownReconEvent(void *);
52 1.1 oster
53 1.3 oster static RF_ReconEvent_t *
54 1.12 oster GetReconEventDesc(RF_RowCol_t col, void *arg, RF_Revent_t type);
55 1.1 oster
56 1.24 christos static void rf_ShutdownReconEvent(void *ignored)
57 1.1 oster {
58 1.18 oster pool_destroy(&rf_pools.revent);
59 1.1 oster }
60 1.1 oster
61 1.21 perry int
62 1.14 oster rf_ConfigureReconEvent(RF_ShutdownList_t **listp)
63 1.3 oster {
64 1.3 oster
65 1.18 oster rf_pool_init(&rf_pools.revent, sizeof(RF_ReconEvent_t),
66 1.18 oster "rf_revent_pl", RF_MIN_FREE_REVENT, RF_MAX_FREE_REVENT);
67 1.15 oster rf_ShutdownCreate(listp, rf_ShutdownReconEvent, NULL);
68 1.13 oster
69 1.3 oster return (0);
70 1.1 oster }
71 1.1 oster
72 1.7 oster /* returns the next reconstruction event, blocking the calling thread
73 1.7 oster * until one becomes available. will now return null if it is blocked
74 1.7 oster * or will return an event if it is not */
75 1.1 oster
76 1.3 oster RF_ReconEvent_t *
77 1.19 oster rf_GetNextReconEvent(RF_RaidReconDesc_t *reconDesc)
78 1.3 oster {
79 1.3 oster RF_Raid_t *raidPtr = reconDesc->raidPtr;
80 1.12 oster RF_ReconCtrl_t *rctrl = raidPtr->reconControl;
81 1.3 oster RF_ReconEvent_t *event;
82 1.25 oster int stall_count;
83 1.3 oster
84 1.3 oster RF_LOCK_MUTEX(rctrl->eq_mutex);
85 1.7 oster /* q null and count==0 must be equivalent conditions */
86 1.7 oster RF_ASSERT((rctrl->eventQueue == NULL) == (rctrl->eq_count == 0));
87 1.1 oster
88 1.7 oster /* mpsleep timeout value: secs = timo_val/hz. 'ticks' here is
89 1.7 oster defined as cycle-counter ticks, not softclock ticks */
90 1.7 oster
91 1.5 oster #define MAX_RECON_EXEC_USECS (100 * 1000) /* 100 ms */
92 1.1 oster #define RECON_DELAY_MS 25
93 1.1 oster #define RECON_TIMO ((RECON_DELAY_MS * hz) / 1000)
94 1.1 oster
95 1.3 oster /* we are not pre-emptible in the kernel, but we don't want to run
96 1.3 oster * forever. If we run w/o blocking for more than MAX_RECON_EXEC_TICKS
97 1.3 oster * ticks of the cycle counter, delay for RECON_DELAY before
98 1.3 oster * continuing. this may murder us with context switches, so we may
99 1.3 oster * need to increase both the MAX...TICKS and the RECON_DELAY_MS. */
100 1.3 oster if (reconDesc->reconExecTimerRunning) {
101 1.3 oster int status;
102 1.3 oster
103 1.3 oster RF_ETIMER_STOP(reconDesc->recon_exec_timer);
104 1.3 oster RF_ETIMER_EVAL(reconDesc->recon_exec_timer);
105 1.21 perry reconDesc->reconExecTicks +=
106 1.7 oster RF_ETIMER_VAL_US(reconDesc->recon_exec_timer);
107 1.3 oster if (reconDesc->reconExecTicks > reconDesc->maxReconExecTicks)
108 1.21 perry reconDesc->maxReconExecTicks =
109 1.7 oster reconDesc->reconExecTicks;
110 1.5 oster if (reconDesc->reconExecTicks >= MAX_RECON_EXEC_USECS) {
111 1.3 oster /* we've been running too long. delay for
112 1.3 oster * RECON_DELAY_MS */
113 1.1 oster #if RF_RECON_STATS > 0
114 1.3 oster reconDesc->numReconExecDelays++;
115 1.3 oster #endif /* RF_RECON_STATS > 0 */
116 1.9 oster
117 1.21 perry status = ltsleep(&reconDesc->reconExecTicks, PRIBIO,
118 1.9 oster "recon delay", RECON_TIMO,
119 1.9 oster &rctrl->eq_mutex);
120 1.3 oster RF_ASSERT(status == EWOULDBLOCK);
121 1.3 oster reconDesc->reconExecTicks = 0;
122 1.3 oster }
123 1.3 oster }
124 1.25 oster
125 1.25 oster stall_count = 0;
126 1.3 oster while (!rctrl->eventQueue) {
127 1.1 oster #if RF_RECON_STATS > 0
128 1.3 oster reconDesc->numReconEventWaits++;
129 1.3 oster #endif /* RF_RECON_STATS > 0 */
130 1.17 oster
131 1.17 oster ltsleep(&(rctrl)->eventQueue, PRIBIO, "raidframe eventq",
132 1.25 oster RF_EVENTQ_WAIT, &((rctrl)->eq_mutex));
133 1.25 oster
134 1.25 oster stall_count++;
135 1.17 oster
136 1.25 oster if ((stall_count > 10) &&
137 1.25 oster rctrl->headSepCBList) {
138 1.25 oster /* There is work to do on the callback list, and
139 1.25 oster we've waited long enough... */
140 1.25 oster rf_WakeupHeadSepCBWaiters(raidPtr);
141 1.25 oster stall_count = 0;
142 1.25 oster }
143 1.3 oster reconDesc->reconExecTicks = 0; /* we've just waited */
144 1.3 oster }
145 1.3 oster
146 1.3 oster reconDesc->reconExecTimerRunning = 1;
147 1.8 oster if (RF_ETIMER_VAL_US(reconDesc->recon_exec_timer)!=0) {
148 1.8 oster /* it moved!! reset the timer. */
149 1.8 oster RF_ETIMER_START(reconDesc->recon_exec_timer);
150 1.8 oster }
151 1.3 oster event = rctrl->eventQueue;
152 1.3 oster rctrl->eventQueue = event->next;
153 1.3 oster event->next = NULL;
154 1.3 oster rctrl->eq_count--;
155 1.7 oster
156 1.7 oster /* q null and count==0 must be equivalent conditions */
157 1.7 oster RF_ASSERT((rctrl->eventQueue == NULL) == (rctrl->eq_count == 0));
158 1.3 oster RF_UNLOCK_MUTEX(rctrl->eq_mutex);
159 1.3 oster return (event);
160 1.1 oster }
161 1.1 oster /* enqueues a reconstruction event on the indicated queue */
162 1.21 perry void
163 1.21 perry rf_CauseReconEvent(RF_Raid_t *raidPtr, RF_RowCol_t col, void *arg,
164 1.14 oster RF_Revent_t type)
165 1.3 oster {
166 1.12 oster RF_ReconCtrl_t *rctrl = raidPtr->reconControl;
167 1.12 oster RF_ReconEvent_t *event = GetReconEventDesc(col, arg, type);
168 1.3 oster
169 1.3 oster if (type == RF_REVENT_BUFCLEAR) {
170 1.3 oster RF_ASSERT(col != rctrl->fcol);
171 1.3 oster }
172 1.12 oster RF_ASSERT(col >= 0 && col <= raidPtr->numCol);
173 1.3 oster RF_LOCK_MUTEX(rctrl->eq_mutex);
174 1.7 oster /* q null and count==0 must be equivalent conditions */
175 1.7 oster RF_ASSERT((rctrl->eventQueue == NULL) == (rctrl->eq_count == 0));
176 1.3 oster event->next = rctrl->eventQueue;
177 1.3 oster rctrl->eventQueue = event;
178 1.3 oster rctrl->eq_count++;
179 1.3 oster RF_UNLOCK_MUTEX(rctrl->eq_mutex);
180 1.1 oster
181 1.17 oster wakeup(&(rctrl)->eventQueue);
182 1.1 oster }
183 1.1 oster /* allocates and initializes a recon event descriptor */
184 1.3 oster static RF_ReconEvent_t *
185 1.14 oster GetReconEventDesc(RF_RowCol_t col, void *arg, RF_Revent_t type)
186 1.1 oster {
187 1.1 oster RF_ReconEvent_t *t;
188 1.1 oster
189 1.18 oster t = pool_get(&rf_pools.revent, PR_WAITOK);
190 1.1 oster t->col = col;
191 1.1 oster t->arg = arg;
192 1.1 oster t->type = type;
193 1.13 oster t->next = NULL;
194 1.3 oster return (t);
195 1.1 oster }
196 1.1 oster
197 1.20 oster /*
198 1.20 oster rf_DrainReconEventQueue() -- used in the event of a reconstruction
199 1.20 oster problem, this function simply drains all pending events from the
200 1.20 oster reconstruct event queue.
201 1.20 oster */
202 1.20 oster
203 1.20 oster void
204 1.20 oster rf_DrainReconEventQueue(RF_RaidReconDesc_t *reconDesc)
205 1.20 oster {
206 1.20 oster RF_ReconCtrl_t *rctrl = reconDesc->raidPtr->reconControl;
207 1.20 oster RF_ReconEvent_t *event;
208 1.20 oster
209 1.20 oster RF_LOCK_MUTEX(rctrl->eq_mutex);
210 1.20 oster while (rctrl->eventQueue!=NULL) {
211 1.21 perry
212 1.20 oster event = rctrl->eventQueue;
213 1.20 oster rctrl->eventQueue = event->next;
214 1.20 oster event->next = NULL;
215 1.20 oster rctrl->eq_count--;
216 1.20 oster /* dump it */
217 1.20 oster rf_FreeReconEventDesc(event);
218 1.20 oster }
219 1.20 oster RF_UNLOCK_MUTEX(rctrl->eq_mutex);
220 1.20 oster }
221 1.20 oster
222 1.21 perry void
223 1.14 oster rf_FreeReconEventDesc(RF_ReconEvent_t *event)
224 1.1 oster {
225 1.18 oster pool_put(&rf_pools.revent, event);
226 1.1 oster }
227