rf_revent.c revision 1.29 1 1.29 oster /* $NetBSD: rf_revent.c,v 1.29 2021/07/23 00:54:45 oster Exp $ */
2 1.1 oster /*
3 1.1 oster * Copyright (c) 1995 Carnegie-Mellon University.
4 1.1 oster * All rights reserved.
5 1.1 oster *
6 1.1 oster * Author:
7 1.1 oster *
8 1.1 oster * Permission to use, copy, modify and distribute this software and
9 1.1 oster * its documentation is hereby granted, provided that both the copyright
10 1.1 oster * notice and this permission notice appear in all copies of the
11 1.1 oster * software, derivative works or modified versions, and any portions
12 1.1 oster * thereof, and that both notices appear in supporting documentation.
13 1.1 oster *
14 1.1 oster * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
15 1.1 oster * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
16 1.1 oster * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
17 1.1 oster *
18 1.1 oster * Carnegie Mellon requests users of this software to return to
19 1.1 oster *
20 1.1 oster * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
21 1.1 oster * School of Computer Science
22 1.1 oster * Carnegie Mellon University
23 1.1 oster * Pittsburgh PA 15213-3890
24 1.1 oster *
25 1.1 oster * any improvements or extensions that they make and grant Carnegie the
26 1.1 oster * rights to redistribute these changes.
27 1.1 oster */
28 1.1 oster /*
29 1.1 oster * revent.c -- reconstruction event handling code
30 1.1 oster */
31 1.10 lukem
32 1.10 lukem #include <sys/cdefs.h>
33 1.29 oster __KERNEL_RCSID(0, "$NetBSD: rf_revent.c,v 1.29 2021/07/23 00:54:45 oster Exp $");
34 1.1 oster
35 1.1 oster #include <sys/errno.h>
36 1.1 oster
37 1.1 oster #include "rf_raid.h"
38 1.1 oster #include "rf_revent.h"
39 1.1 oster #include "rf_etimer.h"
40 1.1 oster #include "rf_general.h"
41 1.1 oster #include "rf_desc.h"
42 1.1 oster #include "rf_shutdown.h"
43 1.1 oster
44 1.1 oster #define RF_MAX_FREE_REVENT 128
45 1.18 oster #define RF_MIN_FREE_REVENT 32
46 1.25 oster #define RF_EVENTQ_WAIT 5000
47 1.1 oster
48 1.1 oster #include <sys/proc.h>
49 1.7 oster #include <sys/kernel.h>
50 1.1 oster
51 1.1 oster static void rf_ShutdownReconEvent(void *);
52 1.1 oster
53 1.3 oster static RF_ReconEvent_t *
54 1.29 oster GetReconEventDesc(RF_Raid_t *raidPtr, RF_RowCol_t col, void *arg, RF_Revent_t type);
55 1.1 oster
56 1.29 oster static void rf_ShutdownReconEvent(void *arg)
57 1.1 oster {
58 1.29 oster RF_Raid_t *raidPtr;
59 1.29 oster
60 1.29 oster raidPtr = (RF_Raid_t *) arg;
61 1.29 oster
62 1.29 oster pool_destroy(&raidPtr->pools.revent);
63 1.1 oster }
64 1.1 oster
65 1.21 perry int
66 1.29 oster rf_ConfigureReconEvent(RF_ShutdownList_t **listp, RF_Raid_t *raidPtr,
67 1.29 oster RF_Config_t *cfgPtr)
68 1.3 oster {
69 1.3 oster
70 1.29 oster rf_pool_init(raidPtr, raidPtr->poolNames.revent, &raidPtr->pools.revent, sizeof(RF_ReconEvent_t),
71 1.29 oster "revent", RF_MIN_FREE_REVENT, RF_MAX_FREE_REVENT);
72 1.29 oster rf_ShutdownCreate(listp, rf_ShutdownReconEvent, raidPtr);
73 1.13 oster
74 1.3 oster return (0);
75 1.1 oster }
76 1.1 oster
77 1.7 oster /* returns the next reconstruction event, blocking the calling thread
78 1.7 oster * until one becomes available. will now return null if it is blocked
79 1.7 oster * or will return an event if it is not */
80 1.1 oster
81 1.3 oster RF_ReconEvent_t *
82 1.19 oster rf_GetNextReconEvent(RF_RaidReconDesc_t *reconDesc)
83 1.3 oster {
84 1.3 oster RF_Raid_t *raidPtr = reconDesc->raidPtr;
85 1.12 oster RF_ReconCtrl_t *rctrl = raidPtr->reconControl;
86 1.3 oster RF_ReconEvent_t *event;
87 1.25 oster int stall_count;
88 1.3 oster
89 1.28 mrg rf_lock_mutex2(rctrl->eq_mutex);
90 1.7 oster /* q null and count==0 must be equivalent conditions */
91 1.7 oster RF_ASSERT((rctrl->eventQueue == NULL) == (rctrl->eq_count == 0));
92 1.1 oster
93 1.7 oster /* mpsleep timeout value: secs = timo_val/hz. 'ticks' here is
94 1.7 oster defined as cycle-counter ticks, not softclock ticks */
95 1.7 oster
96 1.5 oster #define MAX_RECON_EXEC_USECS (100 * 1000) /* 100 ms */
97 1.1 oster #define RECON_DELAY_MS 25
98 1.1 oster #define RECON_TIMO ((RECON_DELAY_MS * hz) / 1000)
99 1.1 oster
100 1.3 oster /* we are not pre-emptible in the kernel, but we don't want to run
101 1.3 oster * forever. If we run w/o blocking for more than MAX_RECON_EXEC_TICKS
102 1.3 oster * ticks of the cycle counter, delay for RECON_DELAY before
103 1.3 oster * continuing. this may murder us with context switches, so we may
104 1.3 oster * need to increase both the MAX...TICKS and the RECON_DELAY_MS. */
105 1.3 oster if (reconDesc->reconExecTimerRunning) {
106 1.3 oster int status;
107 1.3 oster
108 1.3 oster RF_ETIMER_STOP(reconDesc->recon_exec_timer);
109 1.3 oster RF_ETIMER_EVAL(reconDesc->recon_exec_timer);
110 1.21 perry reconDesc->reconExecTicks +=
111 1.7 oster RF_ETIMER_VAL_US(reconDesc->recon_exec_timer);
112 1.3 oster if (reconDesc->reconExecTicks > reconDesc->maxReconExecTicks)
113 1.21 perry reconDesc->maxReconExecTicks =
114 1.7 oster reconDesc->reconExecTicks;
115 1.5 oster if (reconDesc->reconExecTicks >= MAX_RECON_EXEC_USECS) {
116 1.3 oster /* we've been running too long. delay for
117 1.3 oster * RECON_DELAY_MS */
118 1.1 oster #if RF_RECON_STATS > 0
119 1.3 oster reconDesc->numReconExecDelays++;
120 1.3 oster #endif /* RF_RECON_STATS > 0 */
121 1.9 oster
122 1.28 mrg status = rf_sleep("rfrecond", RECON_TIMO,
123 1.28 mrg rctrl->eq_mutex);
124 1.3 oster RF_ASSERT(status == EWOULDBLOCK);
125 1.3 oster reconDesc->reconExecTicks = 0;
126 1.3 oster }
127 1.3 oster }
128 1.25 oster
129 1.25 oster stall_count = 0;
130 1.3 oster while (!rctrl->eventQueue) {
131 1.1 oster #if RF_RECON_STATS > 0
132 1.3 oster reconDesc->numReconEventWaits++;
133 1.3 oster #endif /* RF_RECON_STATS > 0 */
134 1.17 oster
135 1.28 mrg rf_timedwait_cond2(rctrl->eq_cv, rctrl->eq_mutex,
136 1.28 mrg RF_EVENTQ_WAIT);
137 1.25 oster
138 1.25 oster stall_count++;
139 1.17 oster
140 1.25 oster if ((stall_count > 10) &&
141 1.25 oster rctrl->headSepCBList) {
142 1.25 oster /* There is work to do on the callback list, and
143 1.25 oster we've waited long enough... */
144 1.25 oster rf_WakeupHeadSepCBWaiters(raidPtr);
145 1.25 oster stall_count = 0;
146 1.25 oster }
147 1.3 oster reconDesc->reconExecTicks = 0; /* we've just waited */
148 1.3 oster }
149 1.3 oster
150 1.3 oster reconDesc->reconExecTimerRunning = 1;
151 1.8 oster if (RF_ETIMER_VAL_US(reconDesc->recon_exec_timer)!=0) {
152 1.8 oster /* it moved!! reset the timer. */
153 1.8 oster RF_ETIMER_START(reconDesc->recon_exec_timer);
154 1.8 oster }
155 1.3 oster event = rctrl->eventQueue;
156 1.3 oster rctrl->eventQueue = event->next;
157 1.3 oster event->next = NULL;
158 1.3 oster rctrl->eq_count--;
159 1.7 oster
160 1.7 oster /* q null and count==0 must be equivalent conditions */
161 1.7 oster RF_ASSERT((rctrl->eventQueue == NULL) == (rctrl->eq_count == 0));
162 1.28 mrg rf_unlock_mutex2(rctrl->eq_mutex);
163 1.3 oster return (event);
164 1.1 oster }
165 1.1 oster /* enqueues a reconstruction event on the indicated queue */
166 1.21 perry void
167 1.21 perry rf_CauseReconEvent(RF_Raid_t *raidPtr, RF_RowCol_t col, void *arg,
168 1.14 oster RF_Revent_t type)
169 1.3 oster {
170 1.12 oster RF_ReconCtrl_t *rctrl = raidPtr->reconControl;
171 1.29 oster RF_ReconEvent_t *event = GetReconEventDesc(raidPtr, col, arg, type);
172 1.3 oster
173 1.3 oster if (type == RF_REVENT_BUFCLEAR) {
174 1.3 oster RF_ASSERT(col != rctrl->fcol);
175 1.3 oster }
176 1.12 oster RF_ASSERT(col >= 0 && col <= raidPtr->numCol);
177 1.28 mrg rf_lock_mutex2(rctrl->eq_mutex);
178 1.7 oster /* q null and count==0 must be equivalent conditions */
179 1.7 oster RF_ASSERT((rctrl->eventQueue == NULL) == (rctrl->eq_count == 0));
180 1.3 oster event->next = rctrl->eventQueue;
181 1.3 oster rctrl->eventQueue = event;
182 1.3 oster rctrl->eq_count++;
183 1.28 mrg rf_broadcast_cond2(rctrl->eq_cv);
184 1.28 mrg rf_unlock_mutex2(rctrl->eq_mutex);
185 1.1 oster }
186 1.1 oster /* allocates and initializes a recon event descriptor */
187 1.3 oster static RF_ReconEvent_t *
188 1.29 oster GetReconEventDesc(RF_Raid_t *raidPtr, RF_RowCol_t col, void *arg, RF_Revent_t type)
189 1.1 oster {
190 1.1 oster RF_ReconEvent_t *t;
191 1.1 oster
192 1.29 oster t = pool_get(&raidPtr->pools.revent, PR_WAITOK);
193 1.1 oster t->col = col;
194 1.1 oster t->arg = arg;
195 1.1 oster t->type = type;
196 1.13 oster t->next = NULL;
197 1.3 oster return (t);
198 1.1 oster }
199 1.1 oster
200 1.20 oster /*
201 1.20 oster rf_DrainReconEventQueue() -- used in the event of a reconstruction
202 1.20 oster problem, this function simply drains all pending events from the
203 1.20 oster reconstruct event queue.
204 1.20 oster */
205 1.20 oster
206 1.20 oster void
207 1.20 oster rf_DrainReconEventQueue(RF_RaidReconDesc_t *reconDesc)
208 1.20 oster {
209 1.20 oster RF_ReconCtrl_t *rctrl = reconDesc->raidPtr->reconControl;
210 1.20 oster RF_ReconEvent_t *event;
211 1.20 oster
212 1.28 mrg rf_lock_mutex2(rctrl->eq_mutex);
213 1.20 oster while (rctrl->eventQueue!=NULL) {
214 1.21 perry
215 1.20 oster event = rctrl->eventQueue;
216 1.20 oster rctrl->eventQueue = event->next;
217 1.20 oster event->next = NULL;
218 1.20 oster rctrl->eq_count--;
219 1.20 oster /* dump it */
220 1.29 oster rf_FreeReconEventDesc(reconDesc->raidPtr, event);
221 1.20 oster }
222 1.28 mrg rf_unlock_mutex2(rctrl->eq_mutex);
223 1.20 oster }
224 1.20 oster
225 1.21 perry void
226 1.29 oster rf_FreeReconEventDesc(RF_Raid_t *raidPtr, RF_ReconEvent_t *event)
227 1.1 oster {
228 1.29 oster pool_put(&raidPtr->pools.revent, event);
229 1.1 oster }
230